repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
mix | mix-master/fairseq/data/legacy/block_pair_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import numpy as np
import torch
from fairseq.data import FairseqDataset
class BlockPairDataset(FairseqDataset):
"""Break a Dataset of tokens into sentence pair blocks for next sentence
prediction as well as masked language model.
High-level logics are:
1. break input tensor to tensor blocks
2. pair the blocks with 50% next sentence and 50% random sentence
3. return paired blocks as well as related segment labels
Args:
dataset (~torch.utils.data.Dataset): dataset to break into blocks
sizes: array of sentence lengths
dictionary: dictionary for the task
block_size: maximum block size
break_mode: mode for breaking copurs into block pairs. currently we support
2 modes
doc: respect document boundaries and each part of the pair should belong to on document
none: don't respect any boundary and cut tokens evenly
short_seq_prob: probability for generating shorter block pairs
doc_break_size: Size for empty line separating documents. Typically 1 if
the sentences have eos, 0 otherwise.
"""
def __init__(
self,
dataset,
dictionary,
sizes,
block_size,
break_mode="doc",
short_seq_prob=0.1,
doc_break_size=1,
):
super().__init__()
self.dataset = dataset
self.pad = dictionary.pad()
self.eos = dictionary.eos()
self.cls = dictionary.cls()
self.mask = dictionary.mask()
self.sep = dictionary.sep()
self.break_mode = break_mode
self.dictionary = dictionary
self.short_seq_prob = short_seq_prob
self.block_indices = []
assert len(dataset) == len(sizes)
if break_mode == "doc":
cur_doc = []
for sent_id, sz in enumerate(sizes):
assert doc_break_size == 0 or sz != 0, (
"when doc_break_size is non-zero, we expect documents to be"
"separated by a blank line with a single eos."
)
# empty line as document separator
if sz == doc_break_size:
if len(cur_doc) == 0:
continue
self.block_indices.append(cur_doc)
cur_doc = []
else:
cur_doc.append(sent_id)
max_num_tokens = block_size - 3 # Account for [CLS], [SEP], [SEP]
self.sent_pairs = []
self.sizes = []
for doc_id, doc in enumerate(self.block_indices):
self._generate_sentence_pair(doc, doc_id, max_num_tokens, sizes)
elif break_mode is None or break_mode == "none":
# each block should have half of the block size since we are constructing block pair
sent_length = (block_size - 3) // 2
total_len = sum(dataset.sizes)
length = math.ceil(total_len / sent_length)
def block_at(i):
start = i * sent_length
end = min(start + sent_length, total_len)
return (start, end)
sent_indices = np.array([block_at(i) for i in range(length)])
sent_sizes = np.array([e - s for s, e in sent_indices])
dataset_index = self._sent_to_dataset_index(sent_sizes)
# pair sentences
self._pair_sentences(dataset_index)
else:
raise ValueError("Invalid break_mode: " + break_mode)
def _pair_sentences(self, dataset_index):
"""
Give a list of evenly cut blocks/sentences, pair these sentences with 50%
consecutive sentences and 50% random sentences.
This is used for none break mode
"""
# pair sentences
for sent_id, sent in enumerate(dataset_index):
next_sent_label = (
1 if np.random.rand() > 0.5 and sent_id != len(dataset_index) - 1 else 0
)
if next_sent_label:
next_sent = dataset_index[sent_id + 1]
else:
next_sent = dataset_index[
self._skip_sampling(len(dataset_index), [sent_id, sent_id + 1])
]
self.sent_pairs.append((sent, next_sent, next_sent_label))
# The current blocks don't include the special tokens but the
# sizes already account for this
self.sizes.append(3 + sent[3] + next_sent[3])
def _sent_to_dataset_index(self, sent_sizes):
"""
Build index mapping block indices to the underlying dataset indices
"""
dataset_index = []
ds_idx, ds_remaining = -1, 0
for to_consume in sent_sizes:
sent_size = to_consume
if ds_remaining == 0:
ds_idx += 1
ds_remaining = sent_sizes[ds_idx]
start_ds_idx = ds_idx
start_offset = sent_sizes[ds_idx] - ds_remaining
while to_consume > ds_remaining:
to_consume -= ds_remaining
ds_idx += 1
ds_remaining = sent_sizes[ds_idx]
ds_remaining -= to_consume
dataset_index.append(
(
start_ds_idx, # starting index in dataset
start_offset, # starting offset within starting index
ds_idx, # ending index in dataset
sent_size, # sentence length
)
)
assert ds_remaining == 0
assert ds_idx == len(self.dataset) - 1
return dataset_index
def _generate_sentence_pair(self, doc, doc_id, max_num_tokens, sizes):
"""
Go through a single document and genrate sentence paris from it
"""
current_chunk = []
current_length = 0
curr = 0
# To provide more randomness, we decrease target seq length for parts of
# samples (10% by default). Note that max_num_tokens is the hard threshold
# for batching and will never be changed.
target_seq_length = max_num_tokens
if np.random.random() < self.short_seq_prob:
target_seq_length = np.random.randint(2, max_num_tokens)
# loop through all sentences in document
while curr < len(doc):
sent_id = doc[curr]
current_chunk.append(sent_id)
current_length = sum(sizes[current_chunk])
# split chunk and generate pair when exceed target_seq_length or
# finish the loop
if curr == len(doc) - 1 or current_length >= target_seq_length:
# split the chunk into 2 parts
a_end = 1
if len(current_chunk) > 2:
a_end = np.random.randint(1, len(current_chunk) - 1)
sent_a = current_chunk[:a_end]
len_a = sum(sizes[sent_a])
# generate next sentence label, note that if there is only 1 sentence
# in current chunk, label is always 0
next_sent_label = (
1 if np.random.rand() > 0.5 and len(current_chunk) != 1 else 0
)
if not next_sent_label:
# if next sentence label is 0, sample sent_b from a random doc
target_b_length = target_seq_length - len_a
rand_doc_id = self._skip_sampling(len(self.block_indices), [doc_id])
random_doc = self.block_indices[rand_doc_id]
random_start = np.random.randint(0, len(random_doc))
sent_b = []
len_b = 0
for j in range(random_start, len(random_doc)):
sent_b.append(random_doc[j])
len_b = sum(sizes[sent_b])
if len_b >= target_b_length:
break
# return the second part of the chunk since it's not used
num_unused_segments = len(current_chunk) - a_end
curr -= num_unused_segments
else:
# if next sentence label is 1, use the second part of chunk as sent_B
sent_b = current_chunk[a_end:]
len_b = sum(sizes[sent_b])
# currently sent_a and sent_B may be longer than max_num_tokens,
# truncate them and return block idx and offsets for them
sent_a, sent_b = self._truncate_sentences(
sent_a, sent_b, max_num_tokens
)
self.sent_pairs.append((sent_a, sent_b, next_sent_label))
self.sizes.append(3 + sent_a[3] + sent_b[3])
current_chunk = []
curr += 1
def _skip_sampling(self, total, skip_ids):
"""
Generate a random integer which is not in skip_ids. Sample range is [0, total)
TODO: ids in skip_ids should be consecutive, we can extend it to more generic version later
"""
rand_id = np.random.randint(total - len(skip_ids))
return rand_id if rand_id < min(skip_ids) else rand_id + len(skip_ids)
def _truncate_sentences(self, sent_a, sent_b, max_num_tokens):
"""
Trancate a pair of sentence to limit total length under max_num_tokens
Logics:
1. Truncate longer sentence
2. Tokens to be truncated could be at the beginning or the end of the sentnce
Returns:
Truncated sentences represented by dataset idx
"""
len_a, len_b = sum(self.dataset.sizes[sent_a]), sum(self.dataset.sizes[sent_b])
front_cut_a = front_cut_b = end_cut_a = end_cut_b = 0
while True:
total_length = (
len_a + len_b - front_cut_a - front_cut_b - end_cut_a - end_cut_b
)
if total_length <= max_num_tokens:
break
if len_a - front_cut_a - end_cut_a > len_b - front_cut_b - end_cut_b:
if np.random.rand() < 0.5:
front_cut_a += 1
else:
end_cut_a += 1
else:
if np.random.rand() < 0.5:
front_cut_b += 1
else:
end_cut_b += 1
# calculate ds indices as well as offsets and return
truncated_sent_a = self._cut_sentence(sent_a, front_cut_a, end_cut_a)
truncated_sent_b = self._cut_sentence(sent_b, front_cut_b, end_cut_b)
return truncated_sent_a, truncated_sent_b
def _cut_sentence(self, sent, front_cut, end_cut):
"""
Cut a sentence based on the numbers of tokens to be cut from beginning and end
Represent the sentence as dataset idx and return
"""
start_ds_idx, end_ds_idx, offset = sent[0], sent[-1], 0
target_len = sum(self.dataset.sizes[sent]) - front_cut - end_cut
while front_cut > 0:
if self.dataset.sizes[start_ds_idx] > front_cut:
offset += front_cut
break
else:
front_cut -= self.dataset.sizes[start_ds_idx]
start_ds_idx += 1
while end_cut > 0:
if self.dataset.sizes[end_ds_idx] > end_cut:
break
else:
end_cut -= self.dataset.sizes[end_ds_idx]
end_ds_idx -= 1
return start_ds_idx, offset, end_ds_idx, target_len
def _fetch_block(self, start_ds_idx, offset, end_ds_idx, length):
"""
Fetch a block of tokens based on its dataset idx
"""
buffer = torch.cat(
[self.dataset[idx] for idx in range(start_ds_idx, end_ds_idx + 1)]
)
s, e = offset, offset + length
return buffer[s:e]
def __getitem__(self, index):
block1, block2, next_sent_label = self.sent_pairs[index]
block1 = self._fetch_block(*block1)
block2 = self._fetch_block(*block2)
return block1, block2, next_sent_label
def __len__(self):
return len(self.sizes)
@property
def supports_prefetch(self):
return getattr(self.dataset, "supports_prefetch", False)
def prefetch(self, indices):
prefetch_idx = set()
for index in indices:
for block1, block2, _ in [self.sent_pairs[index]]:
for ds_idx in range(block1[0], block1[2] + 1):
prefetch_idx.add(ds_idx)
for ds_idx in range(block2[0], block2[2] + 1):
prefetch_idx.add(ds_idx)
self.dataset.prefetch(prefetch_idx)
| 12,878 | 40.146965 | 99 | py |
mix | mix-master/fairseq/data/legacy/masked_lm_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import numpy as np
import torch
from typing import Dict, List, Tuple
from fairseq.data import FairseqDataset, data_utils
from fairseq.data import Dictionary
from fairseq.data.legacy.block_pair_dataset import BlockPairDataset
from fairseq.data.token_block_dataset import TokenBlockDataset
from fairseq.data.concat_dataset import ConcatDataset
class MaskedLMDataset(FairseqDataset):
"""
A wrapper Dataset for masked language modelling. The dataset
wraps around TokenBlockDataset or BlockedPairDataset and creates a batch
where the input blocks are masked according to the specified masking
probability. Additionally the batch can also contain sentence level targets
if this is specified.
Args:
dataset: Dataset which generates blocks of data. Only BlockPairDataset
and TokenBlockDataset are supported.
sizes: Sentence lengths
vocab: Dictionary with the vocabulary and special tokens.
pad_idx: Id of padding token in dictionary
mask_idx: Id of mask token in dictionary
classif_token_idx: Id of classification token in dictionary. This is the
token associated with the sentence embedding (Eg: CLS for BERT)
sep_token_idx: Id of separator token in dictionary
(Eg: SEP in BERT)
seed: Seed for random number generator for reproducibility.
shuffle: Shuffle the elements before batching.
has_pairs: Specifies whether the underlying dataset
generates a pair of blocks along with a sentence_target or not.
Setting it to True assumes that the underlying dataset generates a
label for the pair of sentences which is surfaced as
sentence_target. The default value assumes a single block with no
sentence target.
segment_id: An optional segment id for filling in the segment labels
when we are in the single block setting (Eg: XLM). Default is 0.
masking_ratio: specifies what percentage of the blocks should be masked.
masking_prob: specifies the probability of a given token being
replaced with the "MASK" token.
random_token_prob: specifies the probability of a given token being
replaced by a random token from the vocabulary.
"""
def __init__(
self,
dataset: FairseqDataset,
sizes: np.ndarray,
vocab: Dictionary,
pad_idx: int,
mask_idx: int,
classif_token_idx: int,
sep_token_idx: int,
seed: int = 1,
shuffle: bool = True,
has_pairs: bool = True,
segment_id: int = 0,
masking_ratio: float = 0.15,
masking_prob: float = 0.8,
random_token_prob: float = 0.1
):
# Make sure the input datasets are the ones supported
assert (
isinstance(dataset, TokenBlockDataset) or
isinstance(dataset, BlockPairDataset) or
isinstance(dataset, ConcatDataset)
), "MaskedLMDataset only wraps TokenBlockDataset or BlockPairDataset or " \
"ConcatDataset"
self.dataset = dataset
self.sizes = np.array(sizes)
self.vocab = vocab
self.pad_idx = pad_idx
self.mask_idx = mask_idx
self.classif_token_idx = classif_token_idx
self.sep_token_idx = sep_token_idx
self.shuffle = shuffle
self.seed = seed
self.has_pairs = has_pairs
self.segment_id = segment_id
self.masking_ratio = masking_ratio
self.masking_prob = masking_prob
self.random_token_prob = random_token_prob
# If we have only one block then sizes needs to be updated to include
# the classification token
if not has_pairs:
self.sizes = self.sizes + 1
def __getitem__(
self,
index: int
):
# if has_pairs, then expect 2 blocks and a sentence target
if self.has_pairs:
(block_one, block_two, sentence_target) = self.dataset[index]
else:
block_one = self.dataset[index]
return {
"id": index,
"block_one": block_one,
"block_two": block_two if self.has_pairs else None,
"sentence_target": sentence_target if self.has_pairs else None,
}
def __len__(self):
return len(self.dataset)
def _mask_block(
self,
sentence: np.ndarray,
mask_idx: int,
pad_idx: int,
dictionary_token_range: Tuple,
):
"""
Mask tokens for Masked Language Model training
Samples mask_ratio tokens that will be predicted by LM.
Note:This function may not be efficient enough since we had multiple
conversions between np and torch, we can replace them with torch
operators later.
Args:
sentence: 1d tensor to be masked
mask_idx: index to use for masking the sentence
pad_idx: index to use for masking the target for tokens we aren't
predicting
dictionary_token_range: range of indices in dictionary which can
be used for random word replacement
(e.g. without special characters)
Return:
masked_sent: masked sentence
target: target with words which we are not predicting replaced
by pad_idx
"""
masked_sent = np.copy(sentence)
sent_length = len(sentence)
mask_num = math.ceil(sent_length * self.masking_ratio)
mask = np.random.choice(sent_length, mask_num, replace=False)
target = np.copy(sentence)
for i in range(sent_length):
if i in mask:
rand = np.random.random()
# replace with mask if probability is less than masking_prob
# (Eg: 0.8)
if rand < self.masking_prob:
masked_sent[i] = mask_idx
# replace with random token if probability is less than
# masking_prob + random_token_prob (Eg: 0.9)
elif rand < (self.masking_prob + self.random_token_prob):
# sample random token from dictionary
masked_sent[i] = (
np.random.randint(
dictionary_token_range[0], dictionary_token_range[1]
)
)
else:
target[i] = pad_idx
return masked_sent, target
def _collate(
self,
samples: List[Dict],
pad_idx: int,
eos_idx: int
):
"""
Does the heavy lifting for creating a batch from the input list of
examples. The logic is as follows:
1. Mask the input blocks. In case has_pair is True then we have 2
blocks to mask.
2. Prepend the first masked block tensor with the special token
used as sentence embedding. Eg: CLS in BERT. This happens
irrespective of the value of has_pair.
3. If has_pair is True, then append the first masked block with the
special separator token (eg: SEP for BERT) and compute segment
label accordingly. In this case, also append the second masked
block with this special separator token and compute its segment
label.
4. For the targets tensor, prepend and append with padding index
accordingly.
5. Concatenate all tensors.
"""
if len(samples) == 0:
return {}
# To ensure determinism, we reset the state of the PRNG after every
# batch based on the seed and the first id of the batch. This ensures
# that across epochs we get the same mask for the same example. This
# is needed for reproducibility and is how BERT does masking
# TODO: Can we add deteminism without this constraint?
with data_utils.numpy_seed(self.seed + samples[0]["id"]):
for s in samples:
# token range is needed for replacing with random token during
# masking
token_range = (self.vocab.nspecial, len(self.vocab))
# mask according to specified probabilities.
masked_blk_one, masked_tgt_one = self._mask_block(
s["block_one"], self.mask_idx, self.pad_idx, token_range,
)
tokens = np.concatenate([
[self.classif_token_idx], masked_blk_one
])
targets = np.concatenate([[self.pad_idx], masked_tgt_one])
segments = np.ones(len(tokens)) * self.segment_id
# if has_pairs is True then we need to add the SEP token to both
# the blocks after masking and re-compute segments based on the new
# lengths.
if self.has_pairs:
tokens_one = np.concatenate([tokens, [self.sep_token_idx]])
targets_one = np.concatenate([targets, [self.pad_idx]])
masked_blk_two, masked_tgt_two = self._mask_block(
s["block_two"], self.mask_idx, self.pad_idx, token_range)
tokens_two = np.concatenate(
[masked_blk_two, [self.sep_token_idx]])
targets_two = np.concatenate([masked_tgt_two, [self.pad_idx]])
# block + 1 sep + 1 special (CLS)
segments_one = np.zeros(len(tokens_one))
# block + 1 sep
segments_two = np.ones(len(tokens_two))
tokens = np.concatenate([tokens_one, tokens_two])
targets = np.concatenate([targets_one, targets_two])
segments = np.concatenate([segments_one, segments_two])
s["source"] = torch.LongTensor(tokens)
s["segment_labels"] = torch.LongTensor(segments)
s["lm_target"] = torch.LongTensor(targets)
def merge(key):
return data_utils.collate_tokens(
[s[key] for s in samples], pad_idx, eos_idx, left_pad=False
)
return {
"id": torch.LongTensor([s["id"] for s in samples]),
"ntokens": sum(len(s["source"]) for s in samples),
"net_input": {
"src_tokens": merge("source"),
"segment_labels": merge("segment_labels"),
},
"lm_target": merge("lm_target"),
"sentence_target": torch.LongTensor(
[s["sentence_target"] for s in samples]
) if self.has_pairs else None,
"nsentences": len(samples),
}
def collater(
self,
samples: List[Dict]
):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch of data
"""
return self._collate(samples, self.vocab.pad(), self.vocab.eos())
def num_tokens(
self,
index: int
):
"""
Return the number of tokens in a sample. This value is used to
enforce max-tokens during batching.
"""
return self.sizes[index]
def size(
self,
index: int
):
"""
Return an example's size as a float or tuple. This value is used when
filtering a dataset with max-positions.
"""
return self.sizes[index]
def ordered_indices(self):
"""
Return an ordered list of indices. Batches will be constructed based
on this order.
"""
if self.shuffle:
return np.random.permutation(len(self))
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)
@property
def supports_prefetch(self):
return getattr(self.dataset, "supports_prefetch", False)
def prefetch(self, indices):
self.dataset.prefetch(indices)
| 12,468 | 37.603715 | 83 | py |
mix | mix-master/fairseq/tasks/translation_from_pretrained_bart.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.data import LanguagePairDataset
from .translation import load_langpair_dataset, TranslationTask
from . import register_task
@register_task('translation_from_pretrained_bart')
class TranslationFromPretrainedBARTTask(TranslationTask):
"""
Translate from source language to target language with a model initialized with a multilingual pretrain.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
TranslationTask.add_args(parser)
parser.add_argument('--langs', required=True, metavar='LANG',
help='comma-separated list of monolingual language, for example, "en,de,fr"'
'be careful these langs are what you used for pretraining (the same order),'
'not for finetuning.'
'you should always add all pretraining language idx during finetuning.')
# fmt: on
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args, src_dict, tgt_dict)
self.langs = args.langs.split(',')
for d in [src_dict, tgt_dict]:
for l in self.langs:
d.add_symbol('[{}]'.format(l))
d.add_symbol('<mask>')
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = self.args.data.split(':')
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path, split, src, self.src_dict, tgt, self.tgt_dict,
combine=combine, dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=getattr(self.args, 'max_source_positions', 1024),
max_target_positions=getattr(self.args, 'max_target_positions', 1024),
load_alignments=self.args.load_alignments,
prepend_bos=getattr(self.args, 'preprend_bos', False),
append_source_id=True
)
def build_generator(self, args):
if getattr(args, 'score_reference', False):
from fairseq.sequence_scorer import SequenceScorer
return SequenceScorer(
self.target_dictionary,
eos=self.tgt_dict.index('[{}]'.format(self.target_lang))
)
else:
from fairseq.sequence_generator import SequenceGenerator
return SequenceGenerator(
self.target_dictionary,
beam_size=getattr(args, 'beam', 5),
max_len_a=getattr(args, 'max_len_a', 0),
max_len_b=getattr(args, 'max_len_b', 200),
min_len=getattr(args, 'min_len', 1),
normalize_scores=(not getattr(args, 'unnormalized', False)),
len_penalty=getattr(args, 'lenpen', 1),
unk_penalty=getattr(args, 'unkpen', 0),
temperature=getattr(args, 'temperature', 1.),
match_source_len=getattr(args, 'match_source_len', False),
no_repeat_ngram_size=getattr(args, 'no_repeat_ngram_size', 0),
eos=self.tgt_dict.index('[{}]'.format(self.args.target_lang))
)
def build_dataset_for_inference(self, src_tokens, src_lengths):
src_lang_id = self.source_dictionary.index('[{}]'.format(self.args.source_lang))
source_tokens = []
for s_t in src_tokens:
s_t = torch.cat([s_t, s_t.new(1).fill_(src_lang_id)])
source_tokens.append(s_t)
dataset = LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary)
return dataset
| 4,719 | 40.403509 | 109 | py |
mix | mix-master/fairseq/tasks/translation_self_distill.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from argparse import Namespace
import json
import itertools
import logging
import os
import torch
import numpy as np
from fairseq import metrics, options, utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
ConcatSentencesDataset,
data_utils,
encoders,
indexed_dataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
)
from fairseq.tasks import FairseqTask, register_task
EVAL_BLEU_ORDER = 4
logger = logging.getLogger(__name__)
def load_langpair_dataset(
data_path, split,
src, src_dict,
tgt, tgt_dict,
combine, dataset_impl, upsample_primary,
left_pad_source, left_pad_target, max_source_positions,
max_target_positions, prepend_bos=False, load_alignments=False,
truncate_source=False, append_source_id=False,
concat_datasets=False,
concat_sents_datasets=False,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else '')
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
src_dataset = data_utils.load_indexed_dataset(prefix + src, src_dict, dataset_impl)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(prefix + tgt, tgt_dict, dataset_impl)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info('{} {} {}-{} {} examples'.format(
data_path, split_k, src, tgt, len(src_datasets[-1])
))
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
if concat_datasets:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
elif concat_sents_datasets:
src_dataset = ConcatSentencesDataset(src_datasets)
else:
raise ValueError('You forget to concatenate source datasets')
if len(tgt_datasets) > 0:
if concat_datasets:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
elif concat_sents_datasets:
tgt_dataset = ConcatSentencesDataset(tgt_datasets)
else:
raise ValueError("You forget to concatenate tgt datasets")
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(src_dataset, src_dict.index('[{}]'.format(src)))
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(tgt_dataset, tgt_dict.index('[{}]'.format(tgt)))
eos = tgt_dict.index('[{}]'.format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, '{}.align.{}-{}'.format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(align_path, None, dataset_impl)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset, src_dataset.sizes, src_dict,
tgt_dataset, tgt_dataset_sizes, tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
max_source_positions=max_source_positions,
max_target_positions=max_target_positions,
align_dataset=align_dataset, eos=eos
)
@register_task('translation_self_distill')
class TranslationSelfDistillTask(FairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language')
parser.add_argument('--load-alignments', action='store_true',
help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--truncate-source', action='store_true', default=False,
help='truncate source to max-source-positions')
parser.add_argument('--concat-datasets', action='store_true', default=False,
help='concatenate multiple datasets')
parser.add_argument('--sentence-concat-datasets', action='store_true', default=False,
help='concatenate multiple sentences from different datasets')
# options for reporting BLEU during validation
parser.add_argument('--eval-bleu', action='store_true',
help='evaluation with BLEU scores')
parser.add_argument('--eval-bleu-detok', type=str, default="space",
help='detokenizer before computing BLEU (e.g., "moses"); '
'required if using --eval-bleu; use "space" to '
'disable detokenization; see fairseq.data.encoders '
'for other options')
parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',
help='args for building the tokenizer, if needed')
parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,
help='if setting, we compute tokenized BLEU instead of sacrebleu')
parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE before computing BLEU')
parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',
help='generation args for BLUE scoring, '
'e.g., \'{"beam": 4, "lenpen": 0.6}\'')
parser.add_argument('--eval-bleu-print-samples', action='store_true',
help='print sample generations during validation')
# fmt: on
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
paths = utils.split_paths(args.data)
assert len(paths) > 0
# find language pair automatically
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(paths[0])
if args.source_lang is None or args.target_lang is None:
raise Exception('Could not infer language pair, please provide it explicitly')
# load dictionaries
src_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.source_lang)))
tgt_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.target_lang)))
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
logger.info('[{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))
logger.info('[{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path, split, src, self.src_dict, tgt, self.tgt_dict,
combine=combine, dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
load_alignments=self.args.load_alignments,
truncate_source=self.args.truncate_source,
concat_datasets=self.args.concat_datasets,
concat_sents_datasets=self.args.sentence_concat_datasets,
)
def build_dataset_for_inference(self, src_tokens, src_lengths):
return LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary)
def begin_epoch(self, epoch, model):
if hasattr(model, 'set_epoch') and model != self:
model.set_epoch(epoch)
def build_model(self, args):
if getattr(args, 'eval_bleu', False):
assert getattr(args, 'eval_bleu_detok', None) is not None, (
'--eval-bleu-detok is required if using --eval-bleu; '
'try --eval-bleu-detok=moses (or --eval-bleu-detok=space '
'to disable detokenization, e.g., when using sentencepiece)'
)
detok_args = json.loads(getattr(args, 'eval_bleu_detok_args', '{}') or '{}')
self.tokenizer = encoders.build_tokenizer(Namespace(
tokenizer=getattr(args, 'eval_bleu_detok', None),
**detok_args
))
args.eval_bleu_args = '{"beam": 5, "max_len_a": 1.2, "max_len_b": 10}'
gen_args = json.loads(getattr(args, 'eval_bleu_args', '{}') or '{}')
self.sequence_generator = self.build_generator(Namespace(**gen_args))
return super().build_model(args)
def separate_batches(self, batch):
src_lengths = batch['net_input']['src_lengths']
src_tokens = batch['net_input']['src_tokens']
prev_output_tokens = batch['net_input']['prev_output_tokens']
target = batch['target']
tgt_lengths = target.ne(1).sum(dim=1)
def _get_length_tokens(lengths):
max_len = (lengths.max().float() / 2).long()
assert torch.all(lengths % 2 == 0), 'distilled targets are not of the same length as the original targets'
real_lengths = (lengths.float() / 2).long()
tokens = lengths.new(lengths.size(0), max_len).long().fill_(1)
return real_lengths, tokens
real_src_lengths, real_src_tokens = _get_length_tokens(src_lengths)
for i, l in enumerate(real_src_lengths):
#src tokens are left-padded.
real_src_tokens[i, -l:] = src_tokens[i, -l:]
real_tgt_lengths, real_tgt_tokens = _get_length_tokens(tgt_lengths)
distilled_tgt_tokens = real_tgt_tokens.clone().detach()
for i, l in enumerate(real_tgt_lengths):
real_tgt_tokens[i, :l] = target[i, :l]
distilled_tgt_tokens[i, :l] = target[i, l:2*l]
real_prev_output_tokens = prev_output_tokens.new(tgt_lengths.size(0), (tgt_lengths.max().float() / 2).long()).fill_(1)
for i, l in enumerate(real_tgt_lengths):
real_prev_output_tokens[i, :l] = prev_output_tokens[i, :l]
batch['target'] = real_tgt_tokens
batch['net_input']['src_tokens'] = real_src_tokens
batch['net_input']['src_lengths'] = real_src_lengths
batch['net_input']['prev_output_tokens'] = real_prev_output_tokens
batch['distilled_target'] = distilled_tgt_tokens
return batch
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
"""
Do forward and backward, and return the loss as computed by *criterion*
for the given *model* and *sample*.
Args:
sample (dict): the mini-batch. The format is defined by the
:class:`~fairseq.data.FairseqDataset`.
model (~fairseq.models.BaseFairseqModel): the model
criterion (~fairseq.criterions.FairseqCriterion): the criterion
optimizer (~fairseq.optim.FairseqOptimizer): the optimizer
update_num (int): the current update
ignore_grad (bool): multiply loss by 0 if this is set to True
Returns:
tuple:
- the loss
- the sample size, which is used as the denominator for the
gradient
- logging outputs to display while training
"""
model.train()
model.set_num_updates(update_num)
sample = self.separate_batches(sample)
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if self.args.eval_bleu:
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output['_bleu_sys_len'] = bleu.sys_len
logging_output['_bleu_ref_len'] = bleu.ref_len
# we split counts into separate entries so that they can be
# summed efficiently across workers using fast-stat-sync
assert len(bleu.counts) == EVAL_BLEU_ORDER
for i in range(EVAL_BLEU_ORDER):
logging_output['_bleu_counts_' + str(i)] = bleu.counts[i]
logging_output['_bleu_totals_' + str(i)] = bleu.totals[i]
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.args.eval_bleu:
def sum_logs(key):
return sum(log.get(key, 0) for log in logging_outputs)
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs('_bleu_counts_' + str(i)))
totals.append(sum_logs('_bleu_totals_' + str(i)))
if max(totals) > 0:
# log counts as numpy arrays -- log_scalar will sum them correctly
metrics.log_scalar('_bleu_counts', np.array(counts))
metrics.log_scalar('_bleu_totals', np.array(totals))
metrics.log_scalar('_bleu_sys_len', sum_logs('_bleu_sys_len'))
metrics.log_scalar('_bleu_ref_len', sum_logs('_bleu_ref_len'))
def compute_bleu(meters):
import inspect
import sacrebleu
fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]
if 'smooth_method' in fn_sig:
smooth = {'smooth_method': 'exp'}
else:
smooth = {'smooth': 'exp'}
bleu = sacrebleu.compute_bleu(
correct=meters['_bleu_counts'].sum,
total=meters['_bleu_totals'].sum,
sys_len=meters['_bleu_sys_len'].sum,
ref_len=meters['_bleu_ref_len'].sum,
**smooth
)
return round(bleu.score, 2)
metrics.log_derived('bleu', compute_bleu)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.src_dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.tgt_dict
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, escape_unk=False):
s = self.tgt_dict.string(
toks.int().cpu(),
self.args.eval_bleu_remove_bpe,
escape_unk=escape_unk,
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, [model], sample, None)
hyps, refs = [], []
for i in range(len(gen_out)):
hyps.append(decode(gen_out[i][0]['tokens']))
refs.append(decode(
utils.strip_pad(sample['target'][i], self.tgt_dict.pad()),
escape_unk=True, # don't count <unk> as matches to the hypo
))
if self.args.eval_bleu_print_samples:
logger.info('example hypothesis: ' + hyps[0])
logger.info('example reference: ' + refs[0])
tokenize = sacrebleu.DEFAULT_TOKENIZER if not self.args.eval_tokenized_bleu else 'none'
return sacrebleu.corpus_bleu(hyps, [refs], tokenize=tokenize)
| 20,267 | 42.493562 | 126 | py |
mix | mix-master/fairseq/tasks/language_modeling.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
import torch
from fairseq import utils
from fairseq.data import (
data_utils,
Dictionary,
IdDataset,
MonolingualDataset,
NestedDictionaryDataset,
NumelDataset,
PadDataset,
PrependTokenDataset,
StripTokenDataset,
TokenBlockDataset,
TransformEosDataset,
TruncateDataset,
TruncatedDictionary,
)
from fairseq.tasks import FairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task("language_modeling")
class LanguageModelingTask(FairseqTask):
"""
Train a language model.
Args:
dictionary (~fairseq.data.Dictionary): the dictionary for the input of
the language model
output_dictionary (~fairseq.data.Dictionary): the dictionary for the
output of the language model. In most cases it will be the same as
*dictionary*, but could possibly be a more limited version of the
dictionary (if ``--output-dictionary-size`` is used).
targets (List[str]): list of the target types that the language model
should predict. Can be one of "self", "future", and "past".
Defaults to "future".
.. note::
The language modeling task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate`, :mod:`fairseq-interactive` and
:mod:`fairseq-eval-lm`.
The language modeling task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.language_modeling_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', help='path to data directory')
parser.add_argument('--sample-break-mode', default='none',
choices=['none', 'complete', 'complete_doc', 'eos'],
help='If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
'of sentence, but may include multiple sentences per sample. '
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.')
parser.add_argument('--tokens-per-sample', default=1024, type=int,
help='max number of tokens per sample for LM dataset')
parser.add_argument('--output-dictionary-size', default=-1, type=int,
help='limit the size of output dictionary')
parser.add_argument('--self-target', action='store_true',
help='include self target')
parser.add_argument('--future-target', action='store_true',
help='include future target')
parser.add_argument('--past-target', action='store_true',
help='include past target')
parser.add_argument('--add-bos-token', action='store_true',
help='prepend beginning of sentence token (<s>)')
parser.add_argument('--max-target-positions', type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--truncate-sequence', action='store_true', default=False,
help='truncate sequences to --tokens-per-sample')
# fmt: on
def __init__(self, args, dictionary, output_dictionary=None, targets=None):
super().__init__(args)
self.dictionary = dictionary
self.output_dictionary = output_dictionary or dictionary
if targets is None:
targets = ["future"]
self.targets = targets
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
dictionary = None
output_dictionary = None
if args.data:
paths = utils.split_paths(args.data)
assert len(paths) > 0
dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
logger.info("dictionary: {} types".format(len(dictionary)))
output_dictionary = dictionary
if args.output_dictionary_size >= 0:
output_dictionary = TruncatedDictionary(
dictionary, args.output_dictionary_size
)
# upgrade old checkpoints
if hasattr(args, "exclude_self_target"):
args.self_target = not args.exclude_self_target
targets = []
if getattr(args, "self_target", False):
targets.append("self")
if getattr(args, "future_target", False):
targets.append("future")
if getattr(args, "past_target", False):
targets.append("past")
if len(targets) == 0:
# standard language modeling
targets = ["future"]
return cls(args, dictionary, output_dictionary, targets=targets)
def build_model(self, args):
model = super().build_model(args)
for target in self.targets:
if target not in model.supported_targets:
raise ValueError(
"Unsupported language modeling target: {}".format(target)
)
return model
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
dataset = data_utils.load_indexed_dataset(
split_path, self.dictionary, self.args.dataset_impl, combine=combine
)
if dataset is None:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, split_path)
)
if self.args.truncate_sequence:
dataset = TruncateDataset(dataset, self.args.tokens_per_sample)
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample,
pad=self.dictionary.pad(),
eos=self.dictionary.eos(),
break_mode=self.args.sample_break_mode,
include_targets=True,
)
add_eos_for_other_targets = (
self.args.sample_break_mode is not None
and self.args.sample_break_mode != "none"
)
self.datasets[split] = MonolingualDataset(
dataset,
dataset.sizes,
self.dictionary,
self.output_dictionary,
add_eos_for_other_targets=add_eos_for_other_targets,
shuffle=True,
targets=self.targets,
add_bos_token=self.args.add_bos_token,
)
def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs):
"""
Generate batches for inference. We prepend an eos token to src_tokens
(or bos if `--add-bos-token` is set) and we append an eos to target.
This is convenient both for generation with a prefix and LM scoring.
"""
tgt_dataset = TokenBlockDataset(
src_tokens,
src_lengths,
block_size=None, # ignored for "eos" break mode
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode="eos",
)
src_dataset = PrependTokenDataset(
StripTokenDataset(
tgt_dataset,
# remove eos from (end of) target sequence
self.source_dictionary.eos(),
),
token=(
self.source_dictionary.bos()
if getattr(self.args, "add_bos_token", False)
else self.source_dictionary.eos()
),
)
return NestedDictionaryDataset(
{
"id": IdDataset(),
"net_input": {
"src_tokens": PadDataset(src_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False),
"src_lengths": NumelDataset(src_dataset, reduce=False),
},
"target": PadDataset(tgt_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False),
},
sizes=[np.array(src_lengths)],
)
def inference_step(self, generator, models, sample, prefix_tokens=None):
with torch.no_grad():
# Generation will always be conditioned on bos_token
if getattr(self.args, "add_bos_token", False):
bos_token = self.source_dictionary.bos()
else:
bos_token = self.source_dictionary.eos()
# SequenceGenerator doesn't use src_tokens directly, we need to
# pass the `prefix_tokens` argument instead
if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement():
prefix_tokens = sample["net_input"]["src_tokens"]
if prefix_tokens[:, 0].eq(bos_token).all():
prefix_tokens = prefix_tokens[:, 1:]
return generator.generate(
models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token,
)
@property
def source_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.dictionary
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.output_dictionary
| 10,106 | 36.712687 | 112 | py |
mix | mix-master/fairseq/tasks/translation_struct.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import itertools
import os
import numpy as np
import torch
from fairseq import bleu, utils
from fairseq.data import Dictionary, language_pair_dataset
from fairseq.sequence_generator import SequenceGenerator
from fairseq.tasks import register_task, translation
class BleuScorer(object):
key = 'bleu'
def __init__(self, tgt_dict, bpe_symbol='@@ '):
self.tgt_dict = tgt_dict
self.bpe_symbol = bpe_symbol
self.scorer = bleu.Scorer(tgt_dict.pad(), tgt_dict.eos(), tgt_dict.unk())
# use a fresh Dictionary for scoring, so that we can add new elements
self.scoring_dict = Dictionary()
def preprocess_ref(self, ref):
ref = self.tgt_dict.string(ref, bpe_symbol=self.bpe_symbol, escape_unk=True)
return self.scoring_dict.encode_line(ref, add_if_not_exist=True)
def preprocess_hypo(self, hypo):
hypo = hypo['tokens']
hypo = self.tgt_dict.string(hypo.int().cpu(), bpe_symbol=self.bpe_symbol)
return self.scoring_dict.encode_line(hypo, add_if_not_exist=True)
def get_cost(self, ref, hypo):
self.scorer.reset(one_init=True)
self.scorer.add(ref, hypo)
return 1. - (self.scorer.score() / 100.)
def postprocess_costs(self, costs):
return costs
@register_task('translation_struct')
class TranslationStructuredPredictionTask(translation.TranslationTask):
"""
Translate from one (source) language to another (target) language.
Compared to :class:`TranslationTask`, this version performs
generation during training and computes sequence-level losses.
Args:
src_dict (Dictionary): dictionary for the source language
tgt_dict (Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`train.py <train>`,
:mod:`generate.py <generate>` and :mod:`interactive.py <interactive>`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
translation.TranslationTask.add_args(parser)
parser.add_argument('--seq-beam', default=5, type=int, metavar='N',
help='beam size for sequence training')
parser.add_argument('--seq-keep-reference', default=False, action='store_true',
help='retain the reference in the list of hypos')
parser.add_argument('--seq-scorer', default='bleu', metavar='SCORER',
choices=['bleu'],
help='optimization metric for sequence level training')
parser.add_argument('--seq-gen-with-dropout', default=False, action='store_true',
help='use dropout to generate hypos')
parser.add_argument('--seq-max-len-a', default=0, type=float, metavar='N',
help='generate sequences of maximum length ax + b, '
'where x is the source length')
parser.add_argument('--seq-max-len-b', default=200, type=int, metavar='N',
help='generate sequences of maximum length ax + b, '
'where x is the source length')
parser.add_argument('--seq-remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE tokens before scoring')
parser.add_argument('--seq-sampling', default=False, action='store_true',
help='use sampling instead of beam search')
parser.add_argument('--seq-unkpen', default=0, type=float,
help='unknown word penalty to be used in seq generation')
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args, src_dict, tgt_dict)
self._generator = None
self._scorers = {}
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
return super(TranslationStructuredPredictionTask, cls).setup_task(args, **kwargs)
def build_criterion(self, args):
"""
Build the :class:`~fairseq.criterions.FairseqCriterion` instance for
this task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.criterions.FairseqCriterion` instance
"""
from fairseq import criterions
criterion = criterions.build_criterion(args, self)
assert isinstance(criterion, criterions.FairseqSequenceCriterion)
return criterion
def train_step(self, sample, model, criterion, optimizer, update_num, ignore_grad=False):
"""
Do forward and backward, and return the loss as computed by *criterion*
for the given *model* and *sample*.
Args:
sample (dict): the mini-batch. The format is defined by the
:class:`~fairseq.data.FairseqDataset`.
model (~fairseq.models.BaseFairseqModel): the model
criterion (~fairseq.criterions.FairseqCriterion): the criterion
optimizer (~fairseq.optim.FairseqOptimizer): the optimizer
ignore_grad (bool): multiply loss by 0 if this is set to True
Returns:
tuple:
- the loss
- the sample size, which is used as the denominator for the
gradient
- logging outputs to display while training
"""
# control dropout during generation
model.train(self.args.seq_gen_with_dropout)
model.set_num_updates(update_num)
# generate hypotheses
self._generate_hypotheses(model, sample)
return super().train_step(
sample=sample,
model=model,
criterion=criterion,
optimizer=optimizer,
update_num=update_num, ##
ignore_grad=ignore_grad,
)
def valid_step(self, sample, model, criterion):
model.eval()
self._generate_hypotheses(model, sample)
return super().valid_step(sample=sample, model=model, criterion=criterion)
def _generate_hypotheses(self, model, sample):
# initialize generator
if self._generator is None:
self._generator = SequenceGenerator(
self.target_dictionary,
beam_size=self.args.seq_beam,
max_len_a=self.args.seq_max_len_a,
max_len_b=self.args.seq_max_len_b,
unk_penalty=self.args.seq_unkpen,
#sampling=self.args.seq_sampling,
)
# generate hypotheses
sample['hypos'] = self._generator.generate(
[model],
sample,
)
# add reference to the set of hypotheses
if self.args.seq_keep_reference:
self.add_reference_to_hypotheses(sample)
def add_reference_to_hypotheses_(self, sample):
"""
Add the reference translation to the set of hypotheses. This can be
called from the criterion's forward.
"""
if 'includes_reference' in sample:
return
sample['includes_reference'] = True
target = sample['target']
pad_idx = self.target_dictionary.pad()
for i, hypos_i in enumerate(sample['hypos']):
# insert reference as first hypothesis
ref = utils.strip_pad(target[i, :], pad_idx)
hypos_i.insert(0, {
'tokens': ref,
'score': None,
})
def get_new_sample_for_hypotheses(self, orig_sample):
"""
Extract hypotheses from *orig_sample* and return a new collated sample.
"""
ids = orig_sample['id'].tolist()
pad_idx = self.source_dictionary.pad()
samples = [
{
'id': ids[i],
'source': utils.strip_pad(orig_sample['net_input']['src_tokens'][i, :], pad_idx),
'target': hypo['tokens'],
}
for i, hypos_i in enumerate(orig_sample['hypos'])
for hypo in hypos_i
]
return language_pair_dataset.collate(
samples, pad_idx=pad_idx, eos_idx=self.source_dictionary.eos(),
left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target,
#sort=False, always true
)
def get_sequence_scorer(self, scorer):
if scorer not in self._scorers:
tgt_dict = self.target_dictionary
if scorer == 'bleu':
self._scorers[scorer] = BleuScorer(
tgt_dict, bpe_symbol=self.args.seq_remove_bpe,
)
else:
raise ValueError('Unknown sequence scorer {}'.format(scorer))
return self._scorers[scorer]
def get_costs(self, sample, scorer=None):
"""Get costs for hypotheses using the specified *scorer*."""
if scorer is None:
scorer = self.get_sequence_scorer(self.args.seq_scorer)
bsz = len(sample['hypos'])
nhypos = len(sample['hypos'][0])
target = sample['target'].int()
pad_idx = self.target_dictionary.pad()
costs = torch.zeros(bsz, nhypos).to(sample['target'].device)
for i, hypos_i in enumerate(sample['hypos']):
ref = utils.strip_pad(target[i, :], pad_idx).cpu()
ref = scorer.preprocess_ref(ref)
for j, hypo in enumerate(hypos_i):
costs[i, j] = scorer.get_cost(ref, scorer.preprocess_hypo(hypo))
return scorer.postprocess_costs(costs)
| 10,110 | 39.606426 | 97 | py |
mix | mix-master/fairseq/tasks/multilingual_masked_lm.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
import torch
from fairseq.data import (
data_utils,
Dictionary,
encoders,
ConcatDataset,
IdDataset,
MaskTokensDataset,
NestedDictionaryDataset,
NumelDataset,
NumSamplesDataset,
PadDataset,
PrependTokenDataset,
RawLabelDataset,
ResamplingDataset,
SortDataset,
TokenBlockDataset,
)
from fairseq.tasks import FairseqTask, register_task
from fairseq import utils
logger = logging.getLogger(__name__)
@register_task('multilingual_masked_lm')
class MultiLingualMaskedLMTask(FairseqTask):
"""Task for training masked language models (e.g., BERT, RoBERTa)."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner')
parser.add_argument('--sample-break-mode', default='complete',
choices=['none', 'complete', 'complete_doc', 'eos'],
help='If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
'of sentence, but may include multiple sentences per sample. '
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.')
parser.add_argument('--tokens-per-sample', default=512, type=int,
help='max number of total tokens over all segments '
'per sample for BERT dataset')
parser.add_argument('--mask-prob', default=0.15, type=float,
help='probability of replacing a token with mask')
parser.add_argument('--leave-unmasked-prob', default=0.1, type=float,
help='probability that a masked token is unmasked')
parser.add_argument('--random-token-prob', default=0.1, type=float,
help='probability of replacing a token with a random token')
parser.add_argument('--freq-weighted-replacement', action='store_true',
help='sample random replacement words based on word frequencies')
parser.add_argument('--mask-whole-words', default=False, action='store_true',
help='mask whole words; you may also want to set --bpe')
parser.add_argument('--multilang-sampling-alpha', type=float, default=1.0,
help='smoothing alpha for sample rations across multiple datasets')
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
# add mask token
self.mask_idx = dictionary.add_symbol('<mask>')
@classmethod
def setup_task(cls, args, **kwargs):
paths = utils.split_paths(args.data)
assert len(paths) > 0
dictionary = Dictionary.load(os.path.join(paths[0], 'dict.txt'))
logger.info('dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
def _get_whole_word_mask(self):
# create masked input and targets
if self.args.mask_whole_words:
bpe = encoders.build_bpe(self.args)
if bpe is not None:
def is_beginning_of_word(i):
if i < self.source_dictionary.nspecial:
# special elements are always considered beginnings
return True
tok = self.source_dictionary[i]
if tok.startswith('madeupword'):
return True
try:
return bpe.is_beginning_of_word(tok)
except ValueError:
return True
mask_whole_words = torch.ByteTensor(list(
map(is_beginning_of_word, range(len(self.source_dictionary)))
))
else:
mask_whole_words = None
return mask_whole_words
def _get_sample_prob(self, dataset_lens):
"""
Get smoothed sampling porbability by languages. This helps low resource
languages by upsampling them.
"""
prob = dataset_lens / dataset_lens.sum()
smoothed_prob = prob ** self.args.multilang_sampling_alpha
smoothed_prob = smoothed_prob / smoothed_prob.sum()
return smoothed_prob
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
languages = sorted(
name for name in os.listdir(data_path)
if os.path.isdir(os.path.join(data_path, name))
)
logger.info("Training on {0} languages: {1}".format(len(languages), languages))
logger.info("Language to id mapping: ", {
lang: id for id, lang in enumerate(languages)
}
)
mask_whole_words = self._get_whole_word_mask()
lang_datasets = []
for lang_id, language in enumerate(languages):
split_path = os.path.join(data_path, language, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.source_dictionary,
self.args.dataset_impl,
combine=combine,
)
if dataset is None:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path))
# create continuous blocks of tokens
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample - 1, # one less for <s>
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode=self.args.sample_break_mode,
)
logger.info('loaded {} blocks from: {}'.format(len(dataset), split_path))
# prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT)
dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
src_dataset, tgt_dataset = MaskTokensDataset.apply_mask(
dataset,
self.source_dictionary,
pad_idx=self.source_dictionary.pad(),
mask_idx=self.mask_idx,
seed=self.args.seed,
mask_prob=self.args.mask_prob,
leave_unmasked_prob=self.args.leave_unmasked_prob,
random_token_prob=self.args.random_token_prob,
freq_weighted_replacement=self.args.freq_weighted_replacement,
mask_whole_words=mask_whole_words,
)
lang_dataset = NestedDictionaryDataset(
{
'net_input': {
'src_tokens': PadDataset(
src_dataset,
pad_idx=self.source_dictionary.pad(),
left_pad=False,
),
'src_lengths': NumelDataset(src_dataset, reduce=False),
},
'target': PadDataset(
tgt_dataset,
pad_idx=self.source_dictionary.pad(),
left_pad=False,
),
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(src_dataset, reduce=True),
'lang_id': RawLabelDataset([lang_id] * src_dataset.sizes.shape[0]),
},
sizes=[src_dataset.sizes],
)
lang_datasets.append(lang_dataset)
dataset_lengths = np.array(
[len(d) for d in lang_datasets],
dtype=float,
)
logger.info(
'loaded total {} blocks for all languages'.format(
dataset_lengths.sum(),
)
)
if split == self.args.train_subset:
# For train subset, additionally up or down sample languages.
sample_probs = self._get_sample_prob(dataset_lengths)
logger.info("Sample probability by language: ", {
lang: "{0:.4f}".format(sample_probs[id])
for id, lang in enumerate(languages)
}
)
size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths
logger.info("Up/Down Sampling ratio by language: ", {
lang: "{0:.2f}".format(size_ratio[id])
for id, lang in enumerate(languages)
}
)
resampled_lang_datasets = [
ResamplingDataset(
lang_datasets[i],
size_ratio=size_ratio[i],
seed=self.args.seed,
epoch=epoch,
replace=size_ratio[i] >= 1.0,
)
for i, d in enumerate(lang_datasets)
]
dataset = ConcatDataset(resampled_lang_datasets)
else:
dataset = ConcatDataset(lang_datasets)
lang_splits = [split]
for lang_id, lang_dataset in enumerate(lang_datasets):
split_name = split + '_' + languages[lang_id]
lang_splits.append(split_name)
self.datasets[split_name] = lang_dataset
# [TODO]: This is hacky for now to print validation ppl for each
# language individually. Maybe need task API changes to allow it
# in more generic ways.
if split in self.args.valid_subset:
self.args.valid_subset = self.args.valid_subset.replace(
split, ','.join(lang_splits)
)
with data_utils.numpy_seed(self.args.seed + epoch):
shuffle = np.random.permutation(len(dataset))
self.datasets[split] = SortDataset(
dataset,
sort_order=[
shuffle,
dataset.sizes,
],
)
def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True):
src_dataset = PadDataset(
TokenBlockDataset(
src_tokens,
src_lengths,
self.args.tokens_per_sample - 1, # one less for <s>
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode='eos',
),
pad_idx=self.source_dictionary.pad(),
left_pad=False,
)
src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos())
src_dataset = NestedDictionaryDataset(
{
'id': IdDataset(),
'net_input': {
'src_tokens': src_dataset,
'src_lengths': NumelDataset(src_dataset, reduce=False),
},
},
sizes=src_lengths,
)
if sort:
src_dataset = SortDataset(src_dataset, sort_order=[src_lengths])
return src_dataset
def get_batch_iterator(
self, dataset, max_tokens=None, max_sentences=None, max_positions=None,
ignore_invalid_inputs=False, required_batch_size_multiple=1,
seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=1,
):
# Recreate epoch iterator every epoch cause the underlying
# datasets are dynamic due to sampling.
self.dataset_to_epoch_iter = {}
epoch_iter = super().get_batch_iterator(
dataset, max_tokens, max_sentences, max_positions,
ignore_invalid_inputs, required_batch_size_multiple,
seed, num_shards, shard_id, num_workers, epoch,
)
self.dataset_to_epoch_iter = {}
return epoch_iter
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
| 12,616 | 38.676101 | 98 | py |
mix | mix-master/fairseq/tasks/multilingual_translation.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
import logging
import os
import torch
from fairseq import metrics, options
from fairseq.data import (
Dictionary,
LanguagePairDataset,
RoundRobinZipDatasets,
TransformEosLangPairDataset,
)
from fairseq.models import FairseqMultiModel
from fairseq.tasks.translation import load_langpair_dataset
from . import FairseqTask, register_task
from fairseq import utils
logger = logging.getLogger(__name__)
def _lang_token(lang: str):
return '__{}__'.format(lang)
def _lang_token_index(dic: Dictionary, lang: str):
"""Return language token index."""
idx = dic.index(_lang_token(lang))
assert idx != dic.unk_index, \
'cannot find language token for lang {}'.format(lang)
return idx
@register_task('multilingual_translation')
class MultilingualTranslationTask(FairseqTask):
"""A task for training multiple translation models simultaneously.
We iterate round-robin over batches from multiple language pairs, ordered
according to the `--lang-pairs` argument.
The training loop is roughly:
for i in range(len(epoch)):
for lang_pair in args.lang_pairs:
batch = next_batch_for_lang_pair(lang_pair)
loss = criterion(model_for_lang_pair(lang_pair), batch)
loss.backward()
optimizer.step()
In practice, `next_batch_for_lang_pair` is abstracted in a FairseqDataset
(e.g., `RoundRobinZipDatasets`) and `model_for_lang_pair` is a model that
implements the `FairseqMultiModel` interface.
During inference it is required to specify a single `--source-lang` and
`--target-lang`, which indicates the inference langauge direction.
`--lang-pairs`, `--encoder-langtok`, `--decoder-langtok` have to be set to
the same value as training.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', metavar='DIR', help='path to data directory')
parser.add_argument('--lang-pairs', default=None, metavar='PAIRS',
help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language (only needed for inference)')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language (only needed for inference)')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left (default: True)')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left (default: False)')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--encoder-langtok', default=None, type=str, choices=['src', 'tgt'],
metavar='SRCTGT',
help='replace beginning-of-sentence in source sentence with source or target '
'language token. (src/tgt)')
parser.add_argument('--decoder-langtok', action='store_true',
help='replace beginning-of-sentence in target sentence with target language token')
# fmt: on
def __init__(self, args, dicts, training):
super().__init__(args)
self.dicts = dicts
self.training = training
if training:
self.lang_pairs = args.lang_pairs
else:
self.lang_pairs = ['{}-{}'.format(args.source_lang, args.target_lang)]
# eval_lang_pairs for multilingual translation is usually all of the
# lang_pairs. However for other multitask settings or when we want to
# optimize for certain languages we want to use a different subset. Thus
# the eval_lang_pairs class variable is provided for classes that extend
# this class.
self.eval_lang_pairs = self.lang_pairs
# model_lang_pairs will be used to build encoder-decoder model pairs in
# models.build_model(). This allows multitask type of sub-class can
# build models other than the input lang_pairs
self.model_lang_pairs = self.lang_pairs
self.langs = list(dicts.keys())
@classmethod
def setup_task(cls, args, **kwargs):
dicts, training = cls.prepare(args, **kwargs)
return cls(args, dicts, training)
@classmethod
def prepare(cls, args, **kargs):
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
if args.lang_pairs is None:
raise ValueError('--lang-pairs is required. List all the language pairs in the training objective.')
if isinstance(args.lang_pairs, str):
args.lang_pairs = args.lang_pairs.split(',')
sorted_langs = sorted(list({x for lang_pair in args.lang_pairs for x in lang_pair.split('-')}))
if args.source_lang is not None or args.target_lang is not None:
training = False
else:
training = True
# load dictionaries
dicts = OrderedDict()
for lang in sorted_langs:
paths = utils.split_paths(args.data)
assert len(paths) > 0
dicts[lang] = Dictionary.load(os.path.join(paths[0], 'dict.{}.txt'.format(lang)))
if len(dicts) > 0:
assert dicts[lang].pad() == dicts[sorted_langs[0]].pad()
assert dicts[lang].eos() == dicts[sorted_langs[0]].eos()
assert dicts[lang].unk() == dicts[sorted_langs[0]].unk()
if args.encoder_langtok is not None or args.decoder_langtok:
for lang_to_add in sorted_langs:
dicts[lang].add_symbol(_lang_token(lang_to_add))
logger.info('[{}] dictionary: {} types'.format(lang, len(dicts[lang])))
return dicts, training
def get_encoder_langtok(self, src_lang, tgt_lang):
if self.args.encoder_langtok is None:
return self.dicts[src_lang].eos()
if self.args.encoder_langtok == 'src':
return _lang_token_index(self.dicts[src_lang], src_lang)
else:
return _lang_token_index(self.dicts[src_lang], tgt_lang)
def get_decoder_langtok(self, tgt_lang):
if not self.args.decoder_langtok:
return self.dicts[tgt_lang].eos()
return _lang_token_index(self.dicts[tgt_lang], tgt_lang)
def alter_dataset_langtok(self, lang_pair_dataset,
src_eos=None, src_lang=None, tgt_eos=None, tgt_lang=None):
if self.args.encoder_langtok is None and not self.args.decoder_langtok:
return lang_pair_dataset
new_src_eos = None
if self.args.encoder_langtok is not None and src_eos is not None \
and src_lang is not None and tgt_lang is not None:
new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang)
else:
src_eos = None
new_tgt_bos = None
if self.args.decoder_langtok and tgt_eos is not None and tgt_lang is not None:
new_tgt_bos = self.get_decoder_langtok(tgt_lang)
else:
tgt_eos = None
return TransformEosLangPairDataset(
lang_pair_dataset,
src_eos=src_eos,
new_src_eos=new_src_eos,
tgt_bos=tgt_eos,
new_tgt_bos=new_tgt_bos,
)
def load_dataset(self, split, epoch=1, **kwargs):
"""Load a dataset split."""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
def language_pair_dataset(lang_pair):
src, tgt = lang_pair.split('-')
langpair_dataset = load_langpair_dataset(
data_path, split, src, self.dicts[src], tgt, self.dicts[tgt],
combine=True, dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
)
return self.alter_dataset_langtok(
langpair_dataset,
src_eos=self.dicts[src].eos(),
src_lang=src,
tgt_eos=self.dicts[tgt].eos(),
tgt_lang=tgt,
)
self.datasets[split] = RoundRobinZipDatasets(
OrderedDict([
(lang_pair, language_pair_dataset(lang_pair))
for lang_pair in self.lang_pairs
]),
eval_key=None if self.training else "%s-%s" % (self.args.source_lang, self.args.target_lang),
)
def build_dataset_for_inference(self, src_tokens, src_lengths):
lang_pair = "%s-%s" % (self.args.source_lang, self.args.target_lang)
return RoundRobinZipDatasets(
OrderedDict([(
lang_pair,
self.alter_dataset_langtok(
LanguagePairDataset(
src_tokens, src_lengths,
self.source_dictionary
),
src_eos=self.source_dictionary.eos(),
src_lang=self.args.source_lang,
tgt_eos=self.target_dictionary.eos(),
tgt_lang=self.args.target_lang,
),
)]),
eval_key=lang_pair,
)
def build_model(self, args):
def check_args():
messages = []
if len(set(self.args.lang_pairs).symmetric_difference(args.lang_pairs)) != 0:
messages.append('--lang-pairs should include all the language pairs {}.'.format(args.lang_pairs))
if self.args.encoder_langtok != args.encoder_langtok:
messages.append('--encoder-langtok should be {}.'.format(args.encoder_langtok))
if self.args.decoder_langtok != args.decoder_langtok:
messages.append('--decoder-langtok should {} be set.'.format("" if args.decoder_langtok else "not"))
if len(messages) > 0:
raise ValueError(' '.join(messages))
# Check if task args are consistant with model args
check_args()
from fairseq import models
model = models.build_model(args, self)
if not isinstance(model, FairseqMultiModel):
raise ValueError('MultilingualTranslationTask requires a FairseqMultiModel architecture')
return model
def train_step(self, sample, model, criterion, optimizer, update_num, ignore_grad=False):
model.train()
from collections import defaultdict
agg_loss, agg_sample_size, agg_logging_output = 0., 0., defaultdict(float)
for lang_pair in self.model_lang_pairs:
if sample[lang_pair] is None or len(sample[lang_pair]) == 0:
continue
loss, sample_size, logging_output = criterion(model.models[lang_pair], sample[lang_pair])
if ignore_grad:
loss *= 0
optimizer.backward(loss)
agg_loss += loss.detach().item()
# TODO make summing of the sample sizes configurable
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[k] += logging_output[k]
agg_logging_output[f"{lang_pair}:{k}"] += logging_output[k]
return agg_loss, agg_sample_size, agg_logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
from collections import defaultdict
agg_loss, agg_sample_size, agg_logging_output = 0., 0., defaultdict(float)
for lang_pair in self.eval_lang_pairs:
if lang_pair not in sample or sample[lang_pair] is None or len(sample[lang_pair]) == 0:
continue
loss, sample_size, logging_output = criterion(model.models[lang_pair], sample[lang_pair])
agg_loss += loss.data.item()
# TODO make summing of the sample sizes configurable
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[k] += logging_output[k]
agg_logging_output[f"{lang_pair}:{k}"] += logging_output[k]
return agg_loss, agg_sample_size, agg_logging_output
def inference_step(self, generator, models, sample, prefix_tokens=None):
with torch.no_grad():
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
bos_token=_lang_token_index(self.target_dictionary, self.args.target_lang)
if self.args.decoder_langtok else self.target_dictionary.eos(),
)
def reduce_metrics(self, logging_outputs, criterion):
with metrics.aggregate():
# pass 'sample_size', 'nsentences', 'ntokens' stats to fairseq_task
super().reduce_metrics(logging_outputs, criterion)
for k in ['sample_size', 'nsentences', 'ntokens']:
metrics.log_scalar(k, sum(l[k] for l in logging_outputs))
@property
def source_dictionary(self):
if self.training:
return next(iter(self.dicts.values()))
else:
return self.dicts[self.args.source_lang]
@property
def target_dictionary(self):
if self.training:
return next(iter(self.dicts.values()))
else:
return self.dicts[self.args.target_lang]
def max_positions(self):
"""Return the max sentence length allowed by the task."""
if len(self.datasets.values()) == 0:
return {'%s-%s' % (self.args.source_lang, self.args.target_lang):
(self.args.max_source_positions, self.args.max_target_positions)}
return OrderedDict([
(key, (self.args.max_source_positions, self.args.max_target_positions))
for split in self.datasets.keys()
for key in self.datasets[split].datasets.keys()
])
| 15,113 | 43.322581 | 116 | py |
mix | mix-master/fairseq/tasks/translation_lev.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
from fairseq.utils import new_arange
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationTask, load_langpair_dataset
from fairseq import utils
@register_task('translation_lev')
class TranslationLevenshteinTask(TranslationTask):
"""
Translation (Sequence Generation) task for Levenshtein Transformer
See `"Levenshtein Transformer" <https://arxiv.org/abs/1905.11006>`_.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
TranslationTask.add_args(parser)
parser.add_argument(
'--noise',
default='random_delete',
choices=['random_delete', 'random_mask', 'no_noise', 'full_mask'])
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path, split, src, self.src_dict, tgt, self.tgt_dict,
combine=combine, dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
prepend_bos=True,
)
def inject_noise(self, target_tokens):
def _random_delete(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
max_len = target_tokens.size(1)
target_mask = target_tokens.eq(pad)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(
target_tokens.eq(bos) | target_tokens.eq(eos), 0.0)
target_score.masked_fill_(target_mask, 1)
target_score, target_rank = target_score.sort(1)
target_length = target_mask.size(1) - target_mask.float().sum(
1, keepdim=True)
# do not delete <bos> and <eos> (we assign 0 score for them)
target_cutoff = 2 + ((target_length - 2) * target_score.new_zeros(
target_score.size(0), 1).uniform_()).long()
target_cutoff = target_score.sort(1)[1] >= target_cutoff
prev_target_tokens = target_tokens.gather(
1, target_rank).masked_fill_(target_cutoff, pad).gather(
1,
target_rank.masked_fill_(target_cutoff,
max_len).sort(1)[1])
prev_target_tokens = prev_target_tokens[:, :prev_target_tokens.
ne(pad).sum(1).max()]
return prev_target_tokens
def _random_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_masks = target_tokens.ne(pad) & \
target_tokens.ne(bos) & \
target_tokens.ne(eos)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(~target_masks, 2.0)
target_length = target_masks.sum(1).float()
target_length = target_length * target_length.clone().uniform_()
target_length = target_length + 1 # make sure to mask at least one token.
_, target_rank = target_score.sort(1)
target_cutoff = new_arange(target_rank) < target_length[:, None].long()
prev_target_tokens = target_tokens.masked_fill(
target_cutoff.scatter(1, target_rank, target_cutoff), unk)
return prev_target_tokens
def _full_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_mask = target_tokens.eq(bos) | target_tokens.eq(
eos) | target_tokens.eq(pad)
return target_tokens.masked_fill(~target_mask, unk)
if self.args.noise == 'random_delete':
return _random_delete(target_tokens)
elif self.args.noise == 'random_mask':
return _random_mask(target_tokens)
elif self.args.noise == 'full_mask':
return _full_mask(target_tokens)
elif self.args.noise == 'no_noise':
return target_tokens
else:
raise NotImplementedError
def build_generator(self, args):
from fairseq.iterative_refinement_generator import IterativeRefinementGenerator
return IterativeRefinementGenerator(
self.target_dictionary,
eos_penalty=getattr(args, 'iter_decode_eos_penalty', 0.0),
max_iter=getattr(args, 'iter_decode_max_iter', 10),
beam_size=getattr(args, 'iter_decode_with_beam', 1),
reranking=getattr(args, 'iter_decode_with_external_reranker', False),
decoding_format=getattr(args, 'decoding_format', None),
adaptive=not getattr(args, 'iter_decode_force_max_iter', False),
retain_history=getattr(args, 'retain_iter_history', False))
def train_step(self,
sample,
model,
criterion,
optimizer,
update_num,
ignore_grad=False):
model.train()
sample['prev_target'] = self.inject_noise(sample['target'])
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
sample['prev_target'] = self.inject_noise(sample['target'])
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
| 6,640 | 39.993827 | 87 | py |
mix | mix-master/fairseq/tasks/fairseq_task.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
import torch
from fairseq import metrics, search, tokenizer, utils
from fairseq.data import data_utils, FairseqDataset, iterators, Dictionary
class FairseqTask(object):
"""
Tasks store dictionaries and provide helpers for loading/iterating over
Datasets, initializing the Model/Criterion and calculating the loss.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
pass
@staticmethod
def logging_outputs_can_be_summed(criterion) -> bool:
"""
Whether the logging outputs returned by `train_step` and `valid_step` can
be summed across workers prior to calling `aggregate_logging_outputs`.
Setting this to True will improves distributed training speed.
"""
return criterion.logging_outputs_can_be_summed()
def __init__(self, args):
self.args = args
self.datasets = {}
self.dataset_to_epoch_iter = {}
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
return Dictionary.load(filename)
@classmethod
def build_dictionary(
cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8
):
"""Build the dictionary
Args:
filenames (list): list of filenames
workers (int): number of concurrent workers
threshold (int): defines the minimum word count
nwords (int): defines the total number of words in the final dictionary,
including special symbols
padding_factor (int): can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
d = Dictionary()
for filename in filenames:
Dictionary.add_file_to_dictionary(
filename, d, tokenizer.tokenize_line, workers
)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
return cls(args, **kwargs)
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
raise NotImplementedError
def dataset(self, split):
"""
Return a loaded dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
Returns:
a :class:`~fairseq.data.FairseqDataset` corresponding to *split*
"""
from fairseq.data import FairseqDataset
if split not in self.datasets:
raise KeyError("Dataset not loaded: " + split)
if not isinstance(self.datasets[split], FairseqDataset):
raise TypeError("Datasets are expected to be of type FairseqDataset")
return self.datasets[split]
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
):
"""
Get an iterator that yields batches of data from the given dataset.
Args:
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N (default: 1).
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
Returns:
~fairseq.iterators.EpochBatchIterator: a batched iterator over the
given dataset split
"""
# For default fairseq task, return same iterator across epochs
# as datasets are not dynamic, can be overridden in task specific
# setting.
if dataset in self.dataset_to_epoch_iter:
return self.dataset_to_epoch_iter[dataset]
assert isinstance(dataset, FairseqDataset)
# initialize the dataset with the correct starting epoch
dataset.set_epoch(epoch)
# get indices ordered by example size
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
# filter examples that are too large
if max_positions is not None:
indices = data_utils.filter_by_size(
indices,
dataset,
max_positions,
raise_exception=(not ignore_invalid_inputs),
)
# create mini-batches with given size constraints
batch_sampler = data_utils.batch_by_size(
indices,
dataset.num_tokens,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
# return a reusable, sharded iterator
epoch_iter = iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=batch_sampler,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
)
self.dataset_to_epoch_iter[dataset] = epoch_iter
return epoch_iter
def build_model(self, args):
"""
Build the :class:`~fairseq.models.BaseFairseqModel` instance for this
task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.models.BaseFairseqModel` instance
"""
from fairseq import models
return models.build_model(args, self)
def build_criterion(self, args):
"""
Build the :class:`~fairseq.criterions.FairseqCriterion` instance for
this task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.criterions.FairseqCriterion` instance
"""
from fairseq import criterions
return criterions.build_criterion(args, self)
def build_generator(self, args):
if getattr(args, "score_reference", False):
from fairseq.sequence_scorer import SequenceScorer
return SequenceScorer(
self.target_dictionary,
compute_alignment=getattr(args, "print_alignment", False),
)
from fairseq.sequence_generator import (
SequenceGenerator,
SequenceGeneratorWithAlignment,
)
# Choose search strategy. Defaults to Beam Search.
sampling = getattr(args, "sampling", False)
sampling_topk = getattr(args, "sampling_topk", -1)
sampling_topp = getattr(args, "sampling_topp", -1.0)
diverse_beam_groups = getattr(args, "diverse_beam_groups", -1)
diverse_beam_strength = getattr(args, "diverse_beam_strength", 0.5)
match_source_len = getattr(args, "match_source_len", False)
match_target_len = getattr(args, "match_target_len", False)
diversity_rate = getattr(args, "diversity_rate", -1)
if (
sum(
int(cond)
for cond in [
sampling,
diverse_beam_groups > 0,
match_source_len,
diversity_rate > 0,
]
)
> 1
):
raise ValueError("Provided Search parameters are mutually exclusive.")
assert sampling_topk < 0 or sampling, "--sampling-topk requires --sampling"
assert sampling_topp < 0 or sampling, "--sampling-topp requires --sampling"
if sampling:
search_strategy = search.Sampling(
self.target_dictionary, sampling_topk, sampling_topp
)
elif diverse_beam_groups > 0:
search_strategy = search.DiverseBeamSearch(
self.target_dictionary, diverse_beam_groups, diverse_beam_strength
)
elif match_source_len:
# this is useful for tagging applications where the output
# length should match the input length, so we hardcode the
# length constraints for simplicity
search_strategy = search.LengthConstrainedBeamSearch(
self.target_dictionary,
min_len_a=1,
min_len_b=0,
max_len_a=1,
max_len_b=0,
)
elif match_target_len:
search_strategy = search.LengthConstrainedBeamSearch(
self.target_dictionary,
min_len_a=1,
min_len_b=0,
max_len_a=1,
max_len_b=0,
)
elif diversity_rate > -1:
search_strategy = search.DiverseSiblingsSearch(
self.target_dictionary, diversity_rate
)
else:
search_strategy = search.BeamSearch(self.target_dictionary)
if getattr(args, "print_alignment", False):
seq_gen_cls = SequenceGeneratorWithAlignment
else:
seq_gen_cls = SequenceGenerator
return seq_gen_cls(
self.target_dictionary,
beam_size=getattr(args, "beam", 5),
max_len_a=getattr(args, "max_len_a", 0),
max_len_b=getattr(args, "max_len_b", 200),
min_len=getattr(args, "min_len", 1),
normalize_scores=(not getattr(args, "unnormalized", False)),
len_penalty=getattr(args, "lenpen", 1),
unk_penalty=getattr(args, "unkpen", 0),
temperature=getattr(args, "temperature", 1.0),
match_source_len=getattr(args, "match_source_len", False),
match_target_len=getattr(args, "match_target_len", False),
no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0),
search_strategy=search_strategy,
)
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
"""
Do forward and backward, and return the loss as computed by *criterion*
for the given *model* and *sample*.
Args:
sample (dict): the mini-batch. The format is defined by the
:class:`~fairseq.data.FairseqDataset`.
model (~fairseq.models.BaseFairseqModel): the model
criterion (~fairseq.criterions.FairseqCriterion): the criterion
optimizer (~fairseq.optim.FairseqOptimizer): the optimizer
update_num (int): the current update
ignore_grad (bool): multiply loss by 0 if this is set to True
Returns:
tuple:
- the loss
- the sample size, which is used as the denominator for the
gradient
- logging outputs to display while training
"""
model.train()
model.set_num_updates(update_num)
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
def inference_step(self, generator, models, sample, prefix_tokens=None):
with torch.no_grad():
return generator.generate(models, sample, prefix_tokens=prefix_tokens)
def begin_epoch(self, epoch, model):
"""Hook function called before the start of each epoch."""
pass
def aggregate_logging_outputs(self, logging_outputs, criterion):
"""[deprecated] Aggregate logging outputs from data parallel training."""
utils.deprecation_warning(
"The aggregate_logging_outputs API is deprecated. "
"Please use the reduce_metrics API instead."
)
with metrics.aggregate() as agg:
self.reduce_metrics(logging_outputs, criterion)
return agg.get_smoothed_values()
def reduce_metrics(self, logging_outputs, criterion):
"""Aggregate logging outputs from data parallel training."""
# backward compatibility for tasks that override aggregate_logging_outputs
base_func = FairseqTask.aggregate_logging_outputs
self_func = getattr(self, "aggregate_logging_outputs").__func__
if self_func is not base_func:
utils.deprecation_warning(
"Tasks should implement the reduce_metrics API. "
"Falling back to deprecated aggregate_logging_outputs API."
)
agg_logging_outputs = self.aggregate_logging_outputs(
logging_outputs, criterion
)
for k, v in agg_logging_outputs.items():
metrics.log_scalar(k, v)
return
if not any("ntokens" in log for log in logging_outputs):
warnings.warn(
"ntokens not found in Criterion logging outputs, cannot log wpb or wps"
)
else:
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
metrics.log_scalar("wpb", ntokens, priority=180, round=1)
metrics.log_speed("wps", ntokens, priority=90, round=1)
if not any("nsentences" in log for log in logging_outputs):
warnings.warn(
"nsentences not found in Criterion logging outputs, cannot log bsz"
)
else:
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
metrics.log_scalar("bsz", nsentences, priority=190, round=1)
criterion.__class__.reduce_metrics(logging_outputs)
def max_positions(self):
"""Return the max input length allowed by the task."""
return None
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
| 15,928 | 36.21729 | 87 | py |
mix | mix-master/docs/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# fairseq documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 17 21:45:30 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
# source code directory, relative to this file, for sphinx-autobuild
sys.path.insert(0, os.path.abspath('..'))
source_suffix = ['.rst']
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinxarg.ext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'fairseq'
copyright = '2019, Facebook AI Research (FAIR)'
author = 'Facebook AI Research (FAIR)'
github_doc_root = 'https://github.com/pytorch/fairseq/tree/master/docs/'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9.0'
# The full version, including alpha/beta/rc tags.
release = '0.9.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'python'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_context = {
'css_files': [
'_static/theme_overrides.css', # override wide tables in RTD theme
],
}
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
#html_sidebars = {
# '**': [
# 'about.html',
# 'navigation.html',
# 'relations.html', # needs 'show_related': True theme option to display
# 'searchbox.html',
# 'donate.html',
# ]
#}
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'python': ('https://docs.python.org/', None),
'torch': ('https://pytorch.org/docs/master/', None),
}
| 4,235 | 30.849624 | 80 | py |
mix | mix-master/fairseq_cli/generate.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate pre-processed data with a trained model.
"""
import logging
import math
import os
import sys
import torch
from fairseq import bleu, checkpoint_utils, options, tasks, utils
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.data import encoders
def main(args):
assert args.path is not None, '--path required for generation!'
assert not args.sampling or args.nbest == args.beam, \
'--sampling requires --nbest to be equal to --beam'
assert args.replace_unk is None or args.dataset_impl == 'raw', \
'--replace-unk requires a raw text dataset (--dataset-impl=raw)'
if args.results_path is not None:
os.makedirs(args.results_path, exist_ok=True)
output_path = os.path.join(args.results_path, 'generate-{}.txt'.format(args.gen_subset))
with open(output_path, 'w', buffering=1) as h:
return _main(args, h)
else:
return _main(args, sys.stdout)
def _main(args, output_file):
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=output_file,
)
logger = logging.getLogger('fairseq_cli.generate')
utils.import_user_module(args)
if args.max_tokens is None and args.max_sentences is None:
args.max_tokens = 12000
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
# Set dictionaries
try:
src_dict = getattr(task, 'source_dictionary', None)
except NotImplementedError:
src_dict = None
tgt_dict = task.target_dictionary
# Load ensemble
logger.info('loading model(s) from {}'.format(args.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
utils.split_paths(args.path),
arg_overrides=eval(args.model_overrides),
task=task,
)
# Optimize ensemble for generation
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
default_log_format=('tqdm' if not args.no_progress_bar else 'none'),
)
# Initialize generator
gen_timer = StopwatchMeter()
generator = task.build_generator(args)
# Handle tokenization and BPE
tokenizer = encoders.build_tokenizer(args)
bpe = encoders.build_bpe(args)
def decode_fn(x):
if bpe is not None:
x = bpe.decode(x)
if tokenizer is not None:
x = tokenizer.decode(x)
return x
# Generate and compute BLEU score
if args.sacrebleu:
scorer = bleu.SacrebleuScorer()
else:
scorer = bleu.Scorer(tgt_dict.pad(), tgt_dict.eos(), tgt_dict.unk())
num_sentences = 0
has_target = True
wps_meter = TimeMeter()
for sample in progress:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if 'net_input' not in sample:
continue
prefix_tokens = None
if args.prefix_size > 0:
prefix_tokens = sample['target'][:, :args.prefix_size]
gen_timer.start()
hypos = task.inference_step(generator, models, sample, prefix_tokens)
num_generated_tokens = sum(len(h[0]['tokens']) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample['id'].tolist()):
has_target = sample['target'] is not None
# Remove padding
src_tokens = utils.strip_pad(sample['net_input']['src_tokens'][i, :], tgt_dict.pad())
target_tokens = None
if has_target:
target_tokens = utils.strip_pad(sample['target'][i, :], tgt_dict.pad()).int().cpu()
# Either retrieve the original sentences or regenerate them from tokens.
if align_dict is not None:
src_str = task.dataset(args.gen_subset).src.get_original_text(sample_id)
target_str = task.dataset(args.gen_subset).tgt.get_original_text(sample_id)
else:
if src_dict is not None:
src_str = src_dict.string(src_tokens, args.remove_bpe)
else:
src_str = ""
if has_target:
target_str = tgt_dict.string(
target_tokens,
args.remove_bpe,
escape_unk=True,
extra_symbols_to_ignore={
generator.eos,
}
)
src_str = decode_fn(src_str)
if has_target:
target_str = decode_fn(target_str)
if not args.quiet:
if src_dict is not None:
print('S-{}\t{}'.format(sample_id, src_str), file=output_file)
if has_target:
print('T-{}\t{}'.format(sample_id, target_str), file=output_file)
# Process top predictions
for j, hypo in enumerate(hypos[i][:args.nbest]):
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo['tokens'].int().cpu(),
src_str=src_str,
alignment=hypo['alignment'],
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=args.remove_bpe,
extra_symbols_to_ignore={
generator.eos,
}
)
detok_hypo_str = decode_fn(hypo_str)
if not args.quiet:
score = hypo['score'] / math.log(2) # convert to base 2
# original hypothesis (after tokenization and BPE)
print('H-{}\t{}\t{}'.format(sample_id, score, hypo_str), file=output_file)
# detokenized hypothesis
print('D-{}\t{}\t{}'.format(sample_id, score, detok_hypo_str), file=output_file)
print('P-{}\t{}'.format(
sample_id,
' '.join(map(
lambda x: '{:.4f}'.format(x),
# convert from base e to base 2
hypo['positional_scores'].div_(math.log(2)).tolist(),
))
), file=output_file)
if args.print_alignment:
print('A-{}\t{}'.format(
sample_id,
' '.join(['{}-{}'.format(src_idx, tgt_idx) for src_idx, tgt_idx in alignment])
), file=output_file)
if args.print_step:
print('I-{}\t{}'.format(sample_id, hypo['steps']), file=output_file)
if getattr(args, 'retain_iter_history', False):
for step, h in enumerate(hypo['history']):
_, h_str, _ = utils.post_process_prediction(
hypo_tokens=h['tokens'].int().cpu(),
src_str=src_str,
alignment=None,
align_dict=None,
tgt_dict=tgt_dict,
remove_bpe=None,
)
print('E-{}_{}\t{}'.format(sample_id, step, h_str), file=output_file)
# Score only the top hypothesis
if has_target and j == 0:
if align_dict is not None or args.remove_bpe is not None:
# Convert back to tokens for evaluation with unk replacement and/or without BPE
target_tokens = tgt_dict.encode_line(target_str, add_if_not_exist=True)
if hasattr(scorer, 'add_string'):
scorer.add_string(target_str, hypo_str)
else:
scorer.add(target_tokens, hypo_tokens)
wps_meter.update(num_generated_tokens)
progress.log({'wps': round(wps_meter.avg)})
num_sentences += sample['nsentences']
logger.info('NOTE: hypothesis and token scores are output in base 2')
logger.info('Translated {} sentences ({} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)'.format(
num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1. / gen_timer.avg))
if has_target:
logger.info('Generate {} with beam={}: {}'.format(args.gen_subset, args.beam, scorer.result_string()))
return scorer
def cli_main():
parser = options.get_generation_parser()
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == '__main__':
cli_main()
| 10,264 | 37.302239 | 110 | py |
mix | mix-master/fairseq_cli/validate.py | #!/usr/bin/env python3 -u
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import sys
import torch
from fairseq import checkpoint_utils, options, utils
from fairseq.logging import metrics, progress_bar
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger('fairseq_cli.validate')
def main(args, override_args=None):
utils.import_user_module(args)
assert args.max_tokens is not None or args.max_sentences is not None, \
'Must specify batch size either with --max-tokens or --max-sentences'
use_fp16 = args.fp16
use_cuda = torch.cuda.is_available() and not args.cpu
if override_args is not None:
overrides = vars(override_args)
overrides.update(eval(getattr(override_args, 'model_overrides', '{}')))
else:
overrides = None
# Load ensemble
logger.info('loading model(s) from {}'.format(args.path))
models, model_args, task = checkpoint_utils.load_model_ensemble_and_task(
[args.path],
arg_overrides=overrides,
)
model = models[0]
# Move models to GPU
for model in models:
if use_fp16:
model.half()
if use_cuda:
model.cuda()
# Print args
logger.info(model_args)
# Build criterion
criterion = task.build_criterion(model_args)
criterion.eval()
for subset in args.valid_subset.split(','):
try:
task.load_dataset(subset, combine=False, epoch=1)
dataset = task.dataset(subset)
except KeyError:
raise Exception('Cannot find dataset: ' + subset)
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[m.max_positions() for m in models],
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
prefix=f"valid on '{subset}' subset",
default_log_format=('tqdm' if not args.no_progress_bar else 'simple'),
)
log_outputs = []
for i, sample in enumerate(progress):
sample = utils.move_to_cuda(sample) if use_cuda else sample
_loss, _sample_size, log_output = task.valid_step(sample, model, criterion)
progress.log(log_output, step=i)
log_outputs.append(log_output)
with metrics.aggregate() as agg:
task.reduce_metrics(log_outputs, criterion)
log_output = agg.get_smoothed_values()
progress.print(log_output, tag=subset, step=i)
def cli_main():
parser = options.get_validation_parser()
args = options.parse_args_and_arch(parser)
# only override args that are explicitly given on the command line
override_parser = options.get_validation_parser()
override_args = options.parse_args_and_arch(override_parser, suppress_defaults=True)
main(args, override_args)
if __name__ == '__main__':
cli_main()
| 3,706 | 30.415254 | 88 | py |
mix | mix-master/fairseq_cli/eval_lm.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Evaluate the perplexity of a trained language model.
"""
import logging
import math
import os
import torch
from fairseq import checkpoint_utils, options, tasks, utils
from fairseq.data import LMContextWindowDataset
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.sequence_scorer import SequenceScorer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger('fairseq_cli.eval_lm')
class WordStat(object):
def __init__(self, word, is_bpe):
self.word = word
self.is_bpe = is_bpe
self.log_prob = 0
self.next_word_prob = 0
self.count = 0
self.missing_next_words = 0
def add(self, log_prob, next_word_prob):
""" increments counters for the sum of log probs of current word and next
word (given context ending at current word). Since the next word might be at the end of the example,
or it might be not counted because it is not an ending subword unit,
also keeps track of how many of those we have seen """
if next_word_prob is not None:
self.next_word_prob += next_word_prob
else:
self.missing_next_words += 1
self.log_prob += log_prob
self.count += 1
def __str__(self):
return '{}\t{}\t{}\t{}\t{}\t{}'.format(self.word, self.count, self.log_prob, self.is_bpe,
self.next_word_prob, self.count - self.missing_next_words)
def main(parsed_args):
assert parsed_args.path is not None, '--path required for evaluation!'
utils.import_user_module(parsed_args)
logger.info(parsed_args)
use_cuda = torch.cuda.is_available() and not parsed_args.cpu
task = tasks.setup_task(parsed_args)
# Load ensemble
logger.info('loading model(s) from {}'.format(parsed_args.path))
models, args = checkpoint_utils.load_model_ensemble(
parsed_args.path.split(os.pathsep),
arg_overrides=eval(parsed_args.model_overrides),
task=task,
)
for arg in vars(parsed_args).keys():
if arg not in {
'self_target', 'future_target', 'past_target', 'tokens_per_sample',
'output_size_dictionary', 'add_bos_token',
}:
setattr(args, arg, getattr(parsed_args, arg))
# reduce tokens per sample by the required context window size
args.tokens_per_sample -= args.context_window
task = tasks.setup_task(args)
# Load dataset splits
task.load_dataset(args.gen_subset)
dataset = task.dataset(args.gen_subset)
if args.context_window > 0:
dataset = LMContextWindowDataset(
dataset=dataset,
tokens_per_sample=args.tokens_per_sample,
context_window=args.context_window,
pad_idx=task.source_dictionary.pad(),
)
logger.info('{} {} {} examples'.format(args.data, args.gen_subset, len(dataset)))
# Optimize ensemble for generation and set the source and dest dicts on the model (required by scorer)
for model in models:
model.make_generation_fast_()
if args.fp16:
model.half()
if use_cuda:
model.cuda()
assert len(models) > 0
logger.info('num. model params: {}'.format(sum(p.numel() for p in models[0].parameters())))
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=args.max_tokens or 36000,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(*[
model.max_positions() for model in models
]),
ignore_invalid_inputs=True,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
default_log_format=('tqdm' if not args.no_progress_bar else 'none'),
)
gen_timer = StopwatchMeter()
scorer = SequenceScorer(task.target_dictionary, args.softmax_batch)
score_sum = 0.
count = 0
if args.remove_bpe is not None:
if args.remove_bpe == 'sentencepiece':
raise NotImplementedError
else:
bpe_cont = args.remove_bpe.rstrip()
bpe_toks = {
i
for i in range(len(task.source_dictionary))
if task.source_dictionary[i].endswith(bpe_cont)
}
bpe_len = len(bpe_cont)
else:
bpe_toks = None
bpe_len = 0
word_stats = dict()
wps_meter = TimeMeter()
for sample in progress:
if 'net_input' not in sample:
continue
sample = utils.move_to_cuda(sample) if use_cuda else sample
gen_timer.start()
hypos = scorer.generate(models, sample)
gen_timer.stop(sample['ntokens'])
for i, hypos_i in enumerate(hypos):
hypo = hypos_i[0]
sample_id = sample['id'][i]
tokens = hypo['tokens']
tgt_len = tokens.numel()
pos_scores = hypo['positional_scores'].float()
if args.add_bos_token:
assert hypo['tokens'][0].item() == task.target_dictionary.bos()
tokens = tokens[1:]
pos_scores = pos_scores[1:]
skipped_toks = 0
if bpe_toks is not None:
for i in range(tgt_len - 1):
if tokens[i].item() in bpe_toks:
skipped_toks += 1
pos_scores[i + 1] += pos_scores[i]
pos_scores[i] = 0
inf_scores = pos_scores.eq(float('inf')) | pos_scores.eq(float('-inf'))
if inf_scores.any():
logger.info(
'skipping tokens with inf scores:',
task.target_dictionary.string(tokens[inf_scores.nonzero()])
)
pos_scores = pos_scores[(~inf_scores).nonzero()]
score_sum += pos_scores.sum().cpu()
count += pos_scores.numel() - skipped_toks
if args.output_word_probs or args.output_word_stats:
w = ''
word_prob = []
is_bpe = False
for i in range(len(tokens)):
w_ind = tokens[i].item()
w += task.source_dictionary[w_ind]
if bpe_toks is not None and w_ind in bpe_toks:
w = w[:-bpe_len]
is_bpe = True
else:
word_prob.append((w, pos_scores[i].item()))
next_prob = None
ind = i + 1
while ind < len(tokens):
if pos_scores[ind].item() != 0:
next_prob = pos_scores[ind]
break
ind += 1
word_stats.setdefault(w, WordStat(w, is_bpe)).add(pos_scores[i].item(), next_prob)
is_bpe = False
w = ''
if args.output_word_probs:
logger.info(
str(int(sample_id)) + " "
+ ('\t'.join('{} [{:2f}]'.format(x[0], x[1]) for x in word_prob))
)
wps_meter.update(sample['ntokens'])
progress.log({'wps': round(wps_meter.avg)})
avg_nll_loss = -score_sum / count / math.log(2) # convert to base 2
logger.info('Evaluated {} tokens in {:.1f}s ({:.2f} tokens/s)'.format(
gen_timer.n, gen_timer.sum, 1. / gen_timer.avg
))
logger.info('Loss (base 2): {:.4f}, Perplexity: {:.2f}'.format(
avg_nll_loss, 2**avg_nll_loss
))
if args.output_word_stats:
for ws in sorted(word_stats.values(), key=lambda x: x.count, reverse=True):
logger.info(ws)
def cli_main():
parser = options.get_eval_lm_parser()
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == '__main__':
cli_main()
| 8,462 | 32.717131 | 112 | py |
mix | mix-master/fairseq_cli/interactive.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate raw text with a trained model. Batches data on-the-fly.
"""
from collections import namedtuple
import fileinput
import logging
import math
import sys
import os
import torch
from fairseq import checkpoint_utils, options, tasks, utils
from fairseq.data import encoders
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger('fairseq_cli.interactive')
Batch = namedtuple('Batch', 'ids src_tokens src_lengths')
Translation = namedtuple('Translation', 'src_str hypos pos_scores alignments')
def buffered_read(input, buffer_size):
buffer = []
with fileinput.input(files=[input], openhook=fileinput.hook_encoded("utf-8")) as h:
for src_str in h:
buffer.append(src_str.strip())
if len(buffer) >= buffer_size:
yield buffer
buffer = []
if len(buffer) > 0:
yield buffer
def make_batches(lines, args, task, max_positions, encode_fn):
tokens = [
task.source_dictionary.encode_line(
encode_fn(src_str), add_if_not_exist=False
).long()
for src_str in lines
]
lengths = [t.numel() for t in tokens]
itr = task.get_batch_iterator(
dataset=task.build_dataset_for_inference(tokens, lengths),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=max_positions,
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test
).next_epoch_itr(shuffle=False)
for batch in itr:
yield Batch(
ids=batch['id'],
src_tokens=batch['net_input']['src_tokens'], src_lengths=batch['net_input']['src_lengths'],
)
def main(args):
utils.import_user_module(args)
if args.buffer_size < 1:
args.buffer_size = 1
if args.max_tokens is None and args.max_sentences is None:
args.max_sentences = 1
assert not args.sampling or args.nbest == args.beam, \
'--sampling requires --nbest to be equal to --beam'
assert not args.max_sentences or args.max_sentences <= args.buffer_size, \
'--max-sentences/--batch-size cannot be larger than --buffer-size'
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Setup task, e.g., translation
task = tasks.setup_task(args)
# Load ensemble
logger.info('loading model(s) from {}'.format(args.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
args.path.split(os.pathsep),
arg_overrides=eval(args.model_overrides),
task=task,
)
# Set dictionaries
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
# Optimize ensemble for generation
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
# Initialize generator
generator = task.build_generator(args)
# Handle tokenization and BPE
tokenizer = encoders.build_tokenizer(args)
bpe = encoders.build_bpe(args)
def encode_fn(x):
if tokenizer is not None:
x = tokenizer.encode(x)
if bpe is not None:
x = bpe.encode(x)
return x
def decode_fn(x):
if bpe is not None:
x = bpe.decode(x)
if tokenizer is not None:
x = tokenizer.decode(x)
return x
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
max_positions = utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
)
if args.buffer_size > 1:
logger.info('Sentence buffer size: %s', args.buffer_size)
logger.info('NOTE: hypothesis and token scores are output in base 2')
logger.info('Type the input sentence and press return:')
start_id = 0
for inputs in buffered_read(args.input, args.buffer_size):
results = []
for batch in make_batches(inputs, args, task, max_positions, encode_fn):
src_tokens = batch.src_tokens
src_lengths = batch.src_lengths
if use_cuda:
src_tokens = src_tokens.cuda()
src_lengths = src_lengths.cuda()
sample = {
'net_input': {
'src_tokens': src_tokens,
'src_lengths': src_lengths,
},
}
translations = task.inference_step(generator, models, sample)
for i, (id, hypos) in enumerate(zip(batch.ids.tolist(), translations)):
src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad())
results.append((start_id + id, src_tokens_i, hypos))
# sort output to match input order
for id, src_tokens, hypos in sorted(results, key=lambda x: x[0]):
if src_dict is not None:
src_str = src_dict.string(src_tokens, args.remove_bpe)
print('S-{}\t{}'.format(id, src_str))
# Process top predictions
for hypo in hypos[:min(len(hypos), args.nbest)]:
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo['tokens'].int().cpu(),
src_str=src_str,
alignment=hypo['alignment'],
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=args.remove_bpe,
)
detok_hypo_str = decode_fn(hypo_str)
score = hypo['score'] / math.log(2) # convert to base 2
# original hypothesis (after tokenization and BPE)
print('H-{}\t{}\t{}'.format(id, score, hypo_str))
# detokenized hypothesis
print('D-{}\t{}\t{}'.format(id, score, detok_hypo_str))
print('P-{}\t{}'.format(
id,
' '.join(map(
lambda x: '{:.4f}'.format(x),
# convert from base e to base 2
hypo['positional_scores'].div_(math.log(2)).tolist(),
))
))
if args.print_alignment:
alignment_str = " ".join(["{}-{}".format(src, tgt) for src, tgt in alignment])
print('A-{}\t{}'.format(
id,
alignment_str
))
# update running id counter
start_id += len(inputs)
def cli_main():
parser = options.get_generation_parser(interactive=True)
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == '__main__':
cli_main()
| 7,270 | 32.353211 | 103 | py |
mix | mix-master/fairseq_cli/train.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
"""
import logging
import math
import os
import random
import sys
import numpy as np
import torch
from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils
from fairseq.data import iterators
from fairseq.logging import meters, metrics, progress_bar
from fairseq.trainer import Trainer
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger('fairseq_cli.train')
def main(args, init_distributed=False):
utils.import_user_module(args)
assert args.max_tokens is not None or args.max_sentences is not None, \
'Must specify batch size either with --max-tokens or --max-sentences'
# Initialize CUDA and distributed training
if torch.cuda.is_available() and not args.cpu:
torch.cuda.set_device(args.device_id)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if init_distributed:
args.distributed_rank = distributed_utils.distributed_init(args)
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
# Print args
logger.info(args)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
# Load valid dataset (we load training data below, based on the latest checkpoint)
for valid_sub_split in args.valid_subset.split(','):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
# Build model and criterion
model = task.build_model(args)
criterion = task.build_criterion(args)
logger.info(model)
logger.info('model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
logger.info('num. model params: {} (num. trained: {})'.format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
))
# Build trainer
if args.model_parallel_size == 1:
trainer = Trainer(args, task, model, criterion)
else:
trainer = MegatronTrainer(args, task, model, criterion)
logger.info('training on {} GPUs'.format(args.distributed_world_size))
logger.info('max tokens per GPU = {} and max sentences per GPU = {}'.format(
args.max_tokens,
args.max_sentences,
))
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(args, trainer)
# Train until the learning rate gets too small
max_epoch = args.max_epoch or math.inf
max_update = args.max_update or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
valid_subsets = args.valid_subset.split(',')
while (
lr > args.min_lr
and epoch_itr.next_epoch_idx <= max_epoch
and trainer.get_num_updates() < max_update
):
# train for one epoch
train(args, trainer, task, epoch_itr)
if not args.disable_validation and epoch_itr.epoch % args.validate_interval == 0:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
else:
valid_losses = [None]
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
# save checkpoint
if epoch_itr.epoch % args.save_interval == 0:
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
# early stop
if should_stop_early(args, valid_losses[0]):
logger.info('early stop since valid performance hasn\'t improved for last {} runs'.format(args.patience))
break
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=(os.pathsep in getattr(args, 'data', '')),
)
train_meter.stop()
logger.info('done training in {:.1f} seconds'.format(train_meter.sum))
def should_stop_early(args, valid_loss):
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if args.patience <= 0:
return False
def is_better(a, b):
return a > b if args.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, 'best', None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
return should_stop_early.num_runs >= args.patience
@metrics.aggregate('train')
def train(args, trainer, task, epoch_itr):
"""Train the model for one epoch."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=args.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > args.curriculum),
)
update_freq = (
args.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(args.update_freq)
else args.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=('tqdm' if not args.no_progress_bar else 'simple'),
)
# task specific setup per epoch
task.begin_epoch(epoch_itr.epoch, trainer.get_model())
valid_subsets = args.valid_subset.split(',')
max_update = args.max_update or math.inf
for samples in progress:
with metrics.aggregate('train_inner'):
log_output = trainer.train_step(samples)
if log_output is None: # OOM, overflow, ...
continue
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % args.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values('train_inner'))
progress.log(stats, tag='train_inner', step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters('train_inner')
if (
not args.disable_validation
and args.save_interval_updates > 0
and num_updates % args.save_interval_updates == 0
and num_updates > 0
):
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
if num_updates >= max_update:
break
# log end-of-epoch stats
stats = get_training_stats(metrics.get_smoothed_values('train'))
progress.print(stats, tag='train', step=num_updates)
# reset epoch-level meters
metrics.reset_meters('train')
def get_training_stats(stats):
if 'nll_loss' in stats and 'ppl' not in stats:
stats['ppl'] = utils.get_perplexity(stats['nll_loss'])
stats['wall'] = round(metrics.get_meter('default', 'wall').elapsed_time, 0)
return stats
def validate(args, trainer, task, epoch_itr, subsets):
"""Evaluate the model on the validation set(s) and return the losses."""
if args.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(args.fixed_validation_seed)
valid_losses = []
for subset in subsets:
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=task.dataset(subset),
max_tokens=args.max_tokens_valid,
max_sentences=args.max_sentences_valid,
max_positions=utils.resolve_max_positions(
task.max_positions(),
trainer.get_model().max_positions(),
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=('tqdm' if not args.no_progress_bar else 'simple'),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for sample in progress:
trainer.valid_step(sample)
# log validation stats
stats = get_valid_stats(args, trainer, agg.get_smoothed_values())
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[args.best_checkpoint_metric])
return valid_losses
def get_valid_stats(args, trainer, stats):
if 'nll_loss' in stats and 'ppl' not in stats:
stats['ppl'] = utils.get_perplexity(stats['nll_loss'])
stats['num_updates'] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, 'best'):
key = 'best_{0}'.format(args.best_checkpoint_metric)
best_function = max if args.maximize_best_checkpoint_metric else min
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best,
stats[args.best_checkpoint_metric],
)
return stats
def distributed_main(i, args, start_rank=0):
args.device_id = i
if args.distributed_rank is None: # torch.multiprocessing.spawn
args.distributed_rank = start_rank + i
main(args, init_distributed=True)
def cli_main(modify_parser=None):
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
if args.distributed_init_method is None:
distributed_utils.infer_init_method(args)
if args.distributed_init_method is not None:
# distributed training
if torch.cuda.device_count() > 1 and not args.distributed_no_spawn:
start_rank = args.distributed_rank
args.distributed_rank = None # assign automatically
torch.multiprocessing.spawn(
fn=distributed_main,
args=(args, start_rank),
nprocs=torch.cuda.device_count(),
)
else:
distributed_main(args.device_id, args)
elif args.distributed_world_size > 1:
# fallback for single node with multiple GPUs
assert args.distributed_world_size <= torch.cuda.device_count()
port = random.randint(10000, 20000)
args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
args.distributed_rank = None # set based on device id
torch.multiprocessing.spawn(
fn=distributed_main,
args=(args, ),
nprocs=args.distributed_world_size,
)
else:
# single GPU training
main(args)
if __name__ == '__main__':
cli_main()
| 11,933 | 35.054381 | 117 | py |
transmatching | transmatching-main/setup.py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="transmatching", # Replace with your own username
version="0.0.1",
author="Example Author",
author_email="author@example.com",
description="A small example package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/pypa/sampleproject",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
install_requires=[
"tqdm",
"trimesh",
"matplotlib",
"plotly",
"pytorch-lightning",
"dvc[gdrive]==2.13.0",
"meshio==5.3.4",
"scipy==1.8.1",
"scikit-learn==1.1.1",
"python-dotenv==0.20.0",
"hydra-core==1.1",
"GitPython==3.1.27",
"streamlit==1.10.0",
"stqdm==0.0.4",
],
)
| 1,064 | 26.307692 | 59 | py |
transmatching | transmatching-main/evaluation/evaluate.py | import itertools
import json
from pathlib import Path
from typing import Dict, Optional, Sequence, Union
import hydra
import igl
import meshio
import numpy as np
import omegaconf
from matplotlib import pyplot as plt
from pytorch_lightning import seed_everything
from scipy import sparse
from scipy.sparse.csgraph import dijkstra
from scipy.spatial.distance import cdist
from tqdm import tqdm
from evaluation.competitors.eval_dataset import EvalDataset
from evaluation.utils import (
PROJECT_ROOT,
Mesh,
convert_colors,
get_point_colors,
plot_geo_errors,
plot_mesh,
plot_meshes,
)
def approximate_geodesic_distances(v: np.ndarray, f: np.ndarray) -> np.ndarray:
"""
Compute the geodesic distances approximated by the dijkstra method weighted by
euclidean edge length
Args:
v: the mesh points
f: the mesh faces
Returns:
an nxn matrix which contains the approximated distances
"""
# todo vedi dentro igl se si possono fare esatte. "exact geodesics" method.
a = igl.adjacency_matrix(f)
dist = cdist(v, v)
values = dist[np.nonzero(a)]
matrix = sparse.coo_matrix((values, np.nonzero(a)), shape=(v.shape[0], v.shape[0]))
d = dijkstra(matrix, directed=False)
return d
def get_geodesic_errors(
pred_matching_A_to_B: np.ndarray,
gt_matching_A_to_B: np.ndarray,
points_B: np.ndarray,
faces_B: np.ndarray,
cache_path: Optional[Path] = None,
) -> np.ndarray:
"""
Compute the matching geodesic errors on the shape B, given the predicted and
ground truth matching A -> B. The geodesics distances on the shape B are
approximated using dijkstra considering the euclidean length of each edge.
The two matching `x` are such that the index `i` refers to the shape A,
the corresponding value `x[i]` refers to the shape B
Args:
pred_matching_A_to_B: the predicted matching
gt_matching_A_to_B: the ground truth matching
points_B: the points of the shape B
faces_B: the faces of the shape B
Returns:
a array that indices the prediction geodesic error for each point of the shape A
"""
cache_file = cache_path / "B_geo_dists.npy" if cache_path is not None else None
geo_dists = None
if cache_file is not None and cache_file.exists():
geo_dists = np.load(str(cache_file))
if geo_dists is None:
geo_dists = approximate_geodesic_distances(points_B, faces_B)
if cache_path is not None:
cache_path.mkdir(parents=True, exist_ok=True)
np.save(str(cache_file), geo_dists)
geo_dists /= np.max(geo_dists)
geo_errors = geo_dists[pred_matching_A_to_B, gt_matching_A_to_B]
return geo_errors
def get_registration_errors(
registration_A_to_B: np.ndarray,
points_B: np.ndarray,
gt_matching_A_to_B: np.ndarray,
) -> Dict[str, float]:
pointwise_distance = np.sqrt(
((registration_A_to_B - points_B[gt_matching_A_to_B]) ** 2).sum(-1)
)
max_eu = pointwise_distance.max()
mean_eu = pointwise_distance.mean()
dist = cdist(registration_A_to_B, points_B)
chamfer_dist = (dist.min(-2).mean(-1) + dist.min(-1).mean(-1)) / 2
return {
"max_eu": max_eu,
"mean_eu": mean_eu,
"chamfer": chamfer_dist,
}
def load_predictions(
dataset_name: str, sample_idx: int, model_name: str
) -> Dict[str, np.ndarray]:
"""
Load the pre-computed predictions
Args:
dataset_name: the name of the dataset to consider
sample_idx: the sample index inside the dataset to consider
model_name: the name of the model to consider
Returns:
the computed predictions, i.e. the matching from A to B
"""
prediction_folder = (
PROJECT_ROOT
/ "evaluation"
/ "predictions"
/ dataset_name
/ model_name
/ f"{sample_idx:03}"
)
assert (
prediction_folder.exists()
), f"Prediction folder does not exists: <{prediction_folder}>"
pred_matching_A_to_B = np.loadtxt(
str(prediction_folder / f"pred_matching_A_to_B.txt"), dtype=np.int32
)
out = {"pred_matching_A_to_B": pred_matching_A_to_B}
registration_A_to_B_path = prediction_folder / "registration_A_to_B.off"
if registration_A_to_B_path.exists():
out["registration_A_to_B"] = meshio.read(registration_A_to_B_path)
return out
def compute_and_store_pair_metrics(
sample_path: Path,
cache_path: Path,
sample: Dict[str, np.ndarray],
predictions: Dict[str, np.ndarray],
model_name: str,
) -> Dict[str, np.ndarray]:
"""
Given a pair sample and the corresponding prediction, compute the metrics to evaluate
the performance.
Args:
sample: the sample being evaluated
predictions: the predictions over that sample
Returns:
a dictionary containing the computed pair-metrics.
"""
points_A = sample["points_A"]
points_B = sample["points_B"]
gt_matching_A_to_B = sample["gt_matching_A_to_B"]
pred_matching_A_to_B = predictions["pred_matching_A_to_B"]
if "registration_A_to_B" in predictions:
meshio.write(
str(sample_path / "registration_A_to_B.off"),
predictions["registration_A_to_B"],
)
color = get_point_colors(
points_B,
frequency=np.pi,
)
shape_A_color_transfer = plot_meshes(
meshes=[
Mesh(
v=points_A,
f=None,
color=convert_colors(color[pred_matching_A_to_B]),
),
Mesh(
v=predictions["registration_A_to_B"].points,
f=None,
color=convert_colors(color),
),
Mesh(
v=points_B,
f=None,
color=convert_colors(color),
),
],
titles=["Shape A", "A registration to B", "Shape B"],
showtriangles=[False, False, False],
showscales=None,
autoshow=False,
)
shape_A_color_transfer.write_html(str(sample_path / "registration_A_to_B.html"))
registration_metrics = get_registration_errors(
registration_A_to_B=predictions["registration_A_to_B"].points,
points_B=points_B,
gt_matching_A_to_B=gt_matching_A_to_B,
)
# pulled-back errors! The errors are defined over the shape A
geo_errors = get_geodesic_errors(
pred_matching_A_to_B=pred_matching_A_to_B,
gt_matching_A_to_B=gt_matching_A_to_B,
points_B=points_B,
faces_B=sample["faces_B"],
cache_path=cache_path,
)
np.savetxt(
fname=str(sample_path / f"geo_errors.txt"),
X=geo_errors,
)
# errors on B pull-back visualization on shape A
shape_A_errors = plot_mesh(
Mesh(
v=points_A,
f=None,
color=geo_errors,
),
colorscale="hot",
reversescale=True,
cmin=0,
cmax=0.25,
)
shape_A_errors.write_html(str(sample_path / "geo_errors_on_A.html"))
# color pull-back transfer from B to A
color = get_point_colors(
points_B,
frequency=np.pi,
)
shape_A_color_transfer = plot_meshes(
meshes=[
Mesh(
v=points_A,
f=None,
color=convert_colors(color[pred_matching_A_to_B]),
),
Mesh(
v=points_B,
f=None,
color=convert_colors(color),
),
],
titles=["Shape A", "Shape B"],
showtriangles=[False, False],
showscales=None,
autoshow=False,
)
shape_A_color_transfer.write_html(str(sample_path / "transfer_on_A.html"))
# cumulative error plot
error_plot = plot_geo_errors(geo_errors)
error_plot.savefig(sample_path / "cumulative_geo_errors.png")
plt.close(error_plot)
# save pair metrics
metrics = {
"item": sample["item"],
"metrics": {
"mean_geo_error": geo_errors.mean(),
},
}
if "registration_A_to_B" in predictions:
metrics["metrics"].update(registration_metrics)
with (sample_path / "metrics.json").open("w") as f:
json.dump(metrics, f, indent=4, sort_keys=True)
return {"model_name": model_name, "geo_errors": geo_errors, **metrics}
def aggregate_and_store_pair_metrics(
global_metrics_path: Path, metrics: Sequence[Dict[str, np.ndarray]]
) -> Dict[str, Dict[str, Union[int, np.ndarray]]]:
"""
Given a list of pair-metrics, aggregate what's needed to compute global metrics.
Pair-wise metrics that needs to be stored in their own folder are returned as-is
Args:
metrics: the list of computed pair-metrics
Returns:
the aggregated metrics and the pair-metrics that must be stored
"""
errors_cat = np.concatenate([x["geo_errors"] for x in metrics])
np.savetxt(
fname=str(global_metrics_path / f"global_geo_errors.txt"),
X=errors_cat,
)
# cumulative error plot
error_plot = plot_geo_errors(errors_cat)
error_plot.savefig(global_metrics_path / "mean_cumulative_geo_errors.png")
plt.close(error_plot)
metric_names = list(metrics[0]["metrics"].keys())
aggregate_metrics = {
f"global_{metric_name}": np.mean(
[sample_metric["metrics"][metric_name] for sample_metric in metrics]
)
for metric_name in metric_names
}
global_metrics = {
"model_name": metrics[0]["model_name"],
"number_sample": len(metrics),
"metrics": aggregate_metrics,
}
with (global_metrics_path / "metrics.json").open("w") as f:
json.dump(global_metrics, f, indent=4, sort_keys=True)
@hydra.main(
config_path=str(PROJECT_ROOT / "evaluation" / "conf"), config_name="default"
)
def run(cfg: omegaconf.DictConfig):
seed_everything(0)
dataset = EvalDataset(cfg["dataset"])
model = hydra.utils.instantiate(cfg.model)
assert model.name, "The model must have a name!"
assert dataset.name, "The dataset must have a name!"
global_metrics_path = (
PROJECT_ROOT / "evaluation" / "performance" / dataset.name / model.name
)
global_metrics_path.mkdir(parents=True, exist_ok=cfg.overwrite_predictions)
pair_metrics = []
desc = f"Evaluating <{model.name}> on <{dataset.name}>"
if "limit" in cfg and cfg["limit"]:
iter_dataset = itertools.islice(dataset, cfg["limit"])
else:
iter_dataset = dataset
for sample in tqdm(iter_dataset, desc=desc):
sample_idx = sample["item"]
sample_path = global_metrics_path / "data" / f"{sample_idx:03}"
sample_path.mkdir(parents=True, exist_ok=cfg.overwrite_predictions)
predictions = load_predictions(
dataset_name=dataset.name,
model_name=model.name,
sample_idx=sample_idx,
)
pair_metrics.append(
compute_and_store_pair_metrics(
sample_path=sample_path,
cache_path=PROJECT_ROOT
/ "evaluation"
/ ".cache"
/ dataset.name
/ "data"
/ f"{sample_idx:03}",
sample=sample,
predictions=predictions,
model_name=model.name,
)
)
aggregate_and_store_pair_metrics(
global_metrics_path=global_metrics_path, metrics=pair_metrics
)
if __name__ == "__main__":
run()
| 11,713 | 28.959079 | 89 | py |
transmatching | transmatching-main/evaluation/utils.py | import os
from pathlib import Path
from typing import Optional, Union
import dotenv
import git
import hydra
import numpy as np
import omegaconf
import torch
from hydra.core.global_hydra import GlobalHydra
from hydra.experimental import compose
from matplotlib import pyplot as plt
from plotly.graph_objs import Layout
from transmatching.Model.debug import Debug
try:
from pykeops.torch import LazyTensor
except ImportError:
Debug.keops = False
def cdist_argmin(input1, input2, argmin_axis: int):
"""
Computes cdist + argmin along some axis in chunks of maximum 100 elements
Args:
input1: tensor of shape [n, d] to be chunked
input2: tensor of shape [m, d]
argmin_axis: the axis of the argmin
Returns
A tensor equivalent to torch.cdist(input1, input2).argmin(argmin_axis)
"""
assert len(input1.shape) == 2
assert len(input2.shape) == 2
assert argmin_axis == 1 or argmin_axis == 0
if argmin_axis == 0:
input1, input2 = input2, input1
num_chunks = input1.shape[0] // 100 if 100 < input1.shape[0] else 1
chunks = input1.chunk(num_chunks)
argmins = []
for input1_chunk in chunks:
argmins.append(torch.cdist(input1_chunk, input2).argmin(-1))
dists = torch.cat(argmins)
return dists
def keops_cdist_argmin(X, Y, argmin_axis: int = 0):
"""
Args:
X: [b, n, d]
Y: [b, m, d]
argmin_axis: int
Returns:
"""
assert len(X.shape) == 2
assert len(Y.shape) == 2
if Debug.keops:
if argmin_axis == 1:
X, Y = Y, X
lX = LazyTensor(X[None, None, :, :].contiguous())
lY = LazyTensor(Y[None, :, None, :].contiguous())
Ds: LazyTensor = ((lX - lY) ** 2).sum(-1)
return Ds.argKmin(K=1, axis=2).squeeze(-1)
else:
return cdist_argmin(X, Y, argmin_axis=argmin_axis)
def cdist_min(input1, input2, min_axis: int):
"""
Computes cdist + min along some axis in chunks of maximum 100 elements
Args:
input1: tensor of shape [n, d] to be chunked
input2: tensor of shape [m, d]
min_axis: the axis of the min
Returns
A tensor equivalent to torch.cdist(input1, input2).min(argmin_axis)
"""
assert min_axis == 1 or min_axis == 0
if min_axis == 0:
input1, input2 = input2, input1
num_chunks = input1.shape[0] // 100 if 100 < input1.shape[0] else 1
chunks = input1.chunk(num_chunks)
mins = []
for input1_chunk in chunks:
mins.append(torch.cdist(input1_chunk, input2).min(-1)[0])
dists = torch.cat(mins)
return dists
def chamfer_chunked(y_hat, src):
assert len(y_hat.shape) == 2, y_hat.shape
assert len(src.shape) == 2, src.shape
dists_0 = cdist_min(y_hat, src, 0)
dists_1 = cdist_min(y_hat, src, 1)
loss = dists_0.mean(-1) + dists_1.mean(-1)
return loss
def plot3d(
x: Union[np.ndarray, torch.Tensor], c: Union[np.ndarray, torch.Tensor]
) -> None:
"""
Plot the function c over the point cloud x
"""
fig = go.Figure(
data=[
go.Scatter3d(
x=x[:, 0],
y=x[:, 1],
z=x[:, 2],
mode="markers",
marker=dict(color=c, colorscale="viridis", size=5, showscale=True),
)
],
layout=Layout(scene=dict(aspectmode="data")),
)
fig.show()
def calc_tri_areas(vert, triv):
v1 = vert[triv[:, 0], :]
v2 = vert[triv[:, 1], :]
v3 = vert[triv[:, 2], :]
v1 = v1 - v3
v2 = v2 - v3
areas = np.linalg.norm(np.cross(v1, v2), ord=2, axis=-1) * 0.5
return areas
def get_env(env_name: str, default: Optional[str] = None) -> str:
"""
Safely read an environment variable.
Raises errors if it is not defined or it is empty.
:param env_name: the name of the environment variable
:param default: the default (optional) value for the environment variable
:return: the value of the environment variable
"""
if env_name not in os.environ:
if default is None:
raise KeyError(f"{env_name} not defined and no default value is present!")
return default
env_value: str = os.environ[env_name]
if not env_value:
if default is None:
raise ValueError(
f"{env_name} has yet to be configured and no default value is present!"
)
return default
return env_value
def load_envs(env_file: Optional[str] = None) -> None:
"""
Load all the environment variables defined in the `env_file`.
This is equivalent to `. env_file` in bash.
It is possible to define all the system specific variables in the `env_file`.
:param env_file: the file that defines the environment variables to use. If None
it searches for a `.env` file in the project.
"""
dotenv.load_dotenv(dotenv_path=env_file, override=True)
# Load environment variables
load_envs()
# Set the cwd to the project root
try:
PROJECT_ROOT = Path(
git.Repo(Path.cwd(), search_parent_directories=True).working_dir
)
except git.exc.InvalidGitRepositoryError:
PROJECT_ROOT = Path.cwd()
os.chdir(PROJECT_ROOT)
def invert_permutation(p: np.ndarray, num_elements) -> np.ndarray:
"""The argument p is assumed to be some permutation of 0, 1, ..., len(p)-1.
Returns an array s, where s[i] gives the index of i in p.
"""
s = np.empty_like(p)
s[p] = np.arange(num_elements)
return s
def get_dists() -> np.ndarray:
"""
Get the geodesics distances matrix
Returns:
the global geodesic distances matrix
"""
geo_dists = np.load(
str(
PROJECT_ROOT / "evaluation" / "data_aux" / "gt_distances_plain" / "data.npy"
)
)
geo_dists /= np.max(geo_dists)
return geo_dists
def get_hydra_cfg(config_name: str = "default") -> omegaconf.DictConfig:
"""
Instantiate and return the hydra config -- streamlit and jupyter compatible
Args:
config_name: .yaml configuration name, without the extension
Returns:
The desired omegaconf.DictConfig
"""
GlobalHydra.instance().clear()
hydra.experimental.initialize_config_dir(
config_dir=str(PROJECT_ROOT / "evaluation" / "conf")
)
return compose(config_name=config_name)
def get_point_colors(
points: np.ndarray, frequency: float = np.pi, rgb_rescale: bool = True
) -> np.ndarray:
"""
Create RGB colors for each point in points, using their coordingates
Args:
points: the points to color
frequency: the frequency of oscillation of the oclors
rgb_rescale: whether the RGB should be rescaled from [0, 1] to [0, 255]
Returns:
an ndarray [n, 3] containing one [0, 1] rgb color for each point. If
rgb_rescale is True the color is in [0, 255]
"""
colors = (points - points.min(axis=0)[0]) / (
points.max(axis=0)[0] - points.min(axis=0)[0]
)
colors = np.cos(frequency * colors)
colors = (colors - colors.min()) / (colors.max() - colors.min())
if rgb_rescale:
colors *= 255
return colors
##### UGLY PLOTTER TO ORGANIZE
from pathlib import Path
from typing import Optional, Sequence, Tuple, Union
import meshio
import numpy as np
import plotly.graph_objects as go
import torch
from matplotlib import cm
from plotly.graph_objs import Figure
from plotly.subplots import make_subplots
class Data:
pass
class Mesh:
def __init__(
self,
*,
v: np.ndarray,
f: np.ndarray,
color: Optional[np.ndarray] = None,
name: str = None,
):
"""
Utility class to represent a mesh
:param v: the vertices
:param f: the faces
:param color:
:param name:
"""
self.v = v
self.f = f
if f is not None:
if f.min() == 1:
self.f = self.f - 1
self.color = color
self.name = name
def _as_mesh(data: Union[Mesh, Data]) -> Mesh:
"""
Uniform the mesh representation "Data" from pytorch_geometric to the Mesh class
"""
if isinstance(data, Mesh):
return data
elif isinstance(data, meshio.Mesh):
return Mesh(v=data.points, f=data.cells_dict["triangle"])
elif isinstance(data, Data):
return Mesh(
v=np.asarray(data.pos),
f=np.asarray(data.face.T),
)
elif isinstance(data, np.ndarray):
return Mesh(v=data, f=None)
else:
raise ValueError(f"Data type not understood: <{data.__class__}>")
def _default_layout(fig: go.Figure) -> Figure:
"""
Set the default camera parameters for the plotly Mesh3D
:param fig: the figure to adjust
:return: the adjusted figure
"""
fig.update_layout(
scene_camera=dict(
up=dict(x=0, y=1, z=0),
center=dict(x=0, y=0, z=0),
eye=dict(x=-0.25, y=0.25, z=2),
),
margin=dict(l=0, r=0, b=0, t=0),
# title='ex',
scene_aspectmode="auto",
)
return fig
def _tri_indices(simplices):
# simplices is a numpy array defining the simplices of the triangularization
# returns the lists of indices i, j, k
return ([triplet[c] for triplet in simplices] for c in range(3))
def _map_z2color(zval, colormap, vmin, vmax):
# map the normalized value zval to a corresponding color in the colormap
if vmin > vmax:
raise ValueError("incorrect relation between vmin and vmax")
t = (zval - vmin) / float((vmax - vmin)) # normalize val
R, G, B, alpha = colormap(t)
return (
"rgb("
+ "{:d}".format(int(R * 255 + 0.5))
+ ","
+ "{:d}".format(int(G * 255 + 0.5))
+ ","
+ "{:d}".format(int(B * 255 + 0.5))
+ ")"
)
def _plotly_trisurf(vertices, faces, colormap=cm.RdBu):
# x, y, z are lists of coordinates of the triangle vertices
# faces are the faces that define the triangularization;
# faces is a numpy array of shape (no_triangles, 3)
# insert here the type check for input data
x = vertices[:, 0]
y = vertices[:, 1]
z = vertices[:, 2]
points3D = np.vstack((x, y, z)).T
tri_vertices = map(
lambda index: points3D[index], faces
) # vertices of the surface triangles
zmean = [
np.mean(tri[:, 2]) for tri in tri_vertices
] # mean values of z-coordinates of
# triangle vertices
min_zmean = np.min(zmean)
max_zmean = np.max(zmean)
facecolor = [_map_z2color(zz, colormap, min_zmean, max_zmean) for zz in zmean]
I, J, K = _tri_indices(faces)
triangles = go.Mesh3d(x=x, y=y, z=z, facecolor=facecolor, i=I, j=J, k=K, name="")
return triangles
# Plot EDGE - not working
# # define the lists Xe, Ye, Ze, of x, y, resp z coordinates of edge end points for each triangle
# # None separates data corresponding to two consecutive triangles
# # lists_coord = [
# # [[T[k % 3][c] for k in range(4)] + [None] for T in tri_vertices]
# # for c in range(3)
# # ]
# # tri_points = tri_vertices
# Xe = []
# Ye = []
# Ze = []
# for T in tri_vertices:
# Xe.extend([T[k % 3][0] for k in range(4)] + [None])
# Ye.extend([T[k % 3][1] for k in range(4)] + [None])
# Ze.extend([T[k % 3][2] for k in range(4)] + [None])
#
# # define the trace for triangle sides
# lines = go.Scatter3d(
# x=Xe,
# y=Ye,
# z=Ze,
# mode="lines",
# name="",
# line=dict(color="rgb(70,70,70)", width=1),
# )
# return [triangles, lines]
def _plot_mesh(
m: Union[Mesh, Data],
showtriangles: bool = True,
showscale=False,
colorscale="Viridis",
reversescale=False,
cmax=None,
cmin=None,
**kwargs,
) -> Union[go.Mesh3d, go.Scatter3d]:
"""
Plot the mesh in a plotly graph object
:param m: the mesh to plot
:param kwargs: possibly additional parameters for the go.Mesh3D class
:return: the plotted mesh
"""
if colorscale is None:
colorscale = "Viridis"
m = _as_mesh(m)
vertices = m.v.astype(np.float64)
if m.f is not None:
if showtriangles:
return _plotly_trisurf(vertices, m.f)
else:
faces = m.f.astype(np.uint32)
return go.Mesh3d(
x=vertices[:, 0],
y=vertices[:, 1],
z=vertices[:, 2],
i=faces[:, 0],
j=faces[:, 1],
k=faces[:, 2],
colorscale=colorscale,
opacity=1,
intensity=m.color if m.color is not None else vertices[:, 0],
showscale=showscale,
reversescale=reversescale,
cmax=cmax,
cmin=cmin,
**kwargs,
)
else:
return go.Scatter3d(
x=vertices[:, 0],
y=vertices[:, 1],
z=vertices[:, 2],
mode="markers",
marker=dict(
size=3,
color=m.color
if m.color is not None
else vertices[:, 0], # set color to an array/list of desired values
colorscale=colorscale, # choose a colorscale
opacity=1,
reversescale=reversescale,
cmax=cmax,
cmin=cmin,
),
**kwargs,
)
def plot_meshes(
meshes: Sequence[Union[Mesh, Data]],
titles: Sequence[str] = None,
showtriangles: Sequence[bool] = None,
showscales: Sequence[bool] = None,
autoshow: bool = False,
showlegend: bool = False,
colorscales: Sequence[str] = None,
reversescales: Sequence[bool] = None,
cmax: Sequence[float] = None,
cmin: Sequence[float] = None,
**kwargs,
) -> Figure:
"""
Plots multiple shapes
:param meshes: a list of shapes to plot
:param titles: a list of titles for each subplot
:param showscales: whether to show the scale for each shape
:param autoshow: if True show the Figure automatically
:return: the Figure
"""
myscene = dict(
camera=dict(
up=dict(x=0, y=1, z=0),
center=dict(x=0, y=0, z=0),
eye=dict(x=-0.25, y=0.25, z=2.75),
),
aspectmode="data",
)
fig = make_subplots(
rows=1,
cols=len(meshes),
specs=[[{"is_3d": True}] * len(meshes)],
subplot_titles=titles,
horizontal_spacing=0,
vertical_spacing=0,
)
for i, mesh in enumerate(meshes):
mesh = _as_mesh(mesh)
fig.add_trace(
_plot_mesh(
mesh,
showtriangles=showtriangles[i] if showtriangles is not None else True,
showscale=showscales[i] if showscales is not None else None,
scene=f"scene{i+1}",
colorscale=colorscales[i] if colorscales is not None else None,
reversescale=reversescales[i] if reversescales is not None else None,
cmax=cmax[i] if cmax is not None else None,
cmin=cmin[i] if cmin is not None else None,
**kwargs,
),
row=1,
col=i + 1,
)
for i in range(len(meshes)):
fig["layout"][f"scene{i+1}"].update(myscene)
fig.update_layout(margin=dict(l=0, r=0, b=0, t=30))
if autoshow:
fig.show()
fig.update_layout(showlegend=showlegend)
return fig
def plot_mesh(
mesh: Union[Mesh, Data, str],
showtriangles: bool = True,
showscale: bool = False,
autoshow: bool = False,
colorscale="Viridis",
reversescale=False,
cmax: float = None,
cmin: float = None,
**kwargs,
) -> Figure:
"""
Plots multiple shapes
:param mesh: a shape to plot
:param autoshow: if True show the Figure automatically
:return: the Figure
"""
if isinstance(mesh, str) or isinstance(mesh, Path):
mesh = meshio.read(mesh)
_as_mesh(mesh)
fig = plot_meshes(
meshes=[mesh],
showtriangles=[showtriangles],
showscales=[showscale],
autoshow=autoshow,
colorscales=[colorscale],
reversescales=[reversescale],
cmax=[cmax],
cmin=[cmin],
**kwargs,
)
return fig
def add_points(
fig: Figure,
vertices: Union[Tuple[np.ndarray], np.ndarray],
color: Union[str, np.ndarray] = "black",
size=10,
):
if isinstance(vertices, np.ndarray) or isinstance(vertices, torch.Tensor):
vertices = (
vertices[..., 0].squeeze().numpy(),
vertices[..., 1].squeeze().numpy(),
vertices[..., 2].squeeze().numpy(),
)
x, y, z = vertices
fig.add_trace(
go.Scatter3d(
x=x,
y=y,
z=z,
mode="markers",
marker=dict(
size=size,
color=color,
colorscale="Viridis",
opacity=1,
),
),
row=1,
col=1,
)
fig.update_layout(showlegend=False)
def convert_colors(colors):
converted_colors = colors
if colors.shape[-1] == 3:
converted_colors = []
for i in range(colors.shape[0]):
c = (colors[i]).astype(np.int32).tolist()
converted_colors.append(f"rgb({c[0]}, {c[1]}, {c[2]})")
return converted_colors
def get_point_colors(points, frequency=np.pi, rgb_rescale=True):
colors = (points - points.min(axis=0)[0]) / (
points.max(axis=0)[0] - points.min(axis=0)[0]
)
colors = np.cos(frequency * colors)
colors = (colors - colors.min()) / (colors.max() - colors.min())
if rgb_rescale:
colors *= 255
return colors
def plot3dmesh(vertices: np.ndarray, faces: np.ndarray, color) -> None:
fig = go.Figure(
data=[
go.Mesh3d(
x=vertices[:, 0],
y=vertices[:, 1],
z=vertices[:, 2],
i=faces[:, 0],
j=faces[:, 1],
k=faces[:, 2],
opacity=1,
intensity=color,
showscale=False,
)
]
)
fig.show()
def get_cumulative_geo_errors(geo_errors):
x = np.linspace(0, 1, 1000)
y = np.mean(geo_errors[:, None] <= x[None, :], 0)
return x, y
def plot_geo_errors(geo_errors):
"""
Cumulative geodesic error plot.
- On the x-axis the geodesic error
- On the y-axis the portion of points that have less than that geo error
Args:
geo_errors:
Returns:
"""
x = np.linspace(0, 1, 1000)
y = np.mean(geo_errors[:, None] <= x[None, :], 0)
fig, ax = plt.subplots()
ax.set_ylabel("% vertices within the error")
ax.set_xlabel("geodesic error")
ax.plot(x, y)
ax.grid()
return fig
if __name__ == "__main__":
keops_cdist_argmin(torch.randn(1000, 2), torch.randn(2000, 2), 0)
| 19,194 | 26.539455 | 105 | py |
transmatching | transmatching-main/evaluation/predict.py | import itertools
from typing import Dict, Union
import hydra
import meshio
import numpy as np
import omegaconf
from plotly import graph_objects as go
from pytorch_lightning import seed_everything
from tqdm import tqdm
from evaluation.competitors.eval_dataset import EvalDataset
from evaluation.utils import PROJECT_ROOT
def plot3dmesh(vertices: np.ndarray, faces: np.ndarray, color) -> None:
fig = go.Figure(
data=[
go.Mesh3d(
x=vertices[:, 0],
y=vertices[:, 1],
z=vertices[:, 2],
i=faces[:, 0],
j=faces[:, 1],
k=faces[:, 2],
opacity=1,
intensity=color,
showscale=False,
)
]
)
fig.show()
def store_prediction(
cfg: omegaconf.DictConfig,
model_name: str,
dataset_name: str,
sample_idx: int,
sample: Dict[str, np.ndarray],
prediction: Dict[str, Union[np.ndarray, meshio.Mesh]],
) -> None:
if "s2t" in sample["name"] and "registration_A_to_B" not in prediction:
raise ValueError(
f"Shape2Template setting but {model_name} did not return a registration!"
)
if "export_shrec" in cfg and cfg["export_shrec"]:
prediction_folder = (
PROJECT_ROOT
/ "evaluation"
/ "predictions"
/ dataset_name
/ model_name
/ f"export"
)
prediction_folder.mkdir(parents=True, exist_ok=cfg.overwrite_predictions)
np.savetxt(
fname=str(prediction_folder / f"{sample['id_A']}_{sample['id_B']}.txt"),
X=prediction["pred_matching_A_to_B"] + 1, # matlab wants one!
fmt="%d",
)
else:
prediction_folder = (
PROJECT_ROOT
/ "evaluation"
/ "predictions"
/ dataset_name
/ model_name
/ f"{sample_idx:03}"
)
prediction_folder.mkdir(parents=True, exist_ok=cfg.overwrite_predictions)
np.savetxt(
fname=str(prediction_folder / f"gt_matching_A_to_B.txt"),
X=sample["gt_matching_A_to_B"],
fmt="%d",
)
np.savetxt(
fname=str(prediction_folder / f"pred_matching_A_to_B.txt"),
X=prediction["pred_matching_A_to_B"],
fmt="%d",
)
# if "s2t" in sample["name"]:
meshio.write(
str(prediction_folder / "registration_A_to_B.off"),
prediction["registration_A_to_B"],
)
@hydra.main(
config_path=str(PROJECT_ROOT / "evaluation" / "conf"), config_name="default"
)
def run(cfg: omegaconf.DictConfig):
seed_everything(0)
dataset = EvalDataset(cfg["dataset"])
model = hydra.utils.instantiate(cfg.model, device=cfg.device)
if "limit" in cfg and cfg["limit"]:
iter_dataset = itertools.islice(dataset, cfg["limit"])
else:
iter_dataset = dataset
desc = f"Running <{model.name}> on <{dataset.name}>"
for sample in tqdm(iter_dataset, desc=desc):
sample_idx = sample["item"]
prediction = model(sample)
store_prediction(
cfg=cfg,
model_name=model.name,
dataset_name=dataset.name,
sample_idx=sample_idx,
sample=sample,
prediction=prediction,
)
# Test the pull-back color transfer, from B to A using a matching A -> B
# plot3dmesh(
# sample["points_A"],
# sample["faces_A"],
# sample["points_B"][prediction["pred_matching_A_to_B"], 0],
# )
# plot3dmesh(
# sample["points_B"],
# sample["faces_B"],
# sample["points_B"][:, 0],
# )
# break
if __name__ == "__main__":
run()
| 3,844 | 27.69403 | 85 | py |
transmatching | transmatching-main/evaluation/competitors/eval_dataset.py | import json
from typing import Dict
import meshio
import numpy as np
from torch.utils.data import Dataset
from evaluation.utils import PROJECT_ROOT
class EvalDataset(Dataset):
def __init__(self, dataset_name: str):
"""
A generic dataset that is able to read every dataset that follows the structure
of an evaluation dataset
Args:
dataset_name: name of an evaluation dataset, a folder under
`evaluation/datasets` with this name must exist.
"""
super().__init__()
self.name = dataset_name
self.dataset_path = (
PROJECT_ROOT / "evaluation" / "datasets" / dataset_name
).absolute()
if not self.dataset_path.exists():
raise ValueError(f"The evaluation dataset <{dataset_name}> does not exist!")
self.samples = sorted(
x for x in (self.dataset_path / "data").iterdir() if x.is_dir()
)
def __len__(self) -> int:
return len(self.samples)
def __getitem__(
self,
item: int,
) -> Dict[str, np.ndarray]:
"""
Return the i-th sample. It is composed of a pair of shapes
Args:
item: the index of the sample to retrieve
Returns:
A dictionary containing the shape A, shape B and the gt matching.
"""
sample = self.samples[item]
shape_A = meshio.read((sample / "A.off"))
shape_B = meshio.read((sample / "B.off"))
try:
gt_matching_A_to_B = np.loadtxt(
sample / "gt_matching_A_to_B.txt", dtype=np.int32
)
except (FileNotFoundError, OSError) as e:
gt_matching_A_to_B = None
with (self.samples[item] / "meta.json").open() as f:
meta = json.load(f)
return {
"name": self.name,
"id_A": meta["id"]["A"],
"id_B": meta["id"]["B"],
"item": item,
"path": sample,
"points_A": shape_A.points,
"points_B": shape_B.points,
"faces_A": shape_A.cells_dict["triangle"],
"faces_B": shape_B.cells_dict["triangle"],
"gt_matching_A_to_B": gt_matching_A_to_B,
}
def __repr__(self) -> str:
return f"EvalDataset(dataset_name={self.name})"
if __name__ == "__main__":
print(EvalDataset("faust")[0])
| 2,406 | 28.353659 | 88 | py |
transmatching | transmatching-main/evaluation/competitors/our/our.py | from typing import Dict
import meshio
import numpy as np
import scipy.io
import torch
from transmatching.Model.model import Model
from transmatching.Utils.refine import refine, refine_hires
from evaluation.competitors.eval_dataset import EvalDataset
from evaluation.competitors.eval_model import ModelMatching
from evaluation.competitors.shape_normalization import (
area_weighted_denormalization,
area_weighted_normalization,
naive_normalization,
normalization_wrt_lowres_mesh,
)
from evaluation.utils import (
PROJECT_ROOT,
Mesh,
chamfer_chunked,
keops_cdist_argmin,
plot_meshes,
)
# checkpoint = "data_aug_1_axis"
# checkpoint = "shape2template_area"
# checkpoint = "best_shape2template_area"
# checkpoint = "s2t_new"
# checkpoint = "s2t_new"
checkpoint_file = "s2s_weighted_bary"
# checkpoint_file = "s2s_noarea_2"
# checkpoint_file = "continue_best_s2s_bary"
# checkpoint_file = "best_fine_tune_best_s2s"
# checkpoint = "best_shape2shape_augRotation_Luca_weighted5"
CHECKPOINTS_ROOT = PROJECT_ROOT / "evaluation" / "competitors" / "our" / "checkpoints"
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
class OurMatching(ModelMatching):
def __init__(
self,
checkpoint_name: str = checkpoint_file,
refine: bool = False,
area_normalization: bool = True,
device=DEVICE,
refine_steps: int = 100,
) -> None:
self.refine_steps = refine_steps
self.checkpoint_name = checkpoint_name
# Loading Models
self.refine = refine
self.area_normalization = (
checkpoint_name == "s2s_weighted_bary"
if area_normalization is None
else area_normalization
)
super(OurMatching, self).__init__()
self.device = device
self.model = Model(
d_bottleneck=32,
d_latent=64,
d_channels=64,
d_middle=512,
N=8,
heads=4,
max_seq_len=100,
d_origin=3,
dropout=0,
)
self.model.load_state_dict(
torch.load(CHECKPOINTS_ROOT / self.checkpoint_name, map_location=device)
)
self.model.eval()
self.model = self.model.to(device)
self.faust_1k = scipy.io.loadmat(
PROJECT_ROOT / "evaluation/data_aux/FAUST_noise_0.00.mat"
)["vertices"]
def get_name(self) -> str:
if self.refine:
return f"our(ckp={self.checkpoint_name},refine={self.refine},area_norm={self.area_normalization},refine_steps={self.refine_steps})"
else:
return f"our(ckp={self.checkpoint_name},refine={self.refine},area_norm={self.area_normalization})"
def get_simple_name(self) -> str:
return "our"
def shape2shape(self, sample: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
assert "s2t" not in sample["name"]
points_A = sample["points_A"]
faces_A = sample["faces_A"]
points_B = sample["points_B"]
faces_B = sample["faces_B"]
points_A = torch.from_numpy(points_A).float()
points_B = torch.from_numpy(points_B).float()
if self.area_normalization:
if "noise" in sample["name"]:
points_A = area_weighted_normalization(points_A, rescale=False)
points_B = area_weighted_normalization(points_B, rescale=False)
else:
points_A = area_weighted_normalization(points_A)
points_B = area_weighted_normalization(points_B)
else:
if sample["name"] in {"faust", "faust_permuted"}:
points_A_1k = self.faust_1k[int(sample["id_A"][-7:-4])]
points_B_1k = self.faust_1k[int(sample["id_B"][-7:-4])]
points_A = normalization_wrt_lowres_mesh(points_A, points_A_1k)
points_B = normalization_wrt_lowres_mesh(points_B, points_B_1k)
else:
points_A = naive_normalization(points_A)
points_B = naive_normalization(points_B)
points_A = points_A[None, ...].to(self.device)
points_B = points_B[None, ...].to(self.device)
with torch.no_grad():
A_hat = self.model(points_A, points_B)
B_hat = self.model(points_B, points_A)
A_chamfer = chamfer_chunked(A_hat.squeeze(), points_A.squeeze())
B_chamfer = chamfer_chunked(B_hat.squeeze(), points_B.squeeze())
if A_chamfer < B_chamfer:
if self.refine:
A_hat, _ = refine_hires(
self.model, points_A, points_B, self.refine_steps
)
A_hat = A_hat[-1]
A_hat = A_hat.to("cpu")
points_A = points_A[None, ...].to("cpu")
pred_matching_A_to_B = (
keops_cdist_argmin(points_A.squeeze(), A_hat.squeeze(), argmin_axis=1)
.cpu()
.squeeze()
)
registration = {
"registration_A_to_B": meshio.Mesh(
points=A_hat.cpu().detach().squeeze().numpy(),
cells=[("triangle", faces_B)],
)
}
else:
if self.refine:
B_hat, _ = refine_hires(
self.model, points_B, points_A, self.refine_steps
)
B_hat = B_hat[-1]
B_hat = B_hat.to("cpu")
points_B = points_B[None, ...].to("cpu")
pred_matching_A_to_B = (
keops_cdist_argmin(points_B.squeeze(), B_hat.squeeze(), argmin_axis=0)
.cpu()
.squeeze()
)
B_hat = B_hat.detach().cpu().squeeze()
B_hat = area_weighted_denormalization(
B_hat, torch.from_numpy(sample["points_B"])
)
registration = {
"registration_A_to_B": meshio.Mesh(
points=B_hat.cpu().detach().squeeze().numpy(),
cells=[("triangle", faces_A)],
)
}
return {"pred_matching_A_to_B": pred_matching_A_to_B, **registration}
def shape2template(self, sample: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
assert "s2t" in sample["name"]
points_A = sample["points_A"]
faces_A = sample["faces_A"]
points_B = sample["points_B"]
faces_B = sample["faces_B"]
points_A = torch.from_numpy(points_A).float()
points_B = torch.from_numpy(points_B).float()
if self.area_normalization:
if "noise" in sample["name"]:
points_A = area_weighted_normalization(points_A, rescale=False)
points_B = area_weighted_normalization(points_B, rescale=False)
else:
points_A = area_weighted_normalization(points_A, rescale=False)
points_B = area_weighted_normalization(points_B)
else:
points_A = naive_normalization(points_A, rescale=False)
if sample["name"] in {"faust", "faust_permuted", "faust_s2t"}:
points_B_1k = self.faust_1k[int(sample["id_B"][-7:-4])]
points_B = points_B.numpy()
points_B = normalization_wrt_lowres_mesh(points_B, points_B_1k)
points_B = torch.from_numpy(points_B).float()
else:
points_B = naive_normalization(points_B)
points_A = points_A[None, ...].to(self.device)
points_B = points_B[None, ...].to(self.device)
with torch.no_grad():
B_hat = self.model(points_B, points_A)
if self.refine:
B_hat, _ = refine(self.model, points_B, points_A, self.refine_steps)
B_hat = B_hat[-1]
B_hat = B_hat.to("cpu")
points_B = points_B.to("cpu")
pred_matching_A_to_B = torch.cdist(points_B, B_hat).squeeze().argmin(0)
B_hat = B_hat.detach().cpu().squeeze()
B_hat = area_weighted_denormalization(
B_hat, torch.from_numpy(sample["points_B"])
)
return {
"pred_matching_A_to_B": pred_matching_A_to_B,
"registration_A_to_B": meshio.Mesh(
points=B_hat.cpu().detach().numpy(),
cells=[("triangle", faces_A)],
),
}
def __call__(self, sample: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
if "s2t" in sample["name"]:
return self.shape2template(sample)
else:
return self.shape2shape(sample)
if __name__ == "__main__":
dataset = EvalDataset("faust_1k_outliers")
sample = dataset[0]
print(sample["id_A"])
print(sample["id_B"])
print(sample["points_A"].shape[0] + sample["points_B"].shape[0])
model = OurMatching()
out = model(sample)
plot_meshes(
meshes=[
Mesh(
v=sample["points_A"],
f=None,
),
Mesh(
v=sample["points_B"],
f=None,
),
Mesh(
v=out["registration_A_to_B"].points,
f=None,
),
],
titles=["A", "B", "REG"],
showtriangles=[False, False, False],
showscales=None,
autoshow=False,
).show()
print("OK")
# dataset = EvalDataset("faust_1k_noise")
# model = OurMatching()
# for i in dataset:
# model(i)
# break
# print("OK")
# #
# plot_meshes(
# meshes=[
# Mesh(
# v=points_A.detach().squeeze().numpy(),
# f=None,
# color=points_B[0, pred_matching_A_to_B, 0]
# ),
# Mesh(
# v=points_B.detach().squeeze().numpy(),
# f=None,
# color=points_B[0, :, 0]
# ),
# ],
# titles=["A", "B"],
# showtriangles=[False, False],
# showscales=None,
# autoshow=False,
# ).show()
| 9,955 | 31.75 | 143 | py |
transmatching | transmatching-main/evaluation/competitors/our_refined/our_refined.py | from pytorch_lightning import seed_everything
from evaluation.competitors.eval_dataset import EvalDataset
from evaluation.competitors.our.our import OurMatching
class OurMatchingRefined(OurMatching):
def __init__(self, **kwargs) -> None:
super(OurMatchingRefined, self).__init__(refine=True, **kwargs)
def get_simple_name(self) -> str:
return "our_refined"
if __name__ == "__main__":
seed_everything(0)
dataset = EvalDataset("faust")
sample = dataset[10]
print(sample["id_A"])
print(sample["id_B"])
print(sample["points_A"].shape[0] + sample["points_B"].shape[0])
model = OurMatchingRefined()
model(sample)
print("OK")
# max_points = 0
# max_sample_idx = None
# for i, sample in tqdm(enumerate(dataset)):
# n_points = sample["points_A"].shape[0] + sample["points_B"].shape[0]
# if n_points > max_points:
# max_points = n_points
# max_sample_idx = i
#
# print(f"biggest sample: {max_sample_idx}, with {max_points} total points")
# plot_meshes(
# meshes=[
# Mesh(
# v=y_hat.detach().squeeze().numpy(),
# f=None,
# ),
# Mesh(
# v=points_A.detach().squeeze().numpy(),
# f=None,
# ),
# Mesh(
# v=points_B.detach().squeeze().numpy(),
# f=None,
# ),
# ],
# titles=["y hat",'A', 'B'],
# showtriangles=[False, False, False],
# showscales=None,
# autoshow=False,
# ).show()
| 1,691 | 28.684211 | 80 | py |
transmatching | transmatching-main/evaluation/datasets/faust_1k_s2t/generate.py | import json
from pathlib import Path
import meshio
import numpy as np
from pytorch_lightning import seed_everything
from scipy import io
from tqdm import tqdm
from evaluation.utils import PROJECT_ROOT, Mesh, plot_meshes
N_PAIRS = 100
FAUST_REM = Path(PROJECT_ROOT / "evaluation/datasets/faust_1k/FAUSTS_rem.mat")
TEMPLATE_PATH = Path(PROJECT_ROOT / "evaluation/datasets/faust_1k_s2t/12ktemplate.ply")
template = meshio.read(TEMPLATE_PATH)
seed_everything(0)
shapes = io.loadmat(str(FAUST_REM))
n_shapes = shapes["vertices"].shape[0]
for i in tqdm(range(n_shapes)):
sample_path = PROJECT_ROOT / "evaluation/datasets/faust_1k_s2t" / f"data/{i:03}"
assert (
not sample_path.exists()
), "Folder already exists! Inconsistency risk, delete <data> and regenerate from scratch."
sample_path.mkdir(parents=True, exist_ok=True)
shape_A = template
shape_B = meshio.Mesh(
points=shapes["vertices"][i], cells=[("triangle", shapes["f"] - 1)]
)
gt_matching_A_to_B = np.arange(shape_A.points.shape[0], dtype=np.int64)
meshio.write(filename=str(sample_path / f"A.off"), mesh=shape_A)
meshio.write(filename=str(sample_path / f"B.off"), mesh=shape_B)
np.savetxt(
fname=str(sample_path / f"gt_matching_A_to_B.txt"),
X=gt_matching_A_to_B,
fmt="%d",
)
with (sample_path / "meta.json").open("w") as f:
json.dump({"id": {"A": "12ktemplate.ply", "B": i}}, f, indent=4)
# plot_meshes(
# meshes=[
# Mesh(v=template.points, f=None, color=shape_B.points[:, 0]),
# Mesh(v=shape_B.points, f=None, color=shape_B.points[:, 0]),
# ],
# titles=["A", "B"],
# showtriangles=[False, False],
# showscales=None,
# autoshow=False,
# ).show()
| 1,753 | 29.241379 | 94 | py |
transmatching | transmatching-main/evaluation/datasets/faust/generate.py | import json
from pathlib import Path
import meshio
import numpy as np
from pytorch_lightning import seed_everything
from tqdm import tqdm
FAUST_PATH = Path("/run/media/luca/LocalDisk/Datasets/MPI-FAUST/training/registrations")
assert FAUST_PATH.exists(), "Do not regenerate! Download from Drive or DVC."
N_PAIRS = 100
seed_everything(0)
shapes_paths = list(FAUST_PATH.glob("*.ply"))
n_shapes = len(shapes_paths)
for i in tqdm(range(N_PAIRS)):
shape_A_idx = np.random.randint(n_shapes)
shape_B_idx = np.random.randint(n_shapes)
while shape_A_idx == shape_B_idx: # avoid taking A exactly equal to B
shape_B_idx = np.random.randint(n_shapes)
shape_A_path = shapes_paths[shape_A_idx]
shape_B_path = shapes_paths[shape_B_idx]
sample_path = Path(f"data/{i:03}")
assert (
not sample_path.exists()
), "Folder already exists! Inconsistency risk, delete <data> and regenerate from scratch."
sample_path.mkdir(parents=True, exist_ok=True)
shape_A = meshio.read(shape_A_path)
shape_B = meshio.read(shape_B_path)
gt_matching_A_to_B = np.arange(shape_B.points.shape[0], dtype=np.int64)
meshio.write(filename=str(sample_path / f"A.off"), mesh=shape_A)
meshio.write(filename=str(sample_path / f"B.off"), mesh=shape_B)
np.savetxt(
fname=str(sample_path / f"gt_matching_A_to_B.txt"),
X=gt_matching_A_to_B,
fmt="%d",
)
with (sample_path / "meta.json").open("w") as f:
json.dump({"id": {"A": shape_A_path.name, "B": shape_B_path.name}}, f, indent=4)
| 1,559 | 30.836735 | 94 | py |
transmatching | transmatching-main/evaluation/datasets/faust_1k/generate.py | import json
from pathlib import Path
import meshio
import numpy as np
from meshio import Mesh
from pytorch_lightning import seed_everything
from scipy import io
from tqdm import tqdm
from evaluation.utils import PROJECT_ROOT
N_PAIRS = 100
FAUST_REM = Path("/home/luca/Desktop/FAUSTS_rem.mat")
seed_everything(0)
shapes = io.loadmat(str(FAUST_REM))
n_shapes = shapes["vertices"].shape[0]
for i in tqdm(range(N_PAIRS)):
shape_A_idx = np.random.randint(n_shapes)
shape_B_idx = np.random.randint(n_shapes)
while shape_A_idx == shape_B_idx: # avoid taking A exactly equal to B
shape_B_idx = np.random.randint(n_shapes)
sample_path = PROJECT_ROOT / "evaluation/datasets/faust_1k" / f"data/{i:03}"
assert (
not sample_path.exists()
), "Folder already exists! Inconsistency risk, delete <data> and regenerate from scratch."
sample_path.mkdir(parents=True, exist_ok=True)
shape_A = Mesh(
points=shapes["vertices"][shape_A_idx], cells=[("triangle", shapes["f"] - 1)]
)
shape_B = Mesh(
points=shapes["vertices"][shape_B_idx], cells=[("triangle", shapes["f"] - 1)]
)
gt_matching_A_to_B = np.arange(
shapes["vertices"][shape_A_idx].shape[0], dtype=np.int64
)
meshio.write(filename=str(sample_path / f"A.off"), mesh=shape_A)
meshio.write(filename=str(sample_path / f"B.off"), mesh=shape_B)
np.savetxt(
fname=str(sample_path / f"gt_matching_A_to_B.txt"),
X=gt_matching_A_to_B,
fmt="%d",
)
with (sample_path / "meta.json").open("w") as f:
json.dump({"id": {"A": shape_A_idx, "B": shape_B_idx}}, f, indent=4)
| 1,649 | 29 | 94 | py |
transmatching | transmatching-main/evaluation/datasets/faust_1k_outliers/generate.py | import json
from pathlib import Path
import meshio
import numpy as np
from pytorch_lightning import seed_everything
from scipy import io
from scipy.spatial.transform import Rotation as R
from tqdm import tqdm
from evaluation.utils import PROJECT_ROOT, Mesh, plot_meshes
N_PAIRS = 100
FAUST_0NOISE = Path(
PROJECT_ROOT / "evaluation/datasets/faust_1k_0noise/FAUST_noise_0.00.mat"
)
FAUST_OUTLIERS = Path(
PROJECT_ROOT / "evaluation/datasets/faust_1k_outliers/FAUST_outliers_30_0.03.mat"
)
seed_everything(0)
old_shapes = io.loadmat(str(FAUST_0NOISE))
shapes = io.loadmat(str(FAUST_OUTLIERS))
n_shapes = shapes["vertices"].shape[0]
rotation_matrix = R.from_euler("z", -90, degrees=True).as_matrix()
for i in tqdm(range(N_PAIRS)):
shape_A_idx = np.random.randint(n_shapes)
shape_B_idx = np.random.randint(n_shapes)
while shape_A_idx == shape_B_idx: # avoid taking A exactly equal to B
shape_B_idx = np.random.randint(n_shapes)
sample_path = (
PROJECT_ROOT / "evaluation/datasets/faust_1k_outliers" / f"data/{i:03}"
)
assert (
not sample_path.exists()
), "Folder already exists! Inconsistency risk, delete <data> and regenerate from scratch."
sample_path.mkdir(parents=True, exist_ok=True)
shape_A = meshio.Mesh(
points=shapes["vertices"][shape_A_idx] @ rotation_matrix,
cells=[("triangle", old_shapes["faces"] - 1)],
)
shape_B = meshio.Mesh(
points=shapes["vertices"][shape_B_idx] @ rotation_matrix,
cells=[("triangle", old_shapes["faces"] - 1)],
)
gt_matching_A_to_B = np.arange(
shapes["vertices"][shape_A_idx].shape[0], dtype=np.int64
)
meshio.write(filename=str(sample_path / f"A.off"), mesh=shape_A)
meshio.write(filename=str(sample_path / f"B.off"), mesh=shape_B)
np.savetxt(
fname=str(sample_path / f"gt_matching_A_to_B.txt"),
X=gt_matching_A_to_B,
fmt="%d",
)
with (sample_path / "meta.json").open("w") as f:
json.dump({"id": {"A": shape_A_idx, "B": shape_B_idx}}, f, indent=4)
# plot_meshes(
# meshes=[
# Mesh(
# v=shapes["vertices"][0] @ rotation_matrix,
# f=old_shapes["faces"] - 1,
# ),
# Mesh(
# v=old_shapes["vertices"][0],
# f=old_shapes["faces"] - 1,
# ),
# ],
# titles=["A", "B"],
# showtriangles=[False, False],
# showscales=None,
# autoshow=False,
# ).show()
| 2,476 | 27.802326 | 94 | py |
transmatching | transmatching-main/evaluation/datasets/faust_1k_noise/generate.py | import json
from pathlib import Path
import meshio
import numpy as np
from meshio import Mesh
from pytorch_lightning import seed_everything
from scipy import io
from tqdm import tqdm
from evaluation.utils import PROJECT_ROOT
N_PAIRS = 100
FAUST_NOISE = Path(
PROJECT_ROOT / "evaluation/datasets/faust_1k_noise/FAUST_noise_0.01.mat"
)
seed_everything(0)
shapes = io.loadmat(str(FAUST_NOISE))
n_shapes = shapes["vertices"].shape[0]
for i in tqdm(range(N_PAIRS)):
shape_A_idx = np.random.randint(n_shapes)
shape_B_idx = np.random.randint(n_shapes)
while shape_A_idx == shape_B_idx: # avoid taking A exactly equal to B
shape_B_idx = np.random.randint(n_shapes)
sample_path = PROJECT_ROOT / "evaluation/datasets/faust_1k_noise" / f"data/{i:03}"
assert (
not sample_path.exists()
), "Folder already exists! Inconsistency risk, delete <data> and regenerate from scratch."
sample_path.mkdir(parents=True, exist_ok=True)
shape_A = Mesh(
points=shapes["vertices"][shape_A_idx],
cells=[("triangle", shapes["faces"] - 1)],
)
shape_B = Mesh(
points=shapes["vertices"][shape_B_idx],
cells=[("triangle", shapes["faces"] - 1)],
)
gt_matching_A_to_B = np.arange(
shapes["vertices"][shape_A_idx].shape[0], dtype=np.int64
)
meshio.write(filename=str(sample_path / f"A.off"), mesh=shape_A)
meshio.write(filename=str(sample_path / f"B.off"), mesh=shape_B)
np.savetxt(
fname=str(sample_path / f"gt_matching_A_to_B.txt"),
X=gt_matching_A_to_B,
fmt="%d",
)
with (sample_path / "meta.json").open("w") as f:
json.dump({"id": {"A": shape_A_idx, "B": shape_B_idx}}, f, indent=4)
| 1,729 | 27.833333 | 94 | py |
transmatching | transmatching-main/evaluation/datasets/faust_permuted/generate.py | import json
from pathlib import Path
import meshio
import numpy as np
from meshio import Mesh
from plotly import graph_objects as go
from pytorch_lightning import seed_everything
from tqdm import tqdm
from evaluation.utils import PROJECT_ROOT
FAUST_PATH = Path("/run/media/luca/LocalDisk/Datasets/MPI-FAUST/training/registrations")
assert FAUST_PATH.exists(), "Do not regenerate! Download from Drive or DVC."
N_PAIRS = 100
seed_everything(0)
shapes_paths = list(FAUST_PATH.glob("*.ply"))
n_shapes = len(shapes_paths)
def invert_permutation(p: np.ndarray) -> np.ndarray:
"""The argument p is assumed to be some permutation of 0, 1, ..., len(p)-1.
Returns an array s, where s[i] gives the index of i in p.
"""
s = np.empty_like(p)
s[p] = np.arange(p.size)
return s
def plot3dmesh(vertices: np.ndarray, faces: np.ndarray) -> None:
fig = go.Figure(
data=[
go.Mesh3d(
x=vertices[:, 0],
y=vertices[:, 1],
z=vertices[:, 2],
i=faces[:, 0],
j=faces[:, 1],
k=faces[:, 2],
opacity=1,
intensity=vertices[:, 0],
showscale=False,
)
]
)
fig.show()
for i in tqdm(range(N_PAIRS)):
with (
PROJECT_ROOT
/ "evaluation"
/ "datasets"
/ "faust"
/ "data"
/ f"{i:03d}"
/ "meta.json"
).open() as f:
sample_meta = json.load(f)
shape_A_path = FAUST_PATH / sample_meta["id"]["A"]
shape_B_path = FAUST_PATH / sample_meta["id"]["B"]
sample_path = Path(__file__).parent / f"data/{i:03d}"
assert (
not sample_path.exists()
), "Folder already exists! Inconsistency risk, delete <data> and regenerate from scratch."
sample_path.mkdir(parents=True, exist_ok=True)
shape_A = meshio.read(shape_A_path)
shape_B = meshio.read(shape_B_path)
one_to_n = np.arange(shape_A.points.shape[0], dtype=np.int32)
permutation = np.random.permutation(shape_A.points.shape[0])
gt_matching_A_to_B = invert_permutation(permutation)
# apply permutation to mesh, points and faces
shape_B_points = shape_B.points[permutation, :]
shape_B_faces = invert_permutation(permutation)[shape_B.cells_dict["triangle"]]
meshio.write(filename=str(sample_path / f"A.off"), mesh=shape_A)
meshio.write(
filename=str(sample_path / f"B.off"),
mesh=Mesh(shape_B_points, [("triangle", shape_B_faces)]),
)
np.savetxt(
fname=str(sample_path / f"gt_matching_A_to_B.txt"),
X=gt_matching_A_to_B,
fmt="%d",
)
with (sample_path / "meta.json").open("w") as f:
json.dump({"id": {"A": shape_A_path.name, "B": shape_B_path.name}}, f, indent=4)
| 2,803 | 27.612245 | 94 | py |
transmatching | transmatching-main/evaluation/datasets/faust_s2t/generate.py | import json
from pathlib import Path
import meshio
import numpy as np
from pytorch_lightning import seed_everything
from scipy.io import loadmat
from tqdm import tqdm
from evaluation.utils import PROJECT_ROOT, Mesh, invert_permutation, plot_meshes
FAUST_PATH = Path("/run/media/luca/LocalDisk/Datasets/MPI-FAUST/training/registrations")
assert FAUST_PATH.exists(), "Do not regenerate! Download from Drive or DVC."
TEMPLATE_PATH = Path(PROJECT_ROOT / "evaluation/datasets/faust_1k_s2t/12ktemplate.ply")
template = meshio.read(TEMPLATE_PATH)
template_to_faust_matchings = (
PROJECT_ROOT / "evaluation/datasets/faust_s2t/1k_to_faust_matchings.mat"
)
template_idxs = loadmat(template_to_faust_matchings)["idx_rem"].squeeze() - 1
seed_everything(0)
shapes_paths = list(FAUST_PATH.glob("*.ply"))
n_shapes = len(shapes_paths)
for i in tqdm(range(n_shapes)):
sample_path = PROJECT_ROOT / "evaluation/datasets/faust_s2t" / f"data/{i:03}"
assert (
not sample_path.exists()
), "Folder already exists! Inconsistency risk, delete <data> and regenerate from scratch."
sample_path.mkdir(parents=True, exist_ok=True)
shape_A = template
shape_B_path = shapes_paths[i]
shape_B = meshio.read(shape_B_path)
gt_matching_A_to_B = template_idxs
meshio.write(filename=str(sample_path / f"A.off"), mesh=shape_A)
meshio.write(filename=str(sample_path / f"B.off"), mesh=shape_B)
np.savetxt(
fname=str(sample_path / f"gt_matching_A_to_B.txt"),
X=gt_matching_A_to_B,
fmt="%d",
)
with (sample_path / "meta.json").open("w") as f:
json.dump({"id": {"A": "12ktemplate.ply", "B": shape_B_path.name}}, f, indent=4)
# plot_meshes(
# meshes=[
# Mesh(v=shape_A.points, f=None, color=shape_A.points[:, 0]),
# Mesh(v=shape_B.points, f=None, color=shape_A.points[template_idxs, 0]),
# ],
# titles=["A", "B"],
# showtriangles=[False, False],
# showscales=None,
# autoshow=False,
# ).show()
| 1,995 | 32.266667 | 94 | py |
transmatching | transmatching-main/evaluation/datasets/shrec19/generate.py | import json
from pathlib import Path
import meshio
import numpy as np
import scipy
from meshio import Mesh
from plotly import graph_objects as go
from pytorch_lightning import seed_everything
from scipy import io
from tqdm import tqdm
from evaluation.utils import PROJECT_ROOT
seed_everything(0)
SHREC_PATH = Path(PROJECT_ROOT / "evaluation/datasets/shrec19/origin_shrec/mat")
SHREC_PAIRS = (
PROJECT_ROOT
/ "evaluation/datasets/shrec19/SHREC19_matching_humans/PAIRS_list_SHREC19_connectivity.txt"
)
with SHREC_PAIRS.open("r") as f:
lines = f.read().splitlines()
couples = [list(map(int, line.split(","))) for line in lines]
for i, (shape_idx1, shape_idx2) in tqdm(enumerate(couples)):
sample_path = Path(__file__).parent / f"data/{i:03d}"
assert (
not sample_path.exists()
), "Folder already exists! Inconsistency risk, delete <data> and regenerate from scratch."
sample_path.mkdir(parents=True, exist_ok=True)
shape_1_mat = scipy.io.loadmat(str(SHREC_PATH / f"{shape_idx1}.mat"))
shape_2_mat = scipy.io.loadmat(str(SHREC_PATH / f"{shape_idx2}.mat"))
shape1 = shape_1_mat["M"][0][0][0]
shape2 = shape_2_mat["M"][0][0][0]
faces_1 = shape_1_mat["M"][0][0][1].astype("long") - 1
faces_2 = shape_2_mat["M"][0][0][1].astype("long") - 1
meshio.write(
filename=str(sample_path / f"A.off"),
mesh=Mesh(shape1, [("triangle", faces_1)]),
)
meshio.write(
filename=str(sample_path / f"B.off"),
mesh=Mesh(shape2, [("triangle", faces_2)]),
)
with (sample_path / "meta.json").open("w") as f:
json.dump({"id": {"A": shape_idx1, "B": shape_idx2}}, f, indent=4)
| 1,681 | 29.035714 | 95 | py |
transmatching | transmatching-main/evaluation/datasets/faust_1k_0noise/generate.py | import json
from pathlib import Path
import meshio
import numpy as np
from meshio import Mesh
from pytorch_lightning import seed_everything
from scipy import io
from tqdm import tqdm
from evaluation.utils import PROJECT_ROOT
N_PAIRS = 100
FAUST_0NOISE = Path(
PROJECT_ROOT / "evaluation/datasets/faust_1k_0noise/FAUST_noise_0.00.mat"
)
seed_everything(0)
shapes = io.loadmat(str(FAUST_0NOISE))
n_shapes = shapes["vertices"].shape[0]
for i in tqdm(range(N_PAIRS)):
shape_A_idx = np.random.randint(n_shapes)
shape_B_idx = np.random.randint(n_shapes)
while shape_A_idx == shape_B_idx: # avoid taking A exactly equal to B
shape_B_idx = np.random.randint(n_shapes)
sample_path = PROJECT_ROOT / "evaluation/datasets/faust_1k_0noise" / f"data/{i:03}"
assert (
not sample_path.exists()
), "Folder already exists! Inconsistency risk, delete <data> and regenerate from scratch."
sample_path.mkdir(parents=True, exist_ok=True)
shape_A = Mesh(
points=shapes["vertices"][shape_A_idx],
cells=[("triangle", shapes["faces"] - 1)],
)
shape_B = Mesh(
points=shapes["vertices"][shape_B_idx],
cells=[("triangle", shapes["faces"] - 1)],
)
gt_matching_A_to_B = np.arange(
shapes["vertices"][shape_A_idx].shape[0], dtype=np.int64
)
meshio.write(filename=str(sample_path / f"A.off"), mesh=shape_A)
meshio.write(filename=str(sample_path / f"B.off"), mesh=shape_B)
np.savetxt(
fname=str(sample_path / f"gt_matching_A_to_B.txt"),
X=gt_matching_A_to_B,
fmt="%d",
)
with (sample_path / "meta.json").open("w") as f:
json.dump({"id": {"A": shape_A_idx, "B": shape_B_idx}}, f, indent=4)
| 1,733 | 27.9 | 94 | py |
transmatching | transmatching-main/evaluation/ui/generate_point_colors.py | import numpy as np
import streamlit as st
from pytorch_lightning import seed_everything
from stqdm import stqdm
from evaluation.competitors.eval_dataset import EvalDataset
from evaluation.utils import (
PROJECT_ROOT,
Mesh,
convert_colors,
get_dists,
get_hydra_cfg,
get_point_colors,
plot_meshes,
)
st.markdown("App to generate a nice point-wise color for each evaluation dataset.")
seed_everything(0)
datasets = [
x.name for x in (PROJECT_ROOT / "evaluation" / "datasets").iterdir() if x.is_dir()
]
dataset_name = st.selectbox("Select dataset to consider:", datasets)
dataset = EvalDataset(dataset_name)
sample_idx = st.number_input("Select sample index", min_value=0, max_value=len(dataset))
sample = dataset[sample_idx]
geo_dists = get_dists()
cfg = get_hydra_cfg()
points_A = sample["points_A"]
frequency = st.number_input("Select color frequency:", value=np.pi)
color = get_point_colors(
sample["points_A"],
frequency=frequency,
)
f = plot_meshes(
meshes=[
Mesh(
v=points_A,
f=None,
color=convert_colors(color),
),
],
titles=["Shape A"],
showtriangles=[False],
showscales=None,
autoshow=False,
)
st.plotly_chart(f, use_container_width=True)
col1, col2 = st.columns(2)
with col1:
st.subheader("Dataset wise color")
st.markdown(
"Export color for the whole dataset. "
"The **shapes in the dataset must be in correspondence**!"
)
if st.button("Export global color"):
colorsfile = (
PROJECT_ROOT
/ "evaluation"
/ "datasets"
/ dataset_name
/ "data"
/ "colors.npy"
)
np.save(colorsfile, color)
st.info(f"Saved: `{colorsfile}`")
with col2:
st.subheader("Shape wise color")
st.markdown(
"Export color for each shape in each sample of the dataset. "
"The **coloring may change between shapes**!"
)
if st.button("Export all colors"):
for sample in stqdm(dataset):
path = sample["path"]
for shape, file in [
("points_A", "colors_A.npy"),
("points_B", "colors_B.npy"),
]:
colorsfile = path / file
color = get_point_colors(
sample[shape],
frequency=frequency,
)
np.save(colorsfile, color)
st.info(
f"Saved `{len(dataset)* 2}` colors under <`{path.parent}/*/color_*.npy`>"
)
# dataset = get_dataset(cfg)
#
# colorsfile = Path(get_env("SURREAL_COLOR"))
#
# set_dataset_augmentations(dataset)
#
#
# st.button("Rerun")
# sampledidx = st.number_input("Sample idx", min_value=0, max_value=len(dataset), value=6)
# sample = dataset[sampledidx]
#
# shape_A = sample["shape_A"]
# shape_B = sample["shape_B"]
# permuted_shape_A = sample["permuted_shape_A"]
# permuted_shape_B = sample["permuted_shape_B"]
# permutations_from_A_to_B = sample["permutation_from_A_to_B"]
#
#
# s = permuted_shape_A
#
# # color = get_point_colors(shape_A, frequency=st.number_input("Freq:", value=np.pi))
# color = np.load(Path(get_env("SURREAL_COLOR")))
#
# plot_color = color[sample["permutation_A"]]
# f = plot_meshes(
# meshes=[
# Mesh(
# v=s,
# f=None,
# color=convert_colors(plot_color),
# ),
# ],
# titles=["Shape A"],
# showtriangles=[False],
# showscales=None,
# autoshow=False,
# )
# st.plotly_chart(f, use_container_width=True)
#
# st.write(color)
#
#
| 3,605 | 24.394366 | 90 | py |
transmatching | transmatching-main/transmatching/Data/dataset_faust.py | import numpy as np
import os
import torch
import trimesh
from torch.utils.data import Dataset
from scipy.io import loadmat
from transmatching.Utils.utils import RandomRotateCustom, est_area
class FaustDataset(Dataset):
def __init__(self, in_path, area=True):
self.in_path = in_path
self.area = area
self.mat = loadmat(self.in_path+"Fausts/FAUSTS_rem.mat")
self.data = torch.from_numpy(self.mat["vertices"]).float()
self.reference = torch.from_numpy(trimesh.load_mesh((os.path.join(self.in_path, '12ktemplate.ply')), process=False).vertices).float()
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
shape = self.data[index]
shape = shape * 0.7535
ref = self.reference
if self.area:
A = est_area(ref[None,...])[0]
ref = ref - (ref*(A/A.sum(-1,keepdims=True))[...,None]).sum(-2,keepdims=True)
A = est_area(shape[None,...])[0]
shape = shape - (shape*(A/A.sum(-1,keepdims=True))[...,None]).sum(-2,keepdims=True)
else:
shape = shape - torch.mean(shape, dim=(-2))
ref = self.reference - torch.mean(self.reference, dim=-2)
return {'x': shape, 'y': ref}
if __name__ == '__main__':
a = loadmat("../../test/dataset/Fausts/FAUSTS_rem.mat")
print(a)
# d = FaustDataset("../../test/dataset/")
# print(d.mat.keys())
# faces = trimesh.load_mesh((os.path.join("../../test/dataset/", '12ktemplate.ply')), process=False).faces
# mesh = trimesh.Trimesh(vertices=d[0]["x"], faces=d.mat["faces"] - 1, process=False)
# mesh.show()
# print(len(d)) | 1,705 | 31.188679 | 141 | py |
transmatching | transmatching-main/transmatching/Data/dataset_smpl.py | import numpy as np
import os
import torch
import trimesh
from torch.utils.data import Dataset
from transmatching.Utils.utils import RandomRotateCustom, est_area
class SMPLDataset(Dataset):
def __init__(self, in_path, train=True, area=True):
self.in_path = in_path
self.train = train
self.area = area
self.train_data = torch.from_numpy(np.load(os.path.join(self.in_path, '12k_shapes_train.npy'))).float()
self.test_data = torch.from_numpy(np.load(os.path.join(self.in_path, '12k_shapes_test.npy'))).float()
self.reference = torch.from_numpy(trimesh.load_mesh((os.path.join(self.in_path, '12ktemplate.ply')),
process=False).vertices).float()
def __len__(self):
if self.train:
return self.train_data.shape[0]
return self.test_data.shape[0]
def __getitem__(self, index):
if self.train:
shape = self.train_data[index]
shape = RandomRotateCustom(shape, 180, 1)
else:
shape = self.test_data[index]
ref = self.reference
if self.area:
A = est_area(ref[None,...])[0]
ref = ref - (ref*(A/A.sum(-1,keepdims=True))[...,None]).sum(-2,keepdims=True)
A = est_area(shape[None,...])[0]
shape = shape - (shape*(A/A.sum(-1,keepdims=True))[...,None]).sum(-2,keepdims=True)
else:
shape = shape - torch.mean(shape, dim=(-2))
ref = self.reference - torch.mean(self.reference, dim=-2)
return {'x': shape, 'y': ref}
if __name__ == '__main__':
pass
| 1,668 | 31.72549 | 111 | py |
transmatching | transmatching-main/transmatching/Model/feedforward.py | from torch import nn
import torch.nn.functional as F
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff=32, dropout=0.05):
super().__init__()
self.linear_1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.dropout(F.relu(self.linear_1(x)))
x = self.linear_2(x)
return x
| 435 | 23.222222 | 55 | py |
transmatching | transmatching-main/transmatching/Model/norm.py | import torch
from torch import nn
class Norm(nn.Module):
def __init__(self, d_model, eps=1e-06):
super().__init__()
self.size = d_model
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
norm = self.alpha * (x - x.mean(dim=(-1), keepdim=True)) / (x.std(dim=(-1), keepdim=True) + self.eps) + self.bias
return norm
| 467 | 23.631579 | 121 | py |
transmatching | transmatching-main/transmatching/Model/layernorm.py | from torch import nn
class AddNorm(nn.Module):
def __init__(self, normalized_shape, dropout):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.ln = nn.LayerNorm(normalized_shape)
def forward(self, X, Y):
return self.ln(self.dropout(Y) + X)
| 296 | 18.8 | 50 | py |
transmatching | transmatching-main/transmatching/Model/model.py | import torch
from torch import nn
from transmatching.Model.decoder import Decoder
from transmatching.Model.encoder import Encoder
from transmatching.Model.attention import MultiHeadAttention
from transmatching.Model.feedforward import FeedForward
from transmatching.Model.layernorm import AddNorm
from transmatching.Model.norm import Norm
from transmatching.Model.pos_enc import PositionalEncoderLearnt
import torch
from transmatching.Utils.utils import get_clones, est_area
from transmatching.Model.debug import Debug
class Model(nn.Module):
def __init__(self, d_bottleneck, d_latent, d_channels, d_middle, N, heads, max_seq_len, d_origin, dropout=0, estimate_area=True):
super().__init__()
self.encoder = Encoder(d_latent, d_channels, d_middle, N, heads, max_seq_len, d_origin, dropout)
self.decoder = Decoder(d_channels, d_latent, d_middle, N, heads, max_seq_len, d_origin, dropout)
self.out = nn.Sequential(nn.Linear(d_channels, d_origin * 16), nn.ReLU(),
nn.Linear(d_origin * 16, d_origin * 8), nn.ReLU(),
nn.Linear(d_origin * 8, d_origin * 4), nn.ReLU(),
nn.Linear(d_origin * 4, d_origin * 2), nn.ReLU(),
nn.Linear(d_origin * 2, d_origin))
self.tokens = nn.Parameter(torch.randn((d_bottleneck, d_latent)))
self.estimate_area = estimate_area
def forward(self, src, trg):
Ds = Dt = None
if self.estimate_area:
with torch.no_grad():
Ds = est_area(src)
Dt = est_area(trg)
if Debug.debug:
self.Ds=Ds.cpu()
self.Dt=Dt.cpu()
x = self.tokens.expand(src.size(0), self.tokens.size(0), self.tokens.size(1))
e_out = self.encoder(x, src, Ds)
d_out = self.decoder(trg, e_out, Dt)
out = self.out(d_out)
return out
if __name__ == '__main__':
src = torch.ones(1, 100, 3)
trg = torch.ones(1, 100, 3)
model = Model2(100, 32, 32, 128, 4, 1, 100, 3)
print(model(src).shape)
| 2,138 | 37.890909 | 133 | py |
transmatching | transmatching-main/transmatching/Model/encoder.py | from torch import nn
from transmatching.Model.attention import MultiHeadAttention
from transmatching.Model.feedforward import FeedForward
from transmatching.Model.layernorm import AddNorm
from transmatching.Model.norm import Norm
from transmatching.Model.pos_enc import PositionalEncoderLearnt
import torch
from transmatching.Utils.utils import get_clones
class EncoderLayer(nn.Module):
def __init__(self, d_latent, d_channels, heads, d_middle, d_origin, dropout=0):
super().__init__()
self.ln1 = AddNorm(d_latent, dropout=dropout)
self.ln2 = AddNorm(d_latent, dropout=dropout)
self.linear1 = nn.Linear(d_channels, d_latent)
self.attn1 = MultiHeadAttention(heads, d_latent, d_channels, dropout=dropout)
self.attn2 = MultiHeadAttention(heads, d_latent, d_latent, dropout=dropout)
self.ff = FeedForward(d_latent, d_middle, dropout=dropout)
self.embedder = nn.Sequential(nn.Linear(d_origin, d_channels // 8), nn.ReLU(),
nn.Linear(d_channels // 8, d_channels // 4), nn.ReLU(),
nn.Linear(d_channels // 4, d_channels // 2), nn.ReLU(),
nn.Linear(d_channels // 2, d_channels))
def forward(self, x, src, weights1=None, weights2=None):
src = self.embedder(src)
latent = self.attn1(x, src, src, weights=weights1)
x = self.ln1(x, self.linear1(latent))
latent2 = self.attn2(x, x, x, weights=weights2)
latent2 = self.ff(latent2)
x = self.ln2(x, latent2)
return x
class Encoder(nn.Module):
def __init__(self, d_latent, d_channels, d_middle, N, heads, max_seq_len, d_origin, dropout=0):
super().__init__()
self.N = N
self.pe = PositionalEncoderLearnt(d_latent, max_seq_len)
self.norm = Norm(d_latent)
self.encoder_layer = get_clones(EncoderLayer(d_latent, d_channels, heads, d_middle, d_origin, dropout), self.N)
def forward(self, x, src, weights=None):
x = self.pe(x)
for i in range(self.N):
x = self.encoder_layer[i](x, src, weights1=weights)
return self.norm(x)
if __name__ == '__main__':
x = torch.zeros(10, 10, 5)
src = torch.ones(10, 100, 3)
enc = Encoder(5, 3, 128, 4, 1, 1000)
print(enc(x, src).shape)
| 2,357 | 35.84375 | 119 | py |
transmatching | transmatching-main/transmatching/Model/decoder.py | import torch
from torch import nn
from transmatching.Model.attention import MultiHeadAttention
from transmatching.Model.feedforward import FeedForward
from transmatching.Model.layernorm import AddNorm
from transmatching.Model.norm import Norm
from transmatching.Model.pos_enc import PositionalEncoderLearnt
from transmatching.Utils.utils import get_clones
class DecoderLayer(nn.Module):
def __init__(self, d_channels, heads, d_middle, dropout=0):
super().__init__()
self.ln1 = AddNorm(d_channels, dropout=0)
self.ln2 = AddNorm(d_channels, dropout=0)
self.linear1 = nn.Linear(d_channels, d_channels)
self.attn1 = MultiHeadAttention(heads, d_channels, d_channels, dropout=dropout)
self.attn2 = MultiHeadAttention(heads, d_channels, d_channels, dropout=dropout)
self.ff = FeedForward(d_channels, d_middle, dropout=0)
def forward(self, x, src, weights1=None, weights2=None):
latent = self.attn1(x, src, src, weights=weights1)
x = self.ln1(x, self.linear1(latent))
latent2 = self.attn2(x, x, x, weights=weights2)
latent2 = self.ff(latent2)
x = self.ln2(x, latent2)
return x
class Decoder(nn.Module):
def __init__(self, d_latent, d_channels, d_middle, N, heads, max_seq_len, d_origin, dropout=0):
super().__init__()
self.N = N
self.pe = PositionalEncoderLearnt(d_channels, max_seq_len)
self.norm = Norm(d_channels)
self.decoder_layer = get_clones(DecoderLayer(d_channels, heads, d_middle, 0), self.N)
# self.decoder_layer[0] = DecoderLayer(d_channels, heads, d_middle, 0)
self.embedder = nn.Sequential(nn.Linear(d_origin, d_channels // 8), nn.ReLU(),
nn.Linear(d_channels // 8, d_channels // 4), nn.ReLU(),
nn.Linear(d_channels // 4, d_channels // 2), nn.ReLU(),
nn.Linear(d_channels // 2, d_channels))
def forward(self, x, src, weights=None):
src = self.pe(src)
x = self.embedder(x)
for i in range(self.N):
x = self.decoder_layer[i](x, src, weights2=weights)
return self.norm(x)
if __name__ == '__main__':
src = torch.zeros(10, 20, 5)
x = torch.ones(10, 100, 3)
dec = Decoder(3, 5, 128, 4, 1, 1000)
print(dec(x, src).shape)
| 2,412 | 35.014925 | 99 | py |
transmatching | transmatching-main/transmatching/Model/attention.py | import math
import torch
from torch import nn
import torch.nn.functional as F
from transmatching.Model.debug import Debug
try:
from pykeops.torch import LazyTensor
except ImportError:
Debug.keops=False
def attention(q, k, v, d_k, mask=None, dropout=None, weights=None, w=1):
if Debug.keops:
bs = q.shape[0] # b x h x nq x d
lq = LazyTensor(q.reshape(-1,q.shape[2],q.shape[3])[:,None,:,:].contiguous()) #(b x h) x 1 x nq x d
lk = LazyTensor(k.reshape(-1,k.shape[2],k.shape[3])[:,:,None,:].contiguous()) #(b x h) x nk x 1 x d
lv = v.reshape(-1,v.shape[2],v.shape[3]).contiguous()
scores = (lq*lk).sum(-1)/ math.sqrt(d_k) # b x nk x nq
scores = scores.exp()
if weights is not None:
lw = LazyTensor(weights[:,None,:].repeat(1,q.shape[1],1).reshape(-1,weights.shape[-1])[:,:,None,None].contiguous())
scores = scores*lw
output = scores.t()@lv
output = output/scores.sum(1)
assert(dropout.p==0 or dropout is None)
output = output.reshape(q.shape[0],q.shape[1],output.shape[1],output.shape[2])
#hotfix [:bs]: backprob through lazytensors fail if bs=1
return output[:bs], None
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1e9)
scores = F.softmax(scores, dim=(-1))
wscores = scores
if weights is not None:
wscores = wscores*weights[:,None,None,:]
wscores = wscores/wscores.sum(-1,keepdims=True)
if dropout is not None:
scores = dropout(wscores)
output = torch.matmul(wscores, v)
return output, scores
class MultiHeadAttention(nn.Module):
weighted=False
def __init__(self, heads, d_latent, d_model, dropout=0.0):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_latent, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None, weights=None):
bs = q.size(0)
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
scores, _p = attention(q, k, v, self.d_k, mask, self.dropout, weights)
if Debug.debug:
self.scores = _p.detach().cpu()
concat = scores.transpose(1, 2).reshape(bs, -1, self.d_model)
output = self.out(concat)
return output
| 2,928 | 28.887755 | 127 | py |
transmatching | transmatching-main/transmatching/Model/pos_enc.py | import torch
from torch import nn
class PositionalEncoderLearnt(nn.Module):
def __init__(self, d_model, max_seq_len):
super().__init__()
self.pos = nn.Parameter(torch.zeros(max_seq_len, d_model))
def forward(self, x):
seq_len = x.size(-2)
x = x + self.pos[:seq_len]
return x
| 328 | 19.5625 | 66 | py |
transmatching | transmatching-main/transmatching/Utils/refine.py | import torch
# from transmatching.Model.model import Model
import matplotlib.pyplot as plt
import time
import gc
from transmatching.Utils.utils import get_clones, est_area, chamfer_loss
from transmatching.Model.debug import Debug
def chamfer(y_hat,src):
dist = torch.cdist(y_hat,src)
loss = dist.min(-2)[0].mean(-1)+dist.min(-1)[0].mean(-1)
return loss
def subsamp(X, samp=10000):
if X.shape[1]<samp:
return X
subsamp = torch.randperm(X.shape[1])[:samp]
return X[:,subsamp,:]
def get_splits(Y,maxvert=15000):
splits = (Y.shape[0]+1)//maxvert+1
selected = [[] for i in range(splits)]
subsamp = torch.randperm(Y.shape[0]).numpy()
n = Y.shape[0]//splits+1
for i,sel in enumerate(selected):
selected[i] = list(subsamp[i*n:(i+1)*n])
return selected
def register(model,X,Y,maxvert=350000):
if Y.shape[1]<maxvert:
return model(X,Y)
assert(X.shape[0]==1)
X = X[0]
Y = Y[0]
selected = get_splits(Y,maxvert)
y_hats = Y*0
for sel in selected:
with torch.no_grad():
y_hat = model(X[None,...],Y[None,sel,:].cuda())
y_hats[sel,:] = y_hat[0]
del y_hat
gc.collect()
return y_hats[None,...]
def bidirectional_match(model,shape1,shape2, dorefine=False,extra_data=None):
ref_steps = 10
if dorefine:
y_hats,loss1 = refine(model,shape1,shape2,max_iter=ref_steps)
y_hat1=y_hats[-1].to(shape1.device)
y_hats,loss2 = refine(model,shape2,shape1,max_iter=ref_steps)
y_hat2=y_hats[-1].to(shape1.device)
else:
y_hat1 = register(model,subsamp(shape1), shape2)
loss1 = chamfer(subsamp(y_hat1),subsamp(shape1))
y_hat2 = register(model,subsamp(shape2), shape1)
loss2 = chamfer(subsamp(y_hat2),subsamp(shape2))
# print('%.2e - %.2e' % (loss1,loss2))
# swap shapes if the error decreases
better = torch.stack([loss1, loss2],-1).argmin(-1)
if shape1.shape[0]==1:
if extra_data is not None and len(extra_data)==2:
return [shape1,shape2][better],\
[shape2,shape1][better],\
[y_hat1,y_hat2][better],\
(extra_data[better],extra_data[1-better])
return [shape1,shape2][better],\
[shape2,shape1][better],\
[y_hat1,y_hat2][better]
if shape1.shape==shape2.shape: #handles batches
a1 = shape1*(1-better)[:,None,None] + shape2*better[:,None,None]
return shape1*(1-better)[:,None,None] + shape2*better[:,None,None],\
shape2*(1-better)[:,None,None] + shape1*better[:,None,None],\
y_hat1*(1-better)[:,None,None] + y_hat2*better[:,None,None]
def refine(model,src,trg,max_iter=50, samp=1000, lr=5e-3, saveall=False):
src = subsamp(src)
with torch.no_grad():
# D = torch.cdist(src,src)
# D = 1/(-50*D).exp().sum(-1).to(trg.device).detach()
D = est_area(src)
x = model.tokens.expand(src.size(0), model.tokens.size(0), model.tokens.size(1))
e_out = model.encoder(x, src, D)
x_d_pe = model.decoder.embedder(trg)
# D = torch.cdist(trg,trg)
# D = 1/(-50*D).exp().sum(-1).to(trg.device).detach()
D = est_area(trg)
e_opt = torch.autograd.Variable(e_out.detach(), requires_grad=True)
opt = torch.optim.Adam([e_opt], lr=lr)
y_hats=[]
for it in range(max_iter):
#decoding
src_d = model.decoder.pe(e_opt)
ssamp = torch.randperm(x_d_pe.shape[1])[:samp]
x_d = x_d_pe[:,ssamp,:].clone()
for i in range(model.decoder.N):
x_d = model.decoder.decoder_layer[i](x_d, src_d, weights2=D[:,ssamp])
d_out = model.decoder.norm(x_d)
y_hat = model.out(d_out)
if it==0 or it==max_iter-1 or saveall:
y_hats.append(y_hat.detach().cpu())
# dist = torch.cdist(y_hat,src)
# losses = dist.min(-1)[0].mean(-1)+dist.min(-2)[0].mean(-1)
losses = chamfer_loss(y_hat,src)
loss = losses.mean()
opt.zero_grad()
loss.backward()
opt.step()
losses = losses.detach()
del loss, src_d, x_d, d_out, y_hat, opt
gc.collect()
with torch.no_grad():
src_d = model.decoder.pe(e_opt)
x_d = x_d_pe[:,:,:].clone()
for i in range(model.decoder.N):
x_d = model.decoder.decoder_layer[i](x_d, src_d, weights2=D[:,:])
d_out = model.decoder.norm(x_d)
y_hat = model.out(d_out)
y_hats[-1]=y_hat.detach().cpu()
return y_hats, losses
def refine_hires(model,src,trg,max_iter=50, samp=3000, lr=5e-3, saveall=False):
src = subsamp(src,30000)
with torch.no_grad():
# D = torch.cdist(src,src)
# D = 1/(-50*D).exp().sum(-1).to(trg.device).detach()
D = est_area(src)
x = model.tokens.expand(src.size(0), model.tokens.size(0), model.tokens.size(1))
e_out = model.encoder(x, src, D)
x_d_pe = model.decoder.embedder(trg)
e_opt = torch.autograd.Variable(e_out.detach(), requires_grad=True)
opt = torch.optim.Adam([e_opt], lr=lr)
y_hats=[]
for it in range(max_iter):
#decoding
src_d = model.decoder.pe(e_opt)
ssamp = torch.randperm(x_d_pe.shape[1])[:samp]
x_d = x_d_pe[:,ssamp,:].clone().contiguous()
# D = torch.cdist(trg[:,ssamp,:],trg[:,ssamp,:])
# D = 1/(-50*D).exp().sum(-1).to(trg.device).detach()
D = est_area(trg[:,ssamp,:]).detach()
for i in range(model.decoder.N):
x_d = model.decoder.decoder_layer[i](x_d, src_d, weights2=D)
d_out = model.decoder.norm(x_d)
y_hat = model.out(d_out)
if it==0 or it==max_iter-1 or saveall:
y_hats.append(y_hat.detach().cpu())
dist = torch.cdist(y_hat,src)
losses = dist.min(-1)[0].mean(-1)+dist.min(-2)[0].mean(-1)
loss = losses.mean()
opt.zero_grad()
loss.backward()
opt.step()
losses = losses.detach()
del dist, loss, src_d, x_d, d_out, y_hat, opt
gc.collect()
assert(trg.shape[0]==1)
with torch.no_grad():
src_d = model.decoder.pe(e_opt)
selected = get_splits(trg[0],300000)
y_hat = trg.cpu()*0
for sel in selected:
# D = torch.cdist(trg[:,sel,:],trg[:,sel,:])
# D = 1/(-50*D).exp().sum(-1).to(trg.device).detach()
D = est_area(trg[:,sel,:])
x_d = x_d_pe[:,sel,:].clone()
for i in range(model.decoder.N):
x_d = model.decoder.decoder_layer[i](x_d, src_d, weights2=D)
d_out = model.decoder.norm(x_d)
_y_hat = model.out(d_out)
y_hat[0,sel,:] = _y_hat.cpu()
y_hats[-1]=y_hat.detach().cpu()
return y_hats, losses
| 7,206 | 31.463964 | 88 | py |
transmatching | transmatching-main/transmatching/Utils/utils.py | import igl
import torch
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import numpy as np
from plotly.subplots import make_subplots
from torch import nn
import copy
from transmatching.Model.debug import Debug
from scipy import sparse
from scipy.sparse.csgraph import dijkstra
from scipy.spatial.distance import cdist
try:
from pykeops.torch import LazyTensor
except ImportError:
Debug.keops=False
def est_area(X,sigma=1e3):
if Debug.keops and 4*X.shape[0]*X.shape[1]**2 > 2e9:
lX = LazyTensor(X[:,None,:,:].contiguous())
lXt = LazyTensor(X[:,:,None,:].contiguous())
Ds = ((lX-lXt)**2).sum(-1)
# Ds = 1/(-sigma*Ds).exp().sum(dim=2).squeeze(-1)
Ds = 1/(-Ds+0.05**2).step().sum(dim=2).squeeze(-1)
else:
Ds = torch.cdist(X,X)
Ds = 1/(Ds<0.05).float().sum(-1)
# Ds = 1/(-sigma*Ds).exp().sum(-1) #wrong formula, should be Ds^2
return Ds
def chamfer_loss(X,Y):
# if Debug.keops and 4*X.shape[0]*X.shape[1]*Y.shape[1] > 1e8:
# print('.')
# lX = LazyTensor(X[:,None,:,:].contiguous())
# lXt = LazyTensor(Y[:,:,None,:].contiguous())
# Ds = ((lX-lXt)**2).sum(-1).sqrt()
# losses = Ds.min(2).mean(-1) + Ds.min(1).mean(-1)
# else:
dist = torch.cdist(X,Y)
losses = dist.min(-1)[0].mean(-1)+dist.min(-2)[0].mean(-1)
return losses
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def get_errors(d, gt_mat):
p2p = torch.argmin(d, dim=-1).cpu()
err = np.empty(p2p.shape[0])
for i in range(p2p.shape[0]):
pred = p2p[i]
err[i] = gt_mat[pred, i]
return err
def get_errors_2s(d1, d2, gt_mat):
n = d1.shape[0]
p2p1 = torch.argmin(d1, dim=-1)
p2p2 = torch.argmin(d2, dim=-1)
err = np.empty(n)
for i in range(n):
pred1 = p2p1[i]
pred2 = p2p2[pred1]
err[i] = gt_mat[pred2, i]
return err
def geo_plot(err):
err = np.array(err)
x = np.linspace(0, 1, 1000)
y = np.mean(err[:, None] < x[None, :], 0)
plt.plot(x, y)
plt.grid()
plt.show()
def plot3d(x):
fig = go.Figure(data=[go.Scatter3d(x=x[:, 0], y=x[:, 1], z=x[:, 2], mode='markers')])
fig.show()
def plot3d_col(x, c):
fig = go.Figure(data=[go.Scatter3d(x=x[:, 0], y=x[:, 1], z=x[:, 2],
mode='markers',
marker=dict(color=c, colorscale="viridis", size=5, showscale=True),
)])
fig.show()
def save3d_col(x, c, path):
fig = go.Figure(data=[go.Scatter3d(x=x[:, 0], y=x[:, 1], z=x[:, 2],
mode='markers',
marker=dict(color=c, colorscale="viridis", size=5, showscale=True),
)])
fig.write_image(path, format="png")
def plot_colormap(verts, trivs, cols, colorscale=[[0, 'rgb(0,0,255)'], [0.5, 'rgb(255,255,255)'], [1, 'rgb(255,0,0)']]):
# "Draw multiple triangle meshes side by side"
if type(verts) is not list:
verts = [verts]
if type(trivs) is not list:
trivs = [trivs]
if type(cols) is not list:
cols = [cols]
nshapes = min([len(verts), len(cols), len(trivs)])
fig = make_subplots(rows=1, cols=nshapes, specs=[[{'type': 'surface'} for i in range(nshapes)]])
for i, [vert, triv, col] in enumerate(zip(verts, trivs, cols)):
if col is not None:
mesh = go.Mesh3d(x=vert[:, 0], z=vert[:, 1], y=vert[:, 2],
i=triv[:, 0], j=triv[:, 1], k=triv[:, 2],
intensity=col,
colorscale=colorscale,
color='lightpink', opacity=1)
else:
mesh = go.Mesh3d(x=vert[:, 0], z=vert[:, 1], y=vert[:, 2],
i=triv[:, 0], j=triv[:, 1], k=triv[:, 2])
fig.add_trace(mesh, row=1, col=i + 1)
fig.get_subplot(1, i + 1).aspectmode = "data"
camera = dict(
up=dict(x=0, y=0, z=1),
center=dict(x=0, y=0, z=0),
eye=dict(x=0, y=4, z=-1)
)
fig.get_subplot(1, i + 1).camera = camera
# fig = go.Figure(data=[mesh], layout=layout)
fig.update_layout(
# autosize=True,
margin=dict(l=10, r=10, t=10, b=10),
paper_bgcolor="LightSteelBlue")
fig.show()
return fig
def RandomRotateCustom(shape, degree, axis):
device = shape.device
degree = np.pi * np.random.uniform(low=-np.abs(degree), high=np.abs(degree)) / 180.0
sin, cos = np.sin(degree), np.cos(degree)
if axis == 0:
matrix = [[1, 0, 0], [0, cos, sin], [0, -sin, cos]]
elif axis == 1:
matrix = [[cos, 0, -sin], [0, 1, 0], [sin, 0, cos]]
else:
matrix = [[cos, sin, 0], [-sin, cos, 0], [0, 0, 1]]
return torch.matmul(shape, torch.Tensor(matrix).to(device))
def RandomRotateCustomAllAxis(shape, degree):
device = shape.device
degree = np.pi * np.random.uniform(low=-np.abs(degree), high=np.abs(degree)) / 180.0
sin, cos = np.sin(degree), np.cos(degree)
matrix = [[1, 0, 0], [0, cos, sin], [0, -sin, cos]]
shape = torch.matmul(shape, torch.Tensor(matrix).to(device))
degree = np.pi * np.random.uniform(low=-np.abs(degree), high=np.abs(degree)) / 180.0
sin, cos = np.sin(degree), np.cos(degree)
matrix = [[cos, 0, -sin], [0, 1, 0], [sin, 0, cos]]
shape = torch.matmul(shape, torch.Tensor(matrix).to(device))
degree = np.pi * np.random.uniform(low=-np.abs(degree), high=np.abs(degree)) / 180.0
sin, cos = np.sin(degree), np.cos(degree)
matrix = [[cos, sin, 0], [-sin, cos, 0], [0, 0, 1]]
return torch.matmul(shape, torch.Tensor(matrix).to(device))
def split_shape(shape, threshold=10000):
n_parts = shape.shape[-2] // threshold
d = {k: shape[[i for i in range(shape.shape[-2]) if i % n_parts == k]] for k in range(n_parts)}
return d, n_parts
def put_back_together(d, n_v):
final = torch.empty(n_v, 3)
n_parts = len(d.keys())
for i in range(n_v):
pick_d = i % n_parts
pick_idx = i // n_parts
final[i] = d[pick_d][:, pick_idx, :]
return final
def approximate_geodesic_distances(v: np.ndarray, f: np.ndarray) -> np.ndarray:
"""
Compute the geodesic distances approximated by the dijkstra method weighted by
euclidean edge length
Args:
v: the mesh points
f: the mesh faces
Returns:
an nxn matrix which contains the approximated distances
"""
a = igl.adjacency_matrix(f)
dist = cdist(v, v)
values = dist[np.nonzero(a)]
matrix = sparse.coo_matrix((values, np.nonzero(a)), shape=(v.shape[0], v.shape[0]))
d = dijkstra(matrix, directed=False)
return d
def area_weighted_normalization(shape, rescale: bool = True):
if rescale:
shape = shape * 0.741
shape_area = est_area(shape[None, ...])[0]
shape = shape - (
shape * (shape_area / shape_area.sum(-1, keepdims=True))[..., None]
).sum(-2, keepdims=True)
return shape
| 7,124 | 27.846154 | 120 | py |
transmatching | transmatching-main/test/test.py | import torch
from tqdm import tqdm
from transmatching.Model.model import Model
from argparse import ArgumentParser
from transmatching.Utils.utils import get_errors, area_weighted_normalization, chamfer_loss, approximate_geodesic_distances
import numpy as np
from pytorch_lightning import seed_everything
from scipy.io import loadmat
def main(args):
# ------------------------------------------------------------------------------------------------------------------
# BEGIN SETUP -----------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
seed_everything(0)
faust = loadmat(args.path_data)
shapes = faust["vertices"]
faces = faust["faces"] - 1
n_pairs = 100
n = shapes.shape[0]
# INITIALIZE MODEL
model = Model(args.d_bottleneck, args.d_latent, args.d_channels, args.d_middle, args.N,
args.heads, args.max_seq_len, args.d_origin, args.dropout).cuda()
model.load_state_dict(torch.load("models/"+args.run_name))
print("MODEL RESUMED ---------------------------------------------------------------------------------------\n")
# ------------------------------------------------------------------------------------------------------------------
# END SETUP -------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
model.eval()
with torch.no_grad():
err = []
for _ in tqdm(range(n_pairs)):
shape_A_idx = np.random.randint(n)
shape_B_idx = np.random.randint(n)
while shape_A_idx == shape_B_idx: # avoid taking A exactly equal to B
shape_B_idx = np.random.randint(n)
shape_A = shapes[shape_A_idx]
shape_B = shapes[shape_B_idx]
geod = approximate_geodesic_distances(shape_B, faces.astype("int"))
geod /= np.max(geod)
points_A = area_weighted_normalization(torch.from_numpy(shape_A), rescale=False)
points_B = area_weighted_normalization(torch.from_numpy(shape_B), rescale=False)
y_hat_1 = model(points_A.unsqueeze(0).float().cuda(), points_B.unsqueeze(0).float().cuda())
y_hat_2 = model(points_B.unsqueeze(0).float().cuda(), points_A.unsqueeze(0).float().cuda())
d12 = chamfer_loss(points_A.float().cuda(), y_hat_1)
d21 = chamfer_loss(points_B.float().cuda(), y_hat_2)
if d12 < d21:
d = torch.cdist(points_A.float().cuda(), y_hat_1).squeeze(0).cpu()
err.extend(get_errors(d, geod))
else:
d = torch.cdist(points_B.float().cuda(), y_hat_2).squeeze(0).cpu()
err.extend(get_errors(d.transpose(1, 0), geod))
print("ERROR: ", np.mean(np.array(err)))
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--run_name", default="trained_model")
parser.add_argument("--d_bottleneck", default=32)
parser.add_argument("--d_latent", default=64)
parser.add_argument("--d_channels", default=64)
parser.add_argument("--d_origin", default=3)
parser.add_argument("--d_middle", default=512)
parser.add_argument("--N", default=8)
parser.add_argument("--heads", default=4)
parser.add_argument("--max_seq_len", default=100)
parser.add_argument("--dropout", default=0.00)
parser.add_argument("--num_workers", default=0)
parser.add_argument("--path_data", default="dataset/FAUST_noise_0.00.mat")
args = parser.parse_args()
main(args)
| 3,814 | 29.766129 | 123 | py |
transmatching | transmatching-main/test/train.py | import os
import time
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transmatching.Data.dataset_smpl import SMPLDataset
from transmatching.Model.model import Model
from argparse import ArgumentParser
def main(args):
# ------------------------------------------------------------------------------------------------------------------
# BEGIN SETUP -----------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
# DATASET
data_train = SMPLDataset(args.path_data, train=True)
# DATALOADERS
dataloader_train = DataLoader(data_train, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_workers, drop_last=True)
# INITIALIZE MODEL
model = Model(args.d_bottleneck, args.d_latent, args.d_channels, args.d_middle,
args.N, args.heads, args.max_seq_len, args.d_origin, args.dropout).cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999), eps=1e-9)
# ------------------------------------------------------------------------------------------------------------------
# END SETUP -------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
# BEGIN TRAINING ---------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
print("TRAINING --------------------------------------------------------------------------------------------------")
start = time.time()
for epoch in range(args.n_epoch):
model = model.train()
ep_loss = 0
for item in tqdm(dataloader_train):
shapes = item["x"].cuda()
shape1 = shapes[:args.batch_size // 2, :, :]
shape2 = shapes[args.batch_size // 2:, :, :]
y_hat = model(shape1, shape2)
loss = ((y_hat - shape1) ** 2).sum()
optimizer.zero_grad()
loss.backward()
optimizer.step()
ep_loss += loss.item()
print(f"EPOCH: {epoch} HAS FINISHED, in {time.time() - start} SECONDS! ---------------------------------------")
start = time.time()
print(f"LOSS: {ep_loss} --------------------------------------------------------------------------------------")
os.makedirs("models", exist_ok=True)
torch.save(model.state_dict(), "models/"+args.run_name)
# ------------------------------------------------------------------------------------------------------------------
# END TRAINING -----------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--run_name", default="custom_train")
parser.add_argument("--d_bottleneck", default=32)
parser.add_argument("--d_latent", default=64)
parser.add_argument("--d_channels", default=64)
parser.add_argument("--d_origin", default=3)
parser.add_argument("--d_middle", default=512)
parser.add_argument("--N", default=8)
parser.add_argument("--heads", default=4)
parser.add_argument("--max_seq_len", default=100)
parser.add_argument("--dropout", default=0.01)
parser.add_argument("--lr", default=0.0001)
parser.add_argument("--n_epoch", default=1000000)
parser.add_argument("--batch_size", default=16)
parser.add_argument("--num_workers", default=8)
parser.add_argument("--path_data", default="dataset/")
args = parser.parse_args()
main(args)
| 4,167 | 31.310078 | 120 | py |
BeatNet | BeatNet-main/setup.py | """
Created 07-01-21 by Mojtaba Heydari
"""
# Local imports
# None.
# Third party imports
# None.
# Python standard library imports
import setuptools
from setuptools import find_packages
import distutils.cmd
# Required packages
REQUIRED_PACKAGES = [
'numpy',
'cython',
'librosa>=0.8.0',
'numba==0.54.1', # Manually specified here as librosa incorrectly states that it is compatible with the latest version of numba although 0.50.0 is not compatible.
'scipy',
'mido>=1.2.6',
'pytest',
#'pyaudio',
##'pyfftw',
'madmom',
'torch',
'Matplotlib',
]
class MakeReqsCommand(distutils.cmd.Command):
"""A custom command to export requirements to a requirements.txt file."""
description = 'Export requirements to a requirements.txt file.'
user_options = []
def initialize_options(self):
"""Set default values for options."""
pass
def finalize_options(self):
"""Post-process options."""
pass
def run(self):
"""Run command."""
with open('./requirements.txt', 'w') as f:
for req in REQUIRED_PACKAGES:
f.write(req)
f.write('\n')
setuptools.setup(
cmdclass={
'make_reqs': MakeReqsCommand
},
# Package details
name="BeatNet",
version="1.1.0",
package_dir={"": "src"},
packages=find_packages(where="src"),
# packages=find_packages(),
include_package_data=True,
install_requires=REQUIRED_PACKAGES,
# Metadata to display on PyPI
author="Mojtaba Heydari",
author_email="mhydari@ur.rochester.edu",
description="A package for online and offline music beat, downbeat tempo and meter tracking using BeatNet AI",
keywords="Beat tracking, Downbeat tracking, meter detection, tempo tracking, particle filtering, real-time beat, real-time tempo",
url="https://github.com/mjhydri/BeatNet"
# CLI - not developed yet
#entry_points = {
# 'console_scripts': ['beatnet=beatnet.cli:main']
#}
)
| 1,983 | 22.619048 | 167 | py |
BeatNet | BeatNet-main/src/BeatNet/BeatNet.py | # This is the script handler of the BeatNet. First, it extracts the input embeddings of the current frame or the whole song, depending on the working mode.
# Then by feeding them into the selected pre-trained model, it calculates the beat/downbeat activation probabilities.
# Finally, it infers beats and downbeats of the current frame/song based on one of the four performance modes and selected inference method.
import os
import torch
import numpy as np
from madmom.features import DBNDownBeatTrackingProcessor
from BeatNet.particle_filtering_cascade import particle_filter_cascade
from BeatNet.log_spect import LOG_SPECT
import librosa
import sys
from BeatNet.model import BDA
import pyaudio
import matplotlib.pyplot as plt
import time
import threading
class BeatNet:
'''
The main BeatNet handler class including different trained models, different modes for extracting the activation and causal and non-causal inferences
Parameters
----------
Inputs:
model: An scalar in the range [1,3] to select which pre-trained CRNN models to utilize.
mode: An string to determine the working mode. i.e. 'stream', 'realtime', 'online' and ''offline.
'stream' mode: Uses the system microphone to capture sound and does the process in real-time. Due to training the model on standard mastered songs, it is highly recommended to make sure the microphone sound is as loud as possible. Less reverbrations leads to the better results.
'Realtime' mode: Reads an audio file chunk by chunk, and processes each chunck at the time.
'Online' mode: Reads the whole audio and feeds it into the BeatNet CRNN at the same time and then infers the parameters on interest using particle filtering.
'offline' mode: Reads the whole audio and feeds it into the BeatNet CRNN at the same time and then inferes the parameters on interest using madmom dynamic Bayesian network. This method is quicker that madmom beat/downbeat tracking.
inference model: A string to choose the inference approach. i.e. 'PF' standing for Particle Filtering for causal inferences and 'DBN' standing for Dynamic Bayesian Network for non-causal usages.
plot: A list of strings to plot.
'activations': Plots the neural network activations for beats and downbeats of each time frame.
'beat_particles': Plots beat/tempo tracking state space and current particle states at each time frame.
'downbeat_particles': Plots the downbeat/meter tracking state space and current particle states at each time frame.
Note that to speedup plotting the figures, rather than new plots per frame, the previous plots get updated. However, to secure realtime results, it is recommended to not plot or have as less number of plots as possible at the time.
threading: To decide whether accomplish the inference at the main thread or another thread.
device: type of dvice. cpu or cuda:i
Outputs:
A vector including beat times and downbeat identifier columns, respectively with the following shape: numpy_array(num_beats, 2).
'''
def __init__(self, model, mode='online', inference_model='PF', plot=[], thread=False, device='cpu'):
self.model = model
self.mode = mode
self.inference_model = inference_model
self.plot= plot
self.thread = thread
self.device = device
if plot and thread:
raise RuntimeError('Plotting cannot be accomplished in the threading mode')
self.sample_rate = 22050
self.log_spec_sample_rate = self.sample_rate
self.log_spec_hop_length = int(20 * 0.001 * self.log_spec_sample_rate)
self.log_spec_win_length = int(64 * 0.001 * self.log_spec_sample_rate)
self.proc = LOG_SPECT(sample_rate=self.log_spec_sample_rate, win_length=self.log_spec_win_length,
hop_size=self.log_spec_hop_length, n_bands=[24], mode = self.mode)
if self.inference_model == "PF": # instantiating a Particle Filter decoder - Is Chosen for online inference
self.estimator = particle_filter_cascade(beats_per_bar=[], fps=50, plot=self.plot, mode=self.mode)
elif self.inference_model == "DBN": # instantiating an HMM decoder - Is chosen for offline inference
self.estimator = DBNDownBeatTrackingProcessor(beats_per_bar=[2, 3, 4], fps=50)
else:
raise RuntimeError('inference_model can be either "PF" or "DBN"')
script_dir = os.path.dirname(__file__)
#assiging a BeatNet CRNN instance to extract joint beat and downbeat activations
self.model = BDA(272, 150, 2, self.device) #Beat Downbeat Activation detector
#loading the pre-trained BeatNet CRNN weigths
if model == 1: # GTZAN out trained model
self.model.load_state_dict(torch.load(os.path.join(script_dir, 'models/model_1_weights.pt')), strict=False)
elif model == 2: # Ballroom out trained model
self.model.load_state_dict(torch.load(os.path.join(script_dir, 'models/model_2_weights.pt')), strict=False)
elif model == 3: # Rock_corpus out trained model
self.model.load_state_dict(torch.load(os.path.join(script_dir, 'models/model_3_weights.pt')), strict=False)
else:
raise RuntimeError(f'Failed to open the trained model: {model}')
self.model.eval()
if self.mode == 'stream':
self.stream_window = np.zeros(self.log_spec_win_length + 2 * self.log_spec_hop_length, dtype=np.float32)
self.stream = pyaudio.PyAudio().open(format=pyaudio.paFloat32,
channels=1,
rate=self.sample_rate,
input=True,
frames_per_buffer=self.log_spec_hop_length,)
def process(self, audio_path=None):
if self.mode == "stream":
if self.inference_model != "PF":
raise RuntimeError('The infernece model should be set to "PF" for the streaming mode!')
self.counter = 0
while self.stream.is_active():
self.activation_extractor_stream() # Using BeatNet causal Neural network streaming mode to extract activations
if self.thread:
x = threading.Thread(target=self.estimator.process, args=(self.pred), daemon=True) # Processing the inference in another thread
x.start()
x.join()
else:
output = self.estimator.process(self.pred)
self.counter += 1
elif self.mode == "realtime":
self.counter = 0
self.completed = 0
if self.inference_model != "PF":
raise RuntimeError('The infernece model for the streaming mode should be set to "PF".')
if isinstance(audio_path, str) or audio_path.all()!=None:
while self.completed == 0:
self.activation_extractor_realtime(audio_path) # Using BeatNet causal Neural network realtime mode to extract activations
if self.thread:
x = threading.Thread(target=self.estimator.process, args=(self.pred), daemon=True) # Processing the inference in another thread
x.start()
x.join()
else:
output = self.estimator.process(self.pred) # Using particle filtering online inference to infer beat/downbeats
self.counter += 1
return output
else:
raise RuntimeError('An audio object or file directory is required for the realtime usage!')
elif self.mode == "online":
if isinstance(audio_path, str) or audio_path.all()!=None:
preds = self.activation_extractor_online(audio_path) # Using BeatNet causal Neural network to extract activations
else:
raise RuntimeError('An audio object or file directory is required for the online usage!')
if self.inference_model == "PF": # Particle filtering inference (causal)
output = self.estimator.process(preds) # Using particle filtering online inference to infer beat/downbeats
return output
elif self.inference_model == "DBN": # Dynamic bayesian Network Inference (non-causal)
output = self.estimator(preds) # Using DBN offline inference to infer beat/downbeats
return output
elif self.mode == "offline":
if self.inference_model != "DBN":
raise RuntimeError('The infernece model should be set to "DBN" for the offline mode!')
if isinstance(audio_path, str) or audio_path.all()!=None:
preds = self.activation_extractor_online(audio_path) # Using BeatNet causal Neural network to extract activations
output = self.estimator(preds) # Using DBN offline inference to infer beat/downbeats
return output
else:
raise RuntimeError('An audio object or file directory is required for the offline usage!')
def activation_extractor_stream(self):
# TODO:
''' Streaming window
Given the training input window's origin set to center, this streaming data formation causes 0.084 (s) delay compared to the trained model that needs to be fixed.
'''
with torch.no_grad():
hop = self.stream.read(self.log_spec_hop_length)
hop = np.frombuffer(hop, dtype=np.float32)
self.stream_window = np.append(self.stream_window[self.log_spec_hop_length:], hop)
if self.counter < 5:
self.pred = np.zeros([1,2])
else:
feats = self.proc.process_audio(self.stream_window).T[-1]
feats = torch.from_numpy(feats)
feats = feats.unsqueeze(0).unsqueeze(0).to(self.device)
pred = self.model(feats)[0]
pred = self.model.final_pred(pred)
pred = pred.cpu().detach().numpy()
self.pred = np.transpose(pred[:2, :])
def activation_extractor_realtime(self, audio_path):
with torch.no_grad():
if self.counter==0: #loading the audio
if isinstance(audio_path, str):
self.audio, _ = librosa.load(audio_path, sr=self.sample_rate) # reading the data
elif len(np.shape(audio_path))>1:
self.audio = np.mean(audio_path ,axis=1)
else:
self.audio = audio_path
if self.counter<(round(len(self.audio)/self.log_spec_hop_length)):
if self.counter<2:
self.pred = np.zeros([1,2])
else:
feats = self.proc.process_audio(self.audio[self.log_spec_hop_length * (self.counter-2):self.log_spec_hop_length * (self.counter) + self.log_spec_win_length]).T[-1]
feats = torch.from_numpy(feats)
feats = feats.unsqueeze(0).unsqueeze(0).to(self.device)
pred = self.model(feats)[0]
pred = self.model.final_pred(pred)
pred = pred.cpu().detach().numpy()
self.pred = np.transpose(pred[:2, :])
else:
self.completed = 1
def activation_extractor_online(self, audio_path):
with torch.no_grad():
if isinstance(audio_path, str):
audio, _ = librosa.load(audio_path, sr=self.sample_rate) # reading the data
elif len(np.shape(audio_path))>1:
audio = np.mean(audio_path ,axis=1)
else:
audio = audio_path
feats = self.proc.process_audio(audio).T
feats = torch.from_numpy(feats)
feats = feats.unsqueeze(0).to(self.device)
preds = self.model(feats)[0] # extracting the activations by passing the feature through the NN
preds = self.model.final_pred(preds)
preds = preds.cpu().detach().numpy()
preds = np.transpose(preds[:2, :])
return preds
| 12,681 | 58.539906 | 296 | py |
BeatNet | BeatNet-main/src/BeatNet/model.py | import torch.nn as nn
import torch
import torch.nn.functional as F
import numpy as np
class BDA(nn.Module): #beat_downbeat_activation
def __init__(self, dim_in, num_cells, num_layers, device):
super(BDA, self).__init__()
self.dim_in = dim_in
self.dim_hd = num_cells
self.num_layers = num_layers
self.device = device
self.conv_out = 150
self.kernelsize = 10
self.conv1 = nn.Conv1d(1, 2, self.kernelsize)
self.linear0 = nn.Linear(2*int((self.dim_in-self.kernelsize+1)/2), self.conv_out) #divide to 2 is for max pooling filter
self.lstm = nn.LSTM(input_size=self.conv_out, # self.dim_in
hidden_size=self.dim_hd,
num_layers=self.num_layers,
batch_first=True,
bidirectional=False,
)
self.linear = nn.Linear(in_features=self.dim_hd,
out_features=3)
self.softmax = nn.Softmax(dim=0)
# Initialize the hidden state and cell state
self.hidden = torch.zeros(2, 1, self.dim_hd).to(device)
self.cell = torch.zeros(2, 1, self.dim_hd).to(device)
self.to(device)
def forward(self, data):
x = data
x = torch.reshape(x, (-1, self.dim_in))
x = x.unsqueeze(0).transpose(0, 1)
x = F.max_pool1d(F.relu(self.conv1(x)), 2)
x = x.view(-1, self.num_flat_features(x))
x = self.linear0(x)
x = torch.reshape(x, (np.shape(data)[0], np.shape(data)[1], self.conv_out))
x, (self.hidden, self.cell) = self.lstm(x, (self.hidden, self.cell))
# x = self.lstm(x)[0]
out = self.linear(x)
out = out.transpose(1, 2)
return out
def final_pred(self, input):
return self.softmax(input)
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
| 2,088 | 34.40678 | 132 | py |
PaperRobot | PaperRobot-master/New paper writing/test.py | import gc
import os
import time
import torch
import pickle
import argparse
import torch.nn as nn
from eval_final import Evaluate
from loader.preprocessing import prepare_mapping, AssembleMem, printcand, filter_stopwords
from loader.loader import load_file_with_terms
from memory_generator.seq2seq import Seq2seq
from memory_generator.Encoder import EncoderRNN
from memory_generator.Encoder import TermEncoder
from memory_generator.predictor import Predictor
from memory_generator.Decoder import DecoderRNN
# Read parameters from command line
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", default="models/memory/best_dev_model.pth.tar",
help="Model location"
)
parser.add_argument(
"--gpu", default="1",
type=int, help="default is 1. set 0 to disable use gpu."
)
parser.add_argument(
"--max_len", default="150",
type=int, help="Max length."
)
parser.add_argument(
"--data_path", default="data",
help="data directory path"
)
parser.add_argument(
"--output", default="data.txt",
help="data directory path"
)
parser.add_argument(
"--stop_words", default="1",
type=int, help="default is 1. set 0 to disable use stopwords."
)
args = parser.parse_args()
print('loading model from:', args.model)
if args.gpu:
state = torch.load(args.model)
else:
state = torch.load(args.model, map_location=lambda storage, loc: storage)
parameters = state['parameters']
# Data parameters
lower = parameters['lower']
parameters['gpu'] = args.gpu == 1
data = pickle.load(open(args.data_path + '/dataset.pth', 'rb'))
words = data['words']
_, t_dataset = load_file_with_terms(args.data_path + '/test.txt')
try:
mappings = state['mappings']
except:
mappings, words_freq = prepare_mapping(words, lower, parameters['freq'])
state_dict = state['state_dict']
# print model parameters
print('Model parameters:')
for k, v in parameters.items():
print('%s=%s' % (k, v))
# Index data
t_dataset = AssembleMem(t_dataset, mappings['word2id'], lower, 1, args.max_len, parameters['gpu'])
print("Vocabulary size", t_dataset.vocab_size)
print("%i sentences in test." % (t_dataset.len))
word2id = mappings['word2id']
id2word = mappings['id2word']
vocab_size = len(mappings['id2word'])
device = torch.device("cuda:0" if torch.cuda.is_available() and parameters['gpu'] else "cpu")
embedding = nn.Embedding(t_dataset.vocab_size, parameters['word_dim'], padding_idx=0)
ref_encoder = EncoderRNN(vocab_size, embedding, parameters['word_dim'], parameters['input_dropout_p'])
term_encoder = TermEncoder(embedding, parameters['input_dropout_p'])
decoder = DecoderRNN(vocab_size, embedding, **parameters)
model = Seq2seq(ref_encoder, term_encoder, decoder)
model.load_state_dict(state_dict)
model = model.to(device)
stopwords = filter_stopwords(word2id)
#
# training starts
#
since = time.time()
best_dev = 0.0
epoch_examples_total = t_dataset.len
train_loader = t_dataset.corpus
len_batch_t = len(train_loader)
predictor = Predictor(model, id2word, vocab_size)
eval_f = Evaluate()
print("Start computing")
cands, refs, titles, terms = predictor.preeval_batch_beam(t_dataset, False, stopwords, args.stop_words)
print("Start Evaluating")
final_scores = eval_f.evaluate(live=True, cand=cands, ref=refs)
printcand(args.output, cands)
| 3,280 | 28.558559 | 103 | py |
PaperRobot | PaperRobot-master/New paper writing/train.py | import gc
import os
import sys
import time
import torch
import pickle
import argparse
import torch.nn as nn
from collections import OrderedDict
from eval import Evaluate
from loader.logger import Tee
from loader.loader import load_file_with_terms
from loader.preprocessing import prepare_mapping, AssembleMem
from utils.optim import get_optimizer
from memory_generator.seq2seq import Seq2seq
from memory_generator.Encoder import EncoderRNN
from memory_generator.Encoder import TermEncoder
from memory_generator.predictor import Predictor
from memory_generator.Decoder import DecoderRNN
# Read parameters from command line
parser = argparse.ArgumentParser()
parser.add_argument(
"--lower", default='0',
type=int, help="Lowercase words (this will not affect character inputs)"
)
parser.add_argument(
"--word_dim", default="128",
type=int, help="Token embedding dimension"
)
parser.add_argument(
"--h", default="8",
type=int, help="No of attention heads"
)
parser.add_argument(
"--hop", default="3",
type=int, help="No of Memory layers"
)
parser.add_argument(
"--dropout", default="0.2",
type=float, help="Dropout on the embeddings (0 = no dropout)"
)
parser.add_argument(
"--layer_dropout", default="0.2",
type=float, help="Dropout on the layer (0 = no dropout)"
)
parser.add_argument(
"--lr_method", default="adam",
help="Learning method (SGD, Adadelta, Adam..)"
)
parser.add_argument(
"--lr_rate", default="0.001",
type=float, help="Learning method (SGD, Adadelta, Adam..)"
)
parser.add_argument(
"--model_dp", default="models/",
help="model directory path"
)
parser.add_argument(
"--pre_emb", default="",
help="Location of pretrained embeddings"
)
parser.add_argument(
"--gpu", default="1",
type=int, help="default is 1. set 0 to disable use gpu."
)
parser.add_argument(
"--num_epochs", default="100",
type=int, help="Number of training epochs"
)
parser.add_argument(
"--batch_size", default="2",
type=int, help="Batch size."
)
parser.add_argument(
"--max_len", default="150",
type=int, help="Max length."
)
parser.add_argument(
"--freq", default="5",
type=int, help="Min freq."
)
parser.add_argument(
"--cont", action='store_true', help="Continue training."
)
parser.add_argument(
"--model", default="models/memory/best_dev_model.pth.tar",
help="Model location"
)
parser.add_argument(
"--load", action='store_true', help="Load dataset."
)
parser.add_argument(
"--data_path", default="data",
help="data directory path"
)
args = parser.parse_args()
# Parse parameters
parameters = OrderedDict()
parameters['lower'] = args.lower == 1
parameters['freq'] = args.freq
parameters['word_dim'] = args.word_dim
parameters['h'] = args.h
parameters['hop'] = args.hop
parameters['pre_emb'] = args.pre_emb
parameters['input_dropout_p'] = args.dropout
parameters['layer_dropout'] = args.layer_dropout
parameters['gpu'] = args.gpu == 1
parameters['batch_size'] = args.batch_size
parameters['max_len'] = args.max_len
parameters['gpu'] = args.gpu == 1
parameters['lr_method'] = args.lr_method
parameters['lr_rate'] = args.lr_rate
parameters['data_path'] = args.data_path
# Check parameters validity
assert os.path.isdir(args.data_path)
assert 0. <= parameters['input_dropout_p'] < 1.0
assert 0. <= parameters['layer_dropout'] < 1.0
assert not parameters['pre_emb'] or parameters['word_dim'] > 0
assert not parameters['pre_emb'] or os.path.isfile(parameters['pre_emb'])
model_dir = args.model_dp
model_name = ['memory']
for k, v in parameters.items():
if v == "":
continue
if k == 'pre_emb':
v = os.path.basename(v)
model_name.append('='.join((k, str(v))))
model_dir = os.path.join(model_dir, ','.join(model_name[:-1]))
os.makedirs(model_dir, exist_ok=True)
# register logger to save print(messages to both stdout and disk)
training_log_path = os.path.join(model_dir, 'training_log.txt')
if os.path.exists(training_log_path):
os.remove(training_log_path)
f = open(training_log_path, 'w')
sys.stdout = Tee(sys.stdout, f)
# print model parameters
print("Model location: %s" % model_dir)
print('Model parameters:')
for k, v in parameters.items():
print('%s=%s' % (k, v))
# Data parameters
lower = parameters['lower']
# load previously saved data
if args.load:
state = pickle.load(open(args.data_path + '/dataset.pth', 'rb'))
words = state['words']
r_dataset = state['r_dataset']
v_dataset = state['v_dataset']
t_dataset = state['t_dataset']
else:
words = []
r_words, r_dataset = load_file_with_terms(args.data_path + '/train.txt')
words.extend(r_words)
v_words, v_dataset = load_file_with_terms(args.data_path + '/valid.txt')
t_words, t_dataset = load_file_with_terms(args.data_path + '/test.txt')
state = {
'words': words,
'r_dataset': r_dataset,
'v_dataset': v_dataset,
't_dataset': t_dataset
}
pickle.dump(state, open(args.data_path + '/dataset.pth', "wb"))
mappings, words_freq = prepare_mapping(words, lower, args.freq)
parameters['unk_id'] = mappings['word2id']['<unk>']
parameters['sos_id'] = mappings['word2id']['<sos>']
parameters['eos_id'] = mappings['word2id']['<eos>']
# Index data
r_dataset = AssembleMem(r_dataset, mappings['word2id'], lower, args.batch_size, args.max_len, parameters['gpu'])
v_dataset = AssembleMem(v_dataset, mappings['word2id'], lower, args.batch_size, args.max_len, parameters['gpu'])
print("%i / %i pairs in train / dev." % (r_dataset.len, v_dataset.len))
word2id = mappings['word2id']
id2word = mappings['id2word']
vocab_size = len(mappings['id2word'])
device = torch.device("cuda:0" if torch.cuda.is_available() and parameters['gpu'] else "cpu")
# model initialization
embedding = nn.Embedding(r_dataset.vocab_size, args.word_dim, padding_idx=0)
ref_encoder = EncoderRNN(vocab_size, embedding, parameters['word_dim'], parameters['input_dropout_p'])
term_encoder = TermEncoder(embedding, parameters['input_dropout_p'])
decoder = DecoderRNN(vocab_size, embedding, **parameters)
model = Seq2seq(ref_encoder, term_encoder, decoder)
model = model.to(device)
optimizer = get_optimizer(model, parameters['lr_method'], parameters['lr_rate'])
#
# training starts
#
since = time.time()
best_dev = 0.0
num_epochs = args.num_epochs
epoch_examples_total = r_dataset.len
train_loader = r_dataset.corpus
len_batch_t = len(train_loader)
print('train batches', len_batch_t)
start_epoch = 0
# continue training
if args.cont:
print('loading model from:', args.model)
if args.gpu:
state = torch.load(args.model)
else:
state = torch.load(args.model, map_location=lambda storage, loc: storage)
state_dict = state['state_dict']
model.load_state_dict(state_dict)
state_dict = state['optimizer']
optimizer.load_state_dict(state_dict)
start_epoch = state['epoch']
best_dev = state['best_prec1']
for epoch in range(num_epochs):
print('-' * 10)
print('Epoch {}/{}'.format(epoch + start_epoch, num_epochs - 1))
# epoch start time
time_epoch_start = time.time()
# train
model.train(True)
torch.set_grad_enabled(True)
epoch_loss = 0
for batch_idx in range(len_batch_t):
batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term, batch_t, \
batch_o_t = r_dataset.get_batch(batch_idx)
losses = model(batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term, batch_t,
batch_o_t, teacher_forcing_ratio=1)
batch_loss = losses.mean()
# print(losses)
model.zero_grad()
batch_loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
optimizer.step()
num_examples = batch_s.size(0)
loss = batch_loss.item()
epoch_loss += num_examples * loss
sys.stdout.write(
'%d batches processed. current batch loss: %f\r' %
(batch_idx, loss)
)
sys.stdout.flush()
del batch_s, batch_o_s, batch_t, batch_o_t, source_len, batch_term, batch_o_term
gc.collect()
# torch.cuda.empty_cache()
epoch_loss_avg = epoch_loss / float(epoch_examples_total)
log_msg = "Finished epoch %d: Train %s: %.4f" % (epoch + start_epoch, "Avg NLLLoss", epoch_loss_avg)
print()
print(log_msg)
predictor = Predictor(model, id2word, vocab_size)
eval_f = Evaluate()
print("Start Evaluating")
cand, ref, titles, terms = predictor.preeval_batch(v_dataset)
final_scores = eval_f.evaluate(live=True, cand=cand, ref=ref)
final_scores['Bleu_4'] *= 10.0
epoch_score = 2*final_scores['ROUGE_L']*final_scores['Bleu_4']/(final_scores['Bleu_4']+ final_scores['ROUGE_L'])
if epoch_score > best_dev:
best_dev = epoch_score
print('new best score on dev: %.4f' % best_dev)
print('saving the current model to disk...')
state = {
'epoch': epoch + 1,
'parameters': parameters,
'state_dict': model.state_dict(),
'best_prec1': best_dev,
'optimizer': optimizer.state_dict(),
'mappings': mappings
}
torch.save(state, os.path.join(model_dir, 'best_dev_model.pth.tar'))
print("Examples")
print("Output:", cand[1])
print("Refer:", ref[1])
# epoch end time
time_epoch_end = time.time()
# torch.cuda.empty_cache()
print('epoch training time: %f seconds' % round(
(time_epoch_end - time_epoch_start), 2))
print('best dev: ', best_dev)
| 9,621 | 32.065292 | 116 | py |
PaperRobot | PaperRobot-master/New paper writing/input.py | import torch
import pickle
import argparse
import torch.nn as nn
from loader.preprocessing import prepare_mapping, filter_stopwords
from memory_generator.seq2seq import Seq2seq
from memory_generator.Encoder import EncoderRNN
from memory_generator.Encoder import TermEncoder
from memory_generator.predictor import Predictor
from memory_generator.Decoder import DecoderRNN
# Read parameters from command line
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", default="models/memory/best_dev_model.pth.tar",
help="Model location"
)
parser.add_argument(
"--gpu", default="1",
type=int, help="default is 1. set 0 to disable use gpu."
)
parser.add_argument(
"--batch_size", default="100",
type=int, help="Batch size."
)
parser.add_argument(
"--max_len", default="150",
type=int, help="Max length."
)
parser.add_argument(
"--stop_words", default="1",
type=int, help="default is 1. set 0 to disable use stopwords."
)
parser.add_argument(
"--data_path", default="data",
help="data directory path"
)
args = parser.parse_args()
# input()
print('loading model from:', args.model)
if args.gpu:
state = torch.load(args.model)
else:
state = torch.load(args.model, map_location=lambda storage, loc: storage)
parameters = state['parameters']
# Data parameters
lower = parameters['lower']
parameters['gpu'] = args.gpu == 1
data = pickle.load(open(args.data_path + '/dataset.pth', 'rb'))
words = data['words']
try:
mappings = state['mappings']
except:
mappings, words_freq = prepare_mapping(words, lower, parameters['freq'])
state_dict = state['state_dict']
# print model parameters
print('Model parameters:')
for k, v in parameters.items():
print('%s=%s' % (k, v))
# Index data
word2id = mappings['word2id']
id2word = mappings['id2word']
vocab_size = len(mappings['id2word'])
device = torch.device("cuda:0" if torch.cuda.is_available() and parameters['gpu'] else "cpu")
embedding = nn.Embedding(vocab_size, parameters['word_dim'], padding_idx=0)
ref_encoder = EncoderRNN(vocab_size, embedding, parameters['word_dim'], parameters['input_dropout_p'])
term_encoder = TermEncoder(embedding, parameters['input_dropout_p'])
decoder = DecoderRNN(vocab_size, embedding, **parameters)
model = Seq2seq(ref_encoder, term_encoder, decoder)
model.load_state_dict(state_dict)
model = model.to(device)
stopwords = filter_stopwords(word2id)
# print(stopwords)
#
# training starts
#
while (1):
batch_s, batch_o_s, source_len, batch_term, batch_o_term, term_len = [], [], [], [], [], []
seq_str = input("Source:\n")
seq = seq_str.strip().split(' ')
_source, source_oov,_o_source, list_oovs = [],[],[],[]
for _s in seq:
try:
_source.append(word2id[_s])
except KeyError:
_source.append(word2id['<unk>'])
if _s not in source_oov:
_o_source.append(len(source_oov) + vocab_size)
source_oov.append(_s)
else:
_o_source.append(source_oov.index(_s) + vocab_size)
else:
_o_source.append(word2id[_s])
terms = input("terms:\n")
terms = terms.strip().split(' ')
_term, _o_term = [], []
for _t in terms:
try:
_term.append(word2id[_t])
except KeyError:
_term.append(word2id['<unk>'])
if _t not in source_oov:
_o_term.append(len(source_oov) + vocab_size)
source_oov.append(_t)
else:
_o_term.append(source_oov.index(_t) + vocab_size)
else:
_o_term.append(word2id[_t])
max_source_oov = len(source_oov)
source_len.append(len(_o_source))
oovidx2word = {idx: word for idx, word in enumerate(source_oov)}
list_oovs.append(oovidx2word)
batch_s = torch.LongTensor([_source]).cuda()
batch_o_s = torch.LongTensor([_o_source]).cuda()
batch_term = torch.LongTensor([_term]).cuda()
batch_o_term = torch.LongTensor([_o_term]).cuda()
source_len = torch.LongTensor(source_len).cuda()
predictor = Predictor(model, id2word, vocab_size)
print("Output:")
predictor.predict_beam(batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term, list_oovs,
stopwords, args.stop_words)
print()
| 4,304 | 30.888889 | 111 | py |
PaperRobot | PaperRobot-master/New paper writing/loader/preprocessing.py | from collections import Counter
import torch
import json
import string
# Mask variable
def _mask(prev_generated_seq, device, eos_id):
prev_mask = torch.eq(prev_generated_seq, eos_id)
lengths = torch.argmax(prev_mask, dim=1)
max_len = prev_generated_seq.size(1)
mask = []
for i in range(prev_generated_seq.size(0)):
if lengths[i] == 0:
mask_line = [0] * max_len
else:
mask_line = [0] * lengths[i].item()
mask_line.extend([1] * (max_len - lengths[i].item()))
mask.append(mask_line)
mask = torch.ByteTensor(mask)
mask = mask.to(device)
return prev_generated_seq.data.masked_fill_(mask, 0)
def create_mapping(freq, min_freq=0, max_vocab=50000):
"""
Create a mapping (item to ID / ID to item) from a dictionary.
Items are ordered by decreasing frequency.
char is an indicator whether the mapping is for char/word or the
"""
freq = freq.most_common(max_vocab)
item2id = {
'<pad>': 0,
'<unk>': 1,
'<sos>': 2,
'<eos>': 3
}
offset = len(item2id)
for i, v in enumerate(freq):
if v[1] > min_freq:
item2id[v[0]] = i + offset
id2item = {i: v for v, i in item2id.items()}
return item2id, id2item
def create_dict(item_list):
"""
Create a dictionary of items from a list of list of items.
"""
assert type(item_list) is list
freq = Counter(item_list)
return freq
def prepare_mapping(words, lower, min_freq):
"""
prepare word2id
:param words: words corpus
:param lower: whether lower char
:return mappings
"""
if lower:
words = [w.lower() for w in words]
words_freq = create_dict(words)
word2id, id2word = create_mapping(words_freq, min_freq)
print("Found %i unique words (%i in total)" % (
len(word2id), sum(len(x) for x in words)
))
mappings = {
'word2id': word2id,
'id2word': id2word
}
return mappings, words_freq
class AssembleMem:
def __init__(self, dataset, word2id, lower=True, batch_size=64, max_len=30,
cuda=torch.cuda.is_available(), pmid=False):
if pmid:
sources, terms, targets, pmids = dataset
data = list(zip(sources, terms, targets, pmids))
else:
sources, terms, targets = dataset
data = list(zip(sources, terms, targets))
self.pmid = pmid
self.batch_size = batch_size
self.len = len(sources)
self.word2id = word2id
self.lower = lower
self.max_len = max_len
self.device = torch.device("cuda:0" if cuda and torch.cuda.is_available() else "cpu")
self.vocab_size = len(word2id)
self.corpus = self.batchfy(data)
def batchfy(self, data):
self.len = len(data)
data = [data[i:i+self.batch_size] for i in range(0, len(data), self.batch_size)]
corpus = [self.vectorize(sample) for sample in data]
return corpus
def add_start_end(self, vector):
vector.append(self.word2id['<eos>'])
return [self.word2id['<sos>']] + vector
def pad_vector(self, vector, max_len):
padding = max_len - len(vector)
vector.extend([0] * padding)
return vector
def vectorize(self, sample):
sample.sort(key=lambda x: len(x[0]), reverse = True)
list_oovs, targets, sources, terms = [], [], [], []
batch_s, batch_o_s, source_len, batch_term, batch_o_term, term_len = [], [], [], [], [], []
batch_t, batch_o_t, target_len = [], [], []
max_source_oov = 0
if self.pmid:
pmids = []
for data in sample:
# title
source_oov = []
_o_source, _source = [], []
sources.append(data[0])
if self.pmid:
pmids.append(data[3])
for _s in data[0]:
if self.lower:
_s = _s.lower()
try:
_source.append(self.word2id[_s])
except KeyError:
_source.append(self.word2id['<unk>'])
if _s not in source_oov:
_o_source.append(len(source_oov) + self.vocab_size)
source_oov.append(_s)
else:
_o_source.append(source_oov.index(_s) + self.vocab_size)
else:
_o_source.append(self.word2id[_s])
# terms
_o_term, _term = [], []
terms.append(data[1])
for _t in data[1]:
if self.lower:
_t = _t.lower()
try:
_term.append(self.word2id[_t])
except KeyError:
_term.append(self.word2id['<unk>'])
if _t not in source_oov:
_o_term.append(len(source_oov) + self.vocab_size)
source_oov.append(_t)
else:
_o_term.append(source_oov.index(_t) + self.vocab_size)
else:
_o_term.append(self.word2id[_t])
if max_source_oov < len(source_oov):
max_source_oov = len(source_oov)
batch_s.append(_source)
batch_o_s.append(_o_source)
source_len.append(len(_o_source))
oovidx2word = {idx: word for idx, word in enumerate(source_oov)}
list_oovs.append(oovidx2word)
batch_term.append(_term)
batch_o_term.append(_o_term)
term_len.append(len(_o_term))
_o_target, _target = [], []
targets.append(data[2])
for _t in data[2]:
if self.lower:
_t = _t.lower()
try:
_target.append(self.word2id[_t])
except KeyError:
_target.append(self.word2id['<unk>'])
if _t in source_oov:
_o_target.append(source_oov.index(_t) + self.vocab_size)
else:
_o_target.append(self.word2id['<unk>'])
else:
_o_target.append(self.word2id[_t])
_target = self.add_start_end(_target)
batch_t.append(_target)
batch_o_t.append(self.add_start_end(_o_target))
target_len.append(len(_target))
if len(source_len) == 1:
batch_s = torch.LongTensor(batch_s)
batch_o_s = torch.LongTensor(batch_o_s)
batch_term = torch.LongTensor(batch_term)
batch_o_term = torch.LongTensor(batch_o_term)
batch_t = torch.LongTensor(batch_t)
batch_o_t = torch.LongTensor(batch_o_t)
else:
batch_s = [torch.LongTensor(self.pad_vector(i, max(source_len))) for i in batch_s]
batch_o_s = [torch.LongTensor(self.pad_vector(i, max(source_len))) for i in batch_o_s]
batch_s = torch.stack(batch_s, dim=0)
batch_o_s = torch.stack(batch_o_s, dim=0)
batch_term = [torch.LongTensor(self.pad_vector(i, max(term_len))) for i in batch_term]
batch_o_term = [torch.LongTensor(self.pad_vector(i, max(term_len))) for i in batch_o_term]
batch_term = torch.stack(batch_term, dim=0)
batch_o_term = torch.stack(batch_o_term, dim=0)
batch_t = [torch.LongTensor(self.pad_vector(i, max(target_len))) for i in batch_t]
batch_o_t = [torch.LongTensor(self.pad_vector(i, max(target_len))) for i in batch_o_t]
batch_t = torch.stack(batch_t, dim=0)
batch_o_t = torch.stack(batch_o_t, dim=0)
source_len = torch.LongTensor(source_len)
if self.pmid:
return batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term, batch_t, batch_o_t, list_oovs, \
targets, sources, terms, pmids
else:
return batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term, batch_t, batch_o_t, list_oovs, \
targets, sources, terms
def get_batch(self, i, train=True):
if self.pmid:
batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term, batch_t, batch_o_t, list_oovs, \
targets, sources, terms, pmids = self.corpus[i]
else:
batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term, batch_t, batch_o_t, list_oovs, \
targets, sources, terms = self.corpus[i]
if train:
return batch_s.to(self.device), batch_o_s.to(self.device), source_len.to(self.device),\
max_source_oov, batch_term.to(self.device), batch_o_term.to(self.device), batch_t.to(self.device),\
batch_o_t.to(self.device)
else:
if self.pmid:
return batch_s.to(self.device), batch_o_s.to(self.device), source_len.to(self.device),\
max_source_oov, batch_term.to(self.device), batch_o_term.to(self.device), list_oovs, targets,\
sources, terms, pmids
else:
return batch_s.to(self.device), batch_o_s.to(self.device), source_len.to(self.device),\
max_source_oov, batch_term.to(self.device), batch_o_term.to(self.device), list_oovs, targets,\
sources, terms
def filter_stopwords(word2id):
nltk_stopwords = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've", "you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', "don't", 'should', "should've", 'now', 'd', 'll', 'm', 'o', 're', 've', 'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn', "hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn', "mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren', "weren't", 'won', "won't", 'wouldn', "wouldn't"]
stop_words = set()
for w in set(nltk_stopwords) | set(string.punctuation):
if w in word2id:
stop_words.add(word2id[w])
if w.title() in word2id:
stop_words.add(word2id[w.title()])
return stop_words
def printcand(path, cand):
f_output = open(path, 'w')
for i in cand:
f_output.write(cand[i] + "\n")
f_output.close()
| 11,373 | 42.746154 | 1,494 | py |
PaperRobot | PaperRobot-master/New paper writing/loader/loader.py | import os
import json
import gzip
import lzma
import torch
import torch.nn as nn
from loader.preprocessing import create_mapping
def load_files(path):
sources = []
targets = []
words = []
for line in open(path, 'r'):
line = line.strip()
file = json.loads(line)
sources.append(file['title'])
targets.append(file['abs'])
words.extend(file['words'])
return words, [sources, targets]
def load_file_with_terms(path):
sources = []
targets = []
terms = []
words = []
for line in open(path, 'r'):
line = line.strip()
file = json.loads(line)
sources.append(file['title'])
targets.append(file['abs'])
terms.append(file['terms'])
words.extend(file['words'])
return words, [sources, terms, targets]
def load_file_with_pmid(path):
heart = ['heart', 'cardio', 'cardiac', 'annulus', 'arrhyth', 'atrium', 'cardi', 'coronary', 'pulmonary', 'valve']
# heart = ['DNA', 'Atomic', 'Genome', 'Monolayer', 'Molecular', 'Polymer', 'Self-assembly', 'Quantum', 'Ontological',
# 'Autogeny', 'MEMS', 'NEMS', 'Plasmonics', 'Biomimetic', 'nano', 'Molecular', 'Electrospinning']
pmids = []
sources = []
targets = []
terms = []
words = []
for line in open(path, 'r'):
flag = False
line = line.strip()
file = json.loads(line)
for h in heart:
if h.lower() in " ".join(file['title']).lower():
flag = True
break
if h.lower() in " ".join(file['terms']).lower():
flag = True
break
if h.lower() in " ".join(file['abs']).lower():
flag = True
break
if flag:
sources.append(file['title'])
targets.append(file['abs'])
terms.append(file['terms'])
words.extend(file['words'])
pmids.append(file['pmid'])
return words, [sources, terms, targets, pmids]
def load_file_with_pmid_no_filter(path):
pmids = []
sources = []
targets = []
terms = []
words = []
for line in open(path, 'r'):
line = line.strip()
file = json.loads(line)
sources.append(file['title'])
targets.append(file['abs'])
terms.append(file['terms'])
words.extend(file['words'])
pmids.append(file['pmid'])
return words, [sources, terms, targets, pmids]
def load_test_data(path, t_dataset):
osources, oterms, otargets, opmids = t_dataset
data = list(zip(osources, oterms, otargets, opmids))
gturth = {}
pmids = []
sources = []
targets = []
terms = []
for d in data:
gturth[d[3]] = [d[0],d[1],d[2]]
for line in open(path, 'r'):
line = line.strip()
file = json.loads(line)
if file['pmid'] in gturth:
sources.append(file['Output'])
targets.append(gturth[file['pmid']][2])
terms.append(file['Terms'])
pmids.append(file['pmid'])
return [sources, terms, targets, pmids]
def load_test_new_data(path):
gturth = {}
pmids = []
sources = []
targets = []
terms = []
for line in open(path, 'r'):
line = line.strip()
file = json.loads(line)
sources.append(file['Output'])
targets.append(file['Ref'])
terms.append(file['Terms'])
pmids.append(file['pmid'])
return [sources, terms, targets, pmids]
def load_pretrained(word_emb, id2word, pre_emb):
if not pre_emb:
return
word_dim = word_emb.weight.size(1)
# Initialize with pretrained embeddings
new_weights = word_emb.weight.data
print('Loading pretrained embeddings from %s...' % pre_emb)
pretrained = {}
emb_invalid = 0
for i, line in enumerate(load_embedding(pre_emb)):
if type(line) == bytes:
try:
line = str(line, 'utf-8')
except UnicodeDecodeError:
continue
line = line.rstrip().split()
if len(line) == word_dim + 1:
pretrained[line[0]] = torch.Tensor(
[float(x) for x in line[1:]])
else:
emb_invalid += 1
if emb_invalid > 0:
print('WARNING: %i invalid lines' % emb_invalid)
c_found = 0
c_lower = 0
# Lookup table initialization
for i in range(len(id2word)):
word = id2word[i]
if word in pretrained:
new_weights[i] = pretrained[word]
c_found += 1
elif word.lower() in pretrained:
new_weights[i] = pretrained[word.lower()]
c_lower += 1
word_emb.weight = nn.Parameter(new_weights)
print('Loaded %i pretrained embeddings.' % len(pretrained))
print('%i / %i (%.4f%%) words have been initialized with '
'pretrained embeddings.' % (
c_found + c_lower , len(id2word),
100. * (c_found + c_lower) / len(id2word)
))
print('%i found directly, %i after lowercasing, ' % (
c_found, c_lower))
return word_emb
def load_embedding(pre_emb):
if os.path.basename(pre_emb).endswith('.xz'):
return lzma.open(pre_emb)
if os.path.basename(pre_emb).endswith('.gz'):
return gzip.open(pre_emb, 'rb')
else:
return open(pre_emb, 'r', errors='replace')
def augment_with_pretrained(words_freq, ext_emb_path):
"""
Augment the dictionary with words that have a pretrained embedding.
"""
print(
'Augmenting words by pretrained embeddings from %s...' % ext_emb_path
)
assert os.path.isfile(ext_emb_path)
# Load pretrained embeddings from file
pretrained = []
if len(ext_emb_path) > 0:
for line in load_embedding(ext_emb_path):
if not line.strip():
continue
if type(line) == bytes:
try:
pretrained.append(str(line, 'utf-8').rstrip().split()[0].strip())
except UnicodeDecodeError:
continue
else:
pretrained.append(line.rstrip().split()[0].strip())
pretrained = set(pretrained)
# We add every word in the pretrained file
for word in pretrained:
words_freq[word] += 10
word2id, id2word = create_mapping(words_freq)
mappings = {
'word2id': word2id,
'id2word': id2word
}
return mappings
# old
def load_file_with_pred(path_title):
sources = []
targets = []
preds = []
words = []
path = '/data/m1/wangq16/End-end_title_generation/data/' + path_title
for line in open(os.path.abspath(path), 'r'):
line = line.strip()
file = json.loads(line)
sources.append(file['source'])
targets.append(file['target'])
preds.append(file['preds'])
words.extend(file['words'])
return words, [sources, preds, targets]
| 6,934 | 27.539095 | 121 | py |
PaperRobot | PaperRobot-master/New paper writing/utils/optim.py | import torch.optim as optim
def get_optimizer(model, lr_method, lr_rate):
"""
parse optimization method parameters, and initialize optimizer function
"""
lr_method_name = lr_method
# initialize optimizer function
if lr_method_name == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=lr_rate, momentum=0.9)
# scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
elif lr_method_name == 'adagrad':
optimizer = optim.Adagrad(model.parameters(), lr=lr_rate)
# scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
elif lr_method_name == 'adam':
optimizer = optim.Adam(model.parameters(), lr=lr_rate)
# scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.001)
else:
raise Exception('unknown optimization method.')
return optimizer # , scheduler
| 901 | 36.583333 | 85 | py |
PaperRobot | PaperRobot-master/New paper writing/memory_generator/Decoder.py | import sys
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from .baseRNN import BaseRNN
from .utils import MemoryComponent
class DecoderRNN(BaseRNN):
def __init__(self, vocab_size, embedding, word_dim, sos_id, eos_id, unk_id,
max_len=150, input_dropout_p=0, layer_dropout=0, n_layers=1, lmbda=1,
gpu=torch.cuda.is_available(), rnn_type='gru', h=8, hop=3,
beam_size=4, **kwargs):
hidden_size = word_dim
super(DecoderRNN, self).__init__(vocab_size, hidden_size, input_dropout_p, n_layers, rnn_type)
self.rnn = self.rnn_cell(word_dim, hidden_size, n_layers, batch_first=True)
self.output_size = vocab_size
self.hidden_size = hidden_size
self.max_length = max_len
self.eos_id = eos_id
self.sos_id = sos_id
self.unk_id = unk_id
self.lmbda = lmbda
self.embedding = embedding
self.device = torch.device("cuda:0" if gpu and torch.cuda.is_available() else "cpu")
self.beam_size = beam_size
# initialization
self.memory_init = MemoryComponent(hop, h, hidden_size, layer_dropout)
self.Wh = nn.Linear(hidden_size * 2, hidden_size)
# params for ref attention
self.Wih = nn.Linear(hidden_size * 2, hidden_size) # for obtaining projection from encoder hidden states
self.Ws = nn.Linear(hidden_size, hidden_size) # for obtaining e from current state
self.Wc = nn.Linear(1, hidden_size) # for obtaining e from context vector
self.Wr = nn.Linear(hidden_size * 2, hidden_size)
self.vt = nn.Linear(hidden_size, 1)
# params for memory
self.memory = MemoryComponent(hop, h, hidden_size, layer_dropout)
# output
self.V = nn.Linear(hidden_size * 3, self.output_size)
# parameters for p_gen
# for changing refcontext vector into a scalar
self.w_p = nn.Linear(hidden_size * 2, 1)
# for changing context vector into a scalar
self.w_h = nn.Linear(hidden_size * 2, 1)
def decode_step(self, sources_ids, _h, enc_proj, batch_size, cov_ref, cov_mem, max_enc_len, enc_mask,
encoder_outputs, embed_target, max_source_oov, term_output, term_id, term_mask): #
# reference attention
dec_proj = self.Ws(_h).unsqueeze(1).expand_as(enc_proj)
# print('decoder proj', dec_proj.size())
cov_proj = self.Wc(cov_ref.view(-1, 1)).view(batch_size, max_enc_len, -1)
# print('cov proj', cov_proj.size())
e_t = self.vt(torch.tanh(enc_proj + dec_proj + cov_proj).view(batch_size*max_enc_len, -1))
# mask to -INF before applying softmax
ref_attn = e_t.view(batch_size, max_enc_len)
del e_t
ref_attn.data.masked_fill_(enc_mask.data.byte(), -float('inf')) # float('-inf')
ref_attn = F.softmax(ref_attn, dim=1)
ref_context = self.Wr(ref_attn.unsqueeze(1).bmm(encoder_outputs).squeeze(1))
# terms attention
term_context, term_attn = self.memory(_h.unsqueeze(0), term_output, term_mask, cov_mem)
term_context = term_context.squeeze(0)
# output proj calculation
p_vocab = F.softmax(self.V(torch.cat((_h, ref_context, term_context), 1)), dim=1)
# pgen
# print(embed_target.size())
p_gen = torch.sigmoid(self.w_p(torch.cat((_h, embed_target), 1)))
p_ref = torch.sigmoid(self.w_h(torch.cat((ref_context, term_context), 1)))
weighted_Pvocab = p_vocab * p_gen
weighted_ref_attn = (1 - p_gen) * p_ref * ref_attn
weighted_term_attn = (1 - p_gen) * (1 - p_ref) * term_attn
if max_source_oov > 0:
# create OOV (but in-article) zero vectors
ext_vocab = torch.zeros(batch_size, max_source_oov)
ext_vocab=ext_vocab.to(self.device)
combined_vocab = torch.cat((weighted_Pvocab, ext_vocab), 1)
del ext_vocab
else:
combined_vocab = weighted_Pvocab
del weighted_Pvocab
# scatter article word probs to combined vocab prob.
combined_vocab = combined_vocab.scatter_add(1, sources_ids, weighted_ref_attn)
combined_vocab = combined_vocab.scatter_add(1, term_id, weighted_term_attn)
return combined_vocab, ref_attn, term_attn
def forward(self, max_source_oov=0, targets=None, targets_id=None,
sources_ids=None, enc_mask=None, encoder_hidden=None,
encoder_outputs=None, term_id=None, term_mask=None,
term_output=None, teacher_forcing_ratio=None, beam=False,
stopwords=None, sflag=False):
if beam:
return self.decode(max_source_oov, sources_ids, enc_mask, encoder_hidden, encoder_outputs,
term_id, term_mask, term_output, stopwords, sflag)
# initialization
targets, batch_size, max_length, max_enc_len = self._validate_args(targets, encoder_hidden, encoder_outputs,
teacher_forcing_ratio)
decoder_hidden = self._init_state(encoder_hidden)
cov_ref = torch.zeros(batch_size, max_enc_len)
cov_ref = cov_ref.to(self.device)
cov_mem = torch.zeros_like(term_mask, dtype=torch.float)
cov_mem = cov_mem.to(self.device)
# memory initialization
decoder_hidden, _ = self.memory_init(decoder_hidden, term_output, term_mask)
# print(encoder_outputs.size())
enc_proj = self.Wih(encoder_outputs.contiguous().view(batch_size * max_enc_len, -1)).view(batch_size,
max_enc_len, -1)
if teacher_forcing_ratio:
embedded = self.embedding(targets)
embed_targets = self.input_dropout(embedded)
dec_lens = (targets > 0).float().sum(1)
lm_loss, cov_loss = [], [] # , cov_loss_mem , []
hidden, _ = self.rnn(embed_targets, decoder_hidden)
# step through decoder hidden states
for _step in range(max_length):
_h = hidden[:, _step, :]
target_id = targets_id[:, _step+1].unsqueeze(1)
embed_target = embed_targets[:, _step, :]
combined_vocab, ref_attn, term_attn = self.decode_step(sources_ids, _h, enc_proj, batch_size,
cov_ref, cov_mem, max_enc_len, enc_mask,
encoder_outputs, embed_target,
max_source_oov, term_output, term_id, term_mask)
# mask the output to account for PAD , cov_ref
target_mask_0 = target_id.ne(0).detach()
output = combined_vocab.gather(1, target_id).add_(sys.float_info.epsilon)
lm_loss.append(output.log().mul(-1) * target_mask_0.float())
cov_ref = cov_ref + ref_attn
cov_mem = cov_mem + term_attn
# Coverage Loss
# take minimum across both attn_scores and coverage
_cov_loss_ref, _ = torch.stack((cov_ref, ref_attn), 2).min(2)
_cov_loss_mem, _ = torch.stack((cov_mem, term_attn), 2).min(2)
cov_loss.append(_cov_loss_ref.sum(1) + _cov_loss_mem.sum(1))
# print(cov_loss_ref[-1].size())
# cov_loss_mem.append(_)
total_masked_loss = torch.cat(lm_loss, 1).sum(1).div(dec_lens) + self.lmbda * torch.stack(cov_loss, 1)\
.sum(1).div(dec_lens) / 2.0
return total_masked_loss
else:
return self.evaluate(targets, batch_size, max_length, max_source_oov, encoder_outputs, decoder_hidden,
enc_mask, sources_ids, enc_proj, max_enc_len, term_output, term_id, term_mask,
cov_ref, cov_mem)
def evaluate(self, targets, batch_size, max_length, max_source_oov, encoder_outputs, decoder_hidden, enc_mask,
sources_ids, enc_proj, max_enc_len, term_output, term_id, term_mask, cov_ref, cov_mem):
lengths = np.array([max_length] * batch_size)
decoded_outputs = []
embed_target = self.embedding(targets)
# step through decoder hidden states
for _step in range(max_length):
_h, decoder_hidden = self.rnn(embed_target, decoder_hidden)
combined_vocab, ref_attn, term_attn = self.decode_step(sources_ids, _h.squeeze(1), enc_proj, batch_size,
cov_ref, cov_mem, max_enc_len, enc_mask,
encoder_outputs, embed_target.squeeze(1),
max_source_oov, term_output, term_id, term_mask)
# not allow decoder to output UNK
combined_vocab[:, self.unk_id] = 0
symbols = combined_vocab.topk(1)[1]
decoded_outputs.append(symbols.clone())
eos_batches = symbols.data.eq(self.eos_id)
if eos_batches.dim() > 0:
eos_batches = eos_batches.cpu().view(-1).numpy()
update_idx = ((lengths > _step) & eos_batches) != 0
lengths[update_idx] = len(decoded_outputs)
symbols.masked_fill_((symbols > self.vocab_size - 1), self.unk_id)
embed_target = self.embedding(symbols)
cov_ref = cov_ref + ref_attn
cov_mem = cov_mem + term_attn
return torch.stack(decoded_outputs, 1).squeeze(2), lengths.tolist()
def _init_state(self, encoder_hidden):
""" Initialize the encoder hidden state. """
if encoder_hidden is None:
return None
else:
if isinstance(encoder_hidden, tuple):
encoder_hidden = tuple([self._cat_directions(h) for h in encoder_hidden])
else:
encoder_hidden = self._cat_directions(encoder_hidden)
encoder_hidden = self.Wh(encoder_hidden)
return encoder_hidden
def _cat_directions(self, h):
h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)
return h
def _validate_args(self, targets, encoder_hidden, encoder_outputs, teacher_forcing_ratio):
if encoder_outputs is None:
raise ValueError("Argument encoder_outputs cannot be None when attention is used.")
else:
max_enc_len = encoder_outputs.size(1)
# inference batch size
if targets is None and encoder_hidden is None:
batch_size = 1
else:
if targets is not None:
batch_size = targets.size(0)
else:
batch_size = encoder_hidden.size(1)
# set default targets and max decoding length
if targets is None:
if teacher_forcing_ratio > 0:
raise ValueError("Teacher forcing has to be disabled (set 0) when no targets is provided.")
# torch.set_grad_enabled(False)
targets = torch.LongTensor([self.sos_id] * batch_size).view(batch_size, 1)
targets = targets.to(self.device)
max_length = self.max_length
else:
max_length = targets.size(1) - 1 # minus the start of sequence symbol
return targets, batch_size, max_length, max_enc_len
def getOverallTopk(self, vocab_probs, ref_attn, term_attn, cov_ref, cov_mem,
all_hyps, results, decoder_hidden, stopwords):
new_decoder_hidden, new_cov_ref, new_cov_mem = [], [], []
new_vocab_probs = []
for i, hypo in enumerate(all_hyps):
curr_vocab_probs = vocab_probs[i]
curr_vocab_probs[hypo.used_words] = 0
# print(hypo.used_words)
new_vocab_probs.append(curr_vocab_probs.unsqueeze(0))
vocab_probs = torch.cat(new_vocab_probs, 0)
cov_ref += ref_attn
cov_mem += term_attn
# return top-k values i.e. top-k over all beams i.e. next step input ids
# return hidden, cell states corresponding to topk
probs, inds = vocab_probs.topk(k=self.beam_size, dim=1)
probs = probs.log()
candidates = []
assert len(all_hyps) == probs.size(0), '# Hypothesis and log-prob size dont match'
# cycle through all hypothesis in full beam
for i, hypo in enumerate(probs.tolist()):
for j, _ in enumerate(hypo):
new_cand = all_hyps[i].extend(token_id=inds[i,j].item(),
hidden_state=decoder_hidden[i].unsqueeze(0),
cov_ref=cov_ref[i].unsqueeze(0),
cov_mem=cov_mem[i].unsqueeze(0),
log_prob= probs[i,j],
stopwords=stopwords)
candidates.append(new_cand)
# sort in descending order
candidates = sorted(candidates, key=lambda x:x.survivability, reverse=True)
new_beam, next_inp = [], []
# prune hypotheses and generate new beam
for h in candidates:
if h.full_prediction[-1] == self.eos_id:
# weed out small sentences that likely have no meaning
if len(h.full_prediction) >= 5:
results.append(h)
else:
new_beam.append(h)
next_inp.append(h.full_prediction[-1])
new_decoder_hidden.append(h.hidden_state)
new_cov_ref.append(h.cov_ref)
new_cov_mem.append(h.cov_mem)
if len(new_beam) >= self.beam_size:
break
assert len(new_beam) >= 1, 'Non-existent beam'
# print(next_inp)
return new_beam, torch.LongTensor(next_inp).to(self.device).view(-1, 1), results, \
torch.cat(new_decoder_hidden, 0).unsqueeze(0), torch.cat(new_cov_ref, 0), torch.cat(new_cov_mem, 0)
# Beam Search Decoding
def decode(self, max_source_oov=0, sources_ids=None, enc_mask=None, encoder_hidden=None, encoder_outputs=None,
term_id=None, term_mask=None, term_output=None, stopwords=None, sflag=False):
# print(encoder_outputs.size())
# print(term_output.size())
max_length = self.max_length
if not sflag:
stopwords = set(range(max_source_oov + self.vocab_size))
targets = torch.LongTensor([[self.sos_id]]).to(self.device)
decoder_hidden = self._init_state(encoder_hidden)
max_enc_len = encoder_outputs.size(1)
max_term_len = term_id.size(1)
cov_ref = torch.zeros(1, max_enc_len)
cov_ref = cov_ref.to(self.device)
cov_mem = torch.zeros(1, max_term_len)
cov_mem = cov_mem.to(self.device)
# memory initialization
decoder_hidden, _ = self.memory_init(decoder_hidden, term_output, term_mask)
decoded_outputs = []
# all_hyps --> list of current beam hypothesis. start with base initial hypothesis
all_hyps = [Hypothesis([self.sos_id], decoder_hidden, cov_ref, cov_mem, 0, stopwords)]
# start decoding
enc_proj = self.Wih(encoder_outputs.contiguous().view(max_enc_len, -1)).view(1, max_enc_len, -1)
# print(enc_proj.size())
embed_target = self.embedding(targets)
# print(embed_target.size())
for _step in range(max_length):
# print(_step)
# after first step, input is of batch_size=curr_beam_size
# curr_beam_size <= self.beam_size due to pruning of beams that have terminated
# adjust enc_states and init_state accordingly
_h, decoder_hidden = self.rnn(embed_target, decoder_hidden)
# print(decoder_hidden.size())
curr_beam_size = embed_target.size(0)
# print('curr_beam_size', curr_beam_size)
combined_vocab, ref_attn, term_attn = self.decode_step(sources_ids, _h.squeeze(1), enc_proj, curr_beam_size,
cov_ref, cov_mem, max_enc_len, enc_mask,
encoder_outputs, embed_target.squeeze(1),
max_source_oov, term_output, term_id, term_mask)
combined_vocab[:, self.unk_id] = 0
# does bulk of the beam search
# decoded_outputs --> list of all ouputs terminated with stop tokens and of minimal length
all_hyps, symbols, decoded_outputs, decoder_hidden, cov_ref, cov_mem = self.getOverallTopk(combined_vocab,
ref_attn, term_attn, cov_ref, cov_mem,
all_hyps, decoded_outputs,
decoder_hidden.squeeze(0), stopwords)
symbols.masked_fill_((symbols > self.vocab_size - 1), self.unk_id)
embed_target = self.embedding(symbols)
# print('embed_target', embed_target.size())
curr_beam_size = embed_target.size(0)
# print('curr_beam_size', curr_beam_size)
if embed_target.size(0) > encoder_outputs.size(0):
encoder_outputs = encoder_outputs.expand(curr_beam_size, -1, -1).contiguous()
enc_mask = enc_mask.expand(curr_beam_size, -1).contiguous()
sources_ids = sources_ids.expand(curr_beam_size, -1).contiguous()
term_id = term_id.expand(curr_beam_size, -1).contiguous()
term_mask = term_mask.expand(curr_beam_size, -1).contiguous()
term_output = term_output.expand(curr_beam_size, -1, -1).contiguous()
enc_proj = self.Wih(encoder_outputs.contiguous().view(curr_beam_size * max_enc_len, -1))\
.view(curr_beam_size, max_enc_len, -1)
# print('encoder proj', enc_proj.size())
if len(decoded_outputs) > 0:
candidates = decoded_outputs
else:
candidates = all_hyps
all_outputs = sorted(candidates, key=lambda x:x.survivability, reverse=True)
return all_outputs[0].full_prediction #
class Hypothesis(object):
def __init__(self, token_id, hidden_state, cov_ref, cov_mem, log_prob, stopwords):
self._h = hidden_state
self.log_prob = log_prob
self.hidden_state = hidden_state
self.cov_ref = cov_ref.detach()
self.cov_mem = cov_mem.detach()
self.full_prediction = token_id # list
self.used_words = list(set(token_id) - stopwords)
self.survivability = self.log_prob/ float(len(self.full_prediction))
def extend(self, token_id, hidden_state, cov_ref, cov_mem, log_prob, stopwords):
return Hypothesis(token_id= self.full_prediction + [token_id],
hidden_state=hidden_state,
cov_ref=cov_ref.detach(),
cov_mem=cov_mem.detach(),
log_prob= self.log_prob + log_prob,
stopwords=stopwords)
| 18,992 | 50.332432 | 120 | py |
PaperRobot | PaperRobot-master/New paper writing/memory_generator/predictor.py | import gc
from itertools import groupby
import torch
import statistics
def filter_duplicate(sents):
sents = sents.split('.')
used = []
used_s = []
tmp = ""
for ss in sents:
tttmp = ''
for s in ss.split(','):
if s not in used:
if len(s) < 2:
continue
used.append(s)
no_dupes = ([k for k, v in groupby(s.split())])
ns = ' '.join(no_dupes)
if ns not in used_s:
used_s.append(ns)
if s[-1] == ',':
tttmp += ns + ' '
else:
tttmp += ns + ' , '
if len(tttmp) == 0:
continue
tttmp = "%s%s" % (tttmp[0].upper(), tttmp[1:])
if tttmp[-1] == '.':
tmp += tttmp + ' '
else:
if tttmp[-2:] == ', ':
tmp += tttmp[:-2]
else:
tmp += tttmp
tmp += ' . '
return tmp
class Predictor(object):
def __init__(self, model, id2word, vocab_size):
self.model = model
self.model.eval()
self.id2word = id2word
self.vocab_size = vocab_size
def predict(self, batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term, list_oovs):
torch.set_grad_enabled(False)
decoded_outputs, lengths = self.model(batch_s, batch_o_s, source_len, max_source_oov, batch_term,
batch_o_term)
length = lengths[0]
output = []
# print(decoded_outputs)
for i in range(length):
symbol = decoded_outputs[0][i].item()
if symbol < self.vocab_size:
output.append(self.id2word[symbol])
else:
output.append(list_oovs[0][symbol-self.vocab_size])
return self.prepare_for_bleu(output, True)[0]
def predict_beam(self, batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term, list_oovs,
stopwords, sflag=False):
torch.set_grad_enabled(False)
decoded_outputs = self.model(batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term,
beam=True, stopwords=stopwords, sflag=sflag)
outputs = []
for symbol in decoded_outputs:
if symbol < self.vocab_size:
outputs.append(self.id2word[symbol])
else:
outputs.append(list_oovs[0][symbol - self.vocab_size])
outputs = self.prepare_for_bleu(outputs, True)[0]
print(outputs)
return outputs
def preeval_batch(self, dataset, pmid=False):
torch.set_grad_enabled(False)
refs = {}
cands = {}
titles = {}
new_terms = {}
new_pmids = {}
avg_len_ref = []
avg_len_out = []
i = 0
for batch_idx in range(len(dataset.corpus)):
if pmid:
batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term, list_oovs, targets, \
sources, terms, pmids = dataset.get_batch(batch_idx, False)
else:
batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term, list_oovs, targets, \
sources, terms = dataset.get_batch(batch_idx, False)
decoded_outputs, lengths = self.model(batch_s, batch_o_s, source_len, max_source_oov, batch_term,
batch_o_term)
for j in range(len(lengths)):
i += 1
ref, lref = self.prepare_for_bleu(targets[j])
if pmid:
refs[i] = ref.split()
titles[i] = sources[j]
new_terms[i] = terms[j]
else:
avg_len_ref.append(lref)
refs[i] = [ref]
titles[i] = " ".join(sources[j])
new_terms[i] = " ".join(terms[j])
out_seq = []
for k in range(lengths[j]):
symbol = decoded_outputs[j][k].item()
if symbol < self.vocab_size:
out_seq.append(self.id2word[symbol])
else:
out_seq.append(list_oovs[j][symbol-self.vocab_size])
out, lout = self.prepare_for_bleu(out_seq, True)
if pmid:
new_pmids[i] = pmids[j]
cands[i] = out.split()
else:
avg_len_out.append(lout)
cands[i] = out
if i % 500 == 0:
print("Percentages: %.4f" % (i/float(dataset.len)))
# del batch_s, batch_o_s, source_len, batch_term, batch_o_term
# gc.collect()
# torch.cuda.empty_cache()
if pmid:
return cands, refs, titles, new_terms, new_pmids
else:
print("Reference length ", statistics.mean(avg_len_ref))
print("Output length ", statistics.mean(avg_len_out))
return cands, refs, titles, new_terms
def preeval_batch_beam(self, dataset, pmid=False, stopwords=None, sflag=True):
torch.set_grad_enabled(False)
refs = {}
cands = {}
titles = {}
new_terms = {}
new_pmids = {}
avg_len_ref = []
avg_len_out = []
i = 0
for batch_idx in range(len(dataset.corpus)): #
if pmid:
batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term, list_oovs, targets, \
sources, terms, pmids = dataset.get_batch(batch_idx, False)
else:
batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term, list_oovs, targets, \
sources, terms = dataset.get_batch(batch_idx, False)
decoded_outputs = self.model(batch_s, batch_o_s, source_len, max_source_oov, batch_term,
batch_o_term, beam=True, stopwords=stopwords, sflag=sflag)
i += 1
ref, lref = self.prepare_for_bleu(targets[0])
if pmid:
refs[i] = ref.split()
titles[i] = sources[0]
new_terms[i] = terms[0]
else:
avg_len_ref.append(lref)
refs[i] = [ref]
titles[i] = " ".join(sources[0])
new_terms[i] = " ".join(terms[0])
out_seq = []
for symbol in decoded_outputs:
if symbol < self.vocab_size:
out_seq.append(self.id2word[symbol])
else:
out_seq.append(list_oovs[0][symbol-self.vocab_size])
out, lout = self.prepare_for_bleu(out_seq, True)
if pmid:
new_pmids[i] = pmids[0]
cands[i] = out.split()
else:
avg_len_out.append(lout)
cands[i] = out
if i % 10 == 0:
print("Percentages: %.4f" % (i/float(dataset.len)))
# del batch_s, batch_o_s, source_len, batch_term, batch_o_term
# gc.collect()
# torch.cuda.empty_cache()
if pmid:
return cands, refs, titles, new_terms, new_pmids
else:
print("Reference length ", statistics.mean(avg_len_ref))
print("Output length ", statistics.mean(avg_len_out))
return cands, refs, titles, new_terms
def prepare_for_bleu(self, sentence, train=False):
sent = [x for x in sentence if x != '<pad>' and x != '<eos>' and x != '<sos>']
l = len(sent)
sent = ' '.join(sent)
if train:
sent = filter_duplicate(sent)
return sent, l
| 7,888 | 37.862069 | 111 | py |
PaperRobot | PaperRobot-master/New paper writing/memory_generator/seq2seq.py | import torch.nn as nn
class Seq2seq(nn.Module):
def __init__(self, ref_encoder, term_encoder, decoder):
super(Seq2seq, self).__init__()
self.ref_encoder = ref_encoder
self.term_encoder = term_encoder
self.decoder = decoder
def forward(self, batch_s, batch_o_s, source_len, max_source_oov, batch_term, batch_o_term, batch_t=None,
batch_o_t=None, teacher_forcing_ratio=0, beam=False, stopwords=None, sflag=False): # w2fs=None
encoder_outputs, encoder_hidden, enc_mask = self.ref_encoder(batch_s, source_len)
term_output, term_mask = self.term_encoder(batch_term)
result = self.decoder(max_source_oov, batch_t, batch_o_t, batch_o_s, enc_mask, encoder_hidden,
encoder_outputs, batch_o_term, term_mask, term_output, teacher_forcing_ratio, beam,
stopwords, sflag)
return result
| 924 | 45.25 | 113 | py |
PaperRobot | PaperRobot-master/New paper writing/memory_generator/baseRNN.py | """ A base class for RNN. """
import torch.nn as nn
class BaseRNN(nn.Module):
def __init__(self, vocab_size, hidden_size, input_dropout_p, n_layers, rnn_cell):
super(BaseRNN, self).__init__()
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.n_layers = n_layers
self.input_dropout_p = input_dropout_p
self.input_dropout = nn.Dropout(input_dropout_p)
if rnn_cell.lower() == 'lstm':
self.rnn_cell = nn.LSTM
elif rnn_cell.lower() == 'gru':
self.rnn_cell = nn.GRU
else:
raise ValueError("Unsupported RNN Cell: {0}".format(rnn_cell))
def forward(self, *args, **kwargs):
raise NotImplementedError()
| 735 | 32.454545 | 85 | py |
PaperRobot | PaperRobot-master/New paper writing/memory_generator/utils.py | import torch
import copy
import math
import torch.nn as nn
import torch.nn.functional as F
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class MemoryComponent(nn.Module):
def __init__(self, hop, h, d_model, dropout_p):
super(MemoryComponent, self).__init__()
self.max_hops = hop
self.h = h
vt = nn.Linear(d_model, 1)
self.vt_layers = clones(vt, hop)
Wih = nn.Linear(d_model, d_model)
self.Wih_layers = clones(Wih, hop)
Ws = nn.Linear(d_model, d_model)
self.Ws_layers = clones(Ws, hop)
self.Wc = nn.Linear(1, d_model)
def forward(self, query, src, src_mask, cov_mem=None):
u = query.transpose(0, 1)
batch_size, max_enc_len = src_mask.size()
for i in range(self.max_hops ):
enc_proj = self.Wih_layers[i](src.view(batch_size * max_enc_len, -1)).view(batch_size, max_enc_len, -1)
dec_proj = self.Ws_layers[i](u).expand_as(enc_proj)
if cov_mem is not None:
cov_proj = self.Wc(cov_mem.view(-1, 1)).view(batch_size, max_enc_len, -1)
e_t = self.vt_layers[i](torch.tanh(enc_proj + dec_proj + cov_proj).view(batch_size * max_enc_len, -1))
else:
e_t = self.vt_layers[i](torch.tanh(enc_proj + dec_proj).view(batch_size * max_enc_len, -1))
term_attn = e_t.view(batch_size, max_enc_len)
del e_t
term_attn.data.masked_fill_(src_mask.data.byte(), -float('inf'))
term_attn = F.softmax(term_attn, dim=1)
term_context = term_attn.unsqueeze(1).bmm(src)
u = u + term_context
return term_context.transpose(0, 1), term_attn
| 1,774 | 38.444444 | 118 | py |
PaperRobot | PaperRobot-master/New paper writing/memory_generator/Encoder.py | import torch.nn as nn
from .baseRNN import BaseRNN
class EncoderRNN(BaseRNN):
def __init__(self, vocab_size, embedding, hidden_size, input_dropout_p,
n_layers=1, bidirectional=True, rnn_type='gru'):
super(EncoderRNN, self).__init__(vocab_size, hidden_size, input_dropout_p, n_layers, rnn_type)
self.embedding = embedding
self.rnn = self.rnn_cell(hidden_size, hidden_size, n_layers,
batch_first=True, bidirectional=bidirectional)
def forward(self, source, input_lengths=None):
# get mask for location of PAD
mask = source.eq(0).detach()
embedded = self.embedding(source)
embedded = self.input_dropout(embedded)
embedded = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths, batch_first=True)
output, hidden = self.rnn(embedded)
output, _ = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
# print('o', output.size())
return output, hidden, mask
class TermEncoder(nn.Module):
def __init__(self, embedding, input_dropout_p):
super(TermEncoder, self).__init__()
self.embedding = embedding
self.input_dropout = nn.Dropout(input_dropout_p)
def forward(self, term):
mask = term.eq(0).detach()
embedded = self.embedding(term)
embedded = self.input_dropout(embedded)
return embedded, mask
| 1,429 | 31.5 | 102 | py |
PaperRobot | PaperRobot-master/Existing paper reading/test.py | from __future__ import division
from __future__ import print_function
import time
import argparse
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.utils import data
import os, sys, math, pickle, gc
from utils.utils import convert_index, get_subgraph, adjust_sent_order, load_dict, mean_rank, convert_idx2name, write_triples
from utils.data_loader import LinkPredictionDataset, LinkTestTotal, LinkTestDataset
from model.GATA import GATA
from collections import OrderedDict
parser = argparse.ArgumentParser()
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_dir', type=str, default='paper_reading/'
)
parser.add_argument(
'--gpu', default='1', type=int, help='default is 1. set 0 to disable use gpu.'
)
parser.add_argument(
'--batch_size', type=int, default=400, help='Size of a single batch'
)
parser.add_argument(
"--model", default="models/GATA/best_dev_model.pth.tar",
help="Model location"
)
args = parser.parse_args()
device = torch.device('cuda:0' if torch.cuda.is_available() and args.gpu == 1 else 'cpu')
# Initialize
# load previously saved data
state = pickle.load(open('dataset.pth', 'rb'))
parameters = state['parameters']
graph = state['graph']
text_f = state['text_f']
mappings = state['mappings']
num_ent = state['num_ent']
num_rel = state['num_rel']
id2ent = state['id2ent']
num_ent = state['num_ent']
print("finish load")
rel_f = os.path.join(args.data_dir, 'relation2id.txt')
id2rel, _ = load_dict(rel_f)
name_f = os.path.join(args.data_dir, 'term.pth')
ent2name = pickle.load(open(name_f,"rb"))
# Load Positive and Negative Examples
params = {'batch_size': args.batch_size, 'shuffle': True, 'collate_fn': adjust_sent_order}
train_set = LinkPredictionDataset(os.path.join(args.data_dir, 'train2id.txt'), text_f, id2ent, num_ent)
train_triple_dict = train_set.get_triple_dict()
train_generator = data.DataLoader(train_set, **params)
print('Finish loading train')
params_test = {'batch_size': args.batch_size, 'shuffle': False, 'collate_fn': adjust_sent_order}
test_set = LinkTestTotal(os.path.join(args.data_dir, 'test2id.txt'), num_ent)
print('Finish loading test')
y = torch.FloatTensor([-1])
y = y.to(device)
# Initialize Model
model = GATA(**parameters)
model.to(device)
def test():
print('Testing...')
# model.cpu()
model.eval()
output_file = open('test_top10.txt', 'w')
hitk_all = 0
mean_rank_head = []
mean_rank_tail = []
all_named_triples = set()
for batch_idx, (new_head_triple, new_tail_triple, correct) in enumerate(test_set):
t_current = time.time()
print("Current: ", batch_idx, "Total: ", len(test_set))
test = LinkTestDataset(new_head_triple, new_tail_triple, text_f, id2ent)
test_generator = data.DataLoader(test, **params_test)
scores_heads = []
scores_tails = []
for current_idx, instance in enumerate(test_generator):
head, tail, hht_bef, htt_bef, tht_bef, ttt_bef = instance
head = head.to(device)
tail = tail.to(device)
# text information
hht = list(map(lambda x:x.to(device),hht_bef[0:3]))
htt = list(map(lambda x:x.to(device),htt_bef[0:3]))
tht = list(map(lambda x:x.to(device),tht_bef[0:3]))
ttt = list(map(lambda x:x.to(device),ttt_bef[0:3]))
batch_nodes, batch_adj = get_subgraph(head, train_triple_dict, graph)
# get relative location according to the batch_nodes
shifted_head = convert_index([head], batch_nodes)
batch_nodes = torch.LongTensor(batch_nodes.tolist()).to(device)
batch_adj = torch.from_numpy(batch_adj).to(device)
shifted_head = torch.LongTensor(shifted_head[0]).to(device)
score_head = model(batch_nodes, batch_adj, head, shifted_head, hht[0], hht[1], hht[2],
htt[0], htt[1], htt[2])
scores_heads.append(score_head.detach())
del batch_nodes, batch_adj
batch_nodes, batch_adj = get_subgraph(tail, train_triple_dict, graph)
# get relative location according to the batch_nodes
shifted_tail = convert_index([tail], batch_nodes)
shifted_tail = torch.LongTensor(shifted_tail[0]).to(device)
batch_nodes = torch.LongTensor(batch_nodes.tolist()).to(device)
batch_adj = torch.from_numpy(batch_adj).to(device)
score_tail = model(batch_nodes, batch_adj, tail, shifted_tail, tht[0], tht[1], tht[2],
ttt[0], ttt[1], ttt[2])
scores_tails.append(score_tail.detach())
del batch_nodes, batch_adj, head, shifted_head, hht, htt, tail, shifted_tail, tht, ttt
sys.stdout.write(
'%d batches processed.\r' %
(current_idx)
)
# get head scores
scores_head = torch.cat(scores_heads, 0)
scores_head = torch.sum(scores_head, 1).squeeze()
assert scores_head.size(0) == num_ent
sorted_head_idx = np.argsort(scores_head.tolist())
topk_head = new_head_triple[sorted_head_idx][:10]
#get tail socres
scores_tail = torch.cat(scores_tails, 0)
scores_tail = torch.sum(scores_tail, 1).squeeze()
sorted_tail_idx = np.argsort(scores_tail.tolist())
topk_tail = new_tail_triple[sorted_tail_idx][:10]
# predict and output top 10 triples
named_triples_head = convert_idx2name(topk_head, id2ent, ent2name, id2rel)
named_triples_tail = convert_idx2name(topk_tail, id2ent, ent2name, id2rel)
write_triples(named_triples_head, output_file)
write_triples(named_triples_tail, output_file)
mean_rank_result_head = mean_rank(new_head_triple, sorted_head_idx, correct, 0)
mean_rank_result_tail = mean_rank(new_tail_triple, sorted_tail_idx, correct, 1)
if mean_rank_result_head <= 10:
hitk_all += 1
if mean_rank_result_tail <= 10:
hitk_all += 1
mean_rank_head.append(mean_rank_result_head)
mean_rank_tail.append(mean_rank_result_tail)
del test
gc.collect()
output_file.close()
print('Final mean rank for head is %f'%(np.mean(mean_rank_head)))
print('Final median rank for head is %f' % np.median(mean_rank_head))
print('Final mean rank for tail is %f' % (np.mean(mean_rank_tail)))
print('Final median rank for tail is %f' % np.median(mean_rank_tail))
print('Final hit10 is %f'%(hitk_all/(len(mean_rank_tail)+1)/2))
return hitk_all, mean_rank_head, mean_rank_tail
t_total = time.time()
print('loading model from:', args.model)
if args.gpu:
state = torch.load(args.model)
else:
state = torch.load(args.model, map_location=lambda storage, loc: storage)
state_dict = state['state_dict']
model.load_state_dict(state_dict)
start_epoch = state['epoch']
best_dev = state['best_prec1']
test()
print('Test Finished!')
print('Total time elapsed: {:.4f}s'.format(time.time() - t_total))
| 7,048 | 38.161111 | 125 | py |
PaperRobot | PaperRobot-master/Existing paper reading/train.py | from __future__ import division
from __future__ import print_function
import time
import argparse
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.utils import data
import os, sys, math, pickle, gc
from utils.utils import convert_index, get_subgraph, adjust_sent_order, load_dict, load_graph, load_text, mean_rank
from utils.data_loader import LinkPredictionDataset, LinkTestTotal, LinkTestDataset
from model.GATA import GATA
from collections import OrderedDict
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_dir', type=str, default='paper_reading/'
)
parser.add_argument(
'--epochs', type=int, default=100, help='Number of epochs to train.'
)
parser.add_argument(
'--lr', type=float, default=0.001, help='Initial learning rate.'
)
parser.add_argument(
'--gpu', default='1', type=int, help='default is 1. set 0 to disable use gpu.'
)
parser.add_argument(
'--weight_decay', type=float, default=5e-4, help='Weight decay (L2 loss on parameters).'
)
parser.add_argument(
'--batch_size', type=int, default=50, help='Size of a single batch'
)
parser.add_argument(
'--hidden', type=int, default=8, help='Number of hidden units.'
)
parser.add_argument(
'--nb_heads', type=int, default=8, help='Number of head attentions.'
)
parser.add_argument(
'--dropout', type=float, default=0.6, help='Dropout rate (1 - keep probability).'
)
parser.add_argument(
'--alpha', type=float, default=0.2, help='Alpha for the leaky_relu.'
)
parser.add_argument(
'--freq', default='3', type=int, help='Min freq.'
)
parser.add_argument(
'--max_len', default='100', type=int, help='Max length of context text'
)
parser.add_argument(
'--margin', type=int, default=1, help='Margin Value'
)
parser.add_argument(
'--patience', type=int, default=30, help='Patience'
)
parser.add_argument(
'--load', action='store_true', help='Load dataset.'
)
parser.add_argument(
'--cont', action='store_true', help='Continue training.'
)
parser.add_argument(
"--model", default="models/GATA/best_dev_model.pth.tar",
help="Model location"
)
parser.add_argument(
"--model_dp", default="models/",
help="model directory path"
)
args = parser.parse_args()
device = torch.device('cuda:0' if torch.cuda.is_available() and args.gpu == 1 else 'cpu')
# Initialize
# load previously saved data
if args.load:
state = pickle.load(open('dataset.pth', 'rb'))
parameters = state['parameters']
graph = state['graph']
text_f = state['text_f']
mappings = state['mappings']
num_ent = state['num_ent']
num_rel = state['num_rel']
id2ent = state['id2ent']
print("finish load")
else:
embedding_dim = args.nb_heads * args.hidden
num_rel = int(open(os.path.join(args.data_dir, 'relation2id.txt')).readline())
text_file = os.path.join(args.data_dir, 'entity_text_title_tokenized.json')
mappings, text_f = load_text(text_file, args.freq, args.max_len)
ent_f = os.path.join(args.data_dir, 'entity2id.txt')
id2ent, num_ent = load_dict(ent_f)
# Load Graph Data
graph, _ = load_graph(os.path.join(args.data_dir, 'train2id.txt'), num_ent)
# Parse parameters
parameters = OrderedDict()
parameters['emb_dim'] = embedding_dim
parameters['hid_dim'] = args.hidden
parameters['out_dim'] = args.hidden*args.nb_heads
parameters['num_voc'] = len(mappings['idx2word'])
parameters['num_heads'] = args.nb_heads
parameters['num_ent'] = num_ent
parameters['num_rel'] = num_rel
parameters['dropout'] = args.dropout
parameters['alpha'] = args.alpha
parameters['margin'] = args.margin
state = {
'parameters': parameters,
'graph': graph,
'text_f': text_f,
'id2ent': id2ent,
'mappings': mappings,
'num_ent': num_ent,
'num_rel': num_rel
}
pickle.dump(state, open('dataset.pth', "wb"))
print("finish_dump")
model_dir = args.model_dp
model_name = ['GATA']
for k, v in parameters.items():
if v == "":
continue
model_name.append('='.join((k, str(v))))
model_dir = os.path.join(model_dir, ','.join(model_name[:-1]))
os.makedirs(model_dir, exist_ok=True)
# Load Positive and Negative Examples
params = {'batch_size': args.batch_size, 'shuffle': True, 'collate_fn': adjust_sent_order}
train_set = LinkPredictionDataset(os.path.join(args.data_dir, 'train2id.txt'), text_f, id2ent, num_ent)
train_triple_dict = train_set.get_triple_dict()
train_generator = data.DataLoader(train_set, **params)
print('Finish loading train')
valid_set = LinkPredictionDataset(os.path.join(args.data_dir, 'valid2id.txt'), text_f, id2ent, num_ent)
valid_generator = data.DataLoader(valid_set, **params)
print('Finish loading valid')
params_test = {'batch_size': args.batch_size, 'shuffle': False, 'collate_fn': adjust_sent_order}
test_set = LinkTestTotal(os.path.join(args.data_dir, 'test2id.txt'), num_ent)
print('Finish loading test')
y = torch.FloatTensor([-1])
y = y.to(device)
# Initialize Model
model = GATA(**parameters)
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.weight_decay)
model.to(device)
# Train
def train(epoch):
print("Epoch", epoch)
t = time.time()
model.train(True)
torch.set_grad_enabled(True)
eloss = 0
for batch_idx, instance in enumerate(train_generator):
pos, neg, pht_bef, ptt_bef, nht_bef, ntt_bef = instance
pos = pos.to(device)
neg = neg.to(device)
# text information
pht = list(map(lambda x:x.to(device),pht_bef[0:3]))
ptt = list(map(lambda x:x.to(device),ptt_bef[0:3]))
nht = list(map(lambda x:x.to(device),nht_bef[0:3]))
ntt = list(map(lambda x:x.to(device),ntt_bef[0:3]))
batch_nodes, batch_adj = get_subgraph(pos, train_triple_dict, graph)
# get relative location according to the batch_nodes
shifted_pos, shifted_neg = convert_index([pos, neg], batch_nodes)
batch_nodes = torch.LongTensor(batch_nodes.tolist()).to(device)
batch_adj = torch.from_numpy(batch_adj).to(device)
shifted_pos = torch.LongTensor(shifted_pos).to(device)
shifted_neg = torch.LongTensor(shifted_neg).to(device)
score_pos = model(batch_nodes, batch_adj, pos, shifted_pos, pht[0], pht[1], pht[2],
ptt[0], ptt[1], ptt[2])
score_neg = model(batch_nodes, batch_adj, neg, shifted_neg, nht[0], nht[1], nht[2],
ntt[0], ntt[1], ntt[2])
loss_train = F.margin_ranking_loss(score_pos, score_neg, y, margin=args.margin)
sys.stdout.write(
'%d batches processed. current train batch loss: %f\r' %
(batch_idx, loss_train.item())
)
eloss += loss_train.item()
loss_train.backward()
del batch_nodes, batch_adj, shifted_pos, shifted_neg, pos, neg, pht_bef, ptt_bef, nht_bef, ntt_bef
optimizer.step()
if batch_idx%500==0:
gc.collect()
print('\n')
print('Epoch: {:04d}'.format(epoch+1),
'loss_train: {:.4f}'.format(eloss/(batch_idx+1)),
'time: {:.4f}s'.format(time.time() - t))
return eloss
# Valid
def validate(epoch):
t = time.time()
model.eval()
torch.set_grad_enabled(False)
eloss = 0
for batch_idx, instance in enumerate(valid_generator):
pos, neg, pht_bef, ptt_bef, nht_bef, ntt_bef = instance
pos = pos.to(device)
neg = neg.to(device)
# text information
pht = list(map(lambda x:x.to(device),pht_bef[0:3]))
ptt = list(map(lambda x:x.to(device),ptt_bef[0:3]))
nht = list(map(lambda x:x.to(device),nht_bef[0:3]))
ntt = list(map(lambda x:x.to(device),ntt_bef[0:3]))
batch_nodes, batch_adj = get_subgraph(pos, train_triple_dict, graph)
# get relative location according to the batch_nodes
shifted_pos, shifted_neg = convert_index([pos, neg], batch_nodes)
batch_nodes = torch.LongTensor(batch_nodes.tolist()).to(device)
batch_adj = torch.from_numpy(batch_adj).to(device)
shifted_pos = torch.LongTensor(shifted_pos).to(device)
shifted_neg = torch.LongTensor(shifted_neg).to(device)
score_pos = model(batch_nodes, batch_adj, pos, shifted_pos, pht[0], pht[1], pht[2],
ptt[0], ptt[1], ptt[2])
score_neg = model(batch_nodes, batch_adj, neg, shifted_neg, nht[0], nht[1], nht[2],
ntt[0], ntt[1], ntt[2])
loss_train = F.margin_ranking_loss(score_pos, score_neg, y, margin=args.margin)
sys.stdout.write(
'%d batches processed. current valid batch loss: %f\r' %
(batch_idx, loss_train.item())
)
eloss += loss_train.item()
del batch_nodes, batch_adj, shifted_pos, shifted_neg, pos, neg, pht_bef, ptt_bef, nht_bef, ntt_bef
if batch_idx%500==0:
gc.collect()
print('Epoch: {:04d}'.format(epoch+1),
'loss_valid: {:.4f}'.format(eloss/(batch_idx+1)),
'time: {:.4f}s'.format(time.time() - t))
return eloss
t_total = time.time()
best_dev = math.inf
bad_counter = 0
start_epoch = 0
if args.cont:
print('loading model from:', args.model)
if args.gpu:
state = torch.load(args.model)
else:
state = torch.load(args.model, map_location=lambda storage, loc: storage)
state_dict = state['state_dict']
model.load_state_dict(state_dict)
state_dict = state['optimizer']
optimizer.load_state_dict(state_dict)
start_epoch = state['epoch']
best_dev = state['best_prec1']
for epoch in range(args.epochs):
train(start_epoch+epoch)
torch.cuda.empty_cache()
current_valid = validate(start_epoch+epoch)
torch.cuda.empty_cache()
if current_valid < best_dev:
best_dev=current_valid
print('new best score on dev: %.4f' % best_dev)
print('saving the current model to disk...')
state = {
'epoch': start_epoch+epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_dev,
'optimizer': optimizer.state_dict(),
'parameters': parameters,
'optimizer': optimizer.state_dict(),
'mappings': mappings
}
torch.save(state, os.path.join(model_dir, 'best_dev_model.pth.tar'))
bad_counter = 0
else:
bad_counter += 1
if bad_counter == args.patience:
break
print('Optimization Finished!')
print('Total time elapsed: {:.4f}s'.format(time.time() - t_total))
| 10,576 | 35.347079 | 121 | py |
PaperRobot | PaperRobot-master/Existing paper reading/utils/data_loader.py | import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
from .utils import generate_corrupt_triples, load_triple_dict
from torch.utils import data
class LinkPredictionDataset(Dataset):
def __init__(self, kg_file, txt_file, id2ent, num_ent):
self.triples, self.triple_dict, self.triple_dict_rev = load_triple_dict(kg_file)
self.texts = txt_file
self.id2ent = id2ent
self.num_ent = num_ent
self.negative = generate_corrupt_triples(self.triples, self.num_ent, self.triple_dict, self.triple_dict_rev)
self.num_neg = len(self.negative)
self.triples = np.array(self.triples)
self.negative = np.array(self.negative)
def __len__(self):
return len(self.triples)
def __getitem__(self, idx):
positive = self.triples[idx,:]
negative = self.negative[idx,:]
pos_h_text = self.texts[self.id2ent[positive[0].item()]]
pos_t_text = self.texts[self.id2ent[positive[1].item()]]
neg_h_text = self.texts[self.id2ent[negative[0].item()]]
neg_t_text = self.texts[self.id2ent[negative[1].item()]]
return positive, negative, pos_h_text, pos_t_text, neg_h_text, neg_t_text
def get_triples(self):
return self.triples
def get_num_ent(self):
return self.num_ent
def get_triple_dict(self):
return self.triple_dict
def get_triple_dict_rev(self):
return self.triple_dict_rev
class LinkTestTotal:
def __init__(self, kg_file, num_ent):
self.triples, _, _ = load_triple_dict(kg_file)
self.num_ent = num_ent
self.triples = np.array(self.triples)
self.new_head = np.expand_dims(np.array(list(range(self.num_ent))), axis=1)
def __len__(self):
return len(self.triples)
def __getitem__(self, idx):
t = self.triples[idx,:]
np_tri = np.repeat([t], self.num_ent, axis=0)
old_tail = np_tri[:, 1:]
old_head = np_tri[:, 0]
new_head_triple = np.concatenate([self.new_head, old_tail], axis=1)
new_tail_triple = np_tri
new_tail_triple[:, 1] = self.new_head.squeeze()
return new_head_triple, new_tail_triple, np.array(t)
class LinkTestDataset(Dataset):
def __init__(self, head_triple, tail_triple, txt_file, id2ent):
self.head_triple = head_triple
self.tail_triple = tail_triple
self.texts = txt_file
self.id2ent = id2ent
def __len__(self):
return len(self.head_triple)
def __getitem__(self, idx):
head = self.head_triple[idx,:]
tail = self.tail_triple[idx,:]
head_h_text = self.texts[self.id2ent[head[0].item()]]
head_t_text = self.texts[self.id2ent[head[1].item()]]
tail_h_text = self.texts[self.id2ent[tail[0].item()]]
tail_t_text = self.texts[self.id2ent[tail[1].item()]]
return head, tail, head_h_text, head_t_text, tail_h_text, tail_t_text
| 2,949 | 34.119048 | 116 | py |
PaperRobot | PaperRobot-master/Existing paper reading/utils/utils.py | import numpy as np
from sys import getsizeof
import torch
import math
import networkx as nx
import json
import pickle
import codecs
from collections import defaultdict, Counter
class KnowledgeGraph:
def __init__(self):
self.G = nx.DiGraph()
self.triples = []
def load_file(self, fn, delimiter, threshold):
fo = open(fn)
for line in fo:
line = line.strip()
if line:
ent1, ent2, weight = line.split(delimiter)
if int(weight) >= threshold:
self.triples.append((ent1, ent2, weight))
def load_triple_noweight(self, triples):
for t in triples:
ent1, ent2, weight = t
self.triples.append((ent1, ent2, {'label':weight}))
def load_file_noweight(self, fn, delimiter, threshold):
fo = open(fn)
_ = fo.readline()
for line in fo:
line = line.strip()
if line:
ent1, ent2, weight = line.split(delimiter)
self.triples.append((int(ent1), int(ent2), {'label': weight}))
def filter_small_nodes(self, threshold):
# this will filter out all the nodes that have less outgoing edges
to_delete = []
for node in self.G.nodes():
if self.G.degree()[node] < threshold:
to_delete.append(node)
for n in to_delete:
self.G.remove_node(n)
def node_info(self, s):
seg1 = s.find('<')
label1 = s[:seg1]
type1 = s[seg1 + 1:-1]
return label1, type1
def triple2graph(self):
self.G.add_weighted_edges_from(self.triples)
def triple2graph_noweight(self):
self.G.add_edges_from(self.triples)
def new_KG(fn):
KG = KnowledgeGraph()
KG.load_file_noweight(fn, '\t', 0)
KG.triple2graph_noweight()
return KG
def load_triples(kg_f):
fo = open(kg_f)
triples = []
for line in fo:
line = line.strip()
ele = line.split('\t')
if len(ele)==3:
ele = list(map(int, ele))
triples.append(ele)
def load_dict(f):
fo = open(f)
d = {}
num = int(fo.readline().strip())
for line in fo:
line = line.strip()
name, idd = line.split('\t')
d[int(idd)] = name
return d, num
def load_graph(kbf, num_ent):
graph = new_KG(kbf)
adj = torch.FloatTensor(nx.adjacency_matrix(graph.G, nodelist=range(num_ent)).todense())
return graph, adj
def load_kg_embeddings(f):
fo = open(f)
embeddings = json.loads(fo.read())
return embeddings
def load_triple_dict(f):
fo = open(f)
triples = []
triple_dict = defaultdict(set)
triple_dict_rev = defaultdict(set)
for line in fo:
line = line.strip()
ele = line.split('\t')
if len(ele) == 3:
ele = list(map(int, ele))
triples.append(ele)
triple_dict[ele[0]].add((ele[1], ele[2]))
triple_dict_rev[ele[1]].add((ele[0], ele[2]))
return triples, triple_dict, triple_dict_rev
def create_mapping(freq, min_freq=0, max_vocab=50000):
freq = freq.most_common(max_vocab)
item2id = {
'<pad>': 0,
'<unk>': 1
}
offset = len(item2id)
for i, v in enumerate(freq):
if v[1] > min_freq:
item2id[v[0]] = i + offset
id2item = {i: v for v, i in item2id.items()}
return item2id, id2item
def create_dict(item_list):
assert type(item_list) is list
freq = Counter(item_list)
return freq
def prepare_mapping(words, min_freq):
words = [w.lower() for w in words]
words_freq = create_dict(words)
word2id, id2word = create_mapping(words_freq, min_freq)
print("Found %i unique words (%i in total)" % (
len(word2id), sum(len(x) for x in words)
))
mappings = {
'word2idx': word2id,
'idx2word': id2word
}
return mappings
def load_text(f, min_freq, max_len):
with open(f) as jsf:
txt = json.load(jsf)
words = []
new_txt = {}
for key in txt:
tmp = []
for sent in txt[key]:
tmp.extend(sent)
tmp.append('<eos>')
tmp = tmp[:max_len]
new_txt[key] = tmp
words.extend(tmp)
mappings = prepare_mapping(words, min_freq)
word2idx = mappings["word2idx"]
vectorize_txt = defaultdict(list)
for key in new_txt:
for w in new_txt[key]:
try:
vectorize_txt[key].append(word2idx[w])
except:
vectorize_txt[key].append(word2idx['<unk>'])
return mappings, vectorize_txt
def bern(triple_dict, triple_dict_rev, tri):
h = tri[0]
t = tri[1]
tph = len(triple_dict[h])
hpt = len(triple_dict_rev[t])
deno = tph+hpt
return tph/float(deno), hpt/float(deno)
def adjust_single_sent_order(t):
batch_size = len(t)
list_t = t
sorted_r = sorted([(len(r), r_n, r) for r_n,r in enumerate(list_t)], reverse=True)
lr, r_n, ordered_list_rev = zip(*sorted_r)
max_sents = lr[0]
lr = torch.LongTensor(list(lr))
r_n = torch.LongTensor(list(r_n))
batch_t = torch.zeros(batch_size, max_sents).long() # (sents ordered by len)
for i, s in enumerate(ordered_list_rev):
batch_t[i,0:len(s)] = torch.LongTensor(s)
return batch_t, r_n, lr # mainly to record batchified text, sentence order, length of sentence,
def adjust_sent_order(l):
pos,neg,pos_h_text, pos_t_text, neg_h_text, neg_t_text = zip(*l)
ph = adjust_single_sent_order(pos_h_text)
pt = adjust_single_sent_order(pos_t_text)
nh = adjust_single_sent_order(neg_h_text)
nt = adjust_single_sent_order(neg_t_text)
return torch.LongTensor(pos),torch.LongTensor(neg),ph,pt,nh,nt
def convert_index(blist, nodeslist):
new_blist = []
for b in blist:
b = b.cpu().numpy()
index = np.argsort(nodeslist)
sorted_nodeslist = nodeslist[index]
found_index = np.searchsorted(sorted_nodeslist, b[:,0:2])
bindex = np.take(index, found_index, mode='clip')
bindex_exp = np.hstack((bindex, b[:, 2][:, np.newaxis]))
new_blist.append(np.array(bindex_exp))
return new_blist
def generate_corrupt_triples(pos, num_ent, triple_dict, triple_dict_rev):
neg = []
for p in pos:
sub = np.random.randint(num_ent)
tph, hpt = bern(triple_dict, triple_dict_rev, p)
n = [sub, p[1], p[2]]
chose = np.random.choice(2,1,p=[tph, hpt])
if chose[0] == 1:
n = [p[0], sub, p[2]]
neg.append(n)
return neg
def get_subgraph(triples, triple_dict, whole_graph):
# Only handle 1-hop for now
# Data Types for Nodes are INT
in_graph = set()
for triple in triples:
head = triple[0]
tail = triple[1]
in_graph.add(tuple(triple))
for tri in triple_dict[head.item()]:
single1 = (head, tri[0], tri[1])
in_graph.add(single1)
for tri in triple_dict[tail.item()]:
single2 = (tail, tri[0], tri[1])
in_graph.add(single2)
in_kg = KnowledgeGraph()
in_kg.load_triple_noweight(in_graph)
in_kg.triple2graph_noweight()
included_nodes = list(in_kg.G)
adj_ingraph = nx.adjacency_matrix(whole_graph.G, nodelist=included_nodes).todense()
return np.array(included_nodes), adj_ingraph
def mean_rank(triples, sorted_idx, correct, loc):
reordered_triples = triples[sorted_idx]
rank = np.argwhere(reordered_triples[:, loc] == correct[loc])[0][0]
# print("rank is",rank)
return rank
def convert_idx2name(triples, id2ent, ent2name, id2rel):
result = []
for t in triples:
e1 = ent2name[id2ent[t[0].item()]]
e2 = ent2name[id2ent[t[1].item()]]
rel = id2rel[t[2].item()]
result.append([e1,e2,rel])
return result
def write_triples(triple_list, wf):
for t in triple_list:
wf.write('\t'.join(list(map(str, t)))+'\n')
| 7,972 | 27.783394 | 104 | py |
PaperRobot | PaperRobot-master/Existing paper reading/model/TAT.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
class TAT(nn.Module):
"""
A Bi-LSTM layer with attention
"""
def __init__(self, embedding_dim, voc_size):
super(TAT, self).__init__()
self.hidden_dim = embedding_dim
self.word_embeddings = nn.Embedding(voc_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, self.hidden_dim//2, bidirectional=True)
self.attF = nn.Linear(self.hidden_dim, self.hidden_dim)
def forward(self, sentence, orders, lengths, ent_emb):
embedded = self.word_embeddings(sentence)
padded_sent = pack_padded_sequence(embedded, lengths, batch_first=True)
output = padded_sent
output, hidden = self.lstm(output)
output, _ = pad_packed_sequence(output, batch_first=True)
output = output[orders]
att = torch.unsqueeze(self.attF(ent_emb), 2)
att_score = F.softmax(torch.bmm(output, att), dim=1)
o = torch.squeeze(torch.bmm(output.transpose(1,2), att_score))
return o
| 1,173 | 36.870968 | 82 | py |
PaperRobot | PaperRobot-master/Existing paper reading/model/graph_attention.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor(in_features, out_features).type(torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor), gain=np.sqrt(2.0)), requires_grad=True)
self.a1 = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor(out_features, 1).type(torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor), gain=np.sqrt(2.0)), requires_grad=True)
self.a2 = nn.Parameter(nn.init.xavier_uniform_(torch.FloatTensor(out_features, 1).type(torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor), gain=np.sqrt(2.0)),requires_grad=True)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, input, adj):
h = torch.mm(input, self.W)
N = h.size()[0]
f_1 = h @ self.a1
f_2 = h @ self.a2
e = self.leakyrelu(f_1 + f_2.transpose(0, 1)) #node_num * node_num
zero_vec = -9e15*torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.sigmoid(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
| 1,938 | 40.255319 | 220 | py |
PaperRobot | PaperRobot-master/Existing paper reading/model/GAT.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from .graph_attention import GraphAttentionLayer
class GAT(nn.Module):
def __init__(self, nfeat, nhid, dropout, alpha, nheads):
super(GAT, self).__init__()
self.dropout = dropout
self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
def forward(self, x, adj):
x = F.dropout(x, self.dropout, training=self.training)
x = torch.cat([att(x, adj) for att in self.attentions], dim=1)
x = F.dropout(x, self.dropout, training=self.training)
return x
| 763 | 35.380952 | 126 | py |
PaperRobot | PaperRobot-master/Existing paper reading/model/GATA.py | # --------- Link Prediction Model with both TAT and GAT contained -----------
import torch.nn as nn
import torch
from .GAT import GAT
from .TAT import TAT
class GATA(nn.Module):
def __init__(self, emb_dim, hid_dim, out_dim, num_voc, num_heads, num_ent, num_rel, dropout, alpha, **kwargs):
super(GATA, self).__init__()
self.ent_embedding = nn.Embedding(num_ent, emb_dim)
self.rel_embedding = nn.Embedding(num_rel, emb_dim)
self.graph = GAT(nfeat=emb_dim, nhid=hid_dim, dropout=dropout, nheads=num_heads, alpha=alpha)
self.text = TAT(emb_dim, num_voc)
self.gate = nn.Embedding(num_ent, out_dim)
def forward(self, nodes, adj, pos, shifted_pos, h_sents, h_order, h_lengths, t_sents, t_order, t_lengths):
node_features = self.ent_embedding(nodes)
graph = self.graph(node_features, adj)
head_graph = graph[[shifted_pos[:, 0].squeeze()]]
tail_graph = graph[[shifted_pos[:, 1].squeeze()]]
head_text = self.text(h_sents, h_order, h_lengths, node_features[[shifted_pos[:, 0].squeeze()]])
tail_text = self.text(t_sents, t_order, t_lengths, node_features[[shifted_pos[:, 1].squeeze()]])
r_pos = self.rel_embedding(pos[:, 2].squeeze())
gate_head = self.gate(pos[:, 0].squeeze())
gate_tail = self.gate(pos[:, 1].squeeze())
score_pos = self._score(head_graph, head_text, tail_graph, tail_text, r_pos, gate_head, gate_tail)
return score_pos
def _score(self, hg, ht, tg, tt, r, gh, gt):
gate_h = torch.sigmoid(gh)
gate_t = torch.sigmoid(gt)
head = gate_h * hg + (1-gate_h) * ht
tail = gate_t * tg + (1-gate_t) * tt
s = (head + r - tail)** 2
return s
| 1,737 | 41.390244 | 114 | py |
speech-resynthesis | speech-resynthesis-main/inference.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Adapted from https://github.com/jik876/hifi-gan
import argparse
import glob
import json
import os
import random
import sys
import time
from multiprocessing import Manager, Pool
from pathlib import Path
import librosa
import numpy as np
import torch
from scipy.io.wavfile import write
from dataset import CodeDataset, parse_manifest, mel_spectrogram, \
MAX_WAV_VALUE
from utils import AttrDict
from models import CodeGenerator
h = None
device = None
def stream(message):
sys.stdout.write(f"\r{message}")
def progbar(i, n, size=16):
done = (i * size) // n
bar = ''
for i in range(size):
bar += '█' if i <= done else '░'
return bar
def load_checkpoint(filepath):
assert os.path.isfile(filepath)
print("Loading '{}'".format(filepath))
checkpoint_dict = torch.load(filepath, map_location='cpu')
print("Complete.")
return checkpoint_dict
def get_mel(x):
return mel_spectrogram(x, h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax)
def scan_checkpoint(cp_dir, prefix):
pattern = os.path.join(cp_dir, prefix + '*')
cp_list = glob.glob(pattern)
if len(cp_list) == 0:
return ''
return sorted(cp_list)[-1]
def generate(h, generator, code):
start = time.time()
y_g_hat = generator(**code)
if type(y_g_hat) is tuple:
y_g_hat = y_g_hat[0]
rtf = (time.time() - start) / (y_g_hat.shape[-1] / h.sampling_rate)
audio = y_g_hat.squeeze()
audio = audio * MAX_WAV_VALUE
audio = audio.cpu().numpy().astype('int16')
return audio, rtf
def init_worker(queue, arguments):
import logging
logging.getLogger().handlers = []
global generator
global f0_stats
global spkrs_emb
global dataset
global spkr_dataset
global idx
global device
global a
global h
global spkrs
a = arguments
idx = queue.get()
device = idx
if os.path.isdir(a.checkpoint_file):
config_file = os.path.join(a.checkpoint_file, 'config.json')
else:
config_file = os.path.join(os.path.split(a.checkpoint_file)[0], 'config.json')
with open(config_file) as f:
data = f.read()
json_config = json.loads(data)
h = AttrDict(json_config)
generator = CodeGenerator(h).to(idx)
if os.path.isdir(a.checkpoint_file):
cp_g = scan_checkpoint(a.checkpoint_file, 'g_')
else:
cp_g = a.checkpoint_file
state_dict_g = load_checkpoint(cp_g)
generator.load_state_dict(state_dict_g['generator'])
if a.code_file is not None:
dataset = [x.strip().split('|') for x in open(a.code_file).readlines()]
def parse_code(c):
c = [int(v) for v in c.split(" ")]
return [torch.LongTensor(c).numpy()]
dataset = [(parse_code(x[1]), None, x[0], None) for x in dataset]
else:
file_list = parse_manifest(a.input_code_file)
dataset = CodeDataset(file_list, -1, h.code_hop_size, h.n_fft, h.num_mels, h.hop_size, h.win_size,
h.sampling_rate, h.fmin, h.fmax, n_cache_reuse=0,
fmax_loss=h.fmax_for_loss, device=device,
f0=h.get('f0', None), multispkr=h.get('multispkr', None),
f0_stats=h.get('f0_stats', None), f0_normalize=h.get('f0_normalize', False),
f0_feats=h.get('f0_feats', False), f0_median=h.get('f0_median', False),
f0_interp=h.get('f0_interp', False), vqvae=h.get('code_vq_params', False),
pad=a.pad)
if a.unseen_f0:
dataset.f0_stats = torch.load(a.unseen_f0)
os.makedirs(a.output_dir, exist_ok=True)
if h.get('multispkr', None):
spkrs = random.sample(range(len(dataset.id_to_spkr)), k=min(5, len(dataset.id_to_spkr)))
if a.f0_stats and h.get('f0', None) is not None:
f0_stats = torch.load(a.f0_stats)
generator.eval()
generator.remove_weight_norm()
# fix seed
seed = 52 + idx
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
@torch.no_grad()
def inference(item_index):
code, gt_audio, filename, _ = dataset[item_index]
code = {k: torch.from_numpy(v).to(device).unsqueeze(0) for k, v in code.items()}
if a.parts:
parts = Path(filename).parts
fname_out_name = '_'.join(parts[-3:])[:-4]
else:
fname_out_name = Path(filename).stem
if h.get('f0_vq_params', None) or h.get('f0_quantizer', None):
to_remove = gt_audio.shape[-1] % (16 * 80)
assert to_remove % h['code_hop_size'] == 0
if to_remove != 0:
to_remove_code = to_remove // h['code_hop_size']
to_remove_f0 = to_remove // 80
gt_audio = gt_audio[:-to_remove]
code['code'] = code['code'][..., :-to_remove_code]
code['f0'] = code['f0'][..., :-to_remove_f0]
new_code = dict(code)
if 'f0' in new_code:
del new_code['f0']
new_code['f0'] = code['f0']
audio, rtf = generate(h, generator, new_code)
output_file = os.path.join(a.output_dir, fname_out_name + '_gen.wav')
audio = librosa.util.normalize(audio.astype(np.float32))
write(output_file, h.sampling_rate, audio)
if h.get('multispkr', None) and a.vc:
if a.random_speakers:
local_spkrs = random.sample(range(len(dataset.id_to_spkr)), k=min(5, len(dataset.id_to_spkr)))
else:
local_spkrs = spkrs
for spkr_i, k in enumerate(local_spkrs):
code['spkr'].fill_(k)
if a.f0_stats and h.get('f0', None) is not None and not h.get('f0_normalize', False):
spkr = k
f0 = code['f0'].clone()
ii = (f0 != 0)
mean_, std_ = f0[ii].mean(), f0[ii].std()
if spkr not in f0_stats:
new_mean_, new_std_ = f0_stats['f0_mean'], f0_stats['f0_std']
else:
new_mean_, new_std_ = f0_stats[spkr]['f0_mean'], f0_stats[spkr]['f0_std']
f0[ii] -= mean_
f0[ii] /= std_
f0[ii] *= new_std_
f0[ii] += new_mean_
code['f0'] = f0
if h.get('f0_feats', False):
f0_stats_ = torch.load(h["f0_stats"])
if k not in f0_stats_:
mean = f0_stats_['f0_mean']
std = f0_stats_['f0_std']
else:
mean = f0_stats_[k]['f0_mean']
std = f0_stats_[k]['f0_std']
code['f0_stats'] = torch.FloatTensor([mean, std]).view(1, -1).to(device)
audio, rtf = generate(h, generator, code)
output_file = os.path.join(a.output_dir, fname_out_name + f'_{k}_gen.wav')
audio = librosa.util.normalize(audio.astype(np.float32))
write(output_file, h.sampling_rate, audio)
if gt_audio is not None:
output_file = os.path.join(a.output_dir, fname_out_name + '_gt.wav')
gt_audio = librosa.util.normalize(gt_audio.squeeze().numpy().astype(np.float32))
write(output_file, h.sampling_rate, gt_audio)
def main():
print('Initializing Inference Process..')
parser = argparse.ArgumentParser()
parser.add_argument('--code_file', default=None)
parser.add_argument('--input_code_file', default='./datasets/LJSpeech/cpc100/test.txt')
parser.add_argument('--output_dir', default='generated_files')
parser.add_argument('--checkpoint_file', required=True)
parser.add_argument('--f0-stats', type=Path)
parser.add_argument('--vc', action='store_true')
parser.add_argument('--random-speakers', action='store_true')
parser.add_argument('--pad', default=None, type=int)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--parts', action='store_true')
parser.add_argument('--unseen-f0', type=Path)
parser.add_argument('-n', type=int, default=10)
a = parser.parse_args()
seed = 52
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
ids = list(range(8))
manager = Manager()
idQueue = manager.Queue()
for i in ids:
idQueue.put(i)
if os.path.isdir(a.checkpoint_file):
config_file = os.path.join(a.checkpoint_file, 'config.json')
else:
config_file = os.path.join(os.path.split(a.checkpoint_file)[0], 'config.json')
with open(config_file) as f:
data = f.read()
json_config = json.loads(data)
h = AttrDict(json_config)
if os.path.isdir(a.checkpoint_file):
cp_g = scan_checkpoint(a.checkpoint_file, 'g_')
else:
cp_g = a.checkpoint_file
if not os.path.isfile(cp_g) or not os.path.exists(cp_g):
print(f"Didn't find checkpoints for {cp_g}")
return
if a.code_file is not None:
dataset = [x.strip().split('|') for x in open(a.code_file).readlines()]
def parse_code(c):
c = [int(v) for v in c.split(" ")]
return [torch.LongTensor(c).numpy()]
dataset = [(parse_code(x[1]), None, x[0], None) for x in dataset]
else:
file_list = parse_manifest(a.input_code_file)
dataset = CodeDataset(file_list, -1, h.code_hop_size, h.n_fft, h.num_mels, h.hop_size, h.win_size,
h.sampling_rate, h.fmin, h.fmax, n_cache_reuse=0, fmax_loss=h.fmax_for_loss, device=device,
f0=h.get('f0', None), multispkr=h.get('multispkr', None),
f0_stats=h.get('f0_stats', None), f0_normalize=h.get('f0_normalize', False),
f0_feats=h.get('f0_feats', False), f0_median=h.get('f0_median', False),
f0_interp=h.get('f0_interp', False), vqvae=h.get('code_vq_params', False),
pad=a.pad)
if a.debug:
ids = list(range(1))
import queue
idQueue = queue.Queue()
for i in ids:
idQueue.put(i)
init_worker(idQueue, a)
for i in range(0, len(dataset)):
inference(i)
bar = progbar(i, len(dataset))
message = f'{bar} {i}/{len(dataset)} '
stream(message)
if a.n != -1 and i > a.n:
break
else:
idx = list(range(len(dataset)))
random.shuffle(idx)
with Pool(8, init_worker, (idQueue, a)) as pool:
for i, _ in enumerate(pool.imap(inference, idx), 1):
bar = progbar(i, len(idx))
message = f'{bar} {i}/{len(idx)} '
stream(message)
if a.n != -1 and i > a.n:
break
if __name__ == '__main__':
main()
| 10,958 | 32.411585 | 121 | py |
speech-resynthesis | speech-resynthesis-main/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Adapted from https://github.com/jik876/hifi-gan
import glob
import os
import shutil
import matplotlib
import torch
from torch.nn.utils import weight_norm
matplotlib.use("Agg")
import matplotlib.pylab as plt
def plot_spectrogram(spectrogram):
fig, ax = plt.subplots(figsize=(10, 2))
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
interpolation='none')
plt.colorbar(im, ax=ax)
fig.canvas.draw()
plt.close()
return fig
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
def apply_weight_norm(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
weight_norm(m)
def get_padding(kernel_size, dilation=1):
return int((kernel_size*dilation - dilation)/2)
def load_checkpoint(filepath, device):
assert os.path.isfile(filepath)
print("Loading '{}'".format(filepath))
checkpoint_dict = torch.load(filepath, map_location=device)
print("Complete.")
return checkpoint_dict
def save_checkpoint(filepath, obj):
print("Saving checkpoint to {}".format(filepath))
torch.save(obj, filepath)
print("Complete.")
def scan_checkpoint(cp_dir, prefix):
pattern = os.path.join(cp_dir, prefix + '????????')
cp_list = glob.glob(pattern)
if len(cp_list) == 0:
return None
return sorted(cp_list)[-1]
def build_env(config, config_name, path):
t_path = os.path.join(path, config_name)
if config != t_path:
os.makedirs(path, exist_ok=True)
shutil.copyfile(config, os.path.join(path, config_name))
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self | 2,008 | 24.1125 | 64 | py |
speech-resynthesis | speech-resynthesis-main/dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Adapted from https://github.com/jik876/hifi-gan
import random
from pathlib import Path
import amfm_decompy.basic_tools as basic
import amfm_decompy.pYAAPT as pYAAPT
import numpy as np
import soundfile as sf
import torch
import torch.utils.data
import torch.utils.data
from librosa.filters import mel as librosa_mel_fn
from librosa.util import normalize
MAX_WAV_VALUE = 32768.0
def get_yaapt_f0(audio, rate=16000, interp=False):
frame_length = 20.0
to_pad = int(frame_length / 1000 * rate) // 2
f0s = []
for y in audio.astype(np.float64):
y_pad = np.pad(y.squeeze(), (to_pad, to_pad), "constant", constant_values=0)
signal = basic.SignalObj(y_pad, rate)
pitch = pYAAPT.yaapt(signal, **{'frame_length': frame_length, 'frame_space': 5.0, 'nccf_thresh1': 0.25,
'tda_frame_length': 25.0})
if interp:
f0s += [pitch.samp_interp[None, None, :]]
else:
f0s += [pitch.samp_values[None, None, :]]
f0 = np.vstack(f0s)
return f0
def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
if torch.min(y) < -1.:
print('min value is ', torch.min(y))
if torch.max(y) > 1.:
print('max value is ', torch.max(y))
global mel_basis, hann_window
if fmax not in mel_basis:
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)
hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
y = y.squeeze(1)
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)],
center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec)
spec = spectral_normalize_torch(spec)
return spec
def load_audio(full_path):
data, sampling_rate = sf.read(full_path, dtype='int16')
return data, sampling_rate
def dynamic_range_compression(x, C=1, clip_val=1e-5):
return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
def dynamic_range_decompression(x, C=1):
return np.exp(x) / C
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression_torch(x, C=1):
return torch.exp(x) / C
def spectral_normalize_torch(magnitudes):
output = dynamic_range_compression_torch(magnitudes)
return output
def spectral_de_normalize_torch(magnitudes):
output = dynamic_range_decompression_torch(magnitudes)
return output
mel_basis = {}
hann_window = {}
def parse_manifest(manifest):
audio_files = []
codes = []
with open(manifest) as info:
for line in info.readlines():
if line[0] == '{':
sample = eval(line.strip())
if 'cpc_km100' in sample:
k = 'cpc_km100'
elif 'vqvae256' in sample:
k = 'vqvae256'
else:
k = 'hubert'
codes += [torch.LongTensor(
[int(x) for x in sample[k].split(' ')]
).numpy()]
audio_files += [Path(sample["audio"])]
else:
audio_files += [Path(line.strip())]
return audio_files, codes
def get_dataset_filelist(h):
training_files, training_codes = parse_manifest(h.input_training_file)
validation_files, validation_codes = parse_manifest(h.input_validation_file)
return (training_files, training_codes), (validation_files, validation_codes)
def parse_speaker(path, method):
if type(path) == str:
path = Path(path)
if method == 'parent_name':
return path.parent.name
elif method == 'parent_parent_name':
return path.parent.parent.name
elif method == '_':
return path.name.split('_')[0]
elif method == 'single':
return 'A'
elif callable(method):
return method(path)
else:
raise NotImplementedError()
class CodeDataset(torch.utils.data.Dataset):
def __init__(self, training_files, segment_size, code_hop_size, n_fft, num_mels,
hop_size, win_size, sampling_rate, fmin, fmax, split=True, n_cache_reuse=1,
device=None, fmax_loss=None, f0=None, multispkr=False, pad=None,
f0_stats=None, f0_normalize=False, f0_feats=False, f0_median=False,
f0_interp=False, vqvae=False):
self.audio_files, self.codes = training_files
random.seed(1234)
self.segment_size = segment_size
self.code_hop_size = code_hop_size
self.sampling_rate = sampling_rate
self.split = split
self.n_fft = n_fft
self.num_mels = num_mels
self.hop_size = hop_size
self.win_size = win_size
self.fmin = fmin
self.fmax = fmax
self.fmax_loss = fmax_loss
self.cached_wav = None
self.n_cache_reuse = n_cache_reuse
self._cache_ref_count = 0
self.device = device
self.vqvae = vqvae
self.f0 = f0
self.f0_normalize = f0_normalize
self.f0_feats = f0_feats
self.f0_stats = None
self.f0_interp = f0_interp
self.f0_median = f0_median
if f0_stats:
self.f0_stats = torch.load(f0_stats)
self.multispkr = multispkr
self.pad = pad
if self.multispkr:
spkrs = [parse_speaker(f, self.multispkr) for f in self.audio_files]
spkrs = list(set(spkrs))
spkrs.sort()
self.id_to_spkr = spkrs
self.spkr_to_id = {k: v for v, k in enumerate(self.id_to_spkr)}
def _sample_interval(self, seqs, seq_len=None):
N = max([v.shape[-1] for v in seqs])
if seq_len is None:
seq_len = self.segment_size if self.segment_size > 0 else N
hops = [N // v.shape[-1] for v in seqs]
lcm = np.lcm.reduce(hops)
# Randomly pickup with the batch_max_steps length of the part
interval_start = 0
interval_end = N // lcm - seq_len // lcm
start_step = random.randint(interval_start, interval_end)
new_seqs = []
for i, v in enumerate(seqs):
start = start_step * (lcm // hops[i])
end = (start_step + seq_len // lcm) * (lcm // hops[i])
new_seqs += [v[..., start:end]]
return new_seqs
def __getitem__(self, index):
filename = self.audio_files[index]
if self._cache_ref_count == 0:
audio, sampling_rate = load_audio(filename)
if sampling_rate != self.sampling_rate:
# raise ValueError("{} SR doesn't match target {} SR".format(
# sampling_rate, self.sampling_rate))
import resampy
audio = resampy.resample(audio, sampling_rate, self.sampling_rate)
if self.pad:
padding = self.pad - (audio.shape[-1] % self.pad)
audio = np.pad(audio, (0, padding), "constant", constant_values=0)
audio = audio / MAX_WAV_VALUE
audio = normalize(audio) * 0.95
self.cached_wav = audio
self._cache_ref_count = self.n_cache_reuse
else:
audio = self.cached_wav
self._cache_ref_count -= 1
# Trim audio ending
if self.vqvae:
code_length = audio.shape[0] // self.code_hop_size
else:
code_length = min(audio.shape[0] // self.code_hop_size, self.codes[index].shape[0])
code = self.codes[index][:code_length]
audio = audio[:code_length * self.code_hop_size]
assert self.vqvae or audio.shape[0] // self.code_hop_size == code.shape[0], "Code audio mismatch"
while audio.shape[0] < self.segment_size:
audio = np.hstack([audio, audio])
if not self.vqvae:
code = np.hstack([code, code])
audio = torch.FloatTensor(audio)
audio = audio.unsqueeze(0)
assert audio.size(1) >= self.segment_size, "Padding not supported!!"
if self.vqvae:
audio = self._sample_interval([audio])[0]
else:
audio, code = self._sample_interval([audio, code])
mel_loss = mel_spectrogram(audio, self.n_fft, self.num_mels,
self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax_loss,
center=False)
if self.vqvae:
feats = {
"code": audio.view(1, -1).numpy()
}
else:
feats = {"code": code.squeeze()}
if self.f0:
try:
f0 = get_yaapt_f0(audio.numpy(), rate=self.sampling_rate, interp=self.f0_interp)
except:
f0 = np.zeros((1, 1, audio.shape[-1] // 80))
f0 = f0.astype(np.float32)
feats['f0'] = f0.squeeze(0)
if self.multispkr:
feats['spkr'] = self._get_spkr(index)
if self.f0_normalize:
spkr_id = self._get_spkr(index).item()
if spkr_id not in self.f0_stats:
mean = self.f0_stats['f0_mean']
std = self.f0_stats['f0_std']
else:
mean = self.f0_stats[spkr_id]['f0_mean']
std = self.f0_stats[spkr_id]['f0_std']
ii = feats['f0'] != 0
if self.f0_median:
med = np.median(feats['f0'][ii])
feats['f0'][~ii] = med
feats['f0'][~ii] = (feats['f0'][~ii] - mean) / std
feats['f0'][ii] = (feats['f0'][ii] - mean) / std
if self.f0_feats:
feats['f0_stats'] = torch.FloatTensor([mean, std]).view(-1).numpy()
return feats, audio.squeeze(0), str(filename), mel_loss.squeeze()
def _get_spkr(self, idx):
spkr_name = parse_speaker(self.audio_files[idx], self.multispkr)
spkr_id = torch.LongTensor([self.spkr_to_id[spkr_name]]).view(1).numpy()
return spkr_id
def __len__(self):
return len(self.audio_files)
class F0Dataset(torch.utils.data.Dataset):
def __init__(self, training_files, segment_size, sampling_rate,
split=True, n_cache_reuse=1, device=None, multispkr=False,
pad=None, f0_stats=None, f0_normalize=False, f0_feats=False,
f0_median=False, f0_interp=False, vqvae=False):
self.audio_files, _ = training_files
random.seed(1234)
self.segment_size = segment_size
self.sampling_rate = sampling_rate
self.split = split
self.cached_wav = None
self.n_cache_reuse = n_cache_reuse
self._cache_ref_count = 0
self.device = device
self.vqvae = vqvae
self.f0_normalize = f0_normalize
self.f0_feats = f0_feats
self.f0_stats = None
self.f0_interp = f0_interp
self.f0_median = f0_median
if f0_stats:
self.f0_stats = torch.load(f0_stats)
self.pad = pad
self.multispkr = multispkr
if self.multispkr:
spkrs = [parse_speaker(f, self.multispkr) for f in self.audio_files]
spkrs = list(set(spkrs))
spkrs.sort()
self.id_to_spkr = spkrs
self.spkr_to_id = {k: v for v, k in enumerate(self.id_to_spkr)}
def _sample_interval(self, seqs, seq_len=None):
N = max([v.shape[-1] for v in seqs])
if seq_len is None:
seq_len = self.segment_size if self.segment_size > 0 else N
hops = [N // v.shape[-1] for v in seqs]
lcm = np.lcm.reduce(hops)
# Randomly pickup with the batch_max_steps length of the part
interval_start = 0
interval_end = N // lcm - seq_len // lcm
start_step = random.randint(interval_start, interval_end)
new_seqs = []
for i, v in enumerate(seqs):
start = start_step * (lcm // hops[i])
end = (start_step + seq_len // lcm) * (lcm // hops[i])
new_seqs += [v[..., start:end]]
return new_seqs
def __getitem__(self, index):
filename = self.audio_files[index]
if self._cache_ref_count == 0:
audio, sampling_rate = load_audio(filename)
if self.pad:
padding = self.pad - (audio.shape[-1] % self.pad)
audio = np.pad(audio, (0, padding), "constant", constant_values=0)
audio = audio / MAX_WAV_VALUE
audio = normalize(audio) * 0.95
self.cached_wav = audio
if sampling_rate != self.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.sampling_rate))
self._cache_ref_count = self.n_cache_reuse
else:
audio = self.cached_wav
self._cache_ref_count -= 1
while audio.shape[0] < self.segment_size:
audio = np.hstack([audio, audio])
audio = torch.FloatTensor(audio)
audio = audio.unsqueeze(0)
assert audio.size(1) >= self.segment_size, "Padding not supported!!"
audio = self._sample_interval([audio])[0]
feats = {}
try:
f0 = get_yaapt_f0(audio.numpy(), rate=self.sampling_rate, interp=self.f0_interp)
except:
f0 = np.zeros((1, 1, audio.shape[-1] // 80))
f0 = f0.astype(np.float32)
feats['f0'] = f0.squeeze(0)
if self.multispkr:
feats['spkr'] = self._get_spkr(index)
if self.f0_normalize:
spkr_id = self._get_spkr(index).item()
if spkr_id not in self.f0_stats:
mean = self.f0_stats['f0_mean']
std = self.f0_stats['f0_std']
else:
mean = self.f0_stats[spkr_id]['f0_mean']
std = self.f0_stats[spkr_id]['f0_std']
ii = feats['f0'] != 0
if self.f0_median:
med = np.median(feats['f0'][ii])
feats['f0'][~ii] = med
feats['f0'][~ii] = (feats['f0'][~ii] - mean) / std
feats['f0'][ii] = (feats['f0'][ii] - mean) / std
if self.f0_feats:
feats['f0_stats'] = torch.FloatTensor([mean, std]).view(-1).numpy()
return feats, feats['f0'], str(filename)
def _get_spkr(self, idx):
spkr_name = parse_speaker(self.audio_files[idx], self.multispkr)
spkr_id = torch.LongTensor([self.spkr_to_id[spkr_name]]).view(1).numpy()
return spkr_id
def __len__(self):
return len(self.audio_files)
| 15,169 | 33.555809 | 115 | py |
speech-resynthesis | speech-resynthesis-main/train_f0_vq.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Adapted from https://github.com/jik876/hifi-gan
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import os
import time
import argparse
import json
import torch
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DistributedSampler, DataLoader
from torch.distributed import init_process_group
from torch.nn.parallel import DistributedDataParallel
from dataset import F0Dataset, get_dataset_filelist
from models import Quantizer
from utils import scan_checkpoint, load_checkpoint, save_checkpoint, build_env, \
AttrDict
torch.backends.cudnn.benchmark = True
def train(rank, a, h):
if h.num_gpus > 1:
init_process_group(backend=h.dist_config['dist_backend'], init_method=h.dist_config['dist_url'], rank=rank)
torch.cuda.manual_seed(h.seed)
device = torch.device('cuda:{:d}'.format(rank))
generator = Quantizer(h).to(device)
if rank == 0:
print(generator)
os.makedirs(a.checkpoint_path, exist_ok=True)
print("checkpoints directory : ", a.checkpoint_path)
cp_g = None
if os.path.isdir(a.checkpoint_path):
cp_g = scan_checkpoint(a.checkpoint_path, 'g_')
steps = 0
if cp_g is None:
last_epoch = -1
state_dict_g = None
else:
state_dict_g = load_checkpoint(cp_g, device)
generator.load_state_dict(state_dict_g['generator'])
steps = state_dict_g['steps'] + 1
last_epoch = state_dict_g['epoch']
if h.num_gpus > 1:
generator = DistributedDataParallel(generator, device_ids=[rank]).to(device)
optim_g = torch.optim.AdamW(generator.parameters(), h.learning_rate, betas=[h.adam_b1, h.adam_b2])
if state_dict_g is not None:
optim_g.load_state_dict(state_dict_g['optim_g'])
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=h.lr_decay, last_epoch=last_epoch)
training_filelist, validation_filelist = get_dataset_filelist(h)
trainset = F0Dataset(training_filelist, h.segment_size, h.sampling_rate, n_cache_reuse=0, device=device,
multispkr=h.get('multispkr', None), f0_stats=h.get('f0_stats', None),
f0_normalize=h.get('f0_normalize', False), f0_feats=h.get('f0_feats', False),
f0_median=h.get('f0_median', False), f0_interp=h.get('f0_interp', False),
vqvae=h.get('code_vq_params', False))
train_sampler = DistributedSampler(trainset) if h.num_gpus > 1 else None
train_loader = DataLoader(trainset, num_workers=h.num_workers, shuffle=False, sampler=train_sampler,
batch_size=h.batch_size, pin_memory=True, drop_last=True)
if rank == 0:
validset = F0Dataset(validation_filelist, h.segment_size, h.sampling_rate, False, n_cache_reuse=0,
device=device, multispkr=h.get('multispkr', None), f0_stats=h.get('f0_stats', None),
f0_normalize=h.get('f0_normalize', False), f0_feats=h.get('f0_feats', False),
f0_median=h.get('f0_median', False), f0_interp=h.get('f0_interp', False),
vqvae=h.get('code_vq_params', False))
validation_loader = DataLoader(validset, num_workers=h.num_workers, shuffle=False, sampler=None,
batch_size=h.batch_size, pin_memory=True, drop_last=True)
sw = SummaryWriter(os.path.join(a.checkpoint_path, 'logs'))
generator.train()
for epoch in range(max(0, last_epoch), a.training_epochs):
if rank == 0:
start = time.time()
print("Epoch: {}".format(epoch + 1))
if h.num_gpus > 1:
train_sampler.set_epoch(epoch)
for i, batch in enumerate(train_loader):
if rank == 0:
start_b = time.time()
x, y, _ = batch
y = torch.autograd.Variable(y.to(device, non_blocking=False))
x = {k: torch.autograd.Variable(v.to(device, non_blocking=False)) for k, v in x.items()}
y_g_hat, commit_loss, metrics = generator(**x)
f0_commit_loss = commit_loss[0]
f0_metrics = metrics[0]
# Generator
optim_g.zero_grad()
# L2 Reconstruction Loss
loss_recon = F.mse_loss(y_g_hat, y)
loss_recon += f0_commit_loss * h.get('lambda_commit', None)
loss_recon.backward()
optim_g.step()
if rank == 0:
# STDOUT logging
if steps % a.stdout_interval == 0:
print('Steps : {:d}, Gen Loss Total : {:4.3f}, s/b : {:4.3f}'.format(steps, loss_recon,
time.time() - start_b))
# checkpointing
if steps % a.checkpoint_interval == 0 and steps != 0:
checkpoint_path = "{}/g_{:08d}".format(a.checkpoint_path, steps)
save_checkpoint(checkpoint_path,
{'generator': (generator.module if h.num_gpus > 1 else generator).state_dict(),
'optim_g': optim_g.state_dict(), 'steps': steps, 'epoch': epoch})
# Tensorboard summary logging
if steps % a.summary_interval == 0:
sw.add_scalar("training/gen_loss_total", loss_recon, steps)
sw.add_scalar("training/commit_error", f0_commit_loss, steps)
sw.add_scalar("training/used_curr", f0_metrics['used_curr'].item(), steps)
sw.add_scalar("training/entropy", f0_metrics['entropy'].item(), steps)
sw.add_scalar("training/usage", f0_metrics['usage'].item(), steps)
# Validation
if steps % a.validation_interval == 0: # and steps != 0:
generator.eval()
torch.cuda.empty_cache()
val_err_tot = 0
with torch.no_grad():
for j, batch in enumerate(validation_loader):
x, y, _ = batch
x = {k: v.to(device, non_blocking=False) for k, v in x.items()}
y = torch.autograd.Variable(y.to(device, non_blocking=False))
y_g_hat, commit_loss, _ = generator(**x)
f0_commit_loss = commit_loss[0]
val_err_tot += f0_commit_loss * h.get('lambda_commit', None)
val_err_tot += F.mse_loss(y_g_hat, y).item()
val_err = val_err_tot / (j + 1)
sw.add_scalar("validation/mel_spec_error", val_err, steps)
sw.add_scalar("validation/commit_error", f0_commit_loss, steps)
generator.train()
steps += 1
if steps >= a.training_steps:
break
scheduler_g.step()
if rank == 0:
print('Time taken for epoch {} is {} sec\n'.format(epoch + 1, int(time.time() - start)))
if rank == 0:
print('Finished training')
def main():
print('Initializing Training Process..')
parser = argparse.ArgumentParser()
parser.add_argument('--group_name', default=None)
parser.add_argument('--checkpoint_path', default='cp_hifigan')
parser.add_argument('--config', default='')
parser.add_argument('--training_epochs', default=10000, type=int)
parser.add_argument('--training_steps', default=400000, type=int)
parser.add_argument('--stdout_interval', default=5, type=int)
parser.add_argument('--checkpoint_interval', default=10000, type=int)
parser.add_argument('--summary_interval', default=100, type=int)
parser.add_argument('--validation_interval', default=1000, type=int)
parser.add_argument('--fine_tuning', default=False, type=bool)
parser.add_argument('--local_rank', default=0, type=int)
a = parser.parse_args()
with open(a.config) as f:
data = f.read()
json_config = json.loads(data)
h = AttrDict(json_config)
build_env(a.config, 'config.json', a.checkpoint_path)
torch.manual_seed(h.seed)
if torch.cuda.is_available() and 'WORLD_SIZE' in os.environ:
torch.cuda.manual_seed(h.seed)
h.num_gpus = int(os.environ['WORLD_SIZE'])
h.batch_size = int(h.batch_size / h.num_gpus)
print('Batch size per GPU :', h.batch_size)
else:
pass
train(a.local_rank, a, h)
if __name__ == '__main__':
main()
| 8,872 | 39.701835 | 115 | py |
speech-resynthesis | speech-resynthesis-main/models.py | # adapted from https://github.com/jik876/hifi-gan
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
from modules.jukebox import Encoder, Decoder
from utils import init_weights, get_padding, AttrDict
from modules.vq import Bottleneck
LRELU_SLOPE = 0.1
class ResBlock1(torch.nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.h = h
self.convs1 = nn.ModuleList([weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))), weight_norm(
Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))), weight_norm(
Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))])
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList(
[weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1)))])
self.convs2.apply(init_weights)
def forward(self, x):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class ResBlock2(torch.nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)):
super(ResBlock2, self).__init__()
self.h = h
self.convs = nn.ModuleList([weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))), weight_norm(
Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1])))])
self.convs.apply(init_weights)
def forward(self, x):
for c in self.convs:
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs:
remove_weight_norm(l)
class Generator(torch.nn.Module):
def __init__(self, h):
super(Generator, self).__init__()
self.h = h
self.num_kernels = len(h.resblock_kernel_sizes)
self.num_upsamples = len(h.upsample_rates)
self.conv_pre = weight_norm(
Conv1d(getattr(h, "model_in_dim", 128), h.upsample_initial_channel, 7, 1, padding=3))
resblock = ResBlock1 if h.resblock == '1' else ResBlock2
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
self.ups.append(weight_norm(
ConvTranspose1d(h.upsample_initial_channel // (2 ** i), h.upsample_initial_channel // (2 ** (i + 1)), k,
u, padding=(k - u) // 2)))
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = h.upsample_initial_channel // (2 ** (i + 1))
for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
self.resblocks.append(resblock(h, ch, k, d))
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
def forward(self, x):
x = self.conv_pre(x)
for i in range(self.num_upsamples):
x = F.leaky_relu(x, LRELU_SLOPE)
x = self.ups[i](x)
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i * self.num_kernels + j](x)
else:
xs += self.resblocks[i * self.num_kernels + j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
x = torch.tanh(x)
return x
def remove_weight_norm(self):
for l in self.ups:
remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
class CodeGenerator(Generator):
def __init__(self, h):
super().__init__(h)
self.dict = nn.Embedding(h.num_embeddings, h.embedding_dim)
self.f0 = h.get('f0', None)
self.multispkr = h.get('multispkr', None)
if self.multispkr:
self.spkr = nn.Embedding(200, h.embedding_dim)
self.encoder = None
self.vq = None
if h.get("lambda_commit", None):
assert self.f0, "Requires F0 set"
self.encoder = Encoder(**h.f0_encoder_params)
self.vq = Bottleneck(**h.f0_vq_params)
self.code_encoder = None
self.code_vq = None
if h.get('lambda_commit_code', None):
self.code_encoder = Encoder(**h.code_encoder_params)
self.code_vq = Bottleneck(**h.code_vq_params)
self.dict = None
self.quantizer = None
if h.get('f0_quantizer_path', None):
assert self.f0, "Requires F0 set"
self.quantizer = Quantizer(AttrDict(h.f0_quantizer))
quantizer_state = torch.load(h.f0_quantizer_path, map_location='cpu')
self.quantizer.load_state_dict(quantizer_state['generator'])
self.quantizer.eval()
self.f0_dict = nn.Embedding(h.f0_quantizer['f0_vq_params']['l_bins'], h.embedding_dim)
@staticmethod
def _upsample(signal, max_frames):
if signal.dim() == 3:
bsz, channels, cond_length = signal.size()
elif signal.dim() == 2:
signal = signal.unsqueeze(2)
bsz, channels, cond_length = signal.size()
else:
signal = signal.view(-1, 1, 1)
bsz, channels, cond_length = signal.size()
signal = signal.unsqueeze(3).repeat(1, 1, 1, max_frames // cond_length)
# pad zeros as needed (if signal's shape does not divide completely with max_frames)
reminder = (max_frames - signal.shape[2] * signal.shape[3]) // signal.shape[3]
if reminder > 0:
raise NotImplementedError('Padding condition signal - misalignment between condition features.')
signal = signal.view(bsz, channels, max_frames)
return signal
def forward(self, **kwargs):
code_commit_losses = None
code_metrics = None
if self.code_vq and kwargs['code'].dtype is torch.int64:
x = self.code_vq.level_blocks[0].k[kwargs['code']].transpose(1, 2)
elif self.code_vq:
code_h = self.code_encoder(kwargs['code'])
_, code_h_q, code_commit_losses, code_metrics = self.code_vq(code_h)
x = code_h_q[0]
else:
x = self.dict(kwargs['code']).transpose(1, 2)
f0_commit_losses = None
f0_metrics = None
if self.vq:
f0_h = self.encoder(kwargs['f0'])
_, f0_h_q, f0_commit_losses, f0_metrics = self.vq(f0_h)
kwargs['f0'] = f0_h_q[0]
elif self.quantizer:
self.quantizer.eval()
assert not self.quantizer.training, "VQ is in training status!!!"
f0_h = self.quantizer.encoder(kwargs['f0'])
f0_h = [x.detach() for x in f0_h]
zs, _, _, _ = self.quantizer.vq(f0_h)
zs = [x.detach() for x in zs]
f0_h_q = self.f0_dict(zs[0].detach()).transpose(1, 2)
kwargs['f0'] = f0_h_q
if self.f0:
if x.shape[-1] < kwargs['f0'].shape[-1]:
x = self._upsample(x, kwargs['f0'].shape[-1])
else:
kwargs['f0'] = self._upsample(kwargs['f0'], x.shape[-1])
x = torch.cat([x, kwargs['f0']], dim=1)
if self.multispkr:
spkr = self.spkr(kwargs['spkr']).transpose(1, 2)
spkr = self._upsample(spkr, x.shape[-1])
x = torch.cat([x, spkr], dim=1)
for k, feat in kwargs.items():
if k in ['spkr', 'code', 'f0']:
continue
feat = self._upsample(feat, x.shape[-1])
x = torch.cat([x, feat], dim=1)
if self.vq or self.code_vq:
return super().forward(x), (code_commit_losses, f0_commit_losses), (code_metrics, f0_metrics)
else:
return super().forward(x)
class DiscriminatorP(torch.nn.Module):
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
super(DiscriminatorP, self).__init__()
self.period = period
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList(
[norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))), ])
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
def forward(self, x):
fmap = []
# 1d to 2d
b, c, t = x.shape
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiPeriodDiscriminator(torch.nn.Module):
def __init__(self):
super(MultiPeriodDiscriminator, self).__init__()
self.discriminators = nn.ModuleList(
[DiscriminatorP(2), DiscriminatorP(3), DiscriminatorP(5), DiscriminatorP(7), DiscriminatorP(11), ])
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class DiscriminatorS(torch.nn.Module):
def __init__(self, use_spectral_norm=False):
super(DiscriminatorS, self).__init__()
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList(
[norm_f(Conv1d(1, 128, 15, 1, padding=7)), norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)),
norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)),
norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)),
norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)), norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), ])
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
def forward(self, x):
fmap = []
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiScaleDiscriminator(torch.nn.Module):
def __init__(self):
super(MultiScaleDiscriminator, self).__init__()
self.discriminators = nn.ModuleList(
[DiscriminatorS(use_spectral_norm=True), DiscriminatorS(), DiscriminatorS(), ])
self.meanpools = nn.ModuleList([AvgPool1d(4, 2, padding=2), AvgPool1d(4, 2, padding=2)])
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
if i != 0:
y = self.meanpools[i - 1](y)
y_hat = self.meanpools[i - 1](y_hat)
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class Quantizer(nn.Module):
def __init__(self, h):
super().__init__()
self.encoder = Encoder(**h.f0_encoder_params)
self.vq = Bottleneck(**h.f0_vq_params)
self.decoder = Decoder(**h.f0_decoder_params)
def forward(self, **kwargs):
f0_h = self.encoder(kwargs['f0'])
_, f0_h_q, f0_commit_losses, f0_metrics = self.vq(f0_h)
f0 = self.decoder(f0_h_q)
return f0, f0_commit_losses, f0_metrics
def feature_loss(fmap_r, fmap_g):
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
loss += torch.mean(torch.abs(rl - gl))
return loss * 2
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
loss = 0
r_losses = []
g_losses = []
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean((1 - dr) ** 2)
g_loss = torch.mean(dg ** 2)
loss += (r_loss + g_loss)
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
return loss, r_losses, g_losses
def generator_loss(disc_outputs):
loss = 0
gen_losses = []
for dg in disc_outputs:
l = torch.mean((1 - dg) ** 2)
gen_losses.append(l)
loss += l
return loss, gen_losses
| 14,478 | 36.31701 | 120 | py |
speech-resynthesis | speech-resynthesis-main/infer_vqvae_codes.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
import random
import sys
from multiprocessing import Manager, Pool
from pathlib import Path
import numpy as np
import soundfile as sf
import torch
from utils import AttrDict
from inference import load_checkpoint, scan_checkpoint
from models import CodeGenerator
h = None
device = None
def stream(message):
sys.stdout.write(f"\r{message}")
def progbar(i, n, size=16):
done = (i * size) // n
bar = ''
for i in range(size):
bar += '█' if i <= done else '░'
return bar
def init_worker(queue, arguments):
import logging
logging.getLogger().handlers = []
global encoder
global vq
global dataset
global idx
global device
global a
global h
a = arguments
idx = queue.get()
device = idx
if os.path.isdir(a.checkpoint_file):
config_file = os.path.join(a.checkpoint_file, 'config.json')
else:
config_file = os.path.join(os.path.split(a.checkpoint_file)[0], 'config.json')
with open(config_file) as f:
data = f.read()
json_config = json.loads(data)
h = AttrDict(json_config)
generator = CodeGenerator(h).to(idx)
if os.path.isdir(a.checkpoint_file):
cp_g = scan_checkpoint(a.checkpoint_file, 'g_')
else:
cp_g = a.checkpoint_file
state_dict_g = load_checkpoint(cp_g)
generator.load_state_dict(state_dict_g['generator'])
encoder = generator.code_encoder
encoder.eval()
vq = generator.code_vq
vq.eval()
# fix seed
seed = 52
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
@torch.no_grad()
def inference(path):
# total_rtf = 0.0
audio, sr = sf.read(path)
audio = torch.from_numpy(audio).view(1, 1, -1)
audio = audio.to(device).float()
h = encoder(audio)
code, _, _, _ = vq(h)
code = code[0].cpu().squeeze()
code = ",".join([str(x.item()) for x in code])
return str(path), code
def main():
print('Initializing VQVAE Extraction Process..')
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', type=Path, required=True)
parser.add_argument('--output_dir', type=Path, required=True)
parser.add_argument('--checkpoint_file', required=True)
parser.add_argument('--gpus', type=int, default=8)
parser.add_argument('-n', type=int, default=-1)
parser.add_argument('--ext', type=str, default="wav")
a = parser.parse_args()
ids = list(range(8))
manager = Manager()
idQueue = manager.Queue()
for i in ids:
idQueue.put(i)
files = a.input_dir.glob(f'**/*{a.ext}')
files = list(files)
lines = []
if a.gpus > 1:
with Pool(a.gpus, init_worker, (idQueue, a)) as pool:
for i, l in enumerate(pool.imap(inference, files), 1):
bar = progbar(i, len(files))
message = f'{bar} {i}/{len(files)} '
stream(message)
lines += [l]
if a.n != -1 and i > a.n:
break
else:
ids = list(range(1))
import queue
idQueue = queue.Queue()
for i in ids:
idQueue.put(i)
init_worker(idQueue, a)
for i, p in enumerate(files):
l = inference(p)
lines += [l]
bar = progbar(i, len(files))
message = f'{bar} {i}/{len(files)} '
stream(message)
if a.n != -1 and i > a.n:
break
a.output_dir.mkdir(exist_ok=True)
with open(a.output_dir / 'vqvae_output.txt', 'w') as f:
f.write("\n".join("\t".join(l) for l in lines))
if __name__ == '__main__':
main()
| 3,868 | 24.123377 | 86 | py |
speech-resynthesis | speech-resynthesis-main/train.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Adapted from https://github.com/jik876/hifi-gan
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings(action='ignore', message='.*kernel_size exceeds volume extent.*')
import itertools
import os
import time
import argparse
import json
import torch
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DistributedSampler, DataLoader
from torch.distributed import init_process_group
from torch.nn.parallel import DistributedDataParallel
from dataset import CodeDataset, mel_spectrogram, get_dataset_filelist
from models import CodeGenerator, MultiPeriodDiscriminator, MultiScaleDiscriminator, feature_loss, generator_loss, \
discriminator_loss
from utils import plot_spectrogram, scan_checkpoint, load_checkpoint, \
save_checkpoint, build_env, AttrDict
torch.backends.cudnn.benchmark = True
def train(rank, local_rank, a, h):
if h.num_gpus > 1:
init_process_group(
backend=h.dist_config['dist_backend'],
init_method=h.dist_config['dist_url'],
rank=rank,
world_size=h.num_gpus,
)
torch.cuda.manual_seed(h.seed)
device = torch.device('cuda:{:d}'.format(local_rank))
generator = CodeGenerator(h).to(device)
mpd = MultiPeriodDiscriminator().to(device)
msd = MultiScaleDiscriminator().to(device)
if rank == 0:
print(generator)
os.makedirs(a.checkpoint_path, exist_ok=True)
print("checkpoints directory : ", a.checkpoint_path)
if os.path.isdir(a.checkpoint_path):
cp_g = scan_checkpoint(a.checkpoint_path, 'g_')
cp_do = scan_checkpoint(a.checkpoint_path, 'do_')
steps = 0
if cp_g is None or cp_do is None:
state_dict_do = None
last_epoch = -1
else:
state_dict_g = load_checkpoint(cp_g, device)
state_dict_do = load_checkpoint(cp_do, device)
generator.load_state_dict(state_dict_g['generator'])
mpd.load_state_dict(state_dict_do['mpd'])
msd.load_state_dict(state_dict_do['msd'])
steps = state_dict_do['steps'] + 1
last_epoch = state_dict_do['epoch']
if h.num_gpus > 1:
generator = DistributedDataParallel(
generator,
device_ids=[local_rank],
find_unused_parameters=('f0_quantizer' in h),
).to(device)
mpd = DistributedDataParallel(mpd, device_ids=[local_rank]).to(device)
msd = DistributedDataParallel(msd, device_ids=[local_rank]).to(device)
optim_g = torch.optim.AdamW(generator.parameters(), h.learning_rate, betas=[h.adam_b1, h.adam_b2])
optim_d = torch.optim.AdamW(itertools.chain(msd.parameters(), mpd.parameters()), h.learning_rate,
betas=[h.adam_b1, h.adam_b2])
if state_dict_do is not None:
optim_g.load_state_dict(state_dict_do['optim_g'])
optim_d.load_state_dict(state_dict_do['optim_d'])
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=h.lr_decay, last_epoch=last_epoch)
scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=h.lr_decay, last_epoch=last_epoch)
training_filelist, validation_filelist = get_dataset_filelist(h)
trainset = CodeDataset(training_filelist, h.segment_size, h.code_hop_size, h.n_fft, h.num_mels, h.hop_size,
h.win_size, h.sampling_rate, h.fmin, h.fmax, n_cache_reuse=0, fmax_loss=h.fmax_for_loss,
device=device, f0=h.get('f0', None), multispkr=h.get('multispkr', None),
f0_stats=h.get('f0_stats', None),
f0_normalize=h.get('f0_normalize', False), f0_feats=h.get('f0_feats', False),
f0_median=h.get('f0_median', False), f0_interp=h.get('f0_interp', False),
vqvae=h.get('code_vq_params', False))
train_sampler = DistributedSampler(trainset) if h.num_gpus > 1 else None
train_loader = DataLoader(trainset, num_workers=0, shuffle=False, sampler=train_sampler,
batch_size=h.batch_size, pin_memory=True, drop_last=True)
if rank == 0:
validset = CodeDataset(validation_filelist, h.segment_size, h.code_hop_size, h.n_fft, h.num_mels, h.hop_size,
h.win_size, h.sampling_rate, h.fmin, h.fmax, False, n_cache_reuse=0,
fmax_loss=h.fmax_for_loss, device=device, f0=h.get('f0', None),
multispkr=h.get('multispkr', None),
f0_stats=h.get('f0_stats', None), f0_normalize=h.get('f0_normalize', False),
f0_feats=h.get('f0_feats', False), f0_median=h.get('f0_median', False),
f0_interp=h.get('f0_interp', False), vqvae=h.get('code_vq_params', False))
validation_loader = DataLoader(validset, num_workers=0, shuffle=False, sampler=None,
batch_size=h.batch_size, pin_memory=True, drop_last=True)
sw = SummaryWriter(os.path.join(a.checkpoint_path, 'logs'))
generator.train()
mpd.train()
msd.train()
for epoch in range(max(0, last_epoch), a.training_epochs):
if rank == 0:
start = time.time()
print("Epoch: {}".format(epoch + 1))
if h.num_gpus > 1:
train_sampler.set_epoch(epoch)
for i, batch in enumerate(train_loader):
if rank == 0:
start_b = time.time()
x, y, _, y_mel = batch
y = torch.autograd.Variable(y.to(device, non_blocking=False))
y_mel = torch.autograd.Variable(y_mel.to(device, non_blocking=False))
y = y.unsqueeze(1)
x = {k: torch.autograd.Variable(v.to(device, non_blocking=False)) for k, v in x.items()}
y_g_hat = generator(**x)
if h.get('f0_vq_params', None) or h.get('code_vq_params', None):
y_g_hat, commit_losses, metrics = y_g_hat
assert y_g_hat.shape == y.shape, f"Mismatch in vocoder output shape - {y_g_hat.shape} != {y.shape}"
if h.get('f0_vq_params', None):
f0_commit_loss = commit_losses[1][0]
f0_metrics = metrics[1][0]
if h.get('code_vq_params', None):
code_commit_loss = commit_losses[0][0]
code_metrics = metrics[0][0]
y_g_hat_mel = mel_spectrogram(y_g_hat.squeeze(1), h.n_fft, h.num_mels, h.sampling_rate, h.hop_size,
h.win_size, h.fmin, h.fmax_for_loss)
optim_d.zero_grad()
# MPD
y_df_hat_r, y_df_hat_g, _, _ = mpd(y, y_g_hat.detach())
loss_disc_f, losses_disc_f_r, losses_disc_f_g = discriminator_loss(y_df_hat_r, y_df_hat_g)
# MSD
y_ds_hat_r, y_ds_hat_g, _, _ = msd(y, y_g_hat.detach())
loss_disc_s, losses_disc_s_r, losses_disc_s_g = discriminator_loss(y_ds_hat_r, y_ds_hat_g)
loss_disc_all = loss_disc_s + loss_disc_f
loss_disc_all.backward()
optim_d.step()
# Generator
optim_g.zero_grad()
# L1 Mel-Spectrogram Loss
loss_mel = F.l1_loss(y_mel, y_g_hat_mel) * 45
y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd(y, y_g_hat)
y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = msd(y, y_g_hat)
loss_fm_f = feature_loss(fmap_f_r, fmap_f_g)
loss_fm_s = feature_loss(fmap_s_r, fmap_s_g)
loss_gen_f, losses_gen_f = generator_loss(y_df_hat_g)
loss_gen_s, losses_gen_s = generator_loss(y_ds_hat_g)
loss_gen_all = loss_gen_s + loss_gen_f + loss_fm_s + loss_fm_f + loss_mel
if h.get('f0_vq_params', None):
loss_gen_all += f0_commit_loss * h.get('lambda_commit', None)
if h.get('code_vq_params', None):
loss_gen_all += code_commit_loss * h.get('lambda_commit_code', None)
loss_gen_all.backward()
optim_g.step()
if rank == 0:
# STDOUT logging
if steps % a.stdout_interval == 0:
with torch.no_grad():
mel_error = F.l1_loss(y_mel, y_g_hat_mel).item()
print(
'Steps : {:d}, Gen Loss Total : {:4.3f}, Mel-Spec. Error : {:4.3f}, s/b : {:4.3f}'.format(steps,
loss_gen_all,
mel_error,
time.time() - start_b))
# checkpointing
if steps % a.checkpoint_interval == 0 and steps != 0:
checkpoint_path = "{}/g_{:08d}".format(a.checkpoint_path, steps)
save_checkpoint(checkpoint_path,
{'generator': (generator.module if h.num_gpus > 1 else generator).state_dict()})
checkpoint_path = "{}/do_{:08d}".format(a.checkpoint_path, steps)
save_checkpoint(checkpoint_path, {'mpd': (mpd.module if h.num_gpus > 1 else mpd).state_dict(),
'msd': (msd.module if h.num_gpus > 1 else msd).state_dict(),
'optim_g': optim_g.state_dict(), 'optim_d': optim_d.state_dict(),
'steps': steps, 'epoch': epoch})
# Tensorboard summary logging
if steps % a.summary_interval == 0:
sw.add_scalar("training/gen_loss_total", loss_gen_all, steps)
sw.add_scalar("training/mel_spec_error", mel_error, steps)
if h.get('f0_vq_params', None):
sw.add_scalar("training/commit_error", f0_commit_loss, steps)
sw.add_scalar("training/used_curr", f0_metrics['used_curr'].item(), steps)
sw.add_scalar("training/entropy", f0_metrics['entropy'].item(), steps)
sw.add_scalar("training/usage", f0_metrics['usage'].item(), steps)
if h.get('code_vq_params', None):
sw.add_scalar("training/code_commit_error", code_commit_loss, steps)
sw.add_scalar("training/code_used_curr", code_metrics['used_curr'].item(), steps)
sw.add_scalar("training/code_entropy", code_metrics['entropy'].item(), steps)
sw.add_scalar("training/code_usage", code_metrics['usage'].item(), steps)
# Validation
if steps % a.validation_interval == 0: # and steps != 0:
generator.eval()
torch.cuda.empty_cache()
val_err_tot = 0
with torch.no_grad():
for j, batch in enumerate(validation_loader):
x, y, _, y_mel = batch
x = {k: v.to(device, non_blocking=False) for k, v in x.items()}
y_g_hat = generator(**x)
if h.get('f0_vq_params', None) or h.get('code_vq_params', None):
y_g_hat, commit_losses, _ = y_g_hat
if h.get('f0_vq_params', None):
f0_commit_loss = commit_losses[1][0]
val_err_tot += f0_commit_loss * h.get('lambda_commit', None)
if h.get('code_vq_params', None):
code_commit_loss = commit_losses[0][0]
val_err_tot += code_commit_loss * h.get('lambda_commit_code', None)
y_mel = torch.autograd.Variable(y_mel.to(device, non_blocking=False))
y_g_hat_mel = mel_spectrogram(y_g_hat.squeeze(1), h.n_fft, h.num_mels, h.sampling_rate,
h.hop_size, h.win_size, h.fmin, h.fmax_for_loss)
val_err_tot += F.l1_loss(y_mel, y_g_hat_mel).item()
if j <= 4:
if steps == 0:
sw.add_audio('gt/y_{}'.format(j), y[0], steps, h.sampling_rate)
sw.add_figure('gt/y_spec_{}'.format(j), plot_spectrogram(y_mel[0].cpu()), steps)
sw.add_audio('generated/y_hat_{}'.format(j), y_g_hat[0], steps, h.sampling_rate)
y_hat_spec = mel_spectrogram(y_g_hat[:1].squeeze(1), h.n_fft, h.num_mels,
h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax)
sw.add_figure('generated/y_hat_spec_{}'.format(j),
plot_spectrogram(y_hat_spec[:1].squeeze(0).cpu().numpy()), steps)
val_err = val_err_tot / (j + 1)
sw.add_scalar("validation/mel_spec_error", val_err, steps)
if h.get('f0_vq_params', None):
sw.add_scalar("validation/commit_error", f0_commit_loss, steps)
if h.get('code_vq_params', None):
sw.add_scalar("validation/code_commit_error", code_commit_loss, steps)
generator.train()
steps += 1
if steps >= a.training_steps:
break
scheduler_g.step()
scheduler_d.step()
if rank == 0:
print('Time taken for epoch {} is {} sec\n'.format(epoch + 1, int(time.time() - start)))
if rank == 0:
print('Finished training')
def main():
print('Initializing Training Process..')
parser = argparse.ArgumentParser()
parser.add_argument('--group_name', default=None)
parser.add_argument('--checkpoint_path', default='cp_hifigan')
parser.add_argument('--config', default='')
parser.add_argument('--training_epochs', default=2000, type=int)
parser.add_argument('--training_steps', default=400000, type=int)
parser.add_argument('--stdout_interval', default=5, type=int)
parser.add_argument('--checkpoint_interval', default=10000, type=int)
parser.add_argument('--summary_interval', default=100, type=int)
parser.add_argument('--validation_interval', default=1000, type=int)
parser.add_argument('--fine_tuning', default=False, type=bool)
parser.add_argument('--local_rank', default=0, type=int)
parser.add_argument('--distributed-world-size', type=int)
parser.add_argument('--distributed-port', type=int)
a = parser.parse_args()
with open(a.config) as f:
data = f.read()
json_config = json.loads(data)
h = AttrDict(json_config)
build_env(a.config, 'config.json', a.checkpoint_path)
torch.manual_seed(h.seed)
if torch.cuda.is_available() and 'WORLD_SIZE' in os.environ:
torch.cuda.manual_seed(h.seed)
h.num_gpus = int(os.environ['WORLD_SIZE'])
h.batch_size = int(h.batch_size / h.num_gpus)
local_rank = a.local_rank
rank = a.local_rank
print('Batch size per GPU :', h.batch_size)
else:
rank = 0
local_rank = 0
train(rank, local_rank, a, h)
if __name__ == '__main__':
main()
| 15,924 | 46.966867 | 137 | py |
speech-resynthesis | speech-resynthesis-main/modules/vq.py | # Adapted from https://github.com/openai/jukebox
import numpy as np
import torch as t
import torch.nn as nn
import torch.nn.functional as F
import modules.dist as dist
class BottleneckBlock(nn.Module):
def __init__(self, k_bins, emb_width, mu):
super().__init__()
self.k_bins = k_bins
self.emb_width = emb_width
self.mu = mu
self.reset_k()
self.threshold = 1.0
def reset_k(self):
self.init = False
self.k_sum = None
self.k_elem = None
self.register_buffer('k', t.zeros(self.k_bins, self.emb_width).cuda())
def _tile(self, x):
d, ew = x.shape
if d < self.k_bins:
n_repeats = (self.k_bins + d - 1) // d
std = 0.01 / np.sqrt(ew)
x = x.repeat(n_repeats, 1)
x = x + t.randn_like(x) * std
return x
def init_k(self, x):
mu, emb_width, k_bins = self.mu, self.emb_width, self.k_bins
self.init = True
# init k_w using random vectors from x
y = self._tile(x)
_k_rand = y[t.randperm(y.shape[0])][:k_bins]
dist.broadcast(_k_rand, 0)
self.k = _k_rand
assert self.k.shape == (k_bins, emb_width)
self.k_sum = self.k
self.k_elem = t.ones(k_bins, device=self.k.device)
def restore_k(self, num_tokens=None, threshold=1.0):
mu, emb_width, k_bins = self.mu, self.emb_width, self.k_bins
self.init = True
assert self.k.shape == (k_bins, emb_width)
self.k_sum = self.k.clone()
self.k_elem = t.ones(k_bins, device=self.k.device)
if num_tokens is not None:
expected_usage = num_tokens / k_bins
self.k_elem.data.mul_(expected_usage)
self.k_sum.data.mul_(expected_usage)
self.threshold = threshold
def update_k(self, x, x_l):
mu, emb_width, k_bins = self.mu, self.emb_width, self.k_bins
with t.no_grad():
# Calculate new centres
x_l_onehot = t.zeros(k_bins, x.shape[0], device=x.device) # k_bins, N * L
x_l_onehot.scatter_(0, x_l.view(1, x.shape[0]), 1)
_k_sum = t.matmul(x_l_onehot, x) # k_bins, w
_k_elem = x_l_onehot.sum(dim=-1) # k_bins
y = self._tile(x)
_k_rand = y[t.randperm(y.shape[0])][:k_bins]
dist.broadcast(_k_rand, 0)
dist.all_reduce(_k_sum)
dist.all_reduce(_k_elem)
# Update centres
old_k = self.k
self.k_sum = mu * self.k_sum + (1. - mu) * _k_sum # w, k_bins
self.k_elem = mu * self.k_elem + (1. - mu) * _k_elem # k_bins
usage = (self.k_elem.view(k_bins, 1) >= self.threshold).float()
self.k = usage * (self.k_sum.view(k_bins, emb_width) / self.k_elem.view(k_bins, 1)) \
+ (1 - usage) * _k_rand
_k_prob = _k_elem / t.sum(_k_elem) # x_l_onehot.mean(dim=-1) # prob of each bin
entropy = -t.sum(_k_prob * t.log(_k_prob + 1e-8)) # entropy ie how diverse
used_curr = (_k_elem >= self.threshold).sum()
usage = t.sum(usage)
dk = t.norm(self.k - old_k) / np.sqrt(np.prod(old_k.shape))
return dict(entropy=entropy,
used_curr=used_curr,
usage=usage,
dk=dk)
def preprocess(self, x):
# NCT -> NTC -> [NT, C]
x = x.permute(0, 2, 1).contiguous()
x = x.view(-1, x.shape[-1]) # x_en = (N * L, w), k_j = (w, k_bins)
if x.shape[-1] == self.emb_width:
prenorm = t.norm(x - t.mean(x)) / np.sqrt(np.prod(x.shape))
elif x.shape[-1] == 2 * self.emb_width:
x1, x2 = x[..., :self.emb_width], x[..., self.emb_width:]
prenorm = (t.norm(x1 - t.mean(x1)) / np.sqrt(np.prod(x1.shape))) + (
t.norm(x2 - t.mean(x2)) / np.sqrt(np.prod(x2.shape)))
# Normalise
x = x1 + x2
else:
assert False, f"Expected {x.shape[-1]} to be (1 or 2) * {self.emb_width}"
return x, prenorm
def postprocess(self, x_l, x_d, x_shape):
# [NT, C] -> NTC -> NCT
N, T = x_shape
x_d = x_d.view(N, T, -1).permute(0, 2, 1).contiguous()
x_l = x_l.view(N, T)
return x_l, x_d
def quantise(self, x):
# Calculate latent code x_l
k_w = self.k.t()
distance = t.sum(x ** 2, dim=-1, keepdim=True) - 2 * t.matmul(x, k_w) + t.sum(k_w ** 2, dim=0,
keepdim=True) # (N * L, b)
min_distance, x_l = t.min(distance, dim=-1)
fit = t.mean(min_distance)
return x_l, fit
def dequantise(self, x_l):
x = F.embedding(x_l, self.k)
return x
def encode(self, x):
N, width, T = x.shape
# Preprocess.
x, prenorm = self.preprocess(x)
# Quantise
x_l, fit = self.quantise(x)
# Postprocess.
x_l = x_l.view(N, T)
return x_l
def decode(self, x_l):
N, T = x_l.shape
width = self.emb_width
# Dequantise
x_d = self.dequantise(x_l)
# Postprocess
x_d = x_d.view(N, T, width).permute(0, 2, 1).contiguous()
return x_d
def forward(self, x, update_k=True):
N, width, T = x.shape
# Preprocess
x, prenorm = self.preprocess(x)
# Init k if not inited
if update_k and not self.init:
self.init_k(x)
# Quantise and dequantise through bottleneck
x_l, fit = self.quantise(x)
x_d = self.dequantise(x_l)
# Update embeddings
if update_k and self.training:
update_metrics = self.update_k(x, x_l)
else:
update_metrics = {}
# Loss
commit_loss = t.norm(x_d.detach() - x) ** 2 / np.prod(x.shape)
# Passthrough
x_d = x + (x_d - x).detach()
# Postprocess
x_l, x_d = self.postprocess(x_l, x_d, (N, T))
return x_l, x_d, commit_loss, dict(fit=fit,
pn=prenorm,
**update_metrics)
class Bottleneck(nn.Module):
def __init__(self, l_bins, emb_width, mu, levels):
super().__init__()
self.levels = levels
level_block = lambda level: BottleneckBlock(l_bins, emb_width, mu)
self.level_blocks = nn.ModuleList()
for level in range(self.levels):
self.level_blocks.append(level_block(level))
def encode(self, xs):
zs = [level_block.encode(x) for (level_block, x) in zip(self.level_blocks, xs)]
return zs
def decode(self, zs, start_level=0, end_level=None):
if end_level is None:
end_level = self.levels
xs_quantised = [level_block.decode(z) for (level_block, z) in zip(self.level_blocks[start_level:end_level], zs)]
return xs_quantised
def forward(self, xs):
zs, xs_quantised, commit_losses, metrics = [], [], [], []
for level in range(self.levels):
level_block = self.level_blocks[level]
x = xs[level]
z, x_quantised, commit_loss, metric = level_block(x, update_k=self.training)
zs.append(z)
if not self.training:
# Be extra paranoid and make sure the encoder weights can't
# change from straight-through estimator
x_quantised = x_quantised.detach()
xs_quantised.append(x_quantised)
commit_losses.append(commit_loss)
if self.training:
metrics.append(metric)
return zs, xs_quantised, commit_losses, metrics
class NoBottleneckBlock(nn.Module):
def restore_k(self):
pass
class NoBottleneck(nn.Module):
def __init__(self, levels):
super().__init__()
self.level_blocks = nn.ModuleList()
self.levels = levels
for level in range(levels):
self.level_blocks.append(NoBottleneckBlock())
def encode(self, xs):
return xs
def decode(self, zs, start_level=0, end_level=None):
if end_level is None:
end_level = self.levels
return zs
def forward(self, xs):
zero = t.zeros(()).cuda()
commit_losses = [zero for _ in range(self.levels)]
metrics = [dict(entropy=zero, usage=zero, used_curr=zero, pn=zero, dk=zero) for _ in range(self.levels)]
return xs, xs, commit_losses, metrics
| 8,566 | 33.268 | 120 | py |
speech-resynthesis | speech-resynthesis-main/modules/resnet.py | # Adapted from https://github.com/openai/jukebox
import math
import torch.nn as nn
import modules.dist as dist
class ResConvBlock(nn.Module):
def __init__(self, n_in, n_state):
super().__init__()
self.model = nn.Sequential(
nn.ReLU(),
nn.Conv2d(n_in, n_state, 3, 1, 1),
nn.ReLU(),
nn.Conv2d(n_state, n_in, 1, 1, 0),
)
def forward(self, x):
return x + self.model(x)
class Resnet(nn.Module):
def __init__(self, n_in, n_depth, m_conv=1.0):
super().__init__()
self.model = nn.Sequential(*[ResConvBlock(n_in, int(m_conv * n_in)) for _ in range(n_depth)])
def forward(self, x):
return self.model(x)
class ResConv1DBlock(nn.Module):
def __init__(self, n_in, n_state, dilation=1, zero_out=False, res_scale=1.0):
super().__init__()
padding = dilation
self.model = nn.Sequential(
nn.ReLU(),
nn.Conv1d(n_in, n_state, 3, 1, padding, dilation),
nn.ReLU(),
nn.Conv1d(n_state, n_in, 1, 1, 0),
)
if zero_out:
out = self.model[-1]
nn.init.zeros_(out.weight)
nn.init.zeros_(out.bias)
self.res_scale = res_scale
def forward(self, x):
return x + self.res_scale * self.model(x)
class Resnet1D(nn.Module):
def __init__(self, n_in, n_depth, m_conv=1.0, dilation_growth_rate=1, dilation_cycle=None, zero_out=False,
res_scale=False, reverse_dilation=False, checkpoint_res=False):
super().__init__()
def _get_depth(depth):
if dilation_cycle is None:
return depth
else:
return depth % dilation_cycle
blocks = [ResConv1DBlock(n_in, int(m_conv * n_in),
dilation=dilation_growth_rate ** _get_depth(depth),
zero_out=zero_out,
res_scale=1.0 if not res_scale else 1.0 / math.sqrt(n_depth))
for depth in range(n_depth)]
if reverse_dilation:
blocks = blocks[::-1]
self.checkpoint_res = checkpoint_res
if self.checkpoint_res == 1:
if dist.get_rank() == 0:
print("Checkpointing convs")
self.blocks = nn.ModuleList(blocks)
else:
self.model = nn.Sequential(*blocks)
def forward(self, x):
if self.checkpoint_res == 1:
raise NotImplementedError("Checkpoint not implemented")
else:
return self.model(x)
| 2,603 | 30.373494 | 110 | py |
speech-resynthesis | speech-resynthesis-main/modules/dist.py | # Adapted from https://github.com/openai/jukebox
from enum import Enum
import torch.distributed as dist
class ReduceOp(Enum):
SUM = 0,
PRODUCT = 1,
MIN = 2,
MAX = 3
def ToDistOp(self):
return {
self.SUM: dist.ReduceOp.SUM,
self.PRODUCT: dist.ReduceOp.PRODUCT,
self.MIN: dist.ReduceOp.MIN,
self.MAX: dist.ReduceOp.MAX
}[self]
def is_available():
return dist.is_initialized()
def get_rank():
if is_available():
return _get_rank()
else:
return 0
def get_world_size():
if is_available():
return _get_world_size()
else:
return 1
def barrier():
if is_available():
return _barrier()
# else: do nothing
def all_gather(tensor_list, tensor):
if is_available():
return _all_gather(tensor_list, tensor)
else:
tensor_list[0] = tensor
def all_reduce(tensor, op=ReduceOp.SUM):
if is_available():
return _all_reduce(tensor, op)
# else: do nothing
def reduce(tensor, dst, op=ReduceOp.SUM):
if is_available():
return _reduce(tensor, dst, op)
# else: do nothing
def broadcast(tensor, src):
if is_available():
return _broadcast(tensor, src)
# else: do nothing
def init_process_group(backend, init_method):
if is_available():
return _init_process_group(backend, init_method)
# else: do nothing
def _get_rank():
return dist.get_rank()
def _barrier():
return dist.barrier()
def _get_world_size():
return dist.get_world_size()
def _all_gather(tensor_list, tensor):
return dist.all_gather(tensor_list, tensor)
def _all_reduce(tensor, op):
return dist.all_reduce(tensor, op.ToDistOp())
def _reduce(tensor, dst, op):
return dist.reduce(tensor, dst, op.ToDistOp())
def _broadcast(tensor, src):
return dist.broadcast(tensor, src)
def _init_process_group(backend, init_method):
return dist.init_process_group(backend, init_method)
| 2,013 | 17.477064 | 56 | py |
speech-resynthesis | speech-resynthesis-main/modules/jukebox.py | # Adapted from https://github.com/openai/jukebox
import numpy as np
import torch.nn as nn
from modules.resnet import Resnet1D
def assert_shape(x, exp_shape):
assert x.shape == exp_shape, f"Expected {exp_shape} got {x.shape}"
class EncoderConvBlock(nn.Module):
def __init__(self, input_emb_width, output_emb_width, down_t, stride_t, width, depth, m_conv,
dilation_growth_rate=1, dilation_cycle=None, zero_out=False, res_scale=False):
super().__init__()
blocks = []
if type(stride_t) is tuple or type(stride_t) is list:
start = True
for s_t, d_t in zip(stride_t, down_t):
if s_t % 2 == 0:
filter_t, pad_t = s_t * 2, s_t // 2
else:
filter_t, pad_t = s_t * 2 + 1, s_t // 2 + 1
if d_t > 0:
for i in range(d_t):
block = nn.Sequential(
nn.Conv1d(input_emb_width if i == 0 and start else width, width, filter_t, s_t, pad_t),
Resnet1D(width, depth, m_conv, dilation_growth_rate, dilation_cycle, zero_out, res_scale), )
blocks.append(block)
start = False
block = nn.Conv1d(width, output_emb_width, 3, 1, 1)
blocks.append(block)
else:
filter_t, pad_t = stride_t * 2, stride_t // 2
if down_t > 0:
for i in range(down_t):
block = nn.Sequential(
nn.Conv1d(input_emb_width if i == 0 else width, width, filter_t, stride_t, pad_t),
Resnet1D(width, depth, m_conv, dilation_growth_rate, dilation_cycle, zero_out, res_scale), )
blocks.append(block)
block = nn.Conv1d(width, output_emb_width, 3, 1, 1)
blocks.append(block)
self.model = nn.Sequential(*blocks)
def forward(self, x):
return self.model(x)
class DecoderConvBock(nn.Module):
def __init__(self, input_emb_width, output_emb_width, down_t, stride_t, width, depth, m_conv,
dilation_growth_rate=1, dilation_cycle=None, zero_out=False, res_scale=False,
reverse_decoder_dilation=False, checkpoint_res=False):
super().__init__()
blocks = []
if type(stride_t) is tuple or type(stride_t) is list:
block = nn.Conv1d(output_emb_width, width, 3, 1, 1)
blocks.append(block)
for k, (s_t, d_t) in enumerate(zip(stride_t, down_t)):
if d_t > 0:
if s_t % 2 == 0:
filter_t, pad_t = s_t * 2, s_t // 2
else:
filter_t, pad_t = s_t * 2 + 1, s_t // 2 + 1
end = k == len(stride_t) - 1
for i in range(d_t):
block = nn.Sequential(
Resnet1D(width, depth, m_conv, dilation_growth_rate, dilation_cycle, zero_out=zero_out,
res_scale=res_scale, reverse_dilation=reverse_decoder_dilation,
checkpoint_res=checkpoint_res),
nn.ConvTranspose1d(width, input_emb_width if i == (d_t - 1) and end else width, filter_t,
s_t, pad_t))
blocks.append(block)
else:
if down_t > 0:
filter_t, pad_t = stride_t * 2, stride_t // 2
block = nn.Conv1d(output_emb_width, width, 3, 1, 1)
blocks.append(block)
for i in range(down_t):
block = nn.Sequential(
Resnet1D(width, depth, m_conv, dilation_growth_rate, dilation_cycle, zero_out=zero_out,
res_scale=res_scale, reverse_dilation=reverse_decoder_dilation,
checkpoint_res=checkpoint_res),
nn.ConvTranspose1d(width, input_emb_width if i == (down_t - 1) else width, filter_t, stride_t,
pad_t))
blocks.append(block)
self.model = nn.Sequential(*blocks)
def forward(self, x):
return self.model(x)
class Encoder(nn.Module):
def __init__(self, input_emb_width, output_emb_width, levels, downs_t, strides_t, **block_kwargs):
super().__init__()
self.input_emb_width = input_emb_width
self.output_emb_width = output_emb_width
self.levels = levels
self.downs_t = downs_t
self.strides_t = strides_t
block_kwargs_copy = dict(**block_kwargs)
if 'reverse_decoder_dilation' in block_kwargs_copy:
del block_kwargs_copy['reverse_decoder_dilation']
level_block = lambda level, down_t, stride_t: EncoderConvBlock(
input_emb_width if level == 0 else output_emb_width, output_emb_width, down_t, stride_t,
**block_kwargs_copy)
self.level_blocks = nn.ModuleList()
iterator = zip(list(range(self.levels)), downs_t, strides_t)
for level, down_t, stride_t in iterator:
self.level_blocks.append(level_block(level, down_t, stride_t))
def forward(self, x):
N, T = x.shape[0], x.shape[-1]
emb = self.input_emb_width
assert_shape(x, (N, emb, T))
xs = []
# 64, 32, ...
iterator = zip(list(range(self.levels)), self.downs_t, self.strides_t)
for level, down_t, stride_t in iterator:
level_block = self.level_blocks[level]
x = level_block(x)
if type(stride_t) is tuple or type(stride_t) is list:
emb, T = self.output_emb_width, T // np.prod([s ** d for s, d in zip(stride_t, down_t)])
else:
emb, T = self.output_emb_width, T // (stride_t ** down_t)
assert_shape(x, (N, emb, T))
xs.append(x)
return xs
class Decoder(nn.Module):
def __init__(self, input_emb_width, output_emb_width, levels, downs_t, strides_t, **block_kwargs):
super().__init__()
self.input_emb_width = input_emb_width
self.output_emb_width = output_emb_width
self.levels = levels
self.downs_t = downs_t
self.strides_t = strides_t
level_block = lambda level, down_t, stride_t: DecoderConvBock(output_emb_width, output_emb_width, down_t,
stride_t, **block_kwargs)
self.level_blocks = nn.ModuleList()
iterator = zip(list(range(self.levels)), downs_t, strides_t)
for level, down_t, stride_t in iterator:
self.level_blocks.append(level_block(level, down_t, stride_t))
self.out = nn.Conv1d(output_emb_width, input_emb_width, 3, 1, 1)
def forward(self, xs, all_levels=True):
if all_levels:
assert len(xs) == self.levels
else:
assert len(xs) == 1
x = xs[-1]
N, T = x.shape[0], x.shape[-1]
emb = self.output_emb_width
assert_shape(x, (N, emb, T))
# 32, 64 ...
iterator = reversed(list(zip(list(range(self.levels)), self.downs_t, self.strides_t)))
for level, down_t, stride_t in iterator:
level_block = self.level_blocks[level]
x = level_block(x)
if type(stride_t) is tuple or type(stride_t) is list:
emb, T = self.output_emb_width, T * np.prod([s ** d for s, d in zip(stride_t, down_t)])
else:
emb, T = self.output_emb_width, T * (stride_t ** down_t)
assert_shape(x, (N, emb, T))
if level != 0 and all_levels:
x = x + xs[level - 1]
x = self.out(x)
return x
| 7,855 | 42.888268 | 120 | py |
speech-resynthesis | speech-resynthesis-main/examples/speech_to_speech_translation/inference.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Adapted from https://github.com/jik876/hifi-gan
import argparse
import glob
import json
import os
import random
import sys
import time
from multiprocessing import Manager, Pool
from pathlib import Path
import librosa
import numpy as np
import torch
from scipy.io.wavfile import write
from dataset import CodeDataset, parse_manifest, mel_spectrogram, \
MAX_WAV_VALUE
from utils import AttrDict
from examples.speech_to_speech_translation.models import DurationCodeGenerator
h = None
device = None
def stream(message):
sys.stdout.write(f"\r{message}")
def progbar(i, n, size=16):
done = (i * size) // n
bar = ''
for i in range(size):
bar += '█' if i <= done else '░'
return bar
def load_checkpoint(filepath):
assert os.path.isfile(filepath)
print("Loading '{}'".format(filepath))
checkpoint_dict = torch.load(filepath, map_location='cpu')
print("Complete.")
return checkpoint_dict
def get_mel(x):
return mel_spectrogram(x, h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax)
def scan_checkpoint(cp_dir, prefix):
pattern = os.path.join(cp_dir, prefix + '*')
cp_list = glob.glob(pattern)
if len(cp_list) == 0:
return ''
return sorted(cp_list)[-1]
def generate(h, generator, code):
start = time.time()
y_g_hat = generator(**code)
if type(y_g_hat) is tuple:
y_g_hat = y_g_hat[0]
rtf = (time.time() - start) / (y_g_hat.shape[-1] / h.sampling_rate)
audio = y_g_hat.squeeze()
audio = audio * MAX_WAV_VALUE
audio = audio.cpu().numpy().astype('int16')
return audio, rtf
def init_worker(queue, arguments):
import logging
logging.getLogger().handlers = []
global generator
global spkrs_emb
global dataset
global idx
global device
global a
global h
global spkrs
a = arguments
idx = queue.get()
device = idx
if os.path.isdir(a.checkpoint_file):
config_file = os.path.join(a.checkpoint_file, 'config.json')
else:
config_file = os.path.join(os.path.split(a.checkpoint_file)[0], 'config.json')
with open(config_file) as f:
data = f.read()
json_config = json.loads(data)
h = AttrDict(json_config)
generator = DurationCodeGenerator(h).to(idx)
if os.path.isdir(a.checkpoint_file):
cp_g = scan_checkpoint(a.checkpoint_file, 'g_')
else:
cp_g = a.checkpoint_file
state_dict_g = load_checkpoint(cp_g)
generator.load_state_dict(state_dict_g['generator'])
if a.code_file is not None:
dataset = [x.strip().split('|') for x in open(a.code_file).readlines()]
def parse_code(c):
c = [int(v) for v in c.split(" ")]
return [torch.LongTensor(c).numpy()]
dataset = [(parse_code(x[1]), None, x[0], None) for x in dataset]
else:
file_list = parse_manifest(a.input_code_file)
dataset = CodeDataset(file_list, -1, h.code_hop_size, h.n_fft, h.num_mels, h.hop_size, h.win_size,
h.sampling_rate, h.fmin, h.fmax, n_cache_reuse=0,
fmax_loss=h.fmax_for_loss, device=device,
f0=h.get('f0', None), multispkr=h.get('multispkr', None),
f0_stats=h.get('f0_stats', None), f0_normalize=h.get('f0_normalize', False),
f0_feats=h.get('f0_feats', False), f0_median=h.get('f0_median', False),
f0_interp=h.get('f0_interp', False), vqvae=h.get('code_vq_params', False),
pad=a.pad)
os.makedirs(a.output_dir, exist_ok=True)
if h.get('multispkr', None):
spkrs = random.sample(range(len(dataset.id_to_spkr)), k=min(5, len(dataset.id_to_spkr)))
generator.eval()
generator.remove_weight_norm()
# fix seed
seed = 52 + idx
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
@torch.no_grad()
def inference(item_index):
code, gt_audio, filename, _ = dataset[item_index]
code = {k: torch.from_numpy(v).to(device).unsqueeze(0) for k, v in code.items()}
if a.parts:
parts = Path(filename).parts
fname_out_name = '_'.join(parts[-3:])[:-4]
else:
fname_out_name = Path(filename).stem
new_code = dict(code)
if a.dur_prediction:
new_code['code'] = torch.unique_consecutive(new_code['code']).unsqueeze(0)
new_code['dur_prediction'] = True
audio, rtf = generate(h, generator, new_code)
output_file = os.path.join(a.output_dir, fname_out_name + '_gen.wav')
audio = librosa.util.normalize(audio.astype(np.float32))
write(output_file, h.sampling_rate, audio)
if h.get('multispkr', None):
if a.random_speakers:
local_spkrs = random.sample(range(len(dataset.id_to_spkr)), k=min(5, len(dataset.id_to_spkr)))
else:
local_spkrs = spkrs
for spkr_i, k in enumerate(local_spkrs):
code['spkr'].fill_(k)
audio, rtf = generate(h, generator, code)
output_file = os.path.join(a.output_dir, fname_out_name + f'_{k}_gen.wav')
audio = librosa.util.normalize(audio.astype(np.float32))
write(output_file, h.sampling_rate, audio)
if gt_audio is not None:
output_file = os.path.join(a.output_dir, fname_out_name + '_gt.wav')
gt_audio = librosa.util.normalize(gt_audio.squeeze().numpy().astype(np.float32))
write(output_file, h.sampling_rate, gt_audio)
def main():
print('Initializing Inference Process..')
parser = argparse.ArgumentParser()
parser.add_argument('--code_file', default=None)
parser.add_argument('--input_code_file', default='./datasets/LJSpeech/cpc100/test.txt')
parser.add_argument('--output_dir', default='generated_files')
parser.add_argument('--checkpoint_file', required=True)
parser.add_argument('--random-speakers', action='store_true')
parser.add_argument('--pad', default=None, type=int)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--parts', action='store_true')
parser.add_argument('-n', type=int, default=10)
parser.add_argument('--num-gpu', type=int, default=2)
parser.add_argument('--dur-prediction', action='store_true')
a = parser.parse_args()
seed = 52
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
ids = list(range(8))
manager = Manager()
idQueue = manager.Queue()
for i in ids:
idQueue.put(i)
if os.path.isdir(a.checkpoint_file):
config_file = os.path.join(a.checkpoint_file, 'config.json')
else:
config_file = os.path.join(os.path.split(a.checkpoint_file)[0], 'config.json')
with open(config_file) as f:
data = f.read()
json_config = json.loads(data)
h = AttrDict(json_config)
if os.path.isdir(a.checkpoint_file):
cp_g = scan_checkpoint(a.checkpoint_file, 'g_')
else:
cp_g = a.checkpoint_file
if not os.path.isfile(cp_g) or not os.path.exists(cp_g):
print(f"Didn't find checkpoints for {cp_g}")
return
if a.code_file is not None:
dataset = [x.strip().split('|') for x in open(a.code_file).readlines()]
def parse_code(c):
c = [int(v) for v in c.split(" ")]
return [torch.LongTensor(c).numpy()]
dataset = [(parse_code(x[1]), None, x[0], None) for x in dataset]
else:
file_list = parse_manifest(a.input_code_file)
dataset = CodeDataset(file_list, -1, h.code_hop_size, h.n_fft, h.num_mels, h.hop_size, h.win_size,
h.sampling_rate, h.fmin, h.fmax, n_cache_reuse=0, fmax_loss=h.fmax_for_loss, device=device,
f0=h.get('f0', None), multispkr=h.get('multispkr', None),
f0_stats=h.get('f0_stats', None), f0_normalize=h.get('f0_normalize', False),
f0_feats=h.get('f0_feats', False), f0_median=h.get('f0_median', False),
f0_interp=h.get('f0_interp', False), vqvae=h.get('code_vq_params', False),
pad=a.pad)
if a.debug:
ids = list(range(1))
import queue
idQueue = queue.Queue()
for i in ids:
idQueue.put(i)
init_worker(idQueue, a)
for i in range(0, len(dataset)):
inference(i)
bar = progbar(i, len(dataset))
message = f'{bar} {i}/{len(dataset)} '
stream(message)
if a.n != -1 and i > a.n:
break
else:
idx = list(range(len(dataset)))
random.shuffle(idx)
with Pool(a.num_gpu, init_worker, (idQueue, a)) as pool:
for i, _ in enumerate(pool.imap(inference, idx), 1):
bar = progbar(i, len(idx))
message = f'{bar} {i}/{len(idx)} '
stream(message)
if a.n != -1 and i > a.n:
break
if __name__ == '__main__':
main()
| 9,266 | 31.861702 | 121 | py |
speech-resynthesis | speech-resynthesis-main/examples/speech_to_speech_translation/models.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Adapted from https://github.com/jik876/hifi-gan
import torch
import torch.nn as nn
import torch.nn.functional as F
from models import CodeGenerator, Generator
class VariancePredictor(nn.Module):
def __init__(
self,
encoder_embed_dim,
var_pred_hidden_dim,
var_pred_kernel_size,
var_pred_dropout
):
super().__init__()
self.conv1 = nn.Sequential(
nn.Conv1d(
encoder_embed_dim, var_pred_hidden_dim,
kernel_size=var_pred_kernel_size,
padding=(var_pred_kernel_size - 1) // 2
),
nn.ReLU()
)
self.ln1 = nn.LayerNorm(var_pred_hidden_dim)
self.dropout = var_pred_dropout
self.conv2 = nn.Sequential(
nn.Conv1d(
var_pred_hidden_dim, var_pred_hidden_dim,
kernel_size=var_pred_kernel_size, padding=1
),
nn.ReLU()
)
self.ln2 = nn.LayerNorm(var_pred_hidden_dim)
self.proj = nn.Linear(var_pred_hidden_dim, 1)
def forward(self, x):
# Input: B x T x C; Output: B x T
x = self.conv1(x.transpose(1, 2)).transpose(1, 2)
x = F.dropout(self.ln1(x), p=self.dropout, training=self.training)
x = self.conv2(x.transpose(1, 2)).transpose(1, 2)
x = F.dropout(self.ln2(x), p=self.dropout, training=self.training)
return self.proj(x).squeeze(dim=2)
def process_duration(code, code_feat):
uniq_code_count = []
uniq_code_feat = []
for i in range(code.size(0)):
_, count = torch.unique_consecutive(code[i, :], return_counts=True)
if len(count) > 2:
# remove first and last code as segment sampling may cause incomplete segment length
uniq_code_count.append(count[1:-1])
uniq_code_idx = count.cumsum(dim=0)[:-2]
else:
uniq_code_count.append(count)
uniq_code_idx = count.cumsum(dim=0) - 1
uniq_code_feat.append(code_feat[i, uniq_code_idx, :].view(-1, code_feat.size(2)))
uniq_code_count = torch.cat(uniq_code_count)
# collate feat
max_len = max(feat.size(0) for feat in uniq_code_feat)
out = uniq_code_feat[0].new_zeros((len(uniq_code_feat), max_len, uniq_code_feat[0].size(1)))
mask = torch.arange(max_len).repeat(len(uniq_code_feat), 1)
for i, v in enumerate(uniq_code_feat):
out[i, : v.size(0)] = v
mask[i, :] = mask[i, :] < v.size(0)
return out, mask.bool(), uniq_code_count.float()
class DurationCodeGenerator(Generator):
"""
Discrete unit-based HiFi-GAN vocoder with duration prediction
(used in https://arxiv.org/abs/2107.05604)
The current implementation only supports unit and speaker ID input and
does not support F0 input.
"""
def __init__(self, h):
super().__init__(h)
self.dict = nn.Embedding(h.num_embeddings, h.embedding_dim)
self.f0 = h.get('f0', None)
self.multispkr = h.get('multispkr', None)
if self.multispkr:
self.spkr = nn.Embedding(200, h.embedding_dim)
self.dur_predictor = None
if h.get('dur_prediction_weight', None):
self.dur_predictor = VariancePredictor(**h.dur_predictor_params)
def forward(self, **kwargs):
x = self.dict(kwargs['code']).transpose(1, 2)
dur_losses = 0.0
if self.dur_predictor:
if self.training:
# assume input code is always full sequence
uniq_code_feat, uniq_code_mask, dur = process_duration(
kwargs['code'], x.transpose(1, 2))
log_dur_pred = self.dur_predictor(uniq_code_feat)
log_dur_pred = log_dur_pred[uniq_code_mask]
log_dur = torch.log(dur + 1)
dur_losses = F.mse_loss(log_dur_pred, log_dur, reduction="mean")
elif kwargs.get('dur_prediction', False):
# assume input code can be unique sequence only in eval mode
assert x.size(0) == 1, "only support single sample batch in inference"
log_dur_pred = self.dur_predictor(x.transpose(1, 2))
dur_out = torch.clamp(
torch.round((torch.exp(log_dur_pred) - 1)).long(), min=1
)
# B x C x T
x = torch.repeat_interleave(x, dur_out.view(-1), dim=2)
if self.multispkr:
spkr = self.spkr(kwargs['spkr']).transpose(1, 2)
spkr = self._upsample(spkr, x.shape[-1])
x = torch.cat([x, spkr], dim=1)
return super().forward(x), dur_losses
| 4,834 | 35.908397 | 96 | py |
speech-resynthesis | speech-resynthesis-main/examples/speech_to_speech_translation/train.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Adapted from https://github.com/jik876/hifi-gan
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings(action='ignore', message='.*kernel_size exceeds volume extent.*')
import itertools
import os
import time
import argparse
import json
import torch
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DistributedSampler, DataLoader
from torch.distributed import init_process_group
from torch.nn.parallel import DistributedDataParallel
from dataset import CodeDataset, mel_spectrogram, get_dataset_filelist
from examples.speech_to_speech_translation.models import DurationCodeGenerator
from models import MultiPeriodDiscriminator, MultiScaleDiscriminator, feature_loss, generator_loss, \
discriminator_loss
from utils import plot_spectrogram, scan_checkpoint, load_checkpoint, \
save_checkpoint, build_env, AttrDict
torch.backends.cudnn.benchmark = True
def train(rank, local_rank, a, h):
if h.num_gpus > 1:
init_process_group(
backend=h.dist_config['dist_backend'],
init_method=h.dist_config['dist_url'],
rank=rank,
world_size=h.num_gpus,
)
torch.cuda.manual_seed(h.seed)
device = torch.device('cuda:{:d}'.format(local_rank))
generator = DurationCodeGenerator(h).to(device)
mpd = MultiPeriodDiscriminator().to(device)
msd = MultiScaleDiscriminator().to(device)
if rank == 0:
print(generator)
os.makedirs(a.checkpoint_path, exist_ok=True)
print("checkpoints directory : ", a.checkpoint_path)
if os.path.isdir(a.checkpoint_path):
cp_g = scan_checkpoint(a.checkpoint_path, 'g_')
cp_do = scan_checkpoint(a.checkpoint_path, 'do_')
steps = 0
if cp_g is None or cp_do is None:
state_dict_do = None
last_epoch = -1
else:
state_dict_g = load_checkpoint(cp_g, device)
state_dict_do = load_checkpoint(cp_do, device)
generator.load_state_dict(state_dict_g['generator'])
mpd.load_state_dict(state_dict_do['mpd'])
msd.load_state_dict(state_dict_do['msd'])
steps = state_dict_do['steps'] + 1
last_epoch = state_dict_do['epoch']
if h.num_gpus > 1:
generator = DistributedDataParallel(
generator,
device_ids=[local_rank],
find_unused_parameters=('f0_quantizer' in h),
).to(device)
mpd = DistributedDataParallel(mpd, device_ids=[local_rank]).to(device)
msd = DistributedDataParallel(msd, device_ids=[local_rank]).to(device)
optim_g = torch.optim.AdamW(generator.parameters(), h.learning_rate, betas=[h.adam_b1, h.adam_b2])
optim_d = torch.optim.AdamW(itertools.chain(msd.parameters(), mpd.parameters()), h.learning_rate,
betas=[h.adam_b1, h.adam_b2])
if state_dict_do is not None:
optim_g.load_state_dict(state_dict_do['optim_g'])
optim_d.load_state_dict(state_dict_do['optim_d'])
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=h.lr_decay, last_epoch=last_epoch)
scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=h.lr_decay, last_epoch=last_epoch)
training_filelist, validation_filelist = get_dataset_filelist(h)
trainset = CodeDataset(training_filelist, h.segment_size, h.code_hop_size, h.n_fft, h.num_mels, h.hop_size,
h.win_size, h.sampling_rate, h.fmin, h.fmax, n_cache_reuse=0, fmax_loss=h.fmax_for_loss,
device=device, f0=h.get('f0', None), multispkr=h.get('multispkr', None),
f0_stats=h.get('f0_stats', None),
f0_normalize=h.get('f0_normalize', False), f0_feats=h.get('f0_feats', False),
f0_median=h.get('f0_median', False), f0_interp=h.get('f0_interp', False),
vqvae=h.get('code_vq_params', False))
train_sampler = DistributedSampler(trainset) if h.num_gpus > 1 else None
train_loader = DataLoader(trainset, num_workers=0, shuffle=False, sampler=train_sampler,
batch_size=h.batch_size, pin_memory=True, drop_last=True)
if rank == 0:
validset = CodeDataset(validation_filelist, h.segment_size, h.code_hop_size, h.n_fft, h.num_mels, h.hop_size,
h.win_size, h.sampling_rate, h.fmin, h.fmax, False, n_cache_reuse=0,
fmax_loss=h.fmax_for_loss, device=device, f0=h.get('f0', None),
multispkr=h.get('multispkr', None),
f0_stats=h.get('f0_stats', None), f0_normalize=h.get('f0_normalize', False),
f0_feats=h.get('f0_feats', False), f0_median=h.get('f0_median', False),
f0_interp=h.get('f0_interp', False), vqvae=h.get('code_vq_params', False))
validation_loader = DataLoader(validset, num_workers=0, shuffle=False, sampler=None,
batch_size=h.batch_size, pin_memory=True, drop_last=True)
sw = SummaryWriter(os.path.join(a.checkpoint_path, 'logs'))
generator.train()
mpd.train()
msd.train()
for epoch in range(max(0, last_epoch), a.training_epochs):
if rank == 0:
start = time.time()
print("Epoch: {}".format(epoch + 1))
if h.num_gpus > 1:
train_sampler.set_epoch(epoch)
for i, batch in enumerate(train_loader):
if rank == 0:
start_b = time.time()
x, y, _, y_mel = batch
y = torch.autograd.Variable(y.to(device, non_blocking=False))
y_mel = torch.autograd.Variable(y_mel.to(device, non_blocking=False))
y = y.unsqueeze(1)
x = {k: torch.autograd.Variable(v.to(device, non_blocking=False)) for k, v in x.items()}
y_g_hat, dur_losses = generator(**x)
assert y_g_hat.shape == y.shape, f"Mismatch in vocoder output shape - {y_g_hat.shape} != {y.shape}"
y_g_hat_mel = mel_spectrogram(y_g_hat.squeeze(1), h.n_fft, h.num_mels, h.sampling_rate, h.hop_size,
h.win_size, h.fmin, h.fmax_for_loss)
optim_d.zero_grad()
# MPD
y_df_hat_r, y_df_hat_g, _, _ = mpd(y, y_g_hat.detach())
loss_disc_f, losses_disc_f_r, losses_disc_f_g = discriminator_loss(y_df_hat_r, y_df_hat_g)
# MSD
y_ds_hat_r, y_ds_hat_g, _, _ = msd(y, y_g_hat.detach())
loss_disc_s, losses_disc_s_r, losses_disc_s_g = discriminator_loss(y_ds_hat_r, y_ds_hat_g)
loss_disc_all = loss_disc_s + loss_disc_f
loss_disc_all.backward()
optim_d.step()
# Generator
optim_g.zero_grad()
# L1 Mel-Spectrogram Loss
loss_mel = F.l1_loss(y_mel, y_g_hat_mel) * 45
y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd(y, y_g_hat)
y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = msd(y, y_g_hat)
loss_fm_f = feature_loss(fmap_f_r, fmap_f_g)
loss_fm_s = feature_loss(fmap_s_r, fmap_s_g)
loss_gen_f, losses_gen_f = generator_loss(y_df_hat_g)
loss_gen_s, losses_gen_s = generator_loss(y_ds_hat_g)
loss_gen_all = loss_gen_s + loss_gen_f + loss_fm_s + loss_fm_f + loss_mel
if h.get('dur_prediction_weight', None):
loss_gen_all += dur_losses * h.get('dur_prediction_weight', None)
loss_gen_all.backward()
optim_g.step()
if rank == 0:
# STDOUT logging
if steps % a.stdout_interval == 0:
with torch.no_grad():
mel_error = F.l1_loss(y_mel, y_g_hat_mel).item()
print(
'Steps : {:d}, Gen Loss Total : {:4.3f}, Mel-Spec. Error : {:4.3f}, s/b : {:4.3f}'.format(steps,
loss_gen_all,
mel_error,
time.time() - start_b))
# checkpointing
if steps % a.checkpoint_interval == 0 and steps != 0:
checkpoint_path = "{}/g_{:08d}".format(a.checkpoint_path, steps)
save_checkpoint(checkpoint_path,
{'generator': (generator.module if h.num_gpus > 1 else generator).state_dict()})
checkpoint_path = "{}/do_{:08d}".format(a.checkpoint_path, steps)
save_checkpoint(checkpoint_path, {'mpd': (mpd.module if h.num_gpus > 1 else mpd).state_dict(),
'msd': (msd.module if h.num_gpus > 1 else msd).state_dict(),
'optim_g': optim_g.state_dict(), 'optim_d': optim_d.state_dict(),
'steps': steps, 'epoch': epoch})
# Tensorboard summary logging
if steps % a.summary_interval == 0:
sw.add_scalar("training/gen_loss_total", loss_gen_all, steps)
sw.add_scalar("training/mel_spec_error", mel_error, steps)
if h.get('f0_vq_params', None):
sw.add_scalar("training/commit_error", f0_commit_loss, steps)
sw.add_scalar("training/used_curr", f0_metrics['used_curr'].item(), steps)
sw.add_scalar("training/entropy", f0_metrics['entropy'].item(), steps)
sw.add_scalar("training/usage", f0_metrics['usage'].item(), steps)
if h.get('code_vq_params', None):
sw.add_scalar("training/code_commit_error", code_commit_loss, steps)
sw.add_scalar("training/code_used_curr", code_metrics['used_curr'].item(), steps)
sw.add_scalar("training/code_entropy", code_metrics['entropy'].item(), steps)
sw.add_scalar("training/code_usage", code_metrics['usage'].item(), steps)
# Validation
if steps % a.validation_interval == 0: # and steps != 0:
generator.eval()
torch.cuda.empty_cache()
val_err_tot = 0
with torch.no_grad():
for j, batch in enumerate(validation_loader):
x, y, _, y_mel = batch
x = {k: v.to(device, non_blocking=False) for k, v in x.items()}
y_g_hat, dur_losses = generator(**x)
y_mel = torch.autograd.Variable(y_mel.to(device, non_blocking=False))
y_g_hat_mel = mel_spectrogram(y_g_hat.squeeze(1), h.n_fft, h.num_mels, h.sampling_rate,
h.hop_size, h.win_size, h.fmin, h.fmax_for_loss)
val_err_tot += F.l1_loss(y_mel, y_g_hat_mel).item()
if j <= 4:
if steps == 0:
sw.add_audio('gt/y_{}'.format(j), y[0], steps, h.sampling_rate)
sw.add_figure('gt/y_spec_{}'.format(j), plot_spectrogram(y_mel[0].cpu()), steps)
sw.add_audio('generated/y_hat_{}'.format(j), y_g_hat[0], steps, h.sampling_rate)
y_hat_spec = mel_spectrogram(y_g_hat[:1].squeeze(1), h.n_fft, h.num_mels,
h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax)
sw.add_figure('generated/y_hat_spec_{}'.format(j),
plot_spectrogram(y_hat_spec[:1].squeeze(0).cpu().numpy()), steps)
val_err = val_err_tot / (j + 1)
sw.add_scalar("validation/mel_spec_error", val_err, steps)
generator.train()
steps += 1
if steps >= a.training_steps:
break
scheduler_g.step()
scheduler_d.step()
if rank == 0:
print('Time taken for epoch {} is {} sec\n'.format(epoch + 1, int(time.time() - start)))
if rank == 0:
print('Finished training')
def main():
print('Initializing Training Process..')
parser = argparse.ArgumentParser()
parser.add_argument('--group_name', default=None)
parser.add_argument('--checkpoint_path', default='cp_hifigan')
parser.add_argument('--config', default='')
parser.add_argument('--training_epochs', default=2000, type=int)
parser.add_argument('--training_steps', default=500000, type=int)
parser.add_argument('--stdout_interval', default=5, type=int)
parser.add_argument('--checkpoint_interval', default=50000, type=int)
parser.add_argument('--summary_interval', default=100, type=int)
parser.add_argument('--validation_interval', default=5000, type=int)
parser.add_argument('--fine_tuning', default=False, type=bool)
parser.add_argument('--local_rank', default=0, type=int)
parser.add_argument('--distributed-world-size', type=int)
parser.add_argument('--distributed-port', type=int)
a = parser.parse_args()
with open(a.config) as f:
data = f.read()
json_config = json.loads(data)
h = AttrDict(json_config)
build_env(a.config, 'config.json', a.checkpoint_path)
torch.manual_seed(h.seed)
if torch.cuda.is_available() and 'WORLD_SIZE' in os.environ:
torch.cuda.manual_seed(h.seed)
h.num_gpus = int(os.environ['WORLD_SIZE'])
h.batch_size = int(h.batch_size / h.num_gpus)
local_rank = a.local_rank
rank = a.local_rank
print('Batch size per GPU :', h.batch_size)
else:
rank = 0
local_rank = 0
train(rank, local_rank, a, h)
if __name__ == '__main__':
main()
| 14,559 | 45.967742 | 137 | py |
speech-resynthesis | speech-resynthesis-main/scripts/parse_vqvae_codes.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import random
from pathlib import Path
import torchaudio
from tqdm import tqdm
def parse_manifest(manifest):
audio_files = []
with open(manifest) as info:
for line in info.readlines():
if line[0] == '{':
sample = eval(line.strip())
audio_files += [Path(sample["audio"])]
else:
audio_files += [Path(line.strip())]
return audio_files
def split(args, samples):
if args.ref_train is not None:
train_split = parse_manifest(args.ref_train)
train_split = [x.name for x in train_split]
val_split = parse_manifest(args.ref_val)
val_split = [x.name for x in val_split]
test_split = parse_manifest(args.ref_test)
test_split = [x.name for x in test_split]
tt = []
cv = []
tr = []
# parse
for sample in samples:
name = Path(sample['audio']).name
if name in val_split:
cv += [sample]
elif name in test_split:
tt += [sample]
else:
tr += [sample]
assert name in train_split
else:
# split
N = len(samples)
random.shuffle(samples)
tt = samples[: int(N * args.tt)]
cv = samples[int(N * args.tt): int(N * args.tt + N * args.cv)]
tr = samples[int(N * args.tt + N * args.cv):]
return tr, cv, tt
def save(outdir, tr, cv, tt):
# save
outdir.mkdir(exist_ok=True, parents=True)
with open(outdir / f'train.txt', 'w') as f:
f.write('\n'.join([str(x) for x in tr]))
with open(outdir / f'val.txt', 'w') as f:
f.write('\n'.join([str(x) for x in cv]))
with open(outdir / f'test.txt', 'w') as f:
f.write('\n'.join([str(x) for x in tt]))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--manifest', type=Path, required=True)
parser.add_argument('--outdir', type=Path, required=True)
parser.add_argument('--cv', type=float, default=0.05)
parser.add_argument('--tt', type=float, default=0.05)
parser.add_argument('--min-dur', type=float, default=None)
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--ref-train', type=Path, default=None)
parser.add_argument('--ref-val', type=Path, default=None)
parser.add_argument('--ref-test', type=Path, default=None)
args = parser.parse_args()
random.seed(args.seed)
with open(args.manifest) as f:
lines = [l.strip() for l in f.readlines()]
# parse
samples = []
for l in tqdm(lines):
sample = {}
fname, code = l.split('\t')
sample['audio'] = str(fname)
sample['vqvae256'] = ' '.join(code.split(','))
waveform, sample_rate = torchaudio.load(fname)
sample['duration'] = waveform.shape[1] / sample_rate
if args.min_dur and sample['duration'] < args.min_dur:
continue
samples += [sample]
tr, cv, tt = split(args, samples)
save(args.outdir, tr, cv, tt)
if __name__ == '__main__':
main()
| 3,321 | 28.39823 | 70 | py |
t-leap | t-leap-main/test.py | import os
from datetime import datetime
# scipy imports
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Pytorch imports
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from datasets.seq_pose_dataset import SequentialPoseDataset
from torch.optim import Adam, lr_scheduler, SGD
# Package imports
import core.config
from models.tleap import TLEAP
from utils.plotting_utils import show_heatmaps, show_keypoints
from utils.data_utils import get_keypoints, get_keypoints_batch, dataset_split
from utils.train_utils import save_model, load_model, seed_all, seed_worker
from core.evaluate import euclidian_distance_error, PCKh
def test(model, criterion, data_loader, config, show=False, save=False, PCK=False, save_path="."):
"""
Evaluate the model on unseen data
:param model: the model to evaluate
:param criterion: the loss function
:param data_loader: validation or test loader
:param config: configuration file
:param show: show plots
:param save: save plots
:return: the accuracy, loss and plots
"""
losses = []
test_rmse = []
test_PCKh = {k: [] for k in range(11)}
model.eval()
with torch.no_grad():
for i, data in enumerate(data_loader):
inputs = data['seq'].to(config.device, dtype=torch.float)
targets = data['heatmaps'][:,-1].to(config.device, dtype=torch.float)
keypoints = data['keypoints'][:,-1]
test_outputs = model(inputs)
test_rmse.append(euclidian_distance_error(keypoints, test_outputs).item())
if PCK:
for thr in test_PCKh.keys():
ckh, pckh, nckh = PCKh(keypoints, test_outputs, thr=thr / 10) # dict keys are ints
test_PCKh[thr].append(pckh.item())
losses.append(criterion(test_outputs, targets).item())
if show or save:
keypoints_pred = get_keypoints(test_outputs[0])
_ = show_keypoints(data['seq'][0][-1], keypoints_pred.cpu(),
save=save, save_fname=os.path.join(save_path, 'test_'+str(i)+'.png'), cmap='gray', tb=(not show))
return test_rmse, losses, test_PCKh
def main():
"""
Main function, where the magic happens.
:return: None.
"""
config, _ = core.config.parse_args("Train Sequential Cowpose")
# Set the seeds
seed_all(config.seed)
# Tensorboard summaries
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
# Appendix to file names for saved models.
tb_comment = 'LR_%.6f_BATCH_%d_EPOCH_%d_SEQ_%d' % (config.lr, config.batch_size, config.epochs, config.seq_length)
###########################
# DATASET #
###########################
val_transform = None
test_dataset = SequentialPoseDataset(video_list=config.dataset_test,
video_dir=config.data_folder,
labels_dir=os.path.join(config.data_folder, 'labels_csv'),
seq_length=config.seq_length,
transform=val_transform,
n_keypoints=len(config.keypoints),
file_format=config.file_format
)
test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, drop_last=False, worker_init_fn=seed_worker)
###########################
# MODEL and TRAINING INIT #
###########################
model = TLEAP(in_channels=3, out_channels=len(config.keypoints), seq_length=config.seq_length, depth=config.depth).to(config.device)
optimizer = Adam(model.parameters(), lr=config.lr, amsgrad=True, weight_decay=0.01)
scheduler = lr_scheduler.StepLR(optimizer, step_size=(config.epochs // 10), gamma=0.1)
criterion = nn.MSELoss(reduction='sum')
# LOAD MODEL
checkpoint_path = config.load_checkpoint
if config.load_checkpoint:
checkpoint = load_model(config.load_checkpoint)
config = checkpoint['config']
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
criterion = checkpoint['loss']
else:
print("Please specify a checkpoint for testing.")
exit(1)
checkpoint_path = checkpoint_path.split('.model')[0]
fig_save_path = os.path.join(checkpoint_path, 'test')
if not os.path.exists(fig_save_path):
os.mkdir(fig_save_path)
###########
# TESTING #
###########
# TESTING
# Perform the evaluation on the whole test set
print("Saving test images to %s" % fig_save_path)
test_RSME, test_loss, test_PCK = test(model, criterion, test_loader, config, show=False, save=True, PCK=True, save_path=fig_save_path)
results_dict = {"Test_ID": [*range(len(test_dataset)), 'mean']}
results_dict["Test_RMSE"] = [*test_RSME, np.mean(test_RSME)]
for thr in test_PCK.keys():
results_dict[f"Test_PCKh@{thr/10}"] = [*test_PCK[thr], np.mean(test_PCK[thr])]
results_df = pd.DataFrame.from_dict(results_dict)
results_df.to_csv(os.path.join(fig_save_path, 'test_metrics.csv'), index=False)
plt.close('all')
if __name__ == '__main__':
main()
| 5,451 | 34.633987 | 138 | py |
t-leap | t-leap-main/train_seq.py | # #############################################################################
# Copyright 2022 Helena Russello
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# #############################################################################
import os
from datetime import datetime
# scipy imports
import numpy as np
import matplotlib.pyplot as plt
# Pytorch imports
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from datasets.seq_pose_dataset import SequentialPoseDataset
from torch.optim import Adam, lr_scheduler, SGD
# Package imports
import core.config
from models.tleap import TLEAP
from utils.plotting_utils import show_heatmaps, show_keypoints
from utils.data_utils import get_keypoints, get_keypoints_batch, dataset_split
from utils.train_utils import save_model, load_model, seed_all, seed_worker
from core.evaluate import euclidian_distance_error, PCKh
def validate(model, criterion, data_loader, config, show=False, save=False, PCK=False, save_path="."):
"""
Evaluate the model on unseen data
:param model: the model to evaluate
:param criterion: the loss function
:param data_loader: validation or test loader
:param config: configuration file
:param show: show plots
:param save: save plots
:return: the accuracy, loss and plots
"""
losses = []
test_accuracies = []
test_PCKh = {k: [] for k in range(11)}
figures = []
model.eval()
with torch.no_grad():
for i, data in enumerate(data_loader):
inputs = data['seq'].to(config.device, dtype=torch.float)
targets = data['heatmaps'][:,-1].to(config.device, dtype=torch.float)
keypoints = data['keypoints'][:,-1]
test_outputs = model(inputs)
test_accuracies.append(euclidian_distance_error(keypoints, test_outputs))
if PCK:
for thr in test_PCKh.keys():
ckh, pckh, nckh = PCKh(keypoints, test_outputs, thr=thr / 10) # dict keys are ints
test_PCKh[thr].append(pckh)
losses.append(criterion(test_outputs, targets))
if show or save:
keypoints_pred = get_keypoints(test_outputs[0])
figure = show_keypoints(data['seq'][0][-1], keypoints_pred.cpu(),
save=save, save_fname=os.path.join(save_path, 'test_'+str(i)+'.png'), cmap='gray', tb=(not show))
if config.wandb:
figures.append(
wandb.Image(figure))
if PCK:
for thr in test_PCKh.keys():
test_PCKh[thr] = torch.mean(torch.tensor(test_PCKh[thr]))
return torch.mean(torch.tensor(test_accuracies)), torch.mean(torch.tensor(losses)), figures, test_PCKh
def main():
"""
Main function, where the magic happens.
:return: None.
"""
config, _ = core.config.parse_args("Train Sequential Cowpose")
# Set the seeds
seed_all(config.seed)
# Tensorboard summaries
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
#WandB (Weights and Biases) init
if config.wandb:
import wandb
run = wandb.init(project="cowpose", group=config.group)
# WandB – Config is a variable that holds and saves hyperparameters and inputs
wconfig = wandb.config # Initialize config
wconfig.batch_size = config.batch_size # input batch size for training (default: 64)
wconfig.test_batch_size = 1 # input batch size for testing (default: 1000)
wconfig.epochs = config.epochs # number of epochs to train (default: 10)
wconfig.lr = config.lr # learning rate (default: 0.01)
wconfig.no_cuda = config.device # disables CUDA training
wconfig.seed = config.seed # random seed (default: 42)
wconfig.log_interval = config.frequent # how many batches to wait before logging training status
wconfig.seq_length = config.seq_length
wconfig.optimizer = config.optimizer
wconfig.depth = config.depth
tb_comment = run.id
else:
# Appendix to file names for saved models.
tb_comment = 'LR_%.6f_BATCH_%d_EPOCH_%d_SEQ_%d' % (config.lr, config.batch_size, config.epochs, config.seq_length)
###########################
# DATASET #
###########################
# TRAIN SET
train_transform = [
SequentialPoseDataset.RandomRotate(10),
SequentialPoseDataset.BrightnessContrast(brightness=(-100, 100), contrast=(-3, 3)),
]
val_transform = None
train_dataset = SequentialPoseDataset(video_list=config.dataset_csv,
video_dir=config.data_folder,
labels_dir=os.path.join(config.data_folder, 'labels_csv'),
seq_length=config.seq_length,
n_keypoints=len(config.keypoints),
transform=train_transform,
file_format=config.file_format
)
test_dataset = SequentialPoseDataset(video_list=config.dataset_test,
video_dir=config.data_folder,
labels_dir=os.path.join(config.data_folder, 'labels_csv'),
seq_length=config.seq_length,
transform=val_transform,
n_keypoints=len(config.keypoints),
file_format=config.file_format
)
train_loader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True, drop_last=True, worker_init_fn=seed_worker)
test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, drop_last=False, worker_init_fn=seed_worker)
###########################
# MODEL and TRAINING INIT #
###########################
model = TLEAP(in_channels=3, out_channels=len(config.keypoints), seq_length=config.seq_length, depth=config.depth).to(config.device)
if config.wandb:
wandb.watch(model, log="all")
# default optimizer = 'amsgrad'
if config.optimizer == 'sgd':
optimizer = SGD(model.parameters(), lr=config.lr, momentum=0.9, weight_decay=1e-4)
elif config.optimizer == 'adam':
optimizer = Adam(model.parameters(), lr=config.lr, amsgrad=False, weight_decay=0.01)
else: # amsgrad
optimizer = Adam(model.parameters(), lr=config.lr, amsgrad=True, weight_decay=0.01)
scheduler = lr_scheduler.StepLR(optimizer, step_size=(config.epochs // 10), gamma=0.1)
criterion = nn.MSELoss(reduction='sum')
# LOAD MODEL
if config.load_checkpoint:
checkpoint = load_model(config.load_checkpoint)
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
criterion = checkpoint['loss']
fig_save_path = os.path.join(config.save_checkpoint, tb_comment + '_' + current_time)
if not os.path.exists(fig_save_path):
os.mkdir(fig_save_path)
iterations = 0
epoch = 0
epoch_fig_save = False
train_fig_save_path = ""
while epoch < config.epochs:
model.train()
for step, batch in enumerate(train_loader):
iterations += 1
optimizer.zero_grad()
batch_inputs = batch['seq'].to(config.device, dtype=torch.float)
batch_targets = batch['heatmaps'].to(config.device, dtype=torch.float)
batch_keypoints = batch['keypoints']
# Forward step
batch_preds = model(batch_inputs)
loss = criterion(batch_preds, batch_targets[:, -1]) / config.batch_size
loss.backward()
optimizer.step()
if step % config.frequent == 0:
accuracy = euclidian_distance_error(batch_keypoints[:, -1], batch_preds)
print("Loss at step %d/%d: %.6f, RMSE: %.2f" % (epoch, step, loss.item(), accuracy))
accuracy = euclidian_distance_error(batch_keypoints[:, -1], batch_preds)
# Save val figures every [frequent] epoch
if config.frequent > 0 and (epoch % config.frequent == 0):
train_fig_save_path = os.path.join(fig_save_path, 'train_%d' % epoch)
if not os.path.exists(train_fig_save_path):
os.mkdir(train_fig_save_path)
epoch_fig_save = True
print("Saving validation images to %s" % train_fig_save_path)
else:
epoch_fig_save = False
val_accuracy, val_loss, val_figures, val_PCK = validate(model, criterion, test_loader, config, show=False, PCK=True, save=epoch_fig_save, save_path=train_fig_save_path)
print("Validation loss at epoch %d: %.6f, RMSE: %.2f, PCKh@0.5: %.2f" % (epoch, val_loss, val_accuracy, val_PCK[5]))
if config.wandb:
# Plot progress in wandb
wandb.log({
"Examples": val_figures,
"Train Accuracy": accuracy,
"Train Loss": loss,
"Val Accuracy": val_accuracy,
"Val Loss": val_loss,
"PCKh@0.5": val_PCK[5]
})
plt.close('all')
if config.save_checkpoint:
model_save_path = os.path.join(config.save_checkpoint, tb_comment + '_' + current_time + '.model')
model_saved = save_model(config, model, epoch, optimizer, scheduler, criterion, model_save_path )
print(f"Saved model at {model_save_path}")
epoch += 1
# end epochs
###########
# TESTING #
###########
# TESTING
# Perform the evaluation on the whole test set
test_fig_save_path = os.path.join(fig_save_path, 'test')
if not os.path.exists(test_fig_save_path):
os.mkdir(test_fig_save_path)
print("Saving test images to %s" % fig_save_path)
test_RSME, test_loss, test_figures, test_PCK = validate(model, criterion, test_loader, config, show=False, save=True, PCK=True, save_path=test_fig_save_path)
print("Test RMSE: %.2f" %(test_RSME))
print("Test PCKh@[thr]:")
for thr in test_PCK.keys():
print("PCKh@%.1f : %.2f" % (thr / 10, test_PCK[thr] * 100, ))
if config.wandb:
# Log results into wandb
wandb.log({
"Examples": test_figures,
"Test RMSE": test_RSME,
"Test Loss": test_loss})
for thr in test_PCK.keys():
wandb.log({"PCKh": test_PCK[thr], "thr": thr/10})
plt.close('all')
if __name__ == '__main__':
main()
| 11,265 | 39.235714 | 176 | py |
t-leap | t-leap-main/core/evaluate.py | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Source: https://github.com/microsoft/human-pose-estimation.pytorch/
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Adapted by Helena Russello (helena@russello.dev)
# ------------------------------------------------------------------------------
import numpy as np
from utils.data_utils import get_keypoints, get_keypoints_batch
import torch
def calc_dists(preds, target, normalize):
dists = torch.zeros((preds.shape[0], preds.shape[1]))
for b in range(preds.shape[0]):
for k in range(preds.shape[1]):
if target[b, k, 0] > 1 and target[b, k, 1] > 1:
normed_preds = preds[b, k, :] / normalize[k]
normed_targets = target[b, k, :] / normalize[k]
dists[b, k] = torch.norm(normed_preds - normed_targets)
else:
dists[b, k] = -1
return dists
def dist_acc(dists, thr=0.5):
''' Return percentage below threshold while ignoring values with a -1 '''
dist_cal = torch.ne(dists, -1)
num_dist_cal = dist_cal.sum()
if num_dist_cal > 0:
return torch.lt(dists[dist_cal], thr).sum() * 1.0 / num_dist_cal
else:
return -1
def PCK(targets, predictions, thr=0.5, norm=10):
"""
Calculates the percentage of correct keypoints
:param targets: the target keypoints
:param predictions: the predicted HEATMAPS
:param thr: threshold under which a keypoint is considered correct
:return: the PCK per batch image, the mean PCK, and the number of correct keypoints per batch image
"""
h = predictions.shape[2]
w = predictions.shape[3]
batch_size = targets.shape[0]
n_keypoints = targets.shape[1]
predictions = get_keypoints_batch(predictions)
normalize = torch.ones((n_keypoints, 2))
normalize[:, 0] *= h / norm
normalize[:, 1] *= w / norm
return _accuracy(targets, predictions, thr, normalize)
def _accuracy(targets, predictions, thr, normalize):
"""
Calculates the percentage of correct keypoints
:param targets: the target keypoints
:param predictions: the predicted HEATMAPS
:param thr: threshold under which a keypoint is considered correct
:return: the PCK per batch image, the mean PCK, and the number of correct keypoints per batch image
"""
dists = calc_dists(predictions, targets, normalize)
batch_size = targets.shape[0]
n_keypoints = targets.shape[1]
acc = torch.zeros((batch_size, n_keypoints))
cnt = [0] * batch_size
for b in range(batch_size):
for k in range(n_keypoints):
acc_b_k = dist_acc(dists[b, k], thr)
if acc_b_k > 0:
acc[b, k] = acc_b_k
cnt[b] += 1
return acc, torch.mean(acc), cnt
def PCKh(targets, predictions, thr=0.5, head_index=[12,13]):
"""
Calculates the PCK-h metric (Percentage of Correct Keypoints with respect to head size).
:param targets: the target keypoints
:param predictions: the predicted HEATMAPS
:param thr: threshold under which a keypoint is considered correct
:param head_index: index of the head keypoints
:return: the PCK per batch image, the mean PCK, and the number of correct keypoints per batch image
"""
batch_size = targets.shape[0]
n_keypoints = targets.shape[1]
predictions = get_keypoints_batch(predictions)
normalize = torch.ones((n_keypoints, 2))
# for simplicity we only take the size of the first image in the batch
# Note that for testing, the batch size is always 1, so evaluation results remain correct
head_size = torch.norm(targets[0, head_index[0]] - targets[0, head_index[1]]) # euclidian norm
thr = head_size * thr
return _accuracy(targets, predictions, thr, normalize)
def euclidian_distance_error(targets, predictions, mean=True):
"""Euclidian distance between the ground truth and prediction for a batch
targets are the ground truth keypoints ( not heatmaps)
predictions are the predicted heatmaps
"""
predictions = get_keypoints_batch(predictions)
eucl_dist = torch.zeros((targets.size(0), targets.size(1)))
for b in range(targets.size(0)):
for j in range(targets.size(1)):
if targets[b, j, 0] < 0 or targets[b, j, 1] < 0:
continue
eucl_dist[b, j] = (torch.sqrt(torch.sum((targets[b, j] - predictions[b, j]) ** 2)))
# eucl_dist = torch.sqrt((targets - predictions)**2)
if mean:
return torch.mean(eucl_dist)
return eucl_dist | 4,643 | 38.355932 | 107 | py |
t-leap | t-leap-main/models/tleap.py | # #############################################################################
# Copyright 2022 Helena Russello
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# #############################################################################
import torch
from torch import nn
#from torchsummary import summary
class TLEAP(nn.Module):
def __init__(self, in_channels, out_channels, n_filters=64, depth=3, seq_length=1):
"""
Implementation of the LEAP model by Pereira et al.
The Keras implementation can be found at: https://github.com/talmo/leap/
:param in_channels: input channels (3 for RGB)
:param out_channels: output channels (corresponds to the number of joints).
:param n_filters: initial number of filters of the model. It doubles at each new layer in the encoder.
:param depth: depth of the model. I tested with depth of 3 and 4.
:param seq_length: length of the sequence; 1 is static (LEAP), 2 is two consecutive frame (T-LEAP), etc.
With seq_length=1, the model uses 2D convolutions, with seq_length>=2, the model uses 3D convolutions
"""
super().__init__()
self.input_channels = in_channels
self.output_channels = out_channels
self.n_filters = n_filters
self.seq_length = seq_length
self.is_3D = seq_length > 1
self._make_layers(depth)
self.myparams = self.parameters()
def _conv_block(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, dilation=1):
"""
Creates a convolutional module followed by a ReLU
"""
if self.is_3D:
return nn.Sequential(
nn.Conv3d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=(1, dilation, dilation)),
nn.ReLU(),
nn.BatchNorm3d(out_channels)
)
else:
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation),
nn.ReLU(),
nn.BatchNorm2d(out_channels)
)
def _conv_transpose(self, in_channels, out_channels, weight_init=True):
"""
Creates a transpose convolution module and initialize the weights with Xavier intitialization
"""
if self.is_3D:
convT = nn.ConvTranspose3d(in_channels, out_channels, kernel_size=(1, 3, 3), stride=(1, 2, 2),
padding=(0, 1, 1), output_padding=(0, 1, 1))
else:
convT = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=(3, 3), stride=(2, 2),
padding=(1, 1), output_padding=(1, 1))
if weight_init:
convT.weight.data = nn.init.xavier_normal_(convT.weight.data)
return convT
def _max_pool(self, kernel_size=2, padding=0):
if self.is_3D:
return nn.MaxPool3d(kernel_size=kernel_size, padding=padding)
else:
return nn.MaxPool2d(kernel_size=kernel_size, padding=padding)
def _make_layers(self, depth):
# Encoder
enc_sizes = [self.input_channels]
for i in range(depth):
enc_sizes.append(self.n_filters * 2**i)
self.encoder = nn.Sequential()
for i in range(depth):
self.encoder.add_module("enc_conv_%d_1" % (i+1), self._conv_block(enc_sizes[i], enc_sizes[i+1], dilation=1))
self.encoder.add_module("enc_conv_%d_2" % (i+1), self._conv_block(enc_sizes[i+1], enc_sizes[i+1], dilation=1))
self.encoder.add_module("enc_conv_%d_3" % (i+1), self._conv_block(enc_sizes[i+1], enc_sizes[i+1], dilation=1))
if i < depth-1:
if self.is_3D and i == 0:
self.encoder.add_module("enc_max_%d" % (i + 1),
self._max_pool(kernel_size=(self.seq_length // 2, 2, 2)))
elif self.is_3D and i > 1:
self.encoder.add_module("enc_max_%d" % (i + 1), self._max_pool(kernel_size=(1, 2, 2)))
else:
self.encoder.add_module("enc_max_%d" % (i+1), self._max_pool())
# Decoder
dec_sizes = []
for i in range(depth-1, 0, -1):
dec_sizes.append(self.n_filters * 2 ** i)
self.decoder = nn.Sequential()
for i in range(len(dec_sizes)-1):
# Deconv_4
self.decoder.add_module("dec_convT_%d" % (i+1),
nn.Sequential(self._conv_transpose(dec_sizes[i], dec_sizes[i+1]),
nn.ReLU()))
# Conv_5
self.decoder.add_module("dec_conv_%d_1" % (i+1), self._conv_block(dec_sizes[i+1], dec_sizes[i+1]))
self.decoder.add_module("dec_conv_%d_2" % (i+1), self._conv_block(dec_sizes[i+1], dec_sizes[i+1]))
# Deconv_6
self.decoder.add_module("dec_convT_%d_1" % (depth-1), self._conv_transpose(dec_sizes[-1], dec_sizes[-1]))
# self.decoder.add_module("dec_conv_%d_2" % (depth-1),
# self._conv_block(dec_sizes[-1], self.output_channels,
# kernel_size=1, stride=1, padding=0))
self.fc6 = nn.Linear(dec_sizes[-1], self.output_channels)
self.softmax = nn.Softmax(dim=3)
def forward(self, input):
if len(input.size()) == 5:
# Input is of size [batch, seq_length, channels, height, width]
# we want [batch, channels, seq_length, height, width]
input = input.permute(0, 2, 1, 3, 4).contiguous()
if not self.is_3D:
# Get rid of the seq_length dimension when using static leap
input = input[:, :, -1, :, :]
out = self.encoder(input)
out = self.decoder(out)
if self.is_3D:
out = out[:, :, -1, :, :] # Get rid of the seq_length dimension
out = out.permute([0, 2, 3, 1]).contiguous() # [batch_size, height, width, channels]
out = self.fc6(out)
out = self.softmax(out) # Softmax on the channel dimension (dim=3)
# Back to normal dimensions
out = out.permute([0, 3, 1, 2]).contiguous() # [batch_size, channels, height, width]
# If the original size could not be recovered from the deconvolutions
# Then upsample to original size.
original_h, original_w = input.size()[-2], input.size()[-1]
out_h, out_w = out.size()[-2], out.size()[-1]
if out_h != original_h or out_w != original_w:
up = nn.Upsample(size=[original_h, original_w], mode='nearest')
out = up(out)
return out
class Permute(nn.Module):
def __init__(self, shape):
super().__init__()
self.shape = shape
def forward(self, x):
return x.permute(*self.shape)
if __name__ == '__main__':
# For debugging only
# model = TLEAP(in_channels=3, out_channels=17, seq_length=4, depth=4)
# summary(model, input_data=(4,3,200,200))
pass
| 7,665 | 43.312139 | 144 | py |
t-leap | t-leap-main/datasets/seq_pose_dataset.py | # #############################################################################
# Copyright 2022 Helena Russello
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# #############################################################################
import os
import pandas as pd
import numpy as np
import torchvision
from skimage import io, transform
import matplotlib.pyplot as plt
import cv2
import torch
from torch.utils.data import Dataset
from torchvision import transforms
from utils.plotting_utils import show_keypoints
from utils.data_utils import get_heatmaps_likelihood
# noinspection PyShadowingNames
class SequentialPoseDataset(Dataset):
def __init__(self,
video_list,
video_dir,
labels_dir,
seq_length,
n_keypoints=17,
transform=None,
normalize=None,
crop_body=True,
is_test=False,
file_format="%d.png"):
"""
Constructs a sequential dataset. Each item is a sequence of frames from videos
:param video_list: csv file listing videos, start and end of labelled frames (video name, start, end)
:param video_dir: directory where the videos are stored
:param labels_dir: directory where the csv annotations are stored
:param seq_length: lenght of the sequences
:param transform: Optional.
:param normalize: mean and stdev values for normalization
"""
self.videos = np.loadtxt(video_list, delimiter=',', dtype=np.unicode, skiprows=1) # video_name, start, end
self.video_dir = video_dir
self.labels_dir = labels_dir
self.seq_length = seq_length
self.transform = transform
self.normalize = normalize
self.n_keypoints = n_keypoints
self.crop_body = crop_body
self.dataset = []
self.isTest = is_test
# self.MAX_SEQ = 4
self.file_format = file_format
# self.STEP = 4
if is_test:
self.crop_widths = {}
self.videos = self.videos.reshape(-1, 3)
for video, start, end in self.videos:
self.dataset.extend([[video, i] for i in range(int(start), int(end) - seq_length + 2)])
# self.dataset.extend([[video, i] for i in range(int(start), int(end) - self.MAX_SEQ + 2)]) # same number of samples for any seq length
# self.dataset.extend([[video, i + self.MAX_SEQ - 1] for i in range(int(start), int(end) - self.MAX_SEQ + 2, self.STEP)]) # same number of samples for any seq length
def __getitem__(self, item):
video, start = self.dataset[item][0], self.dataset[item][1]
frames = self._get_video_frames(video, start, self.file_format, opencv=False)
csvfilenmae = video.split('.')[0]+'.csv'
labels = pd.read_csv(os.path.join(self.labels_dir,csvfilenmae ))
seq_labels = labels[(labels['frame'] >= start) & (labels['frame'] < (start + self.seq_length))].iloc[:, 2:]
# in csv file, keypoints for a part are in format: part1_x, part1_y, part1_likelihood, part2_x, part2_y, ...
keypoints_idx = [True, True, False] * self.n_keypoints
seq_keypoints = np.array(seq_labels.iloc[:, keypoints_idx]).astype('float')
if seq_keypoints.shape[0] == 0:
print("here")
seq_keypoints = seq_keypoints.reshape([seq_keypoints.shape[0], -1, 2]) # shape (seq_length, n_keypoints, 2)
likelihood_idx = [False, False, True] * self.n_keypoints
seq_likelihood = np.array(seq_labels.iloc[:, likelihood_idx]).astype('float')
sample = {'seq': frames, 'keypoints': seq_keypoints, 'likelihood': seq_likelihood}
if self.transform:
for t in self.transform:
if isinstance(t, self.CropBody):
if self.isTest and (video in self.crop_widths):
t.set_crop_width(self.crop_widths[video])
else:
t.set_crop_width(-1)
sample = t(sample)
if isinstance(t, self.CropBody) and self.isTest:
self.crop_widths[video] = t.get_crop_width()
# Generate heatmaps for each frame
seq_heatmaps = []
for i, image in enumerate(sample['seq']):
i_sample = {'image': image, 'keypoints': sample['keypoints'][i], 'likelihood': sample['likelihood'][i]}
heatmaps = get_heatmaps_likelihood(i_sample)
seq_heatmaps.append(heatmaps)
seq_heatmaps = np.stack(seq_heatmaps)
sample['heatmaps'] = seq_heatmaps
# Replace missing keypoints by -1 values
sample['keypoints'] = np.nan_to_num(sample['keypoints'], nan=-1)
if self.normalize:
tensor = transforms.Compose(
[self.ToTensor(), self.Normalize(self.normalize['mean'], self.normalize['std'])])
else:
tensor = transforms.Compose([self.ToTensor()])
sample = tensor(sample)
sample['video'] = video
sample['frame'] = start + self.seq_length - 1
# sample['frame'] = start
return sample
def __len__(self):
return len(self.dataset)
def _get_video_frames(self, video, start_frame, file_format, opencv=True):
"""
Retrieves a sequence of frames from a video
:param video: the video to read the frames from
:param start_frame: the start frame of the sequence
:param opencv: whether to extract the frames from the video with opencv
:return: a list of frames of length seq_length or None if an error occured
"""
if opencv:
return self._get_video_frames_opencv(video, start_frame)
video_name = video.split('.')[0]
video_path = os.path.join(self.video_dir, video_name)
frames = []
# for i in range(self.seq_length):
# s = self.seq_length - i
# frame_path = os.path.join(video_path, file_format % (start_frame - s + 1))
for i in range(start_frame, start_frame + self.seq_length):
frame_path = os.path.join(video_path, file_format % i)
frame = cv2.imread(frame_path, cv2.IMREAD_COLOR)
if frame is None:
print("here FRAME NONE")
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frames.append(frame)
return frames
def _get_video_frames_opencv(self, video, start_frame):
"""
Reads a sequence of frames from a video
:param video: the video to read the frames from
:param start_frame: the start frame of the sequence
:return: a list of frames of length seq_length or None if an error occured
"""
video_path = os.path.join(self.video_dir, video)
cap = cv2.VideoCapture(video_path)
# Seek to start of sequence
cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame + 1) # frame 0 = thumbnail of video
i = 0
frames = []
while cap.isOpened() and i < self.seq_length:
ret, frame = cap.read()
if not ret:
print("Error while reading video", video)
return None
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Change to RGB
frames.append(frame)
i += 1
cap.release()
return frames
# noinspection PyShadowingNames
class ToTensor(object):
"""
Convert nd arrays to tensor
source: https://pytorch.org/tutorials/beginner/data_loading_tutorial.html
"""
def __call__(self, sample):
frames, seq_keypoints, seq_heatmaps = sample['seq'], sample['keypoints'], sample['heatmaps']
# Swap color axis
# numpy image H x W x C
# torch image C x H x W
for i, image in enumerate(frames):
image = image.transpose(2, 0, 1)
frames[i] = torch.from_numpy(image)
seq_keypoints[i] = torch.from_numpy(seq_keypoints[i])
seq_heatmaps[i] = torch.from_numpy(seq_heatmaps[i])
frames = torch.stack(frames)
sample['seq'], sample['keypoints'], sample['heatmaps'] = frames, seq_keypoints, seq_heatmaps
return sample
# noinspection PyShadowingNames
class Normalize(object):
"""
Normalize a tensor image with mean and standard deviation.
Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform
will normalize each channel of the input ``torch.*Tensor`` i.e.
``input[channel] = (input[channel] - mean[channel]) / std[channel]``
.. note::
This transform acts out of place, i.e., it does not mutates the input tensor.
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
inplace(bool,optional): Bool to make this operation in-place.
source: https://pytorch.org/docs/stable/_modules/torchvision/transforms/transforms.html#Normalize
"""
def __init__(self, mean, std, inplace=False):
self.normalize = torchvision.transforms.Normalize(mean, std, inplace)
def __call__(self, sample):
frames = sample['seq']
for i, image in enumerate(frames):
frames[i] = self.normalize(image)
sample['seq'] = frames
return sample
# noinspection PyShadowingNames
class Rescale(object):
"""
Rescale the image in a sample to a given size.
Args:
output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smallest of image edges is matched
to output_size keeping aspect ratio the same.
source: https://pytorch.org/tutorials/beginner/data_loading_tutorial.html
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
frames, seq_keypoints = sample['seq'], sample['keypoints']
for i, image in enumerate(frames):
h, w = image.shape[:2]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
frames[i] = transform.resize(image, (new_h, new_w))
# For keypoints we swap h and w because x and y axis are inverted in the image
seq_keypoints[i] = seq_keypoints[i] * [new_w / w, new_h / h]
sample['seq'], sample['keypoints'] = frames, seq_keypoints
return sample
class CropBody(object):
def __init__(self, margin=50, crop_width=-1, random=False, square=False, keep_orig=False):
"""
Crops a sequence of images by placing a bounding box around the keypoints,
and returns the cropped image and keypoints in a sample
:param margin: the margin to place around the left, right, top and bottom keypoints. Default=50
:param crop_width: the width of the bounding box. Leave to -1.
:param random: whether to add a random margin around the bbox. Default=False.
:param square: whether the cropping bbox is squared.
If True, the height and width of the bbox will be set by the
left-most keypoint and the right-most keypoint + margin. Default=False.
:param keep_orig: Whether to keep the non-cropped image and corresponding keypoints in the sample.
Default=False.
:return: a sample dictionnary containing the sequence of cropped images, and the keypoints.
If `keep_orig` is set to True, the sample also contains the bboxes, the original (no-cropped) frames, and
the location of the keypoints in the non-cropped frames.
"""
self.margin = margin
self.random = random
self.square = square
self.crop_width = crop_width
self.keep_orig = keep_orig
def set_crop_width(self, crop_width):
self.crop_width = crop_width
def get_crop_width(self):
return self.crop_width
def __call__(self, sample):
frames, seq_keypoints = sample['seq'], sample['keypoints']
x_min = x_max = y_min = y_max = -1
crop_height = crop_width = self.crop_width
bboxes = []
originals = []
rnd = None
# Loop through each frame in the sequence
for i, image in enumerate(frames):
if self.keep_orig:
originals.append(image)
keypoints = seq_keypoints[i]
x_min, y_min = np.nanmin(keypoints, axis=0).astype(int) # Left-most and bottom most keypoints
x_max, y_max = np.nanmax(keypoints, axis=0).astype(int) # Right-most and top most keypoints
h, w = image.shape[:2]
if i == 0 and crop_width == -1:
# If it's the first frame in the sequence, and we don't have a defined crop width,
# set the width to the distance between the left- and right-most keypoint + twice the margin.
# If it's square, the height will be the same as the width
crop_height = crop_width = (x_max - x_min) + self.margin * 2
if not self.square:
# If not square, the height is the distance between the top- and bottom-most keypoint
# + twice the margin
crop_height = (y_max - y_min) + self.margin * 2
if i == 0:
# define the bbox for the first frame of the sequence,
# the other frames in the sequence will have the same bbox.
x_mid = x_min + (x_max - x_min) // 2 # horizontal center of bbox
x_left = x_mid - crop_width // 2 # left side of bbox
if self.random and rnd is None:
# add random noise to the bbox
rnd = np.random.randint(-self.margin, self.margin)
x_left -= rnd
# if the bbox is outside of the image to the left, place it at x = 0
if x_left < 0:
x_left = 0
# if the bbox is outside of the image to the right, place it at the border of the image
if x_left + crop_width > w:
x_left -= (x_left + crop_width) - w
x_right = x_left + crop_width # right side of the bbox
y_mid = y_min + (y_max - y_min) // 2 # vertical center
y_top = y_mid - crop_height // 2 # top of the bbox
if self.random and rnd is None:
rnd = np.random.randint(-self.margin, self.margin)
y_top -= rnd
# if the bbox is outside of the image to the top, place it at y = 0
if y_top < 0:
y_top = 0
# if the bbox is outside of the image to the bottom, place it at the border of the image
if y_top + crop_height > h:
y_top -= (y_top + crop_height) - h
y_bottom = y_top + crop_height # bottom side of the bbox
image = image[y_top:y_bottom, x_left:x_right] # the cropped image
keypoints = keypoints - [x_left, y_top] # adjust the keypoints to the cropped image
frames[i] = image
seq_keypoints[i] = keypoints
bboxes.append([y_top, y_bottom, x_left, x_right])
sample['seq'], sample['keypoints'] = frames, seq_keypoints
if self.keep_orig:
sample['bboxes'] = bboxes
sample['orig_seq'] = originals
sample['crop_width'] = crop_width
self.set_crop_width(crop_width)
return sample
# noinspection PyShadowingNames
class RandomCrop(object):
"""
Randomly crop the sample image
Args:
output_size (tuple or int): Desired output size. If int, square crop
is made.
source: https://pytorch.org/tutorials/beginner/data_loading_tutorial.html
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, sample):
frames, seq_keypoints = sample['seq'], sample['keypoints']
left, top = -1, -1
for i, image in enumerate(frames):
keypoints = seq_keypoints[i]
h, w = image.shape[:2]
new_h, new_w = self.output_size
if new_h < h:
if top < 0:
top = np.random.randint(0, h - new_h)
image = image[top: top + new_h, :]
keypoints = keypoints - [0, top]
keypoints[keypoints[:, 1] > new_h, ] = None
keypoints[keypoints[:, 1] < 0, ] = None
if new_w < w:
if left < 0:
left = np.random.randint(0, w - new_w)
image = image[:, left: left + new_w]
keypoints = keypoints - [left, 0]
keypoints[keypoints[:, 0] > new_w, ] = None
keypoints[keypoints[:, 0] < 0, ] = None
frames[i] = image
seq_keypoints[i] = keypoints
sample['seq'], sample['keypoints'] = frames, seq_keypoints
return sample
# noinspection PyShadowingNames
class RandomHorizontalFlip(object):
"""Horizontally flip the given image randomly with a given probability.
Args:
p (float): probability of the image being flipped. Default value is 0.5
source; https://pytorch.org/docs/stable/_modules/torchvision/transforms/transforms.html
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, sample):
frames, seq_keypoints, seq_likelihood = sample['seq'], sample['keypoints'], sample['likelihood']
if np.random.random() < self.p:
for i, image in enumerate(frames):
frames[i] = np.fliplr(image)
w = np.shape(image)[1]
seq_keypoints[:, :, 0] = w - seq_keypoints[:, :, 0]
# Swap left and right leg
left_idx = [0, 1, 2, 6, 7, 8]
right_idx = [3, 4, 5, 9, 10, 11]
new_right = seq_keypoints[:, left_idx, :]
seq_keypoints[:, left_idx, :] = seq_keypoints[:, right_idx, :]
seq_keypoints[:, right_idx, :] = new_right
sample['seq'], sample['keypoints'] = frames, seq_keypoints
return sample
# noinspection PyShadowingNames
class RandomRotate(object):
"""
Rotate the image in a random angle between -theta, +theta.
Args:
theta (int, tuple): The range of the rotation angle
"""
def __init__(self, theta):
assert isinstance(theta, (int, tuple))
self.theta = theta
def __call__(self, sample):
frames, seq_keypoints = sample['seq'], sample['keypoints']
# Get a random rotation angle between 0 and the max number of dregrees
if isinstance(self.theta, int):
angle = np.random.randint(-self.theta, self.theta)
else:
angle = np.random.randint(self.theta[0], self.theta[1])
# Calculate the rotation matrix
h, w = np.shape(frames[0])[:2]
center = (w / 2, h / 2)
rotation_matrix = cv2.getRotationMatrix2D(center, angle, 1)
for i, image in enumerate(frames):
# Rotate the image
frames[i] = cv2.warpAffine(image, rotation_matrix, (w, h))
# Rotate the keypoints coordinates
keypoints_rotate = np.transpose(rotation_matrix[:, 0:2])
keypoints_offset = rotation_matrix[:, 2]
seq_keypoints[i] = np.matmul(seq_keypoints[i], keypoints_rotate) + keypoints_offset
sample['seq'], sample['keypoints'] = frames, seq_keypoints
return sample
# noinspection PyShadowingNames
class BrightnessContrast(object):
"""Randomly change the brightness, contrast and saturation of an image.
Args:
brightness (tuple of float (min, max)): How much to jitter brightness.
Should be between -100 and 100
contrast (tuple of float (min, max)): How much to jitter contrast.
Should be between 0.1 and 3.0
source: https://pytorch.org/docs/stable/_modules/torchvision/transforms/transforms.html#ColorJitter
"""
def __init__(self, brightness=(0, 0), contrast=(1, 1)):
self.brightness = self._check_value(brightness, bound=(-100, 100))
self.contrast = self._check_value(contrast, bound=(-3.0, 3.0))
def __call__(self, sample):
frames = sample['seq']
brightness_factor = np.random.uniform(self.brightness[0], self.brightness[1])
contrast_factor = np.random.uniform(self.contrast[0], self.contrast[1])
for i, image in enumerate(frames):
new_image = np.zeros_like(image)
cv2.convertScaleAbs(image, new_image, alpha=contrast_factor, beta=brightness_factor)
frames[i] = new_image
sample['seq'] = frames
return sample
def _check_value(self, value, bound):
assert isinstance(value, tuple)
assert (bound[0] <= value[0] <= value[1] <= bound[1])
# tuple
return value
if __name__ == '__main__':
pass
| 23,293 | 40.155477 | 178 | py |
t-leap | t-leap-main/utils/data_utils.py | # #############################################################################
# Copyright 2022 Helena Russello
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# #############################################################################
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from utils.train_utils import seed_worker
def get_mean_std(dataloader):
"""
Calculate the mean and stddev of the dataset.
This assumes that the whole data can't be loaded in RAM.
Instead of calculating the mean and stddev overall the whole dataset, we average the mean and stddev per batch
:param dataloader: the pytorch dataloader
:return: (tuple) mean, std
source: https://discuss.pytorch.org/t/about-normalization-using-pre-trained-vgg16-networks/23560/6
"""
mean = 0.
std = 0.
nb_samples = 0.
for data in dataloader:
batch_samples = data.size(0)
data = data.view(batch_samples, data.size(1), -1)
mean += data.mean(2).sum(0)
std += data.std(2).sum(0)
nb_samples += batch_samples
mean /= nb_samples
std /= nb_samples
def get_keypoints_batch(batch_heatmaps, thr=0):
"""
Gets the keypoints from the heatmaps for the whole batch
:param batch_keypoints: The heatmaps of the batch
:param thr: Threshold for the location of the keypoints.
Default =0. So if the keypoint is located a (0,0), it set as non-existing.
"""
b, k, w, h = batch_heatmaps.size()
batch_keypoints = torch.zeros([b, k, 2])
for i, heatmaps in enumerate(batch_heatmaps):
keypoints = get_keypoints(heatmaps, thr)
batch_keypoints[i, :, :] = keypoints
return batch_keypoints
def get_keypoints(heatmaps, thr=0):
"""
Gets the keypoints from the heatmaps
:param heatmaps: The heatmaps of shape (n_keypoints, height, width)
:param thr: Threshold for the location of the keypoints.
Default =0. So if the keypoint is located a (0,0), it set as non-existing.
:return: The keypoints
"""
n, h, w = heatmaps.size()
flat = heatmaps.view(n, -1)
max_val, max_idx = flat.max(dim=1)
xx = (max_idx % w).view(-1, 1) # (-1,1) for column vector
yy = (max_idx // w).view(-1, 1) # (-1,1) for column vector
xx[max_val <= thr] = -1
yy[max_val <= thr] = -1
keypoints = torch.cat((xx, yy), dim=1)
return keypoints
def get_heatmaps(sample, sigma=5, normalize=True):
"""
Generate heatmaps from a sample
:param sample: The sample from which to generate heatmaps. Dictionary containing an image and keypoints. (Sample returned by the Dataset)
:param sigma: The standard deviation of the gaussian noise. Default = 5 pixels.
:param normalize: Whether to normalize the heatmaps
:return: The heatmaps of the keypoints
"""
keypoints = sample['keypoints']
likelihood = np.ones(np.shape(keypoints)[0])
sample['likelihood'] = likelihood # Likelihood =1
return get_heatmaps_likelihood(sample, sigma, normalize)
def get_heatmaps_likelihood(sample, sigma=5, normalize=True):
"""
Generates heatmaps from the keypoints of a sample. The std-dev depends on the likelihood of each keypoint. If the likelikhood is high, the std-dev will be smaller.
:param sample: The sample from which to generate heatmaps.
:param sigma: The standard deviation of the gaussian noise
:param normalize: Whether to normalize the heatmaps
:return: The heatmaps of the keypoints
"""
image, keypoints, likelihood = sample['image'], sample['keypoints'], sample['likelihood']
h = np.shape(image)[0]
w = np.shape(image)[1]
x = np.arange(w)
y = np.arange(h)
xx, yy = np.meshgrid(x, y)
heatmaps = np.zeros([np.shape(keypoints)[0], h, w])
keypoints = np.nan_to_num(keypoints, nan=-1)
for i, keypoint in enumerate(keypoints):
if keypoint[0] < 0 or keypoint[1] < 0:
continue
kx = keypoint[0]
ky = keypoint[1]
i_sigma = sigma
# i_sigma = sigma / likelihood[i]
# Gaussian distribution with peak at the keypoint annotation
heatmaps[i] = np.exp(-((yy - ky) ** 2 + (xx - kx) ** 2) / (2 * i_sigma ** 2))
if not normalize:
heatmaps[i] /= i_sigma * np.sqrt(2 * np.pi)
return heatmaps
def get_keypoints_original_size(keypoints, bbox, scaling_factor=1):
"""
Scales back keypoints from a resized bounding box to the original image
:param keypoints: the predicted keypoints
:param bbox: the bounding box on the image
:param scaling_factor: the scaling factor from the resized bbox to the original bbox
:return: the coordinates of the keypoints in the original image
"""
orig_keypoints = torch.full_like(keypoints, -1, dtype=torch.float32).cpu()
for i, k in enumerate(keypoints):
if k[0] > 0 and k[1] > 0:
orig_keypoints[i][0] = k[0].cpu() * scaling_factor + bbox[2]
orig_keypoints[i][1] = k[1].cpu() * scaling_factor + bbox[0]
else:
orig_keypoints[i][0] = 0
orig_keypoints[i][1] = 0
return orig_keypoints
def dataset_split(train_dataset, val_dataset, config, k_fold=1):
"""
Split the train and validation set according to the settings from config.
If k_fold is specified, divides the dataset into k parts and provide k train and validation loaders
:param train_dataset: dataset for training
:param val_dataset: dataset for validation
:param config: configuration list
:param k_fold: number of folds. Default: 1
:return: the train loaders and the validation loaders
"""
# Shuffle the dataset
indices = list(range(len(train_dataset)))
np.random.shuffle(indices)
# Divide the dataset into k equal sets
len_split = len(train_dataset) // k_fold
train_split = int(np.floor(config.val_size * len_split))
train_loaders = []
val_loaders = []
# for each fold make a train and validation set
for split in range(k_fold):
start_split = split * len_split
split_indices = indices[start_split:start_split+len_split]
train_idx, val_idx = split_indices[train_split:], split_indices[: train_split]
train_sampler = SubsetRandomSampler(train_idx)
val_sampler = SubsetRandomSampler(val_idx)
train_loaders.append(DataLoader(train_dataset, batch_size=config.batch_size, sampler=train_sampler, drop_last=False, worker_init_fn=seed_worker))
val_loaders.append(DataLoader(val_dataset, batch_size=1, sampler=val_sampler, drop_last=False, worker_init_fn=seed_worker))
return train_loaders, val_loaders
| 7,253 | 37.585106 | 171 | py |
t-leap | t-leap-main/utils/plotting_utils.py | # #############################################################################
# Copyright 2022 Helena Russello
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# #############################################################################
import numpy as np
import matplotlib.pyplot as plt
import torch
def show_heatmaps(image, heatmaps, concat=True, save_fname=None):
"""
Shows the heatmaps on top of the image with Pyplot.
:param image: The image to display.
:param heatmaps: The heatmaps of the image.
:param concat: Whether to show all the heatmaps in one image, or separately. Default = True.
:param save_fname: Filename to save the image at. Default = None.
:return: None.
"""
if type(image) == torch.Tensor:
image = image.permute(1,2,0).to('cpu')
h, w, _ = np.shape(image)
heatmaps_plt = torch.zeros([h, w])
heatmaps = heatmaps.to("cpu")
heatmaps = heatmaps.detach()
if image.max() > 1:
image = image.int()
if concat:
for i, heatmap in enumerate(heatmaps):
heatmaps_plt += heatmap
fig = plt.figure()
img1 = plt.imshow(image, interpolation='none', cmap='gray')
img2 = plt.imshow(heatmaps_plt, interpolation='none', cmap='jet', alpha=0.5)
if save_fname:
plt.savefig(save_fname)
plt.close()
else:
for i, heatmap in enumerate(heatmaps):
heatmaps_plt = heatmap
fig = plt.figure()
img1 = plt.imshow(image, interpolation='none', cmap='gray')
img2 = plt.imshow(heatmaps_plt, interpolation='none', cmap='jet', alpha=0.5)
if save_fname:
plt.savefig(save_fname+'_%d.png' % i)
plt.close()
def show_keypoints(image, keypoints, save=False, save_fname='keypoints_plot.png', cmap='viridis', tb=True, colormap=None, figsize=(5,5)):
"""
Shows the keypoints on top of the image with Pyplot.
:param image: The image to display.
:param keypoints: The keypoints of the image
:param save: Whether to save the image or to simply display it. Default = False.
:param save_fname: Filename to save the image. Default = 'keypoints_plot.png'
:param cmap: Deprecated, use the colormap param instead. Cmap of the keypoints. Default = 'viridis'.
:param tb: For use with Tensorboard. Default = True.
:param colormap: colormap for the keypoints. If None, default colors are selected (only works with <=17 keypoints. If using more, add your own colormap). Default = None.
:param figsize: Size of the plt figure.
:return: The figure.
"""
if type(image) == torch.Tensor and (image.size()[0] == 3 or image.size()[0] == 1):
image = image.permute(1, 2, 0).to('cpu')
if len(np.shape(image)) == 3 and np.shape(image)[2] == 1:
image = np.reshape(image, np.shape(image)[0:2])
if type(keypoints) == torch.Tensor:
keypoints = keypoints.cpu().detach().numpy().astype(np.float)
fig = plt.figure(figsize=figsize)
if torch.is_tensor(image):
image = image.squeeze().detach().numpy()
if image.max() > 1:
image = image.astype(np.int)
plt.axis('off')
plt.imshow(image, cmap=cmap)
if colormap is None:
colormap = ['navy', 'mediumblue', 'blue',
'dodgerblue', 'lightskyblue', 'deepskyblue',
'turquoise', 'aquamarine', 'palegreen',
'khaki', 'yellow', 'gold',
'orange', 'darkorange',
'orangered', 'red', 'darkred'
]
if keypoints.shape[0] != 17:
colormap="red"
# Don't display keypoints that are out of bounds (e.g., keypoints at (-1,-1))
h, w = image.shape[:2]
# print(type(keypoints))
# print(keypoints[:, 1] > h)
keypoints[keypoints[:, 1] > h,] = np.nan
keypoints[keypoints[:, 1] < 0,] = np.nan
keypoints[keypoints[:, 0] > w,] = np.nan
keypoints[keypoints[:, 0] < 0,] = np.nan
plt.scatter(keypoints[:, 0], keypoints[:, 1], s=30, marker='o', c=colormap)
plt.tight_layout()
# plt.savefig("test-img.png", pad_inches=0, transparent=True, bbox_inches='tight')
if save:
plt.savefig(save_fname, transparent=True, bbox_inches='tight', pad_inches=0)
plt.close()
elif not tb:
plt.pause(0.001)
return fig
| 4,876 | 38.016 | 173 | py |
t-leap | t-leap-main/utils/train_utils.py | # #############################################################################
# Copyright 2022 Helena Russello
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# #############################################################################
import argparse
import core.config
import torch
import os
import numpy
import random
def seed_all(seed):
"""
Sets the random seed everywhere. See: https://discuss.pytorch.org/t/reproducibility-with-all-the-bells-and-whistles/81097
:param seed: The random seed. Default = 10.
:return: None.
"""
if not seed:
seed = 10
print("[ Using Seed : ", seed, " ]")
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
numpy.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def seed_worker(worker_id):
# https://discuss.pytorch.org/t/reproducibility-with-all-the-bells-and-whistles/81097
worker_seed = torch.initial_seed() % 2**32
numpy.random.seed(worker_seed)
random.seed(worker_seed)
def save_model(config, model, epoch, optimizer, scheduler, loss, path):
"""
Save the model weights and the status of training to a pickle file
:param model: the model to save
:param epoch: the epoch we were at
:param optimizer: the optimizer
:param loss: the loss function
:param path: the path where to save
:return: The path where the state was saved
"""
torch.save({
'config': config,
'epoch': epoch,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
'loss': loss
}, path)
return path
def load_model(path):
"""
Loads a checkpoint of a trained model
:param path: the path of the pickle file
:return: epoch, model optimizer and criterion
"""
return torch.load(path)
| 2,444 | 29.949367 | 125 | py |
ditto | ditto-master/train_ditto.py | import os
import argparse
import json
import sys
import torch
import numpy as np
import random
sys.path.insert(0, "Snippext_public")
from ditto_light.dataset import DittoDataset
from ditto_light.summarize import Summarizer
from ditto_light.knowledge import *
from ditto_light.ditto import train
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--task", type=str, default="Structured/Beer")
parser.add_argument("--run_id", type=int, default=0)
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--max_len", type=int, default=256)
parser.add_argument("--lr", type=float, default=3e-5)
parser.add_argument("--n_epochs", type=int, default=20)
parser.add_argument("--finetuning", dest="finetuning", action="store_true")
parser.add_argument("--save_model", dest="save_model", action="store_true")
parser.add_argument("--logdir", type=str, default="checkpoints/")
parser.add_argument("--lm", type=str, default='distilbert')
parser.add_argument("--fp16", dest="fp16", action="store_true")
parser.add_argument("--da", type=str, default=None)
parser.add_argument("--alpha_aug", type=float, default=0.8)
parser.add_argument("--dk", type=str, default=None)
parser.add_argument("--summarize", dest="summarize", action="store_true")
parser.add_argument("--size", type=int, default=None)
hp = parser.parse_args()
# set seeds
seed = hp.run_id
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
# only a single task for baseline
task = hp.task
# create the tag of the run
run_tag = '%s_lm=%s_da=%s_dk=%s_su=%s_size=%s_id=%d' % (task, hp.lm, hp.da,
hp.dk, hp.summarize, str(hp.size), hp.run_id)
run_tag = run_tag.replace('/', '_')
# load task configuration
configs = json.load(open('configs.json'))
configs = {conf['name'] : conf for conf in configs}
config = configs[task]
trainset = config['trainset']
validset = config['validset']
testset = config['testset']
# summarize the sequences up to the max sequence length
if hp.summarize:
summarizer = Summarizer(config, lm=hp.lm)
trainset = summarizer.transform_file(trainset, max_len=hp.max_len)
validset = summarizer.transform_file(validset, max_len=hp.max_len)
testset = summarizer.transform_file(testset, max_len=hp.max_len)
if hp.dk is not None:
if hp.dk == 'product':
injector = ProductDKInjector(config, hp.dk)
else:
injector = GeneralDKInjector(config, hp.dk)
trainset = injector.transform_file(trainset)
validset = injector.transform_file(validset)
testset = injector.transform_file(testset)
# load train/dev/test sets
train_dataset = DittoDataset(trainset,
lm=hp.lm,
max_len=hp.max_len,
size=hp.size,
da=hp.da)
valid_dataset = DittoDataset(validset, lm=hp.lm)
test_dataset = DittoDataset(testset, lm=hp.lm)
# train and evaluate the model
train(train_dataset,
valid_dataset,
test_dataset,
run_tag, hp)
| 3,355 | 35.086022 | 79 | py |
ditto | ditto-master/matcher.py | import torch
import torch.nn as nn
import os
import numpy as np
import random
import json
import jsonlines
import csv
import re
import time
import argparse
import sys
import sklearn
import traceback
from torch.utils import data
from tqdm import tqdm
from apex import amp
from scipy.special import softmax
from ditto_light.ditto import evaluate, DittoModel
from ditto_light.exceptions import ModelNotFoundError
from ditto_light.dataset import DittoDataset
from ditto_light.summarize import Summarizer
from ditto_light.knowledge import *
def set_seed(seed: int):
"""
Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch``
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def to_str(ent1, ent2, summarizer=None, max_len=256, dk_injector=None):
"""Serialize a pair of data entries
Args:
ent1 (Dictionary): the 1st data entry
ent2 (Dictionary): the 2nd data entry
summarizer (Summarizer, optional): the summarization module
max_len (int, optional): the max sequence length
dk_injector (DKInjector, optional): the domain-knowledge injector
Returns:
string: the serialized version
"""
content = ''
for ent in [ent1, ent2]:
if isinstance(ent, str):
content += ent
else:
for attr in ent.keys():
content += 'COL %s VAL %s ' % (attr, ent[attr])
content += '\t'
content += '0'
if summarizer is not None:
content = summarizer.transform(content, max_len=max_len)
new_ent1, new_ent2, _ = content.split('\t')
if dk_injector is not None:
new_ent1 = dk_injector.transform(new_ent1)
new_ent2 = dk_injector.transform(new_ent2)
return new_ent1 + '\t' + new_ent2 + '\t0'
def classify(sentence_pairs, model,
lm='distilbert',
max_len=256,
threshold=None):
"""Apply the MRPC model.
Args:
sentence_pairs (list of str): the sequence pairs
model (MultiTaskNet): the model in pytorch
max_len (int, optional): the max sequence length
threshold (float, optional): the threshold of the 0's class
Returns:
list of float: the scores of the pairs
"""
inputs = sentence_pairs
# print('max_len =', max_len)
dataset = DittoDataset(inputs,
max_len=max_len,
lm=lm)
# print(dataset[0])
iterator = data.DataLoader(dataset=dataset,
batch_size=len(dataset),
shuffle=False,
num_workers=0,
collate_fn=DittoDataset.pad)
# prediction
all_probs = []
all_logits = []
with torch.no_grad():
# print('Classification')
for i, batch in enumerate(iterator):
x, _ = batch
logits = model(x)
probs = logits.softmax(dim=1)[:, 1]
all_probs += probs.cpu().numpy().tolist()
all_logits += logits.cpu().numpy().tolist()
if threshold is None:
threshold = 0.5
pred = [1 if p > threshold else 0 for p in all_probs]
return pred, all_logits
def predict(input_path, output_path, config,
model,
batch_size=1024,
summarizer=None,
lm='distilbert',
max_len=256,
dk_injector=None,
threshold=None):
"""Run the model over the input file containing the candidate entry pairs
Args:
input_path (str): the input file path
output_path (str): the output file path
config (Dictionary): task configuration
model (DittoModel): the model for prediction
batch_size (int): the batch size
summarizer (Summarizer, optional): the summarization module
max_len (int, optional): the max sequence length
dk_injector (DKInjector, optional): the domain-knowledge injector
threshold (float, optional): the threshold of the 0's class
Returns:
None
"""
pairs = []
def process_batch(rows, pairs, writer):
predictions, logits = classify(pairs, model, lm=lm,
max_len=max_len,
threshold=threshold)
# try:
# predictions, logits = classify(pairs, model, lm=lm,
# max_len=max_len,
# threshold=threshold)
# except:
# # ignore the whole batch
# return
scores = softmax(logits, axis=1)
for row, pred, score in zip(rows, predictions, scores):
output = {'left': row[0], 'right': row[1],
'match': pred,
'match_confidence': score[int(pred)]}
writer.write(output)
# input_path can also be train/valid/test.txt
# convert to jsonlines
if '.txt' in input_path:
with jsonlines.open(input_path + '.jsonl', mode='w') as writer:
for line in open(input_path):
writer.write(line.split('\t')[:2])
input_path += '.jsonl'
# batch processing
start_time = time.time()
with jsonlines.open(input_path) as reader,\
jsonlines.open(output_path, mode='w') as writer:
pairs = []
rows = []
for idx, row in tqdm(enumerate(reader)):
pairs.append(to_str(row[0], row[1], summarizer, max_len, dk_injector))
rows.append(row)
if len(pairs) == batch_size:
process_batch(rows, pairs, writer)
pairs.clear()
rows.clear()
if len(pairs) > 0:
process_batch(rows, pairs, writer)
run_time = time.time() - start_time
run_tag = '%s_lm=%s_dk=%s_su=%s' % (config['name'], lm, str(dk_injector != None), str(summarizer != None))
os.system('echo %s %f >> log.txt' % (run_tag, run_time))
def tune_threshold(config, model, hp):
"""Tune the prediction threshold for a given model on a validation set"""
validset = config['validset']
task = hp.task
# summarize the sequences up to the max sequence length
set_seed(123)
summarizer = injector = None
if hp.summarize:
summarizer = Summarizer(config, lm=hp.lm)
validset = summarizer.transform_file(validset, max_len=hp.max_len, overwrite=True)
if hp.dk is not None:
if hp.dk == 'product':
injector = ProductDKInjector(config, hp.dk)
else:
injector = GeneralDKInjector(config, hp.dk)
validset = injector.transform_file(validset)
# load dev sets
valid_dataset = DittoDataset(validset,
max_len=hp.max_len,
lm=hp.lm)
# print(valid_dataset[0])
valid_iter = data.DataLoader(dataset=valid_dataset,
batch_size=64,
shuffle=False,
num_workers=0,
collate_fn=DittoDataset.pad)
# acc, prec, recall, f1, v_loss, th = eval_classifier(model, valid_iter,
# get_threshold=True)
f1, th = evaluate(model, valid_iter, threshold=None)
# verify F1
set_seed(123)
predict(validset, "tmp.jsonl", config, model,
summarizer=summarizer,
max_len=hp.max_len,
lm=hp.lm,
dk_injector=injector,
threshold=th)
predicts = []
with jsonlines.open("tmp.jsonl", mode="r") as reader:
for line in reader:
predicts.append(int(line['match']))
os.system("rm tmp.jsonl")
labels = []
with open(validset) as fin:
for line in fin:
labels.append(int(line.split('\t')[-1]))
real_f1 = sklearn.metrics.f1_score(labels, predicts)
print("load_f1 =", f1)
print("real_f1 =", real_f1)
return th
def load_model(task, path, lm, use_gpu, fp16=True):
"""Load a model for a specific task.
Args:
task (str): the task name
path (str): the path of the checkpoint directory
lm (str): the language model
use_gpu (boolean): whether to use gpu
fp16 (boolean, optional): whether to use fp16
Returns:
Dictionary: the task config
MultiTaskNet: the model
"""
# load models
checkpoint = os.path.join(path, task, 'model.pt')
if not os.path.exists(checkpoint):
raise ModelNotFoundError(checkpoint)
configs = json.load(open('configs.json'))
configs = {conf['name'] : conf for conf in configs}
config = configs[task]
config_list = [config]
if use_gpu:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
else:
device = 'cpu'
model = DittoModel(device=device, lm=lm)
saved_state = torch.load(checkpoint, map_location=lambda storage, loc: storage)
model.load_state_dict(saved_state['model'])
model = model.to(device)
if fp16 and 'cuda' in device:
model = amp.initialize(model, opt_level='O2')
return config, model
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--task", type=str, default='Structured/Beer')
parser.add_argument("--input_path", type=str, default='input/candidates_small.jsonl')
parser.add_argument("--output_path", type=str, default='output/matched_small.jsonl')
parser.add_argument("--lm", type=str, default='distilbert')
parser.add_argument("--use_gpu", dest="use_gpu", action="store_true")
parser.add_argument("--fp16", dest="fp16", action="store_true")
parser.add_argument("--checkpoint_path", type=str, default='checkpoints/')
parser.add_argument("--dk", type=str, default=None)
parser.add_argument("--summarize", dest="summarize", action="store_true")
parser.add_argument("--max_len", type=int, default=256)
hp = parser.parse_args()
# load the models
set_seed(123)
config, model = load_model(hp.task, hp.checkpoint_path,
hp.lm, hp.use_gpu, hp.fp16)
summarizer = dk_injector = None
if hp.summarize:
summarizer = Summarizer(config, hp.lm)
if hp.dk is not None:
if 'product' in hp.dk:
dk_injector = ProductDKInjector(config, hp.dk)
else:
dk_injector = GeneralDKInjector(config, hp.dk)
# tune threshold
threshold = tune_threshold(config, model, hp)
# run prediction
predict(hp.input_path, hp.output_path, config, model,
summarizer=summarizer,
max_len=hp.max_len,
lm=hp.lm,
dk_injector=dk_injector,
threshold=threshold)
| 10,842 | 31.175074 | 110 | py |
ditto | ditto-master/ditto_light/dataset.py | import torch
from torch.utils import data
from transformers import AutoTokenizer
from .augment import Augmenter
# map lm name to huggingface's pre-trained model names
lm_mp = {'roberta': 'roberta-base',
'distilbert': 'distilbert-base-uncased'}
def get_tokenizer(lm):
if lm in lm_mp:
return AutoTokenizer.from_pretrained(lm_mp[lm])
else:
return AutoTokenizer.from_pretrained(lm)
class DittoDataset(data.Dataset):
"""EM dataset"""
def __init__(self,
path,
max_len=256,
size=None,
lm='roberta',
da=None):
self.tokenizer = get_tokenizer(lm)
self.pairs = []
self.labels = []
self.max_len = max_len
self.size = size
if isinstance(path, list):
lines = path
else:
lines = open(path)
for line in lines:
s1, s2, label = line.strip().split('\t')
self.pairs.append((s1, s2))
self.labels.append(int(label))
self.pairs = self.pairs[:size]
self.labels = self.labels[:size]
self.da = da
if da is not None:
self.augmenter = Augmenter()
else:
self.augmenter = None
def __len__(self):
"""Return the size of the dataset."""
return len(self.pairs)
def __getitem__(self, idx):
"""Return a tokenized item of the dataset.
Args:
idx (int): the index of the item
Returns:
List of int: token ID's of the two entities
List of int: token ID's of the two entities augmented (if da is set)
int: the label of the pair (0: unmatch, 1: match)
"""
left = self.pairs[idx][0]
right = self.pairs[idx][1]
# left + right
x = self.tokenizer.encode(text=left,
text_pair=right,
max_length=self.max_len,
truncation=True)
# augment if da is set
if self.da is not None:
combined = self.augmenter.augment_sent(left + ' [SEP] ' + right, self.da)
left, right = combined.split(' [SEP] ')
x_aug = self.tokenizer.encode(text=left,
text_pair=right,
max_length=self.max_len,
truncation=True)
return x, x_aug, self.labels[idx]
else:
return x, self.labels[idx]
@staticmethod
def pad(batch):
"""Merge a list of dataset items into a train/test batch
Args:
batch (list of tuple): a list of dataset items
Returns:
LongTensor: x1 of shape (batch_size, seq_len)
LongTensor: x2 of shape (batch_size, seq_len).
Elements of x1 and x2 are padded to the same length
LongTensor: a batch of labels, (batch_size,)
"""
if len(batch[0]) == 3:
x1, x2, y = zip(*batch)
maxlen = max([len(x) for x in x1+x2])
x1 = [xi + [0]*(maxlen - len(xi)) for xi in x1]
x2 = [xi + [0]*(maxlen - len(xi)) for xi in x2]
return torch.LongTensor(x1), \
torch.LongTensor(x2), \
torch.LongTensor(y)
else:
x12, y = zip(*batch)
maxlen = max([len(x) for x in x12])
x12 = [xi + [0]*(maxlen - len(xi)) for xi in x12]
return torch.LongTensor(x12), \
torch.LongTensor(y)
| 3,634 | 29.805085 | 85 | py |
ditto | ditto-master/ditto_light/ditto.py | import os
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import random
import numpy as np
import sklearn.metrics as metrics
import argparse
from .dataset import DittoDataset
from torch.utils import data
from transformers import AutoModel, AdamW, get_linear_schedule_with_warmup
from tensorboardX import SummaryWriter
from apex import amp
lm_mp = {'roberta': 'roberta-base',
'distilbert': 'distilbert-base-uncased'}
class DittoModel(nn.Module):
"""A baseline model for EM."""
def __init__(self, device='cuda', lm='roberta', alpha_aug=0.8):
super().__init__()
if lm in lm_mp:
self.bert = AutoModel.from_pretrained(lm_mp[lm])
else:
self.bert = AutoModel.from_pretrained(lm)
self.device = device
self.alpha_aug = alpha_aug
# linear layer
hidden_size = self.bert.config.hidden_size
self.fc = torch.nn.Linear(hidden_size, 2)
def forward(self, x1, x2=None):
"""Encode the left, right, and the concatenation of left+right.
Args:
x1 (LongTensor): a batch of ID's
x2 (LongTensor, optional): a batch of ID's (augmented)
Returns:
Tensor: binary prediction
"""
x1 = x1.to(self.device) # (batch_size, seq_len)
if x2 is not None:
# MixDA
x2 = x2.to(self.device) # (batch_size, seq_len)
enc = self.bert(torch.cat((x1, x2)))[0][:, 0, :]
batch_size = len(x1)
enc1 = enc[:batch_size] # (batch_size, emb_size)
enc2 = enc[batch_size:] # (batch_size, emb_size)
aug_lam = np.random.beta(self.alpha_aug, self.alpha_aug)
enc = enc1 * aug_lam + enc2 * (1.0 - aug_lam)
else:
enc = self.bert(x1)[0][:, 0, :]
return self.fc(enc) # .squeeze() # .sigmoid()
def evaluate(model, iterator, threshold=None):
"""Evaluate a model on a validation/test dataset
Args:
model (DMModel): the EM model
iterator (Iterator): the valid/test dataset iterator
threshold (float, optional): the threshold on the 0-class
Returns:
float: the F1 score
float (optional): if threshold is not provided, the threshold
value that gives the optimal F1
"""
all_p = []
all_y = []
all_probs = []
with torch.no_grad():
for batch in iterator:
x, y = batch
logits = model(x)
probs = logits.softmax(dim=1)[:, 1]
all_probs += probs.cpu().numpy().tolist()
all_y += y.cpu().numpy().tolist()
if threshold is not None:
pred = [1 if p > threshold else 0 for p in all_probs]
f1 = metrics.f1_score(all_y, pred)
return f1
else:
best_th = 0.5
f1 = 0.0 # metrics.f1_score(all_y, all_p)
for th in np.arange(0.0, 1.0, 0.05):
pred = [1 if p > th else 0 for p in all_probs]
new_f1 = metrics.f1_score(all_y, pred)
if new_f1 > f1:
f1 = new_f1
best_th = th
return f1, best_th
def train_step(train_iter, model, optimizer, scheduler, hp):
"""Perform a single training step
Args:
train_iter (Iterator): the train data loader
model (DMModel): the model
optimizer (Optimizer): the optimizer (Adam or AdamW)
scheduler (LRScheduler): learning rate scheduler
hp (Namespace): other hyper-parameters (e.g., fp16)
Returns:
None
"""
criterion = nn.CrossEntropyLoss()
# criterion = nn.MSELoss()
for i, batch in enumerate(train_iter):
optimizer.zero_grad()
if len(batch) == 2:
x, y = batch
prediction = model(x)
else:
x1, x2, y = batch
prediction = model(x1, x2)
loss = criterion(prediction, y.to(model.device))
if hp.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
scheduler.step()
if i % 10 == 0: # monitoring
print(f"step: {i}, loss: {loss.item()}")
del loss
def train(trainset, validset, testset, run_tag, hp):
"""Train and evaluate the model
Args:
trainset (DittoDataset): the training set
validset (DittoDataset): the validation set
testset (DittoDataset): the test set
run_tag (str): the tag of the run
hp (Namespace): Hyper-parameters (e.g., batch_size,
learning rate, fp16)
Returns:
None
"""
padder = trainset.pad
# create the DataLoaders
train_iter = data.DataLoader(dataset=trainset,
batch_size=hp.batch_size,
shuffle=True,
num_workers=0,
collate_fn=padder)
valid_iter = data.DataLoader(dataset=validset,
batch_size=hp.batch_size*16,
shuffle=False,
num_workers=0,
collate_fn=padder)
test_iter = data.DataLoader(dataset=testset,
batch_size=hp.batch_size*16,
shuffle=False,
num_workers=0,
collate_fn=padder)
# initialize model, optimizer, and LR scheduler
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = DittoModel(device=device,
lm=hp.lm,
alpha_aug=hp.alpha_aug)
model = model.cuda()
optimizer = AdamW(model.parameters(), lr=hp.lr)
if hp.fp16:
model, optimizer = amp.initialize(model, optimizer, opt_level='O2')
num_steps = (len(trainset) // hp.batch_size) * hp.n_epochs
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=0,
num_training_steps=num_steps)
# logging with tensorboardX
writer = SummaryWriter(log_dir=hp.logdir)
best_dev_f1 = best_test_f1 = 0.0
for epoch in range(1, hp.n_epochs+1):
# train
model.train()
train_step(train_iter, model, optimizer, scheduler, hp)
# eval
model.eval()
dev_f1, th = evaluate(model, valid_iter)
test_f1 = evaluate(model, test_iter, threshold=th)
if dev_f1 > best_dev_f1:
best_dev_f1 = dev_f1
best_test_f1 = test_f1
if hp.save_model:
# create the directory if not exist
directory = os.path.join(hp.logdir, hp.task)
if not os.path.exists(directory):
os.makedirs(directory)
# save the checkpoints for each component
ckpt_path = os.path.join(hp.logdir, hp.task, 'model.pt')
ckpt = {'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
'epoch': epoch}
torch.save(ckpt, ckpt_path)
print(f"epoch {epoch}: dev_f1={dev_f1}, f1={test_f1}, best_f1={best_test_f1}")
# logging
scalars = {'f1': dev_f1,
't_f1': test_f1}
writer.add_scalars(run_tag, scalars, epoch)
writer.close()
| 7,593 | 31.592275 | 86 | py |
ditto | ditto-master/blocking/train_blocker.py | import os
import argparse
import json
import sys
import math
sys.path.insert(0, "sentence-transformers")
from sentence_transformers.readers import InputExample
from sentence_transformers import models, losses
from sentence_transformers import SentencesDataset, LoggingHandler, SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from torch.utils.data import DataLoader
class Reader:
"""A simple reader class for the matching datasets.
"""
def __init__(self):
self.guid = 0
def get_examples(self, fn):
examples = []
for line in open(fn):
sent1, sent2, label = line.strip().split('\t')
examples.append(InputExample(guid=self.guid,
texts=[sent1, sent2],
label=int(label)))
self.guid += 1
return examples
def train(hp):
"""Train the advanced blocking model
Store the trained model in hp.model_fn.
Args:
hp (Namespace): the hyperparameters
Returns:
None
"""
# define model
model_names = {'distilbert': 'distilbert-base-uncased',
'bert': 'bert-base-uncased',
'albert': 'albert-base-v2' }
word_embedding_model = models.Transformer(model_names[hp.lm])
pooling_model = models.Pooling(word_embedding_model\
.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# load the training and validation data
reader = Reader()
trainset = SentencesDataset(examples=reader.get_examples(hp.train_fn),
model=model)
train_dataloader = DataLoader(trainset,
shuffle=True,
batch_size=hp.batch_size)
train_loss = losses.SoftmaxLoss(model=model,
sentence_embedding_dimension=model\
.get_sentence_embedding_dimension(),
num_labels=2)
dev_data = SentencesDataset(examples=reader\
.get_examples(hp.valid_fn),
model=model)
dev_dataloader = DataLoader(dev_data,
shuffle=False,
batch_size=hp.batch_size)
evaluator = EmbeddingSimilarityEvaluator(dev_dataloader)
warmup_steps = math.ceil(len(train_dataloader) \
* hp.n_epochs / hp.batch_size * 0.1) #10% of train data for warm-up
if os.path.exists(hp.model_fn):
import shutil
shutil.rmtree(hp.model_fn)
# Train the model
model.fit(train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=hp.n_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=hp.model_fn,
fp16=hp.fp16,
fp16_opt_level='O2')
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--train_fn", type=str, default="../data/er_magellan/Structured/Beer/train.txt")
parser.add_argument("--valid_fn", type=str, default="../data/er_magellan/Structured/Beer/valid.txt")
parser.add_argument("--model_fn", type=str, default="model.pth")
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--n_epochs", type=int, default=20)
parser.add_argument("--logdir", type=str, default="checkpoints/")
parser.add_argument("--lm", type=str, default='distilbert')
parser.add_argument("--fp16", dest="fp16", action="store_true")
hp = parser.parse_args()
train(hp)
| 3,755 | 34.433962 | 104 | py |
LiteFlowNet2 | LiteFlowNet2-master/models/testing/test_iter.py | #!/usr/bin/env python
import os, sys
import subprocess
from math import ceil
caffe_bin = 'bin/caffe.bin'
img_size_bin = 'bin/get_image_size'
template = './deploy_MODEL.prototxt' # MODEL = LiteFlowNet2-ft-sintel or LiteFlowNet2-ft-kitti
cnn_model = 'MODEL'
# =========================================================
def get_image_size(filename):
global img_size_bin
dim_list = [int(dimstr) for dimstr in str(subprocess.check_output([img_size_bin, filename])).split(',')]
if not len(dim_list) == 2:
print('Could not determine size of image %s' % filename)
sys.exit(1)
return dim_list
def sizes_equal(size1, size2):
return size1[0] == size2[0] and size1[1] == size2[1]
def check_image_lists(lists):
images = [[], []]
with open(lists[0], 'r') as f:
images[0] = [line.strip() for line in f.readlines() if len(line.strip()) > 0]
with open(lists[1], 'r') as f:
images[1] = [line.strip() for line in f.readlines() if len(line.strip()) > 0]
if len(images[0]) != len(images[1]):
print("Unequal amount of images in the given lists (%d vs. %d)" % (len(images[0]), len(images[1])))
sys.exit(1)
if not os.path.isfile(images[0][0]):
print('Image %s not found' % images[0][0])
sys.exit(1)
base_size = get_image_size(images[0][0])
for idx in range(len(images[0])):
print("Checking image pair %d of %d" % (idx+1, len(images[0])))
img1 = images[0][idx]
img2 = images[1][idx]
if not os.path.isfile(img1):
print('Image %s not found' % img1)
sys.exit(1)
if not os.path.isfile(img2):
print('Image %s not found' % img2)
sys.exit(1)
img1_size = get_image_size(img1)
img2_size = get_image_size(img2)
if not (sizes_equal(base_size, img1_size) and sizes_equal(base_size, img2_size)):
print('The images do not all have the same size. (Images: %s or %s vs. %s)\n Please use the pair-mode.' % (img1, img2, images[0][idx]))
sys.exit(1)
return base_size[0], base_size[1], len(images[0])
my_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(my_dir)
if not (os.path.isfile(caffe_bin) and os.path.isfile(img_size_bin)):
print('Caffe tool binaries not found. Did you compile caffe with tools (make all tools)?')
sys.exit(1)
img_files = sys.argv[1:]
print("Image files: " + str(img_files))
# Frame-by-frame processing
images = [[], []]
with open(img_files[0], 'r') as f:
images[0] = [line.strip() for line in f.readlines() if len(line.strip()) > 0]
with open(img_files[1], 'r') as f:
images[1] = [line.strip() for line in f.readlines() if len(line.strip()) > 0]
for idx in reversed(range(len(images[0]))):
img1_size = get_image_size(images[0][idx])
img2_size = get_image_size(images[1][idx])
if not (sizes_equal(img1_size, img2_size)):
print('The images do not have the same size. (Images: %s or %s vs. %s)\n Please use the pair-mode.' % (img1, img2, images[0][idx]))
sys.exit(1)
width = img1_size[0]
height = img1_size[1]
# Prepare prototxt
subprocess.call('mkdir -p tmp', shell=True)
with open('tmp/img1.txt', "w") as tfile:
tfile.write("%s\n" % images[0][idx])
with open('tmp/img2.txt', "w") as tfile:
tfile.write("%s\n" % images[1][idx])
divisor = 32.
adapted_width = ceil(width/divisor) * divisor
adapted_height = ceil(height/divisor) * divisor
rescale_coeff_x = width / adapted_width
rescale_coeff_y = height / adapted_height
replacement_list = {
'$ADAPTED_WIDTH': ('%d' % adapted_width),
'$ADAPTED_HEIGHT': ('%d' % adapted_height),
'$TARGET_WIDTH': ('%d' % width),
'$TARGET_HEIGHT': ('%d' % height),
'$SCALE_WIDTH': ('%.8f' % rescale_coeff_x),
'$SCALE_HEIGHT': ('%.8f' % rescale_coeff_y),
'$OUTFOLDER': ('%s' % '"' + img_files[2] + '"'),
'$CNN': ('%s' % '"' + cnn_model + '-"'),
}
proto = ''
with open(template, "r") as tfile:
proto = tfile.read()
for r in replacement_list:
proto = proto.replace(r, replacement_list[r])
with open('tmp/deploy.prototxt', "w") as tfile:
tfile.write(proto)
# Run caffe
args = [caffe_bin, 'test', '-model', 'tmp/deploy.prototxt',
'-weights', '../trained/' + cnn_model + '.caffemodel',
'-iterations', str(1),
'-gpu', '0']
cmd = str.join(' ', args)
print('Executing %s' % cmd)
subprocess.call(args)
if idx > 0:
os.rename(img_files[2] + '/' + cnn_model + '-0000000.flo', img_files[2] + '/' + cnn_model +'-' + '{0:07d}'.format(idx) + '.flo')
print('\nThe resulting FLOW is stored in CNN-NNNNNNN.flo')
| 4,781 | 31.09396 | 147 | py |
LiteFlowNet2 | LiteFlowNet2-master/models/testing/test_batch.py | #!/usr/bin/env python
import os, sys
import subprocess
from math import ceil
caffe_bin = 'bin/caffe.bin'
img_size_bin = 'bin/get_image_size'
template = './deploy_MODEL.prototxt' # MODEL = LiteFlowNet2-ft-sintel or LiteFlowNet2-ft-kitti
cnn_model = 'MODEL'
# =========================================================
def get_image_size(filename):
global img_size_bin
dim_list = [int(dimstr) for dimstr in str(subprocess.check_output([img_size_bin, filename])).split(',')]
if not len(dim_list) == 2:
print('Could not determine size of image %s' % filename)
sys.exit(1)
return dim_list
def sizes_equal(size1, size2):
return size1[0] == size2[0] and size1[1] == size2[1]
def check_image_lists(lists):
images = [[], []]
with open(lists[0], 'r') as f:
images[0] = [line.strip() for line in f.readlines() if len(line.strip()) > 0]
with open(lists[1], 'r') as f:
images[1] = [line.strip() for line in f.readlines() if len(line.strip()) > 0]
if len(images[0]) != len(images[1]):
print("Unequal amount of images in the given lists (%d vs. %d)" % (len(images[0]), len(images[1])))
sys.exit(1)
if not os.path.isfile(images[0][0]):
print('Image %s not found' % images[0][0])
sys.exit(1)
base_size = get_image_size(images[0][0])
return base_size[0], base_size[1], len(images[0])
my_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(my_dir)
if not (os.path.isfile(caffe_bin) and os.path.isfile(img_size_bin)):
print('Caffe tool binaries not found. Did you compile caffe with tools (make all tools)?')
sys.exit(1)
img_files = sys.argv[1:]
using_lists = False
list_length = 1
if img_files[0][-4:].lower() == '.txt':
print("Checking the images in your lists...")
(width, height, list_length) = check_image_lists(img_files)
using_lists = True
print("Done.")
else:
print("Image files: " + str(img_files))
# Check images
for img_file in img_files:
if not os.path.isfile(img_file):
print('Image %s not found' % img_file)
sys.exit(1)
# Get image sizes and check
img_sizes = [get_image_size(img_file) for img_file in img_files]
print("Image sizes: " + str(img_sizes))
if not sizes_equal(img_sizes[0], img_sizes[1]):
print('Images do not have the same size.')
sys.exit(1)
width = img_sizes[0][0]
height = img_sizes[0][1]
# Prepare prototxt
subprocess.call('mkdir -p tmp', shell=True)
if not using_lists:
with open('tmp/img1.txt', "w") as tfile:
tfile.write("%s\n" % img_files[0])
with open('tmp/img2.txt', "w") as tfile:
tfile.write("%s\n" % img_files[1])
else:
subprocess.call(['cp', img_files[0], 'tmp/img1.txt'])
subprocess.call(['cp', img_files[1], 'tmp/img2.txt'])
divisor = 32.
adapted_width = ceil(width/divisor) * divisor
adapted_height = ceil(height/divisor) * divisor
rescale_coeff_x = width / adapted_width
rescale_coeff_y = height / adapted_height
replacement_list = {
'$ADAPTED_WIDTH': ('%d' % adapted_width),
'$ADAPTED_HEIGHT': ('%d' % adapted_height),
'$TARGET_WIDTH': ('%d' % width),
'$TARGET_HEIGHT': ('%d' % height),
'$SCALE_WIDTH': ('%.8f' % rescale_coeff_x),
'$SCALE_HEIGHT': ('%.8f' % rescale_coeff_y),
'$OUTFOLDER': ('%s' % '"' + img_files[2] + '"'),
'$CNN': ('%s' % '"' + cnn_model + '-"')
}
proto = ''
with open(template, "r") as tfile:
proto = tfile.read()
for r in replacement_list:
proto = proto.replace(r, replacement_list[r])
with open('tmp/deploy.prototxt', "w") as tfile:
tfile.write(proto)
# Run caffe
args = [caffe_bin, 'test', '-model', 'tmp/deploy.prototxt',
'-weights', '../trained/' + cnn_model + '.caffemodel',
'-iterations', str(list_length),
'-gpu', '0']
cmd = str.join(' ', args)
print('Executing %s' % cmd)
subprocess.call(args)
print('\nThe resulting FLOW is stored in CNN-NNNNNNN.flo')
| 3,970 | 27.775362 | 108 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.