repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
CIF-HieraDist | CIF-HieraDist-main/fairseq/tasks/multilingual_masked_lm.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
import torch
from fairseq import utils
from fairseq.data import (
ConcatDataset,
Dictionary,
IdDataset,
MaskTokensDataset,
NestedDictionaryDataset,
NumelDataset,
NumSamplesDataset,
PadDataset,
PrependTokenDataset,
RawLabelDataset,
ResamplingDataset,
SortDataset,
TokenBlockDataset,
data_utils,
encoders,
)
from fairseq.tasks import LegacyFairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task("multilingual_masked_lm")
class MultiLingualMaskedLMTask(LegacyFairseqTask):
"""Task for training masked language models (e.g., BERT, RoBERTa)."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument(
"data",
help="colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner",
)
parser.add_argument(
"--sample-break-mode",
default="complete",
choices=["none", "complete", "complete_doc", "eos"],
help='If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
"of sentence, but may include multiple sentences per sample. "
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.',
)
parser.add_argument(
"--tokens-per-sample",
default=512,
type=int,
help="max number of total tokens over all segments "
"per sample for BERT dataset",
)
parser.add_argument(
"--mask-prob",
default=0.15,
type=float,
help="probability of replacing a token with mask",
)
parser.add_argument(
"--leave-unmasked-prob",
default=0.1,
type=float,
help="probability that a masked token is unmasked",
)
parser.add_argument(
"--random-token-prob",
default=0.1,
type=float,
help="probability of replacing a token with a random token",
)
parser.add_argument(
"--freq-weighted-replacement",
action="store_true",
help="sample random replacement words based on word frequencies",
)
parser.add_argument(
"--mask-whole-words",
default=False,
action="store_true",
help="mask whole words; you may also want to set --bpe",
)
parser.add_argument(
"--multilang-sampling-alpha",
type=float,
default=1.0,
help="smoothing alpha for sample rations across multiple datasets",
)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
# add mask token
self.mask_idx = dictionary.add_symbol("<mask>")
@classmethod
def setup_task(cls, args, **kwargs):
paths = utils.split_paths(args.data)
assert len(paths) > 0
dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
logger.info("dictionary: {} types".format(len(dictionary)))
return cls(args, dictionary)
def _get_whole_word_mask(self):
# create masked input and targets
if self.args.mask_whole_words:
bpe = encoders.build_bpe(self.args)
if bpe is not None:
def is_beginning_of_word(i):
if i < self.source_dictionary.nspecial:
# special elements are always considered beginnings
return True
tok = self.source_dictionary[i]
if tok.startswith("madeupword"):
return True
try:
return bpe.is_beginning_of_word(tok)
except ValueError:
return True
mask_whole_words = torch.ByteTensor(
list(map(is_beginning_of_word, range(len(self.source_dictionary))))
)
else:
mask_whole_words = None
return mask_whole_words
def _get_sample_prob(self, dataset_lens):
"""
Get smoothed sampling porbability by languages. This helps low resource
languages by upsampling them.
"""
prob = dataset_lens / dataset_lens.sum()
smoothed_prob = prob**self.args.multilang_sampling_alpha
smoothed_prob = smoothed_prob / smoothed_prob.sum()
return smoothed_prob
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
languages = sorted(
name
for name in os.listdir(data_path)
if os.path.isdir(os.path.join(data_path, name))
)
logger.info("Training on {0} languages: {1}".format(len(languages), languages))
logger.info(
"Language to id mapping: ", {lang: id for id, lang in enumerate(languages)}
)
mask_whole_words = self._get_whole_word_mask()
lang_datasets = []
for lang_id, language in enumerate(languages):
split_path = os.path.join(data_path, language, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.source_dictionary,
self.args.dataset_impl,
combine=combine,
)
if dataset is None:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, split_path)
)
# create continuous blocks of tokens
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample - 1, # one less for <s>
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode=self.args.sample_break_mode,
)
logger.info("loaded {} blocks from: {}".format(len(dataset), split_path))
# prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT)
dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
src_dataset, tgt_dataset = MaskTokensDataset.apply_mask(
dataset,
self.source_dictionary,
pad_idx=self.source_dictionary.pad(),
mask_idx=self.mask_idx,
seed=self.args.seed,
mask_prob=self.args.mask_prob,
leave_unmasked_prob=self.args.leave_unmasked_prob,
random_token_prob=self.args.random_token_prob,
freq_weighted_replacement=self.args.freq_weighted_replacement,
mask_whole_words=mask_whole_words,
)
lang_dataset = NestedDictionaryDataset(
{
"net_input": {
"src_tokens": PadDataset(
src_dataset,
pad_idx=self.source_dictionary.pad(),
left_pad=False,
),
"src_lengths": NumelDataset(src_dataset, reduce=False),
},
"target": PadDataset(
tgt_dataset,
pad_idx=self.source_dictionary.pad(),
left_pad=False,
),
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(src_dataset, reduce=True),
"lang_id": RawLabelDataset([lang_id] * src_dataset.sizes.shape[0]),
},
sizes=[src_dataset.sizes],
)
lang_datasets.append(lang_dataset)
dataset_lengths = np.array(
[len(d) for d in lang_datasets],
dtype=float,
)
logger.info(
"loaded total {} blocks for all languages".format(
dataset_lengths.sum(),
)
)
if split == self.args.train_subset:
# For train subset, additionally up or down sample languages.
sample_probs = self._get_sample_prob(dataset_lengths)
logger.info(
"Sample probability by language: ",
{
lang: "{0:.4f}".format(sample_probs[id])
for id, lang in enumerate(languages)
},
)
size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths
logger.info(
"Up/Down Sampling ratio by language: ",
{
lang: "{0:.2f}".format(size_ratio[id])
for id, lang in enumerate(languages)
},
)
resampled_lang_datasets = [
ResamplingDataset(
lang_datasets[i],
size_ratio=size_ratio[i],
seed=self.args.seed,
epoch=epoch,
replace=size_ratio[i] >= 1.0,
)
for i, d in enumerate(lang_datasets)
]
dataset = ConcatDataset(resampled_lang_datasets)
else:
dataset = ConcatDataset(lang_datasets)
lang_splits = [split]
for lang_id, lang_dataset in enumerate(lang_datasets):
split_name = split + "_" + languages[lang_id]
lang_splits.append(split_name)
self.datasets[split_name] = lang_dataset
# [TODO]: This is hacky for now to print validation ppl for each
# language individually. Maybe need task API changes to allow it
# in more generic ways.
if split in self.args.valid_subset:
self.args.valid_subset = self.args.valid_subset.replace(
split, ",".join(lang_splits)
)
with data_utils.numpy_seed(self.args.seed + epoch):
shuffle = np.random.permutation(len(dataset))
self.datasets[split] = SortDataset(
dataset,
sort_order=[
shuffle,
dataset.sizes,
],
)
def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True):
src_dataset = PadDataset(
TokenBlockDataset(
src_tokens,
src_lengths,
self.args.tokens_per_sample - 1, # one less for <s>
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode="eos",
),
pad_idx=self.source_dictionary.pad(),
left_pad=False,
)
src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos())
src_dataset = NestedDictionaryDataset(
{
"id": IdDataset(),
"net_input": {
"src_tokens": src_dataset,
"src_lengths": NumelDataset(src_dataset, reduce=False),
},
},
sizes=src_lengths,
)
if sort:
src_dataset = SortDataset(src_dataset, sort_order=[src_lengths])
return src_dataset
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
| 12,144 | 34.825959 | 87 | py |
CIF-HieraDist | CIF-HieraDist-main/fairseq/tasks/online_backtranslation.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import json
import logging
import math
import os
from argparse import Namespace
from collections import OrderedDict, defaultdict
from pathlib import Path
from typing import Dict, Sequence, Tuple
from argparse import ArgumentError
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import fairseq
from fairseq import metrics, options, utils
from fairseq.data import (
FairseqDataset,
LanguagePairDataset,
NoisingDataset,
PrependTokenDataset,
RoundRobinZipDatasets,
TransformEosLangPairDataset,
data_utils,
encoders,
)
from fairseq.sequence_generator import SequenceGenerator
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationTask, load_langpair_dataset
logger = logging.getLogger(__name__)
class PiecewiseLinearFn:
"""Piecewise linear function. Can be configured with a string."""
def __init__(self, pieces: Sequence[Tuple[int, float]]):
assert pieces == sorted(
pieces
), f"PiecewiseLinearFn configuration should be sorted, received: {pieces}"
self.pieces = pieces
def __call__(self, x: int) -> float:
for i, (x_a, y_a) in enumerate(self.pieces[:-1]):
x_b, y_b = self.pieces[i + 1]
if x_a <= x <= x_b:
return y_a + (x - x_a) * (y_b - y_a) / (x_b - x_a)
return self.pieces[-1][1]
@staticmethod
def from_string(configuration: str) -> "PiecewiseLinearFn":
"""
Parse the configuration of lambda coefficient (for scheduling).
x = "3" # lambda will be a constant equal to x
x = "0:1,1000:0" # lambda will start from 1 and linearly decrease
# to 0 during the first 1000 iterations
x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000
# iterations, then will linearly increase to 1 until iteration 2000
"""
if isinstance(configuration, float):
return PiecewiseLinearFn([(0, configuration)])
try:
parts = configuration.split(",")
if len(parts) == 1:
v = float(configuration)
return PiecewiseLinearFn([(0, v)])
split = [s.split(":") for s in parts]
pieces = [(int(t), float(v)) for t, v in split]
return PiecewiseLinearFn(pieces)
except Exception:
raise ValueError(
f"Invalid PiecewiseLinearFn configuration: {configuration!r}"
)
@staticmethod
def one() -> "PiecewiseLinearFn":
return PiecewiseLinearFn([(0, 1.0)])
@register_task("online_backtranslation")
class OnlineBackTranslationTask(TranslationTask):
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
# Generic translation args
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner; \
however, valid and test data are always in the first directory to \
avoid the need for repeating them in all directories')
parser.add_argument('--mono-langs', metavar='MONO_LANGS',
help='monolingual languages for training')
parser.add_argument('--valid-lang-pairs', default=None, metavar='VALID_LANG_PAIRS',
help='language pairs for validation')
parser.add_argument('--load-alignments', action='store_true',
help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='False', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
try:
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
except ArgumentError:
# this might have already been defined. Once we transition this to hydra it should be fine to add it here.
pass
parser.add_argument('--truncate-source', action='store_true', default=False,
help='truncate source to max-source-positions')
parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N',
help='if >0, then bucket source and target lengths into N '
'buckets and pad accordingly; this is useful on TPUs '
'to minimize the number of compilations')
# Denoising args
parser.add_argument('--max-word-shuffle-distance', default=3.0, type=float, metavar='N',
help='maximum word shuffle distance for denoising autoencoding data generation')
parser.add_argument('--word-dropout-prob', default=0.1, type=float, metavar='N',
help='word dropout probability for denoising autoencoding data generation')
parser.add_argument('--word-blanking-prob', default=0.2, type=float, metavar='N',
help='word blanking probability for denoising autoencoding data generation')
# Backtranslation args
parser.add_argument('--lambda-bt', default="1.0", type=str, metavar='N',
help='back-translation weight')
parser.add_argument('--lambda-dae', default="1.0", type=str, metavar='N',
help='denoising auto-encoder weight')
# Evaluation args
parser.add_argument('--generate-one-by-one', action='store_true',
help='generate one sentence at a time for backtranslation')
parser.add_argument('--eval-bleu', action='store_true',
help='evaluation with BLEU scores')
parser.add_argument('--eval-bleu-detok', type=str, default="space",
help='detokenize before computing BLEU (e.g., "moses"); '
'required if using --eval-bleu; use "space" to '
'disable detokenization; see fairseq.data.encoders '
'for other options')
parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',
help='args for building the tokenizer, if needed')
parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,
help='compute tokenized BLEU instead of sacrebleu')
parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE before computing BLEU')
parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',
help='generation args for BLUE scoring, '
'e.g., \'{"beam": 4, "lenpen": 0.6}\'')
parser.add_argument('--eval-bleu-print-samples', action='store_true',
help='print sample generations during validation')
# fmt: on
def __init__(self, args, common_dict, mono_langs, valid_lang_pairs):
super().__init__(args, common_dict, common_dict)
self.common_dict = common_dict
self.mono_langs = mono_langs
self.valid_lang_pairs = valid_lang_pairs
self.SHOW_SAMPLES_INTERVAL = 1000
# Start by showing samples
self._show_samples_ctr = self.SHOW_SAMPLES_INTERVAL
self.SHOW_SAMPLES_NUMBER = 5
self.lambda_bt = PiecewiseLinearFn.from_string(args.lambda_bt)
self.lambda_dae = PiecewiseLinearFn.from_string(args.lambda_dae)
self.args = args
self.data = utils.split_paths(self.args.data)
if len(self.data) == 1:
shards = list(Path(self.data[0]).glob("shard*"))
if len(shards) > 0:
# keep this as strings, since it can also be a manifold path
old_data = self.data
self.data = [str(shard) for shard in shards]
logging.warning(f"Expanded data directory {old_data} to {self.data}")
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
paths = utils.split_paths(args.data)
assert len(paths) > 0
assert args.mono_langs is not None
mono_langs = args.mono_langs.split(",")
valid_lang_pairs = args.valid_lang_pairs.split(",")
# load dictionary
dict_path = os.path.join(paths[0], "dict.txt")
common_dict = cls.load_dictionary(dict_path)
return cls(args, common_dict, mono_langs, valid_lang_pairs)
def load_dataset(self, split, epoch=1, combine=False, **kwargs) -> FairseqDataset:
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if split == "train":
data_path = self.data[(epoch - 1) % len(self.data)]
dataset = self.load_train_dataset(data_path)
else:
# valid/test should always be the same.
dataset = self.load_translation_dataset(split, self.data[0])
self.datasets[split] = dataset
return dataset
def load_train_dataset(self, data_path: str) -> FairseqDataset:
"""The training dataset is made of backtranslation dataset and denoising dataset."""
data = []
for lang in self.mono_langs:
train_path = os.path.join(data_path, lang, "train")
# TODO: could we do the BT using denoise sample ?
# this would half the data loading work
data.append((f"{lang}-BT", self.load_bt_dataset(train_path, lang)))
data.append(
(f"{lang}-DENOISE", self.load_denoise_dataset(train_path, lang))
)
return RoundRobinZipDatasets(OrderedDict(data))
def _langpair_dataset(
self, src: FairseqDataset, tgt: FairseqDataset
) -> LanguagePairDataset:
return LanguagePairDataset(
src,
src.sizes,
self.dictionary,
tgt=tgt,
tgt_sizes=tgt.sizes,
tgt_dict=self.dictionary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
# TODO: should we shuffle ? we are already sorting batch by sizes so ?
# shuffle=True,
)
def _prepend_lang_bos_to_target(
self, dataset: LanguagePairDataset, lang: str
) -> LanguagePairDataset:
bos = _lang_token_index(self.dictionary, lang)
return TransformEosLangPairDataset(
dataset,
src_eos=self.dictionary.eos(),
new_src_eos=self.dictionary.eos(),
tgt_bos=self.dictionary.eos(),
new_tgt_bos=bos,
)
def load_bt_dataset(self, data_path: str, lang: str) -> FairseqDataset:
"""The BT dataset is generated with (tgt, tgt) pairs.
The actual translation to a (generated_src, tgt) pair
is done on the fly during training.
"""
mono_dataset = data_utils.load_indexed_dataset(
data_path, self.common_dict, self.args.dataset_impl
)
assert mono_dataset is not None, f"No dataset found for {lang}"
mono_dataset_src = PrependTokenDataset(
mono_dataset, _lang_token_index(self.dictionary, lang)
)
mono_dataset_bt = self._langpair_dataset(mono_dataset_src, mono_dataset)
logger.info(
f"mono_lang = {lang} "
f"lang token index = {_lang_token_index(self.dictionary, lang)} "
f"lang token = {_lang_token(lang)}"
)
mono_dataset_bt = self._prepend_lang_bos_to_target(mono_dataset_bt, lang)
return mono_dataset_bt
def load_denoise_dataset(self, data_path: str, lang: str) -> FairseqDataset:
"""Classic denoising dataset"""
dataset = data_utils.load_indexed_dataset(
data_path, self.common_dict, self.args.dataset_impl
)
noisy_dataset = NoisingDataset(
dataset,
self.dictionary,
seed=1,
max_word_shuffle_distance=self.args.max_word_shuffle_distance,
word_dropout_prob=self.args.word_dropout_prob,
word_blanking_prob=self.args.word_blanking_prob,
)
noisy_dataset = PrependTokenDataset(
noisy_dataset, _lang_token_index(self.dictionary, lang)
)
clean_dataset = data_utils.load_indexed_dataset(
data_path, self.common_dict, self.args.dataset_impl
)
denoising_dataset = self._langpair_dataset(noisy_dataset, clean_dataset)
denoising_dataset = self._prepend_lang_bos_to_target(denoising_dataset, lang)
return denoising_dataset
def load_translation_dataset(
self, split: str, data_path: str, combine: bool = False
):
# only judging with one language pair for the moment,
# since ConcatDataset doesn't work as expected
assert len(self.valid_lang_pairs) == 1, "For now..."
valid_lang_pair = self.valid_lang_pairs[0]
src, tgt = valid_lang_pair.split("-")
# use the same function than TranslationTask
src_tgt_dt = load_langpair_dataset(
data_path,
split,
src,
self.common_dict,
tgt,
self.common_dict,
combine=combine,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
load_alignments=self.args.load_alignments,
truncate_source=self.args.truncate_source,
num_buckets=self.args.num_batch_buckets,
shuffle=(split != "test"),
prepend_bos_src=_lang_token_index(self.dictionary, src),
)
src_tgt_eos_dt = self._prepend_lang_bos_to_target(src_tgt_dt, tgt)
src_tgt_eos_dt.args = self.args
return src_tgt_eos_dt
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
raise NotImplementedError
def build_model(self, args):
# torch.autograd.set_detect_anomaly(True)
model = super().build_model(args)
add_secial_tokens_to_dict_and_model(self.common_dict, model, self.mono_langs)
self.sequence_generators = {}
for mono_lang in self.mono_langs:
self.sequence_generators[mono_lang] = SequenceGenerator(
[model],
tgt_dict=self.dictionary,
beam_size=1,
max_len_a=1.3,
max_len_b=5,
min_len=5,
# keep 1 to be able to prepend bos
max_len=model.max_decoder_positions() - 1,
)
if getattr(args, "eval_bleu", False):
assert getattr(args, "eval_bleu_detok", None) is not None, (
"--eval-bleu-detok is required if using --eval-bleu; "
"try --eval-bleu-detok=moses (or --eval-bleu-detok=space "
"to disable detokenization, e.g., when using sentencepiece)"
)
detok_args = json.loads(getattr(args, "eval_bleu_detok_args", "{}") or "{}")
self.tokenizer = encoders.build_tokenizer(
Namespace(
tokenizer=getattr(args, "eval_bleu_detok", None), **detok_args
)
)
gen_args = json.loads(getattr(args, "eval_bleu_args", "{}") or "{}")
self.bleu_sequence_generator = self.build_generator(
[model], Namespace(**gen_args)
)
return model
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.common_dict
def display_samples_once_in_a_while(self, smp, mono_lang, other_lang):
self._show_samples_ctr += 1
if self._show_samples_ctr < self.SHOW_SAMPLES_INTERVAL:
return
self._show_samples_ctr = 0
ln = smp["net_input"]["src_tokens"].shape[0]
logger.info(
f"(r:{self.args.distributed_rank}) : "
f"{other_lang} ---> {mono_lang} "
f"({other_lang} was generated by back-translation.) {ln} samples"
)
for i in range(min(ln, self.SHOW_SAMPLES_NUMBER)):
src_tokens = smp["net_input"]["src_tokens"][i]
tgt_tokens = smp["target"][i]
src_str = self.dictionary.string(src_tokens, "sentencepiece")
tgt_str = self.dictionary.string(tgt_tokens, "sentencepiece")
logger.info(
f"\n{i}\t\t[{other_lang} generated] {src_str}\n"
f"\t\t[{mono_lang} original ] {tgt_str}\n"
f"\t\t[ src tokens] {src_tokens}\n"
)
def backtranslate_sample(self, smp, orig_lang, other_lang) -> None:
"""
* WARNING: smp is modified in place.
* At the start of this function, `smp` has the same input and target:
|--------------------------------------------------------|
| smp['net_input']['src_tokens'] | smp['target'] |
| (from data) __en__ hello world | __en__ hello world |
|--------------------------------------------------------|
* We call generator.generate(smp, bos_token = token("ro")),
and copy the result as input
* At the end, `smp` has the translation to other language.
|--------------------------------------------------------|
| smp['net_input']['src_tokens'] | smp['target'] |
| (generated) __ro__ salut lume | __en__ hello world |
|--------------------------------------------------------|
"""
bos_token = _lang_token_index(self.dictionary, other_lang)
generated = self.sequence_generators[orig_lang].generate(
models=[], sample=smp, bos_token=bos_token
)
max_lngth = max([gn[0]["tokens"].size(0) for gn in generated])
net_input = smp["net_input"]
n_src_tokens = torch.empty(
size=(len(generated), max_lngth + 1), dtype=net_input["src_tokens"].dtype
)
n_src_lengths = torch.empty(
len(generated), dtype=net_input["src_lengths"].dtype
)
for i, gn in enumerate(generated):
tokens = gn[0]["tokens"]
tokens_size = tokens.size(0)
padding_needed = max_lngth - tokens_size
tokens = torch.cat([tokens.new([bos_token]), tokens])
tokens = F.pad(tokens, (0, padding_needed), value=self.dictionary.pad())
n_src_tokens[i] = tokens
n_src_lengths[i] = tokens_size + 1
device = net_input["src_tokens"].device
# This seems to be important
del net_input["src_tokens"]
del net_input["src_lengths"]
net_input["src_tokens"] = n_src_tokens.to(device)
net_input["src_lengths"] = n_src_lengths.to(device)
def generate(self, smp, model):
model.eval()
orig_lang = (
self.dictionary[smp["net_input"]["src_tokens"][0][0]]
.replace(" ", "")
.replace("_", "")
)
bos_token = smp["net_input"]["prev_output_tokens"][0][0]
with torch.no_grad():
generated = self.sequence_generators[orig_lang].generate(
models=[model], sample=smp, bos_token=bos_token
)
return generated
def get_other_lang(self, lang):
# TODO: allow more complex mapping
if lang != self.mono_langs[0]:
return self.mono_langs[0]
if len(self.mono_langs) == 2:
return self.mono_langs[1]
return self.mono_langs[np.random.randint(1, len(self.mono_langs))]
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
model.train()
model.set_num_updates(update_num)
agg_loss, agg_sample_size = 0.0, 0.0
agg_logging_output: Dict[str, float] = defaultdict(float)
dataset_keys = self.datasets["train"].datasets.keys()
weights = {
"BT": self.lambda_bt(update_num),
"DENOISE": self.lambda_dae(update_num),
}
log_keys = {"BT": "bt_", "DENOISE": "dae_"}
for dataset_key in dataset_keys:
smp = sample[dataset_key]
mono_lang, task_subtype = dataset_key.split("-")
if weights[task_subtype] == 0:
continue
if task_subtype == "BT":
with torch.autograd.profiler.record_function("backtranslation"):
model.eval()
# TODO: Could we translate to several language at once ?
# this would allow to share encoder_out and maximize GPU usage.
other_lang = self.get_other_lang(mono_lang)
self.backtranslate_sample(smp, mono_lang, other_lang)
self.display_samples_once_in_a_while(smp, mono_lang, other_lang)
model.train()
# Like in FairseqTask.train_step
with torch.autograd.profiler.record_function("forward"):
loss, sample_size, logging_output = criterion(model, smp)
loss *= weights[task_subtype]
if ignore_grad:
loss *= 0
with torch.autograd.profiler.record_function("backward"):
optimizer.backward(loss)
agg_loss += loss.item()
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[log_keys[task_subtype] + k] += logging_output[k]
agg_logging_output[k] += logging_output[k]
return agg_loss, agg_sample_size, agg_logging_output
def get_bos_token_from_sample(self, sample):
net_input = sample["net_input"]
source_lang_token_id = torch.unique(net_input["src_tokens"][:, 0]).item()
source_lang_token = self.dictionary[source_lang_token_id].replace("_", "")
target_lang_token_id = _lang_token_index(
self.dictionary, self.get_other_lang(source_lang_token)
)
return target_lang_token_id
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
bt_sample_size = sum(x.get("bt_sample_size", 0) for x in logging_outputs)
if bt_sample_size:
bt_loss_sum = sum(x.get("bt_loss", 0) for x in logging_outputs)
bt_loss_sum *= 1 / bt_sample_size / math.log(2)
metrics.log_scalar("bt_loss", bt_loss_sum, bt_sample_size, round=3)
bt_nll_loss_sum = sum(x.get("bt_nll_loss", 0) for x in logging_outputs)
bt_ntokens = sum(x.get("bt_ntokens", 0) for x in logging_outputs)
bt_nll_loss_sum *= 1 / bt_ntokens / math.log(2)
metrics.log_scalar("bt_nll_loss", bt_nll_loss_sum, bt_ntokens, round=3)
metrics.log_derived(
"bt_ppl", lambda meters: utils.get_perplexity(meters["bt_nll_loss"].avg)
)
dae_sample_size = sum(x.get("dae_sample_size", 0) for x in logging_outputs)
if dae_sample_size:
dae_loss_sum = sum(x.get("dae_loss", 0) for x in logging_outputs)
dae_loss_sum *= 1 / dae_sample_size / math.log(2)
metrics.log_scalar("dae_loss", dae_loss_sum, dae_sample_size, round=3)
dae_nll_loss_sum = sum(x.get("dae_nll_loss", 0) for x in logging_outputs)
dae_ntokens = sum(x.get("dae_ntokens", 0) for x in logging_outputs)
dae_nll_loss_sum *= 1 / dae_ntokens / math.log(2)
metrics.log_scalar("dae_nll_loss", dae_nll_loss_sum, dae_ntokens, round=3)
metrics.log_derived(
"dae_ppl",
lambda meters: utils.get_perplexity(meters["dae_nll_loss"].avg),
)
@torch.no_grad()
def extend_embedding(
emb: nn.Module, new_vocab_size: int, copy_from_token_id: int
) -> None:
old_emb_data = emb.weight.data
(old_vocab_size, dim) = old_emb_data.shape
assert new_vocab_size >= old_vocab_size
if new_vocab_size > old_vocab_size:
emb.weight.data = torch.zeros((new_vocab_size, dim))
emb.weight.data[:old_vocab_size, :] = old_emb_data
# initialize new embeddings
emb.weight.data[old_vocab_size:, :] = old_emb_data[copy_from_token_id]
if hasattr(emb, "num_embeddings"):
emb.num_embeddings = new_vocab_size
if hasattr(emb, "out_features"):
emb.out_features = new_vocab_size
if getattr(emb, "bias", None) is None:
return
# Fix the bias.
# Bias shape can be different from the previous vocab size
# if the weight matrix was shared and alread extended but not the bias.
(old_vocab_size,) = emb.bias.shape
assert new_vocab_size >= old_vocab_size
if new_vocab_size > old_vocab_size:
old_bias = emb.bias.data
new_bias = torch.zeros(
(new_vocab_size,), dtype=old_bias.dtype, device=old_bias.device
)
new_bias[:old_vocab_size] = old_bias
emb.bias.data = new_bias
def add_secial_tokens_to_dict_and_model(
dictionary: "fairseq.data.Dictionary",
model: nn.Module,
mono_langs: Sequence[str],
) -> None:
embs = model.encoder.embed_tokens
vocab_size, embedding_dim = embs.weight.shape
# The model may or may not have a '<mask>' embedding yet
assert (
len(dictionary) <= vocab_size <= len(dictionary) + 1
), f"Dictionary len ({len(dictionary)}) doesn't match embs shape ({embs.weight.shape})"
# TODO: we should reuse the pretrained model dict which already has <mask>
dictionary.add_symbol("<mask>")
for lang in mono_langs:
lang_token = _lang_token(lang)
dictionary.add_symbol(lang_token)
logger.info(
f"dictionary: {len(dictionary)} -> {vocab_size} tokens "
f"after adding {len(mono_langs)} lang tokens."
)
if len(dictionary) <= vocab_size:
return
extend_embedding(embs, len(dictionary), dictionary.bos())
dec_embs = model.decoder.embed_tokens
extend_embedding(dec_embs, len(dictionary), dictionary.bos())
lm_head = model.decoder.output_projection
extend_embedding(lm_head, len(dictionary), dictionary.bos())
assert lm_head.weight.shape == (len(dictionary), embedding_dim)
def _lang_token(lang: str) -> str:
return f"__{lang}__"
def _lang_token_index(dictionary, lang: str) -> int:
return dictionary.index(_lang_token(lang))
@contextlib.contextmanager
def assert_weights_have_changed(model: nn.Module):
def checksum(model: nn.Module) -> float:
return sum(p.sum().item() for p in model.parameters())
initial_checksum = checksum(model)
yield model
final_checksum = checksum(model)
logger.info(
f"initial_checksum={initial_checksum} -> final_checksum={final_checksum}"
)
assert initial_checksum != final_checksum, "Model hasn't changed !"
| 28,578 | 40.843338 | 118 | py |
CIF-HieraDist | CIF-HieraDist-main/fairseq/tasks/multilingual_translation.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import logging
import os
from collections import OrderedDict
from argparse import ArgumentError
import torch
from fairseq import metrics, options, utils
from fairseq.data import (
Dictionary,
LanguagePairDataset,
RoundRobinZipDatasets,
TransformEosLangPairDataset,
)
from fairseq.models import FairseqMultiModel
from fairseq.tasks.translation import load_langpair_dataset
from . import LegacyFairseqTask, register_task
logger = logging.getLogger(__name__)
def _lang_token(lang: str):
return "__{}__".format(lang)
def _lang_token_index(dic: Dictionary, lang: str):
"""Return language token index."""
idx = dic.index(_lang_token(lang))
assert idx != dic.unk_index, "cannot find language token for lang {}".format(lang)
return idx
@register_task("multilingual_translation")
class MultilingualTranslationTask(LegacyFairseqTask):
"""A task for training multiple translation models simultaneously.
We iterate round-robin over batches from multiple language pairs, ordered
according to the `--lang-pairs` argument.
The training loop is roughly:
for i in range(len(epoch)):
for lang_pair in args.lang_pairs:
batch = next_batch_for_lang_pair(lang_pair)
loss = criterion(model_for_lang_pair(lang_pair), batch)
loss.backward()
optimizer.step()
In practice, `next_batch_for_lang_pair` is abstracted in a FairseqDataset
(e.g., `RoundRobinZipDatasets`) and `model_for_lang_pair` is a model that
implements the `FairseqMultiModel` interface.
During inference it is required to specify a single `--source-lang` and
`--target-lang`, which indicates the inference langauge direction.
`--lang-pairs`, `--encoder-langtok`, `--decoder-langtok` have to be set to
the same value as training.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', metavar='DIR', help='path to data directory')
parser.add_argument('--lang-pairs', default=None, metavar='PAIRS',
help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language (only needed for inference)')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language (only needed for inference)')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left (default: True)')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left (default: False)')
try:
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
except ArgumentError:
# this might have already been defined. Once we transition this to hydra it should be fine to add it here.
pass
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--encoder-langtok', default=None, type=str, choices=['src', 'tgt'],
metavar='SRCTGT',
help='replace beginning-of-sentence in source sentence with source or target '
'language token. (src/tgt)')
parser.add_argument('--decoder-langtok', action='store_true',
help='replace beginning-of-sentence in target sentence with target language token')
# fmt: on
def __init__(self, args, dicts, training):
super().__init__(args)
self.dicts = dicts
self.training = training
if training:
self.lang_pairs = args.lang_pairs
else:
self.lang_pairs = ["{}-{}".format(args.source_lang, args.target_lang)]
# eval_lang_pairs for multilingual translation is usually all of the
# lang_pairs. However for other multitask settings or when we want to
# optimize for certain languages we want to use a different subset. Thus
# the eval_lang_pairs class variable is provided for classes that extend
# this class.
self.eval_lang_pairs = self.lang_pairs
# model_lang_pairs will be used to build encoder-decoder model pairs in
# models.build_model(). This allows multitask type of sub-class can
# build models other than the input lang_pairs
self.model_lang_pairs = self.lang_pairs
self.langs = list(dicts.keys())
@classmethod
def setup_task(cls, args, **kwargs):
dicts, training = cls.prepare(args, **kwargs)
return cls(args, dicts, training)
@classmethod
def update_args(cls, args):
args.left_pad_source = utils.eval_bool(args.left_pad_source)
args.left_pad_target = utils.eval_bool(args.left_pad_target)
if args.lang_pairs is None:
raise ValueError(
"--lang-pairs is required. List all the language pairs in the training objective."
)
if isinstance(args.lang_pairs, str):
args.lang_pairs = args.lang_pairs.split(",")
@classmethod
def prepare(cls, args, **kargs):
cls.update_args(args)
sorted_langs = sorted(
list({x for lang_pair in args.lang_pairs for x in lang_pair.split("-")})
)
if args.source_lang is not None or args.target_lang is not None:
training = False
else:
training = True
# load dictionaries
dicts = OrderedDict()
for lang in sorted_langs:
paths = utils.split_paths(args.data)
assert len(paths) > 0
dicts[lang] = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(lang))
)
if len(dicts) > 0:
assert dicts[lang].pad() == dicts[sorted_langs[0]].pad()
assert dicts[lang].eos() == dicts[sorted_langs[0]].eos()
assert dicts[lang].unk() == dicts[sorted_langs[0]].unk()
if args.encoder_langtok is not None or args.decoder_langtok:
for lang_to_add in sorted_langs:
dicts[lang].add_symbol(_lang_token(lang_to_add))
logger.info("[{}] dictionary: {} types".format(lang, len(dicts[lang])))
return dicts, training
def get_encoder_langtok(self, src_lang, tgt_lang):
if self.args.encoder_langtok is None:
return self.dicts[src_lang].eos()
if self.args.encoder_langtok == "src":
return _lang_token_index(self.dicts[src_lang], src_lang)
else:
return _lang_token_index(self.dicts[src_lang], tgt_lang)
def get_decoder_langtok(self, tgt_lang):
if not self.args.decoder_langtok:
return self.dicts[tgt_lang].eos()
return _lang_token_index(self.dicts[tgt_lang], tgt_lang)
def alter_dataset_langtok(
self,
lang_pair_dataset,
src_eos=None,
src_lang=None,
tgt_eos=None,
tgt_lang=None,
):
if self.args.encoder_langtok is None and not self.args.decoder_langtok:
return lang_pair_dataset
new_src_eos = None
if (
self.args.encoder_langtok is not None
and src_eos is not None
and src_lang is not None
and tgt_lang is not None
):
new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang)
else:
src_eos = None
new_tgt_bos = None
if self.args.decoder_langtok and tgt_eos is not None and tgt_lang is not None:
new_tgt_bos = self.get_decoder_langtok(tgt_lang)
else:
tgt_eos = None
return TransformEosLangPairDataset(
lang_pair_dataset,
src_eos=src_eos,
new_src_eos=new_src_eos,
tgt_bos=tgt_eos,
new_tgt_bos=new_tgt_bos,
)
def load_dataset(self, split, epoch=1, **kwargs):
"""Load a dataset split."""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
def language_pair_dataset(lang_pair):
src, tgt = lang_pair.split("-")
langpair_dataset = load_langpair_dataset(
data_path,
split,
src,
self.dicts[src],
tgt,
self.dicts[tgt],
combine=True,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
)
return self.alter_dataset_langtok(
langpair_dataset,
src_eos=self.dicts[src].eos(),
src_lang=src,
tgt_eos=self.dicts[tgt].eos(),
tgt_lang=tgt,
)
self.datasets[split] = RoundRobinZipDatasets(
OrderedDict(
[
(lang_pair, language_pair_dataset(lang_pair))
for lang_pair in self.lang_pairs
]
),
eval_key=None
if self.training
else "%s-%s" % (self.args.source_lang, self.args.target_lang),
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
if constraints is not None:
raise NotImplementedError(
"Constrained decoding with the multilingual_translation task is not supported"
)
lang_pair = "%s-%s" % (self.args.source_lang, self.args.target_lang)
return RoundRobinZipDatasets(
OrderedDict(
[
(
lang_pair,
self.alter_dataset_langtok(
LanguagePairDataset(
src_tokens, src_lengths, self.source_dictionary
),
src_eos=self.source_dictionary.eos(),
src_lang=self.args.source_lang,
tgt_eos=self.target_dictionary.eos(),
tgt_lang=self.args.target_lang,
),
)
]
),
eval_key=lang_pair,
)
def build_model(self, args):
def check_args():
messages = []
if (
len(set(self.args.lang_pairs).symmetric_difference(args.lang_pairs))
!= 0
):
messages.append(
"--lang-pairs should include all the language pairs {}.".format(
args.lang_pairs
)
)
if self.args.encoder_langtok != args.encoder_langtok:
messages.append(
"--encoder-langtok should be {}.".format(args.encoder_langtok)
)
if self.args.decoder_langtok != args.decoder_langtok:
messages.append(
"--decoder-langtok should {} be set.".format(
"" if args.decoder_langtok else "not"
)
)
if len(messages) > 0:
raise ValueError(" ".join(messages))
# Update args -> the fact that the constructor here
# changes the args object doesn't mean you get the same one here
self.update_args(args)
# Check if task args are consistant with model args
check_args()
from fairseq import models
model = models.build_model(args, self)
if not isinstance(model, FairseqMultiModel):
raise ValueError(
"MultilingualTranslationTask requires a FairseqMultiModel architecture"
)
return model
def _per_lang_pair_train_loss(
self, lang_pair, model, update_num, criterion, sample, optimizer, ignore_grad
):
loss, sample_size, logging_output = criterion(
model.models[lang_pair], sample[lang_pair]
)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
model.train()
from collections import defaultdict
agg_loss, agg_sample_size, agg_logging_output = 0.0, 0.0, defaultdict(float)
curr_lang_pairs = [
lang_pair
for lang_pair in self.model_lang_pairs
if sample[lang_pair] is not None and len(sample[lang_pair]) != 0
]
for idx, lang_pair in enumerate(curr_lang_pairs):
def maybe_no_sync():
if (
self.args.distributed_world_size > 1
and hasattr(model, "no_sync")
and idx < len(curr_lang_pairs) - 1
):
return model.no_sync()
else:
return contextlib.ExitStack() # dummy contextmanager
with maybe_no_sync():
loss, sample_size, logging_output = self._per_lang_pair_train_loss(
lang_pair,
model,
update_num,
criterion,
sample,
optimizer,
ignore_grad,
)
agg_loss += loss.detach().item()
# TODO make summing of the sample sizes configurable
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[k] += logging_output[k]
agg_logging_output[f"{lang_pair}:{k}"] += logging_output[k]
return agg_loss, agg_sample_size, agg_logging_output
def _per_lang_pair_valid_loss(self, lang_pair, model, criterion, sample):
return criterion(model.models[lang_pair], sample[lang_pair])
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
from collections import defaultdict
agg_loss, agg_sample_size, agg_logging_output = 0.0, 0.0, defaultdict(float)
for lang_pair in self.eval_lang_pairs:
if (
lang_pair not in sample
or sample[lang_pair] is None
or len(sample[lang_pair]) == 0
):
continue
loss, sample_size, logging_output = self._per_lang_pair_valid_loss(
lang_pair, model, criterion, sample
)
agg_loss += loss.data.item()
# TODO make summing of the sample sizes configurable
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[k] += logging_output[k]
agg_logging_output[f"{lang_pair}:{k}"] += logging_output[k]
return agg_loss, agg_sample_size, agg_logging_output
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
if self.args.decoder_langtok:
bos_token = _lang_token_index(
self.target_dictionary, self.args.target_lang
)
else:
bos_token = self.target_dictionary.eos()
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
bos_token=bos_token,
)
def reduce_metrics(self, logging_outputs, criterion):
with metrics.aggregate():
# pass 'sample_size', 'nsentences', 'ntokens' stats to fairseq_task
super().reduce_metrics(logging_outputs, criterion)
for k in ["sample_size", "nsentences", "ntokens"]:
metrics.log_scalar(k, sum(l[k] for l in logging_outputs))
@property
def source_dictionary(self):
if self.training:
return next(iter(self.dicts.values()))
else:
return self.dicts[self.args.source_lang]
@property
def target_dictionary(self):
if self.training:
return next(iter(self.dicts.values()))
else:
return self.dicts[self.args.target_lang]
def max_positions(self):
"""Return the max sentence length allowed by the task."""
if len(self.datasets.values()) == 0:
return {
"%s-%s"
% (self.args.source_lang, self.args.target_lang): (
self.args.max_source_positions,
self.args.max_target_positions,
)
}
return OrderedDict(
[
(key, (self.args.max_source_positions, self.args.max_target_positions))
for split in self.datasets.keys()
for key in self.datasets[split].datasets.keys()
]
)
| 18,125 | 38.149028 | 118 | py |
CIF-HieraDist | CIF-HieraDist-main/fairseq/tasks/translation_lev.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import torch
from fairseq import utils
from fairseq.data import LanguagePairDataset
from fairseq.dataclass import ChoiceEnum
from fairseq.tasks import register_task
from fairseq.tasks.translation import (
TranslationConfig,
TranslationTask,
load_langpair_dataset,
)
from fairseq.utils import new_arange
NOISE_CHOICES = ChoiceEnum(["random_delete", "random_mask", "no_noise", "full_mask"])
@dataclass
class TranslationLevenshteinConfig(TranslationConfig):
noise: NOISE_CHOICES = field(
default="random_delete",
metadata={"help": "type of noise"},
)
@register_task("translation_lev", dataclass=TranslationLevenshteinConfig)
class TranslationLevenshteinTask(TranslationTask):
"""
Translation (Sequence Generation) task for Levenshtein Transformer
See `"Levenshtein Transformer" <https://arxiv.org/abs/1905.11006>`_.
"""
cfg: TranslationLevenshteinConfig
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.cfg.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.cfg.source_lang, self.cfg.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.cfg.dataset_impl,
upsample_primary=self.cfg.upsample_primary,
left_pad_source=self.cfg.left_pad_source,
left_pad_target=self.cfg.left_pad_target,
max_source_positions=self.cfg.max_source_positions,
max_target_positions=self.cfg.max_target_positions,
prepend_bos=True,
)
def inject_noise(self, target_tokens):
def _random_delete(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
max_len = target_tokens.size(1)
target_mask = target_tokens.eq(pad)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(
target_tokens.eq(bos) | target_tokens.eq(eos), 0.0
)
target_score.masked_fill_(target_mask, 1)
target_score, target_rank = target_score.sort(1)
target_length = target_mask.size(1) - target_mask.float().sum(
1, keepdim=True
)
# do not delete <bos> and <eos> (we assign 0 score for them)
target_cutoff = (
2
+ (
(target_length - 2)
* target_score.new_zeros(target_score.size(0), 1).uniform_()
).long()
)
target_cutoff = target_score.sort(1)[1] >= target_cutoff
prev_target_tokens = (
target_tokens.gather(1, target_rank)
.masked_fill_(target_cutoff, pad)
.gather(1, target_rank.masked_fill_(target_cutoff, max_len).sort(1)[1])
)
prev_target_tokens = prev_target_tokens[
:, : prev_target_tokens.ne(pad).sum(1).max()
]
return prev_target_tokens
def _random_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_masks = (
target_tokens.ne(pad) & target_tokens.ne(bos) & target_tokens.ne(eos)
)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(~target_masks, 2.0)
target_length = target_masks.sum(1).float()
target_length = target_length * target_length.clone().uniform_()
target_length = target_length + 1 # make sure to mask at least one token.
_, target_rank = target_score.sort(1)
target_cutoff = new_arange(target_rank) < target_length[:, None].long()
prev_target_tokens = target_tokens.masked_fill(
target_cutoff.scatter(1, target_rank, target_cutoff), unk
)
return prev_target_tokens
def _full_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_mask = (
target_tokens.eq(bos) | target_tokens.eq(eos) | target_tokens.eq(pad)
)
return target_tokens.masked_fill(~target_mask, unk)
if self.cfg.noise == "random_delete":
return _random_delete(target_tokens)
elif self.cfg.noise == "random_mask":
return _random_mask(target_tokens)
elif self.cfg.noise == "full_mask":
return _full_mask(target_tokens)
elif self.cfg.noise == "no_noise":
return target_tokens
else:
raise NotImplementedError
def build_generator(self, models, args, **unused):
# add models input to match the API for SequenceGenerator
from fairseq.iterative_refinement_generator import IterativeRefinementGenerator
return IterativeRefinementGenerator(
self.target_dictionary,
eos_penalty=getattr(args, "iter_decode_eos_penalty", 0.0),
max_iter=getattr(args, "iter_decode_max_iter", 10),
beam_size=getattr(args, "iter_decode_with_beam", 1),
reranking=getattr(args, "iter_decode_with_external_reranker", False),
decoding_format=getattr(args, "decoding_format", None),
adaptive=not getattr(args, "iter_decode_force_max_iter", False),
retain_history=getattr(args, "retain_iter_history", False),
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
if constraints is not None:
# Though see Susanto et al. (ACL 2020): https://www.aclweb.org/anthology/2020.acl-main.325/
raise NotImplementedError(
"Constrained decoding with the translation_lev task is not supported"
)
return LanguagePairDataset(
src_tokens, src_lengths, self.source_dictionary, append_bos=True
)
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
model.train()
sample["prev_target"] = self.inject_noise(sample["target"])
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
sample["prev_target"] = self.inject_noise(sample["target"])
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
| 7,416 | 36.841837 | 103 | py |
CIF-HieraDist | CIF-HieraDist-main/fairseq/tasks/fairseq_task.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import warnings
from argparse import Namespace
from typing import Any, Callable, Dict, List
import torch
from fairseq import metrics, search, tokenizer, utils
from fairseq.data import Dictionary, FairseqDataset, data_utils, encoders, iterators
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import gen_parser_from_dataclass
from fairseq.optim.amp_optimizer import AMPOptimizer
from omegaconf import DictConfig
logger = logging.getLogger(__name__)
class StatefulContainer(object):
def __init__(self):
self._state = dict()
self._factories = dict()
def add_factory(self, name, factory: Callable[[], Any]):
self._factories[name] = factory
def merge_state_dict(self, state_dict: Dict[str, Any]):
self._state.update(state_dict)
@property
def state_dict(self) -> Dict[str, Any]:
return self._state
def __getattr__(self, name):
if name not in self._state and name in self._factories:
self._state[name] = self._factories[name]()
if name in self._state:
return self._state[name]
raise AttributeError(f"Task state has no factory for attribute {name}")
class FairseqTask(object):
"""
Tasks store dictionaries and provide helpers for loading/iterating over
Datasets, initializing the Model/Criterion and calculating the loss.
Tasks have limited statefulness. In particular, state that needs to be
saved to/loaded from checkpoints needs to be stored in the `self.state`
:class:`StatefulContainer` object. For example::
self.state.add_factory("dictionary", self.load_dictionary)
print(self.state.dictionary) # calls self.load_dictionary()
This is necessary so that when loading checkpoints, we can properly
recreate the task state after initializing the task instance.
"""
@classmethod
def add_args(cls, parser):
"""Add task-specific arguments to the parser."""
dc = getattr(cls, "__dataclass", None)
if dc is not None:
gen_parser_from_dataclass(parser, dc())
@staticmethod
def logging_outputs_can_be_summed(criterion) -> bool:
"""
Whether the logging outputs returned by `train_step` and `valid_step` can
be summed across workers prior to calling `aggregate_logging_outputs`.
Setting this to True will improves distributed training speed.
"""
return criterion.logging_outputs_can_be_summed()
def __init__(self, cfg: FairseqDataclass, **kwargs):
self.cfg = cfg
self.datasets = dict()
self.dataset_to_epoch_iter = dict()
self.state = StatefulContainer()
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
return Dictionary.load(filename)
@classmethod
def build_dictionary(
cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8
):
"""Build the dictionary
Args:
filenames (list): list of filenames
workers (int): number of concurrent workers
threshold (int): defines the minimum word count
nwords (int): defines the total number of words in the final dictionary,
including special symbols
padding_factor (int): can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
d = Dictionary()
for filename in filenames:
Dictionary.add_file_to_dictionary(
filename, d, tokenizer.tokenize_line, workers
)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
@classmethod
def setup_task(cls, cfg: DictConfig, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
cfg (omegaconf.DictConfig): parsed command-line arguments
"""
return cls(cfg, **kwargs)
def has_sharded_data(self, split):
return os.pathsep in getattr(self.cfg, "data", "")
def load_dataset(
self,
split: str,
combine: bool = False,
task_cfg: FairseqDataclass = None,
**kwargs,
):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
combine (bool): combines a split segmented into pieces into one dataset
task_cfg (FairseqDataclass): optional task configuration stored in the checkpoint that can be used
to load datasets
"""
raise NotImplementedError
def dataset(self, split):
"""
Return a loaded dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
Returns:
a :class:`~fairseq.data.FairseqDataset` corresponding to *split*
"""
from fairseq.data import FairseqDataset
if split not in self.datasets:
raise KeyError("Dataset not loaded: " + split)
if not isinstance(self.datasets[split], FairseqDataset):
raise TypeError("Datasets are expected to be of type FairseqDataset")
return self.datasets[split]
def filter_indices_by_size(
self, indices, dataset, max_positions=None, ignore_invalid_inputs=False
):
"""
Filter examples that are too large
Args:
indices (np.array): original array of sample indices
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
Returns:
np.array: array of filtered sample indices
"""
indices, ignored = dataset.filter_indices_by_size(indices, max_positions)
if len(ignored) > 0:
if not ignore_invalid_inputs:
raise Exception(
(
"Size of sample #{} is invalid (={}) since max_positions={}, "
"skip this example with --skip-invalid-size-inputs-valid-test"
).format(ignored[0], dataset.size(ignored[0]), max_positions)
)
logger.warning(
(
"{:,} samples have invalid sizes and will be skipped, "
"max_positions={}, first few sample ids={}"
).format(len(ignored), max_positions, ignored[:10])
)
return indices
def can_reuse_epoch_itr(self, dataset):
# We can reuse the epoch iterator across epochs as long as the dataset
# hasn't disabled it. We default to ``False`` here, although in practice
# this will be ``True`` for most datasets that inherit from
# ``FairseqDataset`` due to the base implementation there.
return getattr(dataset, "can_reuse_epoch_itr_across_epochs", False)
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
data_buffer_size=0,
disable_iterator_cache=False,
grouped_shuffling=False,
update_epoch_batch_itr=False,
):
"""
Get an iterator that yields batches of data from the given dataset.
Args:
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N (default: 1).
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
data_buffer_size (int, optional): number of batches to
preload (default: 0).
disable_iterator_cache (bool, optional): don't cache the
EpochBatchIterator (ignores `FairseqTask::can_reuse_epoch_itr`)
(default: False).
grouped_shuffling (bool, optional): group batches with each groups
containing num_shards batches and shuffle groups. Reduces difference
between sequence lengths among workers for batches sorted by length.
update_epoch_batch_itr (bool optional): if true then donot use the cached
batch iterator for the epoch
Returns:
~fairseq.iterators.EpochBatchIterator: a batched iterator over the
given dataset split
"""
can_reuse_epoch_itr = (
not disable_iterator_cache
and not update_epoch_batch_itr
and self.can_reuse_epoch_itr(dataset)
)
if can_reuse_epoch_itr and dataset in self.dataset_to_epoch_iter:
logger.debug("reusing EpochBatchIterator for epoch {}".format(epoch))
return self.dataset_to_epoch_iter[dataset]
assert isinstance(dataset, FairseqDataset)
# initialize the dataset with the correct starting epoch
dataset.set_epoch(epoch)
# get indices ordered by example size
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
# filter examples that are too large
if max_positions is not None:
indices = self.filter_indices_by_size(
indices, dataset, max_positions, ignore_invalid_inputs
)
# create mini-batches with given size constraints
batch_sampler = dataset.batch_by_size(
indices,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
# return a reusable, sharded iterator
epoch_iter = iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=batch_sampler,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
buffer_size=data_buffer_size,
grouped_shuffling=grouped_shuffling,
)
if can_reuse_epoch_itr:
self.dataset_to_epoch_iter[dataset] = epoch_iter
return epoch_iter
def build_model(self, cfg: FairseqDataclass):
"""
Build the :class:`~fairseq.models.BaseFairseqModel` instance for this
task.
Args:
cfg (FairseqDataclass): configuration object
Returns:
a :class:`~fairseq.models.BaseFairseqModel` instance
"""
from fairseq import models, quantization_utils
model = models.build_model(cfg, self)
model = quantization_utils.quantize_model_scalar(model, cfg)
return model
def build_criterion(self, cfg: DictConfig):
"""
Build the :class:`~fairseq.criterions.FairseqCriterion` instance for
this task.
Args:
cfg (omegaconf.DictConfig): configration object
Returns:
a :class:`~fairseq.criterions.FairseqCriterion` instance
"""
from fairseq import criterions
return criterions.build_criterion(cfg, self)
def build_generator(
self,
models,
args,
seq_gen_cls=None,
extra_gen_cls_kwargs=None,
prefix_allowed_tokens_fn=None,
):
"""
Build a :class:`~fairseq.SequenceGenerator` instance for this
task.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
args (fairseq.dataclass.configs.GenerationConfig):
configuration object (dataclass) for generation
extra_gen_cls_kwargs (Dict[str, Any]): extra options to pass
through to SequenceGenerator
prefix_allowed_tokens_fn (Callable[[int, torch.Tensor], List[int]]):
If provided, this function constrains the beam search to
allowed tokens only at each step. The provided function
should take 2 arguments: the batch ID (`batch_id: int`)
and a unidimensional tensor of token ids (`inputs_ids:
torch.Tensor`). It has to return a `List[int]` with the
allowed tokens for the next generation step conditioned
on the previously generated tokens (`inputs_ids`) and
the batch ID (`batch_id`). This argument is useful for
constrained generation conditioned on the prefix, as
described in "Autoregressive Entity Retrieval"
(https://arxiv.org/abs/2010.00904) and
https://github.com/facebookresearch/GENRE.
"""
if getattr(args, "score_reference", False):
from fairseq.sequence_scorer import SequenceScorer
return SequenceScorer(
self.target_dictionary,
compute_alignment=getattr(args, "print_alignment", False),
)
from fairseq.sequence_generator import (
SequenceGenerator,
SequenceGeneratorWithAlignment,
)
# Choose search strategy. Defaults to Beam Search.
sampling = getattr(args, "sampling", False)
sampling_topk = getattr(args, "sampling_topk", -1)
sampling_topp = getattr(args, "sampling_topp", -1.0)
diverse_beam_groups = getattr(args, "diverse_beam_groups", -1)
diverse_beam_strength = getattr(args, "diverse_beam_strength", 0.5)
match_source_len = getattr(args, "match_source_len", False)
diversity_rate = getattr(args, "diversity_rate", -1)
constrained = getattr(args, "constraints", False)
if prefix_allowed_tokens_fn is None:
prefix_allowed_tokens_fn = getattr(args, "prefix_allowed_tokens_fn", None)
if (
sum(
int(cond)
for cond in [
sampling,
diverse_beam_groups > 0,
match_source_len,
diversity_rate > 0,
]
)
> 1
):
raise ValueError("Provided Search parameters are mutually exclusive.")
assert sampling_topk < 0 or sampling, "--sampling-topk requires --sampling"
assert sampling_topp < 0 or sampling, "--sampling-topp requires --sampling"
if sampling:
search_strategy = search.Sampling(
self.target_dictionary, sampling_topk, sampling_topp
)
elif diverse_beam_groups > 0:
search_strategy = search.DiverseBeamSearch(
self.target_dictionary, diverse_beam_groups, diverse_beam_strength
)
elif match_source_len:
# this is useful for tagging applications where the output
# length should match the input length, so we hardcode the
# length constraints for simplicity
search_strategy = search.LengthConstrainedBeamSearch(
self.target_dictionary,
min_len_a=1,
min_len_b=0,
max_len_a=1,
max_len_b=0,
)
elif diversity_rate > -1:
search_strategy = search.DiverseSiblingsSearch(
self.target_dictionary, diversity_rate
)
elif constrained:
search_strategy = search.LexicallyConstrainedBeamSearch(
self.target_dictionary, args.constraints
)
elif prefix_allowed_tokens_fn:
search_strategy = search.PrefixConstrainedBeamSearch(
self.target_dictionary, prefix_allowed_tokens_fn
)
else:
search_strategy = search.BeamSearch(self.target_dictionary)
extra_gen_cls_kwargs = extra_gen_cls_kwargs or {}
if seq_gen_cls is None:
if getattr(args, "print_alignment", False):
seq_gen_cls = SequenceGeneratorWithAlignment
extra_gen_cls_kwargs["print_alignment"] = args.print_alignment
else:
seq_gen_cls = SequenceGenerator
return seq_gen_cls(
models,
self.target_dictionary,
beam_size=getattr(args, "beam", 5),
max_len_a=getattr(args, "max_len_a", 0),
max_len_b=getattr(args, "max_len_b", 200),
min_len=getattr(args, "min_len", 1),
normalize_scores=(not getattr(args, "unnormalized", False)),
len_penalty=getattr(args, "lenpen", 1),
unk_penalty=getattr(args, "unkpen", 0),
temperature=getattr(args, "temperature", 1.0),
match_source_len=getattr(args, "match_source_len", False),
no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0),
search_strategy=search_strategy,
**extra_gen_cls_kwargs,
)
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
"""
Do forward and backward, and return the loss as computed by *criterion*
for the given *model* and *sample*.
Args:
sample (dict): the mini-batch. The format is defined by the
:class:`~fairseq.data.FairseqDataset`.
model (~fairseq.models.BaseFairseqModel): the model
criterion (~fairseq.criterions.FairseqCriterion): the criterion
optimizer (~fairseq.optim.FairseqOptimizer): the optimizer
update_num (int): the current update
ignore_grad (bool): multiply loss by 0 if this is set to True
Returns:
tuple:
- the loss
- the sample size, which is used as the denominator for the
gradient
- logging outputs to display while training
"""
model.train()
model.set_num_updates(update_num)
with torch.autograd.profiler.record_function("forward"):
with torch.cuda.amp.autocast(enabled=(isinstance(optimizer, AMPOptimizer))):
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
with torch.autograd.profiler.record_function("backward"):
optimizer.backward(loss)
# TODO: Observe Gradients
# for name, parms in model.named_parameters():
# print('-->name:', name, '-->grad_requirs:', parms.requires_grad, ' -->grad_value:', parms.grad)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
def optimizer_step(self, optimizer, model, update_num):
optimizer.step()
def build_dataset_for_inference(
self, src_tokens: List[torch.Tensor], src_lengths: List[int], **kwargs
) -> torch.utils.data.Dataset:
raise NotImplementedError
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
return generator.generate(
models, sample, prefix_tokens=prefix_tokens, constraints=constraints
)
def begin_epoch(self, epoch, model):
"""Hook function called before the start of each epoch."""
pass
def begin_valid_epoch(self, epoch, model):
"""Hook function called before the start of each validation epoch."""
pass
def aggregate_logging_outputs(self, logging_outputs, criterion):
"""[deprecated] Aggregate logging outputs from data parallel training."""
utils.deprecation_warning(
"The aggregate_logging_outputs API is deprecated. "
"Please use the reduce_metrics API instead."
)
with metrics.aggregate() as agg:
self.reduce_metrics(logging_outputs, criterion)
return agg.get_smoothed_values()
def reduce_metrics(self, logging_outputs, criterion):
"""Aggregate logging outputs from data parallel training."""
# backward compatibility for tasks that override aggregate_logging_outputs
base_func = FairseqTask.aggregate_logging_outputs
self_func = getattr(self, "aggregate_logging_outputs").__func__
if self_func is not base_func:
utils.deprecation_warning(
"Tasks should implement the reduce_metrics API. "
"Falling back to deprecated aggregate_logging_outputs API."
)
agg_logging_outputs = self.aggregate_logging_outputs(
logging_outputs, criterion
)
for k, v in agg_logging_outputs.items():
metrics.log_scalar(k, v)
return
if not any("ntokens" in log for log in logging_outputs):
warnings.warn(
"ntokens not found in Criterion logging outputs, cannot log wpb or wps"
)
else:
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
metrics.log_scalar("wpb", ntokens, priority=180, round=1)
metrics.log_speed("wps", ntokens, priority=90, round=1)
if not any("nsentences" in log for log in logging_outputs):
warnings.warn(
"nsentences not found in Criterion logging outputs, cannot log bsz"
)
else:
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
metrics.log_scalar("bsz", nsentences, priority=190, round=1)
criterion.__class__.reduce_metrics(logging_outputs)
def state_dict(self):
if self.state is not None:
return self.state.state_dict
return {}
def load_state_dict(self, state_dict: Dict[str, Any]):
if self.state is not None:
self.state.merge_state_dict(state_dict)
def max_positions(self):
"""Return the max input length allowed by the task."""
return None
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
def build_tokenizer(self, args):
"""Build the pre-tokenizer for this task."""
return encoders.build_tokenizer(args)
def build_bpe(self, args):
"""Build the tokenizer for this task."""
return encoders.build_bpe(args)
def get_interactive_tokens_and_lengths(self, lines, encode_fn):
tokens = [
self.source_dictionary.encode_line(
encode_fn(src_str), add_if_not_exist=False
).long()
for src_str in lines
]
lengths = [t.numel() for t in tokens]
return tokens, lengths
class LegacyFairseqTask(FairseqTask):
def __init__(self, args: Namespace):
super().__init__(None)
self.args = args
self.datasets = {}
self.dataset_to_epoch_iter = {}
@classmethod
def setup_task(cls, args: Namespace, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
return cls(args, **kwargs)
def has_sharded_data(self, split):
return os.pathsep in getattr(self.args, "data", "")
def build_model(self, args: Namespace):
"""
Build the :class:`~fairseq.models.BaseFairseqModel` instance for this
task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.models.BaseFairseqModel` instance
"""
from fairseq import models, quantization_utils
model = models.build_model(args, self)
model = quantization_utils.quantize_model_scalar(model, args)
return model
def build_criterion(self, args: Namespace):
"""
Build the :class:`~fairseq.criterions.FairseqCriterion` instance for
this task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.criterions.FairseqCriterion` instance
"""
from fairseq import criterions
return criterions.build_criterion(args, self)
| 26,195 | 37.020319 | 110 | py |
CIF-HieraDist | CIF-HieraDist-main/fairseq/tasks/speech_to_text.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import logging
from pathlib import Path
from argparse import Namespace
import torch
import torch.nn.functional as F
import numpy as np
import os.path as op
from fairseq.data import Dictionary, encoders
from fairseq.data.audio.speech_to_text_dataset import (
S2TDataConfig,
SpeechToTextDataset,
SpeechToTextDatasetCreator,
get_features_or_waveform,
)
from fairseq.tasks import LegacyFairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task("speech_to_text")
class SpeechToTextTask(LegacyFairseqTask):
@classmethod
def add_args(cls, parser):
parser.add_argument("data", help="manifest root path")
parser.add_argument(
"--config-yaml",
type=str,
default="config.yaml",
help="Configuration YAML filename (under manifest root)",
)
parser.add_argument(
"--max-source-positions",
default=6000,
type=int,
metavar="N",
help="max number of tokens in the source sequence",
)
parser.add_argument(
"--max-target-positions",
default=1024,
type=int,
metavar="N",
help="max number of tokens in the target sequence",
)
def __init__(self, args, tgt_dict):
super().__init__(args)
self.tgt_dict = tgt_dict
self.data_cfg = S2TDataConfig(Path(args.data) / args.config_yaml)
self.speaker_to_id = self._get_speaker_to_id()
def _get_speaker_to_id(self):
speaker_to_id = None
speaker_set_filename = self.data_cfg.config.get("speaker_set_filename")
if speaker_set_filename is not None:
speaker_set_path = Path(self.args.data) / speaker_set_filename
with open(speaker_set_path) as f:
speaker_to_id = {
r.strip(): i for i, r in enumerate(f)
} # key is absolute id, and value is relative id
return speaker_to_id
@classmethod
def setup_task(cls, args, **kwargs):
data_cfg = S2TDataConfig(Path(args.data) / args.config_yaml)
dict_path = Path(args.data) / data_cfg.vocab_filename
logging.info(str(dict_path))
if not dict_path.is_file():
raise FileNotFoundError(f"Dict not found: {dict_path.as_posix()}")
tgt_dict = Dictionary.load(dict_path.as_posix())
# logging.info(str(tgt_dict))
logger.info(
f"dictionary size ({data_cfg.vocab_filename}): " f"{len(tgt_dict):,}"
)
# sys.exit(0)
if getattr(args, "train_subset", None) is not None:
if not all(s.startswith("train") for s in args.train_subset.split(",")):
raise ValueError('Train splits should be named like "train*".')
return cls(args, tgt_dict)
def build_criterion(self, args):
from fairseq import criterions
if self.data_cfg.prepend_tgt_lang_tag and args.ignore_prefix_size != 1:
raise ValueError(
'Please set "--ignore-prefix-size 1" since '
"target language ID token is prepended as BOS."
)
return criterions.build_criterion(args, self)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
is_train_split = split.startswith("train")
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
self.datasets[split] = SpeechToTextDatasetCreator.from_tsv(
self.args.data,
self.data_cfg,
split,
self.tgt_dict,
pre_tokenizer,
bpe_tokenizer,
is_train_split=is_train_split,
epoch=epoch,
seed=self.args.seed,
speaker_to_id=self.speaker_to_id,
)
@property
def target_dictionary(self):
return self.tgt_dict
@property
def source_dictionary(self):
return None
def max_positions(self):
return self.args.max_source_positions, self.args.max_target_positions
# Method borrowed from text_to_speech.py
def get_speaker_embeddings_path(self):
speaker_emb_path = None
if self.data_cfg.config.get("speaker_emb_filename") is not None:
speaker_emb_path = op.join(
self.args.data, self.data_cfg.config.get("speaker_emb_filename")
)
return speaker_emb_path
# Method borrowed from text_to_speech.py
@classmethod
def get_speaker_embeddings(cls, args):
embed_speaker = None
if args.speaker_to_id is not None:
if args.speaker_emb_path is None:
embed_speaker = torch.nn.Embedding(
len(args.speaker_to_id), args.speaker_embed_dim
)
else:
speaker_emb_mat = np.load(args.speaker_emb_path)
assert speaker_emb_mat.shape[1] == args.speaker_embed_dim
embed_speaker = torch.nn.Embedding.from_pretrained(
torch.from_numpy(speaker_emb_mat),
freeze=True,
)
logger.info(
f"load speaker embeddings from {args.speaker_emb_path}. "
f"train embedding? {embed_speaker.weight.requires_grad}\n"
f"embeddings:\n{speaker_emb_mat}"
)
return embed_speaker
def build_model(self, args):
args.input_feat_per_channel = self.data_cfg.input_feat_per_channel
args.input_channels = self.data_cfg.input_channels
args.speaker_to_id = self.speaker_to_id
args.speaker_emb_path = self.get_speaker_embeddings_path()
return super(SpeechToTextTask, self).build_model(args)
def build_generator(
self,
models,
args,
seq_gen_cls=None,
extra_gen_cls_kwargs=None,
):
if self.data_cfg.prepend_tgt_lang_tag and args.prefix_size != 1:
raise ValueError(
'Please set "--prefix-size 1" since '
"target language ID token is prepended as BOS."
)
lang_token_ids = {
i
for s, i in self.tgt_dict.indices.items()
if SpeechToTextDataset.is_lang_tag(s)
}
if extra_gen_cls_kwargs is None:
extra_gen_cls_kwargs = {}
extra_gen_cls_kwargs["symbols_to_strip_from_output"] = lang_token_ids
return super().build_generator(
models, args, seq_gen_cls=None, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
def build_tokenizer(self, args):
logger.info(f"pre-tokenizer: {self.data_cfg.pre_tokenizer}")
return encoders.build_tokenizer(Namespace(**self.data_cfg.pre_tokenizer))
def build_bpe(self, args):
logger.info(f"tokenizer: {self.data_cfg.bpe_tokenizer}")
return encoders.build_bpe(Namespace(**self.data_cfg.bpe_tokenizer))
def get_interactive_tokens_and_lengths(self, lines, encode_fn):
n_frames = [get_features_or_waveform(p).shape[0] for p in lines]
return lines, n_frames
def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs):
return SpeechToTextDataset(
"interactive", False, self.data_cfg, src_tokens, src_lengths
)
| 7,517 | 34.462264 | 85 | py |
CIF-HieraDist | CIF-HieraDist-main/fairseq/tasks/audio_finetuning.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import logging
import os
import torch
import json
from argparse import Namespace
from dataclasses import dataclass, field
from typing import Optional, Any
from fairseq.data import AddTargetDataset, Dictionary, encoders
from fairseq.tasks.audio_pretraining import AudioPretrainingTask, AudioPretrainingConfig
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.configs import GenerationConfig
from fairseq.data.text_compressor import TextCompressor, TextCompressionLevel
from . import register_task
from .. import utils
from ..logging import metrics
logger = logging.getLogger(__name__)
class LabelEncoder(object):
def __init__(self, dictionary):
self.dictionary = dictionary
def __call__(self, label):
return self.dictionary.encode_line(
label, append_eos=False, add_if_not_exist=False
)
def label_len_fn(label):
return len(label.split(" "))
@dataclass
class AudioFinetuningConfig(AudioPretrainingConfig):
# Options for reporting WER metrics during validation. Only applicable to
# Seq2Seq models during fine-tuning
eval_wer: bool = field(
default=False, metadata={"help": "compute WER for Seq2Seq models"}
)
eval_wer_config: GenerationConfig = field(
default_factory=lambda: GenerationConfig(),
metadata={"help": "beam search config for evaluating wer during training"},
)
eval_wer_tokenizer: Any = field(
default=None,
metadata={"help": "tokenizer config for evaluating wer during training"},
)
eval_wer_post_process: str = field(
default="letter",
metadata={
"help": "remove BPE tokens before scoring (can be sentencepiece, letter, and more)"
},
)
eval_bleu: bool = field(
default=False, metadata={"help": "evaluation with BLEU scores"}
)
eval_bleu_detok: Optional[str] = field(
default=None,
metadata={
"help": "detokenize before computing BLEU (e.g., 'moses'); "
"required if using --eval-bleu; use 'space' to disable "
"detokenization; see fairseq.data.encoders for other options"
},
)
eval_bleu_detok_args: str = field(
default="{}", metadata={"help": "args for building the tokenizer, if needed"}
)
eval_tokenized_bleu: bool = field(
default=False, metadata={"help": "compute tokenized BLEU instead of sacrebleu"}
)
eval_bleu_remove_bpe: Optional[str] = field(
default=None, metadata={"help": "remove BPE before computing BLEU"}
)
eval_bleu_args: str = field(
default="{}",
metadata={
"help": "generation args for BLUE scoring, e.g., "
'\'{"beam": 4, "lenpen": 0.6}\''
},
)
eval_bleu_print_samples: bool = field(
default=False, metadata={"help": "print sample generations during validation"}
)
autoregressive: bool = field(
default=False,
metadata={
"help": "required for autoregressive decoders (like seq2seq models); "
"adds 'prev_output_tokens' to input and appends eos to target"
},
)
@register_task("audio_finetuning", dataclass=AudioFinetuningConfig)
class AudioFinetuningTask(AudioPretrainingTask):
""" """
cfg: AudioFinetuningConfig
def __init__(
self,
cfg: AudioFinetuningConfig,
):
super().__init__(cfg)
self.blank_symbol = "<s>"
self.state.add_factory("target_dictionary", self.load_target_dictionary)
def load_target_dictionary(self):
if self.cfg.labels:
dict_path = os.path.join(self.cfg.data, f"dict.{self.cfg.labels}.txt")
return Dictionary.load(dict_path)
return None
def load_dataset(
self, split: str, task_cfg: AudioFinetuningConfig = None, **kwargs
):
super().load_dataset(split, task_cfg, **kwargs)
task_cfg = task_cfg or self.cfg
assert task_cfg.labels is not None
text_compression_level = getattr(
TextCompressionLevel, str(self.cfg.text_compression_level)
)
data_path = self.cfg.data
label_path = os.path.join(data_path, f"{split}.{task_cfg.labels}")
skipped_indices = getattr(self.datasets[split], "skipped_indices", set())
text_compressor = TextCompressor(level=text_compression_level)
with open(label_path, "r") as f:
labels = [
text_compressor.compress(l)
for i, l in enumerate(f)
if i not in skipped_indices
]
assert len(labels) == len(self.datasets[split]), (
f"labels length ({len(labels)}) and dataset length "
f"({len(self.datasets[split])}) do not match"
)
process_label = LabelEncoder(self.target_dictionary)
self.datasets[split] = AddTargetDataset(
self.datasets[split],
labels,
pad=self.target_dictionary.pad(),
eos=self.target_dictionary.eos(),
batch_targets=True,
process_label=process_label,
label_len_fn=label_len_fn,
add_to_input=task_cfg.get("autoregressive", False),
text_compression_level=text_compression_level,
)
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.state.target_dictionary
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if self.cfg.eval_wer and self.cfg.autoregressive:
metrics = self._inference_with_wer(self.sequence_generator, sample, model)
logging_output["_num_char_errors"] = metrics["num_char_errors"]
logging_output["_num_chars"] = metrics["num_chars"]
logging_output["_num_word_errors"] = metrics["num_word_errors"]
logging_output["_num_words"] = metrics["num_words"]
if self.cfg.eval_bleu and self.cfg.autoregressive:
metrics = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output["_bleu_sys_len"] = metrics.sys_len
logging_output["_bleu_ref_len"] = metrics.ref_len
# we split counts into separate entries so that they can be
# summed efficiently across workers using fast-stat-sync
assert len(metrics.counts) == 4
for i in range(4):
logging_output[f"_bleu_counts_{i}"] = metrics.counts[i]
logging_output[f"_bleu_totals_{i}"] = metrics.totals[i]
return loss, sample_size, logging_output
def build_model(self, model_cfg: FairseqDataclass):
model = super().build_model(model_cfg)
if self.cfg.eval_wer and self.cfg.autoregressive:
self.sequence_generator = self.build_generator(
[model],
self.cfg.eval_wer_config,
)
if self.cfg.eval_wer_tokenizer:
self.tokenizer = encoders.build_tokenizer(self.cfg.eval_wer_tokenizer)
else:
self.tokenizer = None
if self.cfg.eval_bleu and self.cfg.autoregressive:
assert self.cfg.eval_bleu_detok is not None, (
"--eval-bleu-detok is required if using --eval-bleu; "
"try --eval-bleu-detok=moses (or --eval-bleu-detok=space "
"to disable detokenization, e.g., when using sentencepiece)"
)
detok_args = json.loads(self.cfg.eval_bleu_detok_args)
self.tokenizer = encoders.build_tokenizer(
Namespace(tokenizer=self.cfg.eval_bleu_detok, **detok_args)
)
gen_args = json.loads(self.cfg.eval_bleu_args)
gen_args = Namespace(**gen_args)
self.sequence_generator = self.build_generator([model], gen_args)
return model
def _inference_with_wer(self, generator, sample, model):
import editdistance
def decode(toks):
s = self.target_dictionary.string(
toks.int().cpu(),
self.cfg.eval_wer_post_process,
escape_unk=True,
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
num_word_errors, num_char_errors = 0, 0
num_chars, num_words = 0, 0
gen_out = self.inference_step(generator, [model], sample, None)
for i in range(len(gen_out)):
hyp = decode(gen_out[i][0]["tokens"])
ref = decode(
utils.strip_pad(sample["target"][i], self.target_dictionary.pad()),
)
num_char_errors += editdistance.eval(hyp, ref)
num_chars += len(ref)
hyp_words = hyp.split()
ref_words = ref.split()
num_word_errors += editdistance.eval(hyp_words, ref_words)
num_words += len(ref_words)
return {
"num_char_errors": num_char_errors,
"num_chars": num_chars,
"num_word_errors": num_word_errors,
"num_words": num_words,
}
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, is_ref):
s = self.target_dictionary.string(
toks.int().cpu(),
self.cfg.eval_bleu_remove_bpe,
# The default unknown string in fairseq is `<unk>`, but
# this is tokenized by sacrebleu as `< unk >`, inflating
# BLEU scores. Instead, we use a somewhat more verbose
# alternative that is unlikely to appear in the real
# reference, but doesn't get split into multiple tokens.
unk_string=("UNKNOWNTOKENINREF" if is_ref else "UNKNOWNTOKENINHYP"),
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, [model], sample)
hyps, refs = [], []
for i in range(len(gen_out)):
hyps.append(decode(gen_out[i][0]["tokens"], is_ref=False))
refs.append(
decode(
utils.strip_pad(sample["target"][i], self.target_dictionary.pad()),
is_ref=True, # don't count <unk> as matches to the hypo
)
)
if self.cfg.eval_bleu_print_samples:
logger.info("H-{} {}".format(sample["id"][0], hyps[0]))
logger.info("T-{} {}".format(sample["id"][0], refs[0]))
eval_tokenization = "none" if self.cfg.eval_tokenized_bleu else "13a"
return sacrebleu.corpus_bleu(hyps, [refs], tokenize=eval_tokenization)
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.cfg.eval_wer:
zero = torch.scalar_tensor(0.0)
num_char_errors = sum(
log.get("_num_char_errors", zero) for log in logging_outputs
)
num_chars = sum(log.get("_num_chars", zero) for log in logging_outputs)
num_word_errors = sum(
log.get("_num_word_errors", zero) for log in logging_outputs
)
num_words = sum(log.get("_num_words", zero) for log in logging_outputs)
metrics.log_scalar("_num_char_errors", num_char_errors)
metrics.log_scalar("_num_chars", num_chars)
metrics.log_scalar("_num_word_errors", num_word_errors)
metrics.log_scalar("_num_words", num_words)
if num_chars > 0:
metrics.log_derived(
"uer",
lambda meters: meters["_num_char_errors"].sum
* 100.0
/ meters["_num_chars"].sum
if meters["_num_chars"].sum > 0
else float("nan"),
)
if num_words > 0:
metrics.log_derived(
"wer",
lambda meters: meters["_num_word_errors"].sum
* 100.0
/ meters["_num_words"].sum
if meters["_num_words"].sum > 0
else float("nan"),
)
if self.cfg.eval_bleu:
len_keys = ["_bleu_sys_len", "_bleu_ref_len"]
count_keys = [f"_bleu_counts_{i}" for i in range(4)]
total_keys = [f"_bleu_totals_{i}" for i in range(4)]
for k in len_keys + count_keys + total_keys:
metrics.log_scalar(k, sum(log.get(k, 0) for log in logging_outputs))
import sacrebleu
metrics.log_derived(
"bleu",
lambda meters: sacrebleu.compute_bleu(
correct=[meters[k].sum for k in count_keys],
total=[meters[k].sum for k in total_keys],
sys_len=meters["_bleu_sys_len"].sum,
ref_len=meters["_bleu_ref_len"].sum,
smooth_method="exp",
).score,
)
| 13,463 | 38.139535 | 95 | py |
CIF-HieraDist | CIF-HieraDist-main/fairseq/tasks/translation_multi_simple_epoch.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import logging
import time
import torch
from fairseq.data import (
FairseqDataset,
LanguagePairDataset,
ListDataset,
data_utils,
iterators,
)
from fairseq.data.multilingual.multilingual_data_manager import (
MultilingualDatasetManager,
)
from fairseq.data.multilingual.sampling_method import SamplingMethod
from fairseq.tasks import LegacyFairseqTask, register_task
from fairseq.utils import FileContentsAction
###
def get_time_gap(s, e):
return (
datetime.datetime.fromtimestamp(e) - datetime.datetime.fromtimestamp(s)
).__str__()
###
logger = logging.getLogger(__name__)
@register_task("translation_multi_simple_epoch")
class TranslationMultiSimpleEpochTask(LegacyFairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
langs (List[str]): a list of languages that are being supported
dicts (Dict[str, fairseq.data.Dictionary]): mapping from supported languages to their dictionaries
training (bool): whether the task should be configured for training or not
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='inference source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='inference target language')
parser.add_argument('--lang-pairs', default=None, metavar='PAIRS',
help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr',
action=FileContentsAction)
parser.add_argument('--keep-inference-langtok', action='store_true',
help='keep language tokens in inference output (e.g. for analysis or debugging)')
SamplingMethod.add_arguments(parser)
MultilingualDatasetManager.add_args(parser)
# fmt: on
def __init__(self, args, langs, dicts, training):
super().__init__(args)
self.langs = langs
self.dicts = dicts
self.training = training
if training:
self.lang_pairs = args.lang_pairs
else:
self.lang_pairs = ["{}-{}".format(args.source_lang, args.target_lang)]
# eval_lang_pairs for multilingual translation is usually all of the
# lang_pairs. However for other multitask settings or when we want to
# optimize for certain languages we want to use a different subset. Thus
# the eval_lang_pairs class variable is provided for classes that extend
# this class.
self.eval_lang_pairs = self.lang_pairs
# model_lang_pairs will be used to build encoder-decoder model pairs in
# models.build_model(). This allows multitask type of sub-class can
# build models other than the input lang_pairs
self.model_lang_pairs = self.lang_pairs
self.source_langs = [d.split("-")[0] for d in self.lang_pairs]
self.target_langs = [d.split("-")[1] for d in self.lang_pairs]
self.check_dicts(self.dicts, self.source_langs, self.target_langs)
self.sampling_method = SamplingMethod.build_sampler(args, self)
self.data_manager = MultilingualDatasetManager.setup_data_manager(
args, self.lang_pairs, langs, dicts, self.sampling_method
)
def check_dicts(self, dicts, source_langs, target_langs):
if self.args.source_dict is not None or self.args.target_dict is not None:
# no need to check whether the source side and target side are sharing dictionaries
return
src_dict = dicts[source_langs[0]]
tgt_dict = dicts[target_langs[0]]
for src_lang in source_langs:
assert (
src_dict == dicts[src_lang]
), "Diffrent dictionary are specified for different source languages; "
"TranslationMultiSimpleEpochTask only supports one shared dictionary across all source languages"
for tgt_lang in target_langs:
assert (
tgt_dict == dicts[tgt_lang]
), "Diffrent dictionary are specified for different target languages; "
"TranslationMultiSimpleEpochTask only supports one shared dictionary across all target languages"
@classmethod
def setup_task(cls, args, **kwargs):
langs, dicts, training = MultilingualDatasetManager.prepare(
cls.load_dictionary, args, **kwargs
)
return cls(args, langs, dicts, training)
def has_sharded_data(self, split):
return self.data_manager.has_sharded_data(split)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if split in self.datasets:
dataset = self.datasets[split]
if self.has_sharded_data(split):
if self.args.virtual_epoch_size is not None:
if dataset.load_next_shard:
shard_epoch = dataset.shard_epoch
else:
# no need to load next shard so skip loading
# also this avoid always loading from beginning of the data
return
else:
shard_epoch = epoch
else:
# estimate the shard epoch from virtual data size and virtual epoch size
shard_epoch = self.data_manager.estimate_global_pass_epoch(epoch)
logger.info(f"loading data for {split} epoch={epoch}/{shard_epoch}")
logger.info(f"mem usage: {data_utils.get_mem_usage()}")
if split in self.datasets:
del self.datasets[split]
logger.info("old dataset deleted manually")
logger.info(f"mem usage: {data_utils.get_mem_usage()}")
self.datasets[split] = self.data_manager.load_dataset(
split,
self.training,
epoch=epoch,
combine=combine,
shard_epoch=shard_epoch,
**kwargs,
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
if constraints is not None:
raise NotImplementedError(
"Constrained decoding with the multilingual_translation task is not supported"
)
src_data = ListDataset(src_tokens, src_lengths)
dataset = LanguagePairDataset(src_data, src_lengths, self.source_dictionary)
src_langtok_spec, tgt_langtok_spec = self.args.langtoks["main"]
if self.args.lang_tok_replacing_bos_eos:
dataset = self.data_manager.alter_dataset_langtok(
dataset,
src_eos=self.source_dictionary.eos(),
src_lang=self.args.source_lang,
tgt_eos=self.target_dictionary.eos(),
tgt_lang=self.args.target_lang,
src_langtok_spec=src_langtok_spec,
tgt_langtok_spec=tgt_langtok_spec,
)
else:
dataset.src = self.data_manager.src_dataset_tranform_func(
self.args.source_lang,
self.args.target_lang,
dataset=dataset.src,
spec=src_langtok_spec,
)
return dataset
def build_generator(
self,
models,
args,
seq_gen_cls=None,
extra_gen_cls_kwargs=None,
):
if not getattr(args, "keep_inference_langtok", False):
_, tgt_langtok_spec = self.args.langtoks["main"]
if tgt_langtok_spec:
tgt_lang_tok = self.data_manager.get_decoder_langtok(
self.args.target_lang, tgt_langtok_spec
)
extra_gen_cls_kwargs = extra_gen_cls_kwargs or {}
extra_gen_cls_kwargs["symbols_to_strip_from_output"] = {tgt_lang_tok}
return super().build_generator(
models, args, seq_gen_cls=None, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
def build_model(self, args):
return super().build_model(args)
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
return loss, sample_size, logging_output
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
_, tgt_langtok_spec = self.args.langtoks["main"]
if not self.args.lang_tok_replacing_bos_eos:
if prefix_tokens is None and tgt_langtok_spec:
tgt_lang_tok = self.data_manager.get_decoder_langtok(
self.args.target_lang, tgt_langtok_spec
)
src_tokens = sample["net_input"]["src_tokens"]
bsz = src_tokens.size(0)
prefix_tokens = (
torch.LongTensor([[tgt_lang_tok]]).expand(bsz, 1).to(src_tokens)
)
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
)
else:
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
bos_token=self.data_manager.get_decoder_langtok(
self.args.target_lang, tgt_langtok_spec
)
if tgt_langtok_spec
else self.target_dictionary.eos(),
)
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
return self.data_manager.get_source_dictionary(self.source_langs[0])
@property
def target_dictionary(self):
return self.data_manager.get_target_dictionary(self.target_langs[0])
def create_batch_sampler_func(
self,
max_positions,
ignore_invalid_inputs,
max_tokens,
max_sentences,
required_batch_size_multiple=1,
seed=1,
):
def construct_batch_sampler(dataset, epoch):
splits = [
s for s, _ in self.datasets.items() if self.datasets[s] == dataset
]
split = splits[0] if len(splits) > 0 else None
# NEW implementation
if epoch is not None:
# initialize the dataset with the correct starting epoch
dataset.set_epoch(epoch)
# get indices ordered by example size
start_time = time.time()
logger.info(f"start batch sampler: mem usage: {data_utils.get_mem_usage()}")
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
logger.info(
f"[{split}] @batch_sampler order indices time: {get_time_gap(start_time, time.time())}"
)
logger.info(f"mem usage: {data_utils.get_mem_usage()}")
# filter examples that are too large
if max_positions is not None:
my_time = time.time()
indices = self.filter_indices_by_size(
indices, dataset, max_positions, ignore_invalid_inputs
)
logger.info(
f"[{split}] @batch_sampler filter_by_size time: {get_time_gap(my_time, time.time())}"
)
logger.info(f"mem usage: {data_utils.get_mem_usage()}")
# create mini-batches with given size constraints
my_time = time.time()
batch_sampler = dataset.batch_by_size(
indices,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
logger.info(
f"[{split}] @batch_sampler batch_by_size time: {get_time_gap(my_time, time.time())}"
)
logger.info(
f"[{split}] per epoch batch_sampler set-up time: {get_time_gap(start_time, time.time())}"
)
logger.info(f"mem usage: {data_utils.get_mem_usage()}")
return batch_sampler
return construct_batch_sampler
# we need to override get_batch_iterator because we want to reset the epoch iterator each time
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
data_buffer_size=0,
disable_iterator_cache=False,
grouped_shuffling=False,
update_epoch_batch_itr=False,
):
"""
Get an iterator that yields batches of data from the given dataset.
Args:
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N (default: 1).
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 0).
data_buffer_size (int, optional): number of batches to
preload (default: 0).
disable_iterator_cache (bool, optional): don't cache the
EpochBatchIterator (ignores `FairseqTask::can_reuse_epoch_itr`)
(default: False).
grouped_shuffling (bool, optional): group batches with each groups
containing num_shards batches and shuffle groups. Reduces difference
between sequence lengths among workers for batches sorted by length.
update_epoch_batch_itr (bool optional): if true then donot use the cached
batch iterator for the epoch
Returns:
~fairseq.iterators.EpochBatchIterator: a batched iterator over the
given dataset split
"""
# initialize the dataset with the correct starting epoch
assert isinstance(dataset, FairseqDataset)
if dataset in self.dataset_to_epoch_iter:
return self.dataset_to_epoch_iter[dataset]
if self.args.sampling_method == "RoundRobin":
batch_iter = super().get_batch_iterator(
dataset,
max_tokens=max_tokens,
max_sentences=max_sentences,
max_positions=max_positions,
ignore_invalid_inputs=ignore_invalid_inputs,
required_batch_size_multiple=required_batch_size_multiple,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
data_buffer_size=data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
update_epoch_batch_itr=update_epoch_batch_itr,
)
self.dataset_to_epoch_iter[dataset] = batch_iter
return batch_iter
construct_batch_sampler = self.create_batch_sampler_func(
max_positions,
ignore_invalid_inputs,
max_tokens,
max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
seed=seed,
)
epoch_iter = iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=construct_batch_sampler,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
)
return epoch_iter
| 17,791 | 39.436364 | 113 | py |
CIF-HieraDist | CIF-HieraDist-main/docs/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# fairseq documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 17 21:45:30 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
from fairseq import __version__
# source code directory, relative to this file, for sphinx-autobuild
sys.path.insert(0, os.path.abspath(".."))
source_suffix = [".rst"]
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"sphinxarg.ext",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "fairseq"
copyright = "Facebook AI Research (FAIR)"
author = "Facebook AI Research (FAIR)"
github_doc_root = "https://github.com/pytorch/fairseq/tree/main/docs/"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
highlight_language = "python"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_context = {
"css_files": [
"_static/theme_overrides.css", # override wide tables in RTD theme
],
}
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
# html_sidebars = {
# '**': [
# 'about.html',
# 'navigation.html',
# 'relations.html', # needs 'show_related': True theme option to display
# 'searchbox.html',
# 'donate.html',
# ]
# }
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"numpy": ("http://docs.scipy.org/doc/numpy/", None),
"python": ("https://docs.python.org/", None),
"torch": ("https://pytorch.org/docs/master/", None),
}
| 4,270 | 30.637037 | 80 | py |
CIF-HieraDist | CIF-HieraDist-main/fairseq_cli/generate.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate pre-processed data with a trained model.
"""
import ast
import logging
import math
import os
import sys
from argparse import Namespace
from itertools import chain
import numpy as np
import torch
from fairseq import checkpoint_utils, options, scoring, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from omegaconf import DictConfig
def main(cfg: DictConfig):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
assert cfg.common_eval.path is not None, "--path required for generation!"
assert (
not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
cfg.generation.replace_unk is None or cfg.dataset.dataset_impl == "raw"
), "--replace-unk requires a raw text dataset (--dataset-impl=raw)"
if cfg.common_eval.results_path is not None:
os.makedirs(cfg.common_eval.results_path, exist_ok=True)
output_path = os.path.join(
cfg.common_eval.results_path,
"generate-{}.txt".format(cfg.dataset.gen_subset),
)
with open(output_path, "w", buffering=1, encoding="utf-8") as h:
return _main(cfg, h)
else:
return _main(cfg, sys.stdout)
def get_symbols_to_strip_from_output(generator):
if hasattr(generator, "symbols_to_strip_from_output"):
return generator.symbols_to_strip_from_output
else:
return {generator.eos}
def _main(cfg: DictConfig, output_file):
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=output_file,
)
logger = logging.getLogger("fairseq_cli.generate")
utils.import_user_module(cfg.common)
if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None:
cfg.dataset.max_tokens = 12000
logger.info(cfg)
# Fix seed for stochastic decoding
if cfg.common.seed is not None and not cfg.generation.no_seed_provided:
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
# Load dataset splits
task = tasks.setup_task(cfg.task)
# Set dictionaries
try:
src_dict = getattr(task, "source_dictionary", None)
except NotImplementedError:
src_dict = None
tgt_dict = task.target_dictionary
overrides = ast.literal_eval(cfg.common_eval.model_overrides)
# Load ensemble
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, saved_cfg = checkpoint_utils.load_model_ensemble(
utils.split_paths(cfg.common_eval.path),
arg_overrides=overrides,
task=task,
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
)
# loading the dataset should happen after the checkpoint has been loaded so we can give it the saved task config
task.load_dataset(cfg.dataset.gen_subset, task_cfg=saved_cfg.task)
if cfg.generation.lm_path is not None:
overrides["data"] = cfg.task.data
try:
lms, _ = checkpoint_utils.load_model_ensemble(
[cfg.generation.lm_path], arg_overrides=overrides, task=None
)
except:
logger.warning(
f"Failed to load language model! Please make sure that the language model dict is the same "
f"as target dict and is located in the data dir ({cfg.task.data})"
)
raise
assert len(lms) == 1
else:
lms = [None]
# Optimize ensemble for generation
for model in chain(models, lms):
if model is None:
continue
if cfg.common.fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(cfg.generation.replace_unk)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(cfg.dataset.gen_subset),
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
task.max_positions(), *[m.max_positions() for m in models]
),
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.dataset.required_batch_size_multiple,
seed=cfg.common.seed,
num_shards=cfg.distributed_training.distributed_world_size,
shard_id=cfg.distributed_training.distributed_rank,
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
# Initialize generator
gen_timer = StopwatchMeter()
extra_gen_cls_kwargs = {"lm_model": lms[0], "lm_weight": cfg.generation.lm_weight}
generator = task.build_generator(
models, cfg.generation, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
# Handle tokenization and BPE
tokenizer = task.build_tokenizer(cfg.tokenizer)
bpe = task.build_bpe(cfg.bpe)
def decode_fn(x):
if bpe is not None:
x = bpe.decode(x)
if tokenizer is not None:
x = tokenizer.decode(x)
return x
scorer = scoring.build_scorer(cfg.scoring, tgt_dict)
num_sentences = 0
has_target = True
wps_meter = TimeMeter()
for sample in progress:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if "net_input" not in sample:
continue
prefix_tokens = None
if cfg.generation.prefix_size > 0:
prefix_tokens = sample["target"][:, : cfg.generation.prefix_size]
constraints = None
if "constraints" in sample:
constraints = sample["constraints"]
gen_timer.start()
hypos = task.inference_step(
generator,
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
)
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample["id"].tolist()):
print("_________________________________________________________")
has_target = sample["target"] is not None
# Remove padding
if "src_tokens" in sample["net_input"]:
src_tokens = utils.strip_pad(
sample["net_input"]["src_tokens"][i, :], tgt_dict.pad()
)
else:
src_tokens = None
target_tokens = None
if has_target:
target_tokens = (
utils.strip_pad(sample["target"][i, :], tgt_dict.pad()).int().cpu()
)
# Either retrieve the original sentences or regenerate them from tokens.
if align_dict is not None:
src_str = task.dataset(cfg.dataset.gen_subset).src.get_original_text(
sample_id
)
target_str = task.dataset(cfg.dataset.gen_subset).tgt.get_original_text(
sample_id
)
else:
if src_dict is not None:
src_str = src_dict.string(src_tokens, cfg.common_eval.post_process)
else:
src_str = ""
if has_target:
target_str = tgt_dict.string(
target_tokens,
cfg.common_eval.post_process,
escape_unk=True,
extra_symbols_to_ignore=get_symbols_to_strip_from_output(
generator
),
)
src_str = decode_fn(src_str)
if has_target:
target_str = decode_fn(target_str)
if not cfg.common_eval.quiet:
if src_dict is not None:
print("S-{}\t{}".format(sample_id, src_str), file=output_file)
if has_target:
print("T-{}\t{}".format(sample_id, target_str), file=output_file)
# Process top predictions
for j, hypo in enumerate(hypos[i][: cfg.generation.nbest]):
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo["tokens"].int().cpu(),
src_str=src_str,
alignment=hypo["alignment"],
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=cfg.common_eval.post_process,
extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator),
)
detok_hypo_str = decode_fn(hypo_str)
if not cfg.common_eval.quiet:
score = hypo["score"] / math.log(2) # convert to base 2
# original hypothesis (after tokenization and BPE)
print(
"H-{}\t{}".format(sample_id, hypo_str),
file=output_file,
)
# print(
# "H-{}\t{}\t{}".format(sample_id, score, hypo_str),
# file=output_file,
# )
# detokenized hypothesis
print(
"D-{}\t{}".format(sample_id, detok_hypo_str),
file=output_file,
)
# print(
# "D-{}\t{}\t{}".format(sample_id, score, detok_hypo_str),
# file=output_file,
# )
print(
"P-{}\t{}".format(
sample_id,
" ".join(
map(
lambda x: "{:.4f}".format(x),
# convert from base e to base 2
hypo["positional_scores"]
.div_(math.log(2))
.tolist(),
)
),
),
file=output_file,
)
if cfg.generation.print_alignment == "hard":
print(
"A-{}\t{}".format(
sample_id,
" ".join(
[
"{}-{}".format(src_idx, tgt_idx)
for src_idx, tgt_idx in alignment
]
),
),
file=output_file,
)
if cfg.generation.print_alignment == "soft":
print(
"A-{}\t{}".format(
sample_id,
" ".join(
[",".join(src_probs) for src_probs in alignment]
),
),
file=output_file,
)
if cfg.generation.print_step:
print(
"I-{}\t{}".format(sample_id, hypo["steps"]),
file=output_file,
)
if cfg.generation.retain_iter_history:
for step, h in enumerate(hypo["history"]):
_, h_str, _ = utils.post_process_prediction(
hypo_tokens=h["tokens"].int().cpu(),
src_str=src_str,
alignment=None,
align_dict=None,
tgt_dict=tgt_dict,
remove_bpe=None,
)
print(
"E-{}_{}\t{}".format(sample_id, step, h_str),
file=output_file,
)
# Score only the top hypothesis
if has_target and j == 0:
if (
align_dict is not None
or cfg.common_eval.post_process is not None
):
# Convert back to tokens for evaluation with unk replacement and/or without BPE
target_tokens = tgt_dict.encode_line(
target_str, add_if_not_exist=True
)
hypo_tokens = tgt_dict.encode_line(
detok_hypo_str, add_if_not_exist=True
)
if hasattr(scorer, "add_string"):
scorer.add_string(target_str, detok_hypo_str)
else:
scorer.add(target_tokens, hypo_tokens)
wps_meter.update(num_generated_tokens)
progress.log({"wps": round(wps_meter.avg)})
num_sentences += (
sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
)
logger.info("NOTE: hypothesis and token scores are output in base 2")
logger.info(
"Translated {:,} sentences ({:,} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)".format(
num_sentences,
gen_timer.n,
gen_timer.sum,
num_sentences / gen_timer.sum,
1.0 / gen_timer.avg,
)
)
if has_target:
if cfg.bpe and not cfg.generation.sacrebleu:
if cfg.common_eval.post_process:
logger.warning(
"BLEU score is being computed by splitting detokenized string on spaces, this is probably not what you want. Use --sacrebleu for standard 13a BLEU tokenization"
)
else:
logger.warning(
"If you are using BPE on the target side, the BLEU score is computed on BPE tokens, not on proper words. Use --sacrebleu for standard 13a BLEU tokenization"
)
# use print to be consistent with other main outputs: S-, H-, T-, D- and so on
print(
"Generate {} with beam={}: {}".format(
cfg.dataset.gen_subset, cfg.generation.beam, scorer.result_string()
),
file=output_file,
)
return scorer
def cli_main():
parser = options.get_generation_parser()
# TODO: replace this workaround with refactoring of `AudioPretraining`
parser.add_argument(
"--arch",
"-a",
metavar="ARCH",
default="wav2vec2",
help="Model architecture. For constructing tasks that rely on "
"model args (e.g. `AudioPretraining`)",
)
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
| 16,216 | 36.978923 | 180 | py |
CIF-HieraDist | CIF-HieraDist-main/fairseq_cli/validate.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
from argparse import Namespace
from itertools import chain
import torch
from fairseq import checkpoint_utils, distributed_utils, options, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import metrics, progress_bar
from fairseq.utils import reset_logging
from omegaconf import DictConfig
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.validate")
def main(cfg: DictConfig, override_args=None):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
utils.import_user_module(cfg.common)
reset_logging()
assert (
cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
use_fp16 = cfg.common.fp16
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
if use_cuda:
torch.cuda.set_device(cfg.distributed_training.device_id)
if cfg.distributed_training.distributed_world_size > 1:
data_parallel_world_size = distributed_utils.get_data_parallel_world_size()
data_parallel_rank = distributed_utils.get_data_parallel_rank()
else:
data_parallel_world_size = 1
data_parallel_rank = 0
if override_args is not None:
overrides = vars(override_args)
overrides.update(eval(getattr(override_args, "model_overrides", "{}")))
else:
overrides = None
# Load ensemble
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
[cfg.common_eval.path],
arg_overrides=overrides,
suffix=cfg.checkpoint.checkpoint_suffix,
)
model = models[0]
# Move models to GPU
for model in models:
model.eval()
if use_fp16:
model.half()
if use_cuda:
model.cuda()
# Print args
logger.info(saved_cfg)
# Build criterion
criterion = task.build_criterion(saved_cfg.criterion)
criterion.eval()
for subset in cfg.dataset.valid_subset.split(","):
try:
task.load_dataset(subset, combine=False, epoch=1, task_cfg=saved_cfg.task)
dataset = task.dataset(subset)
except KeyError:
raise Exception("Cannot find dataset: " + subset)
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[m.max_positions() for m in models],
),
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.dataset.required_batch_size_multiple,
seed=cfg.common.seed,
num_shards=data_parallel_world_size,
shard_id=data_parallel_rank,
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
prefix=f"valid on '{subset}' subset",
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
log_outputs = []
for i, sample in enumerate(progress):
sample = utils.move_to_cuda(sample) if use_cuda else sample
_loss, _sample_size, log_output = task.valid_step(sample, model, criterion)
progress.log(log_output, step=i)
log_outputs.append(log_output)
if data_parallel_world_size > 1:
log_outputs = distributed_utils.all_gather_list(
log_outputs,
max_size=cfg.common.all_gather_list_size,
group=distributed_utils.get_data_parallel_group(),
)
log_outputs = list(chain.from_iterable(log_outputs))
with metrics.aggregate() as agg:
task.reduce_metrics(log_outputs, criterion)
log_output = agg.get_smoothed_values()
progress.print(log_output, tag=subset, step=i)
def cli_main():
parser = options.get_validation_parser()
args = options.parse_args_and_arch(parser)
# only override args that are explicitly given on the command line
override_parser = options.get_validation_parser()
override_args = options.parse_args_and_arch(override_parser, suppress_defaults=True)
distributed_utils.call_main(
convert_namespace_to_omegaconf(args), main, override_args=override_args
)
if __name__ == "__main__":
cli_main()
| 5,228 | 32.954545 | 88 | py |
CIF-HieraDist | CIF-HieraDist-main/fairseq_cli/hydra_train.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from fairseq.dataclass.initialize import add_defaults, hydra_init
from fairseq_cli.train import main as pre_main
from fairseq import distributed_utils, metrics
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import omegaconf_no_object_check
from fairseq.utils import reset_logging
import hydra
from hydra.core.hydra_config import HydraConfig
import torch
from omegaconf import OmegaConf, open_dict
logger = logging.getLogger("fairseq_cli.hydra_train")
@hydra.main(config_path=os.path.join("..", "fairseq", "config"), config_name="config")
def hydra_main(cfg: FairseqConfig) -> float:
_hydra_main(cfg)
def _hydra_main(cfg: FairseqConfig, **kwargs) -> float:
add_defaults(cfg)
if cfg.common.reset_logging:
reset_logging() # Hydra hijacks logging, fix that
else:
# check if directly called or called through hydra_main
if HydraConfig.initialized():
with open_dict(cfg):
# make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126)
cfg.job_logging_cfg = OmegaConf.to_container(
HydraConfig.get().job_logging, resolve=True
)
with omegaconf_no_object_check():
cfg = OmegaConf.create(
OmegaConf.to_container(cfg, resolve=True, enum_to_str=True)
)
OmegaConf.set_struct(cfg, True)
try:
if cfg.common.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, pre_main, **kwargs)
else:
distributed_utils.call_main(cfg, pre_main, **kwargs)
except BaseException as e:
if not cfg.common.suppress_crashes:
raise
else:
logger.error("Crashed! " + str(e))
# get best val and return - useful for sweepers
try:
best_val = metrics.get_smoothed_value(
"valid", cfg.checkpoint.best_checkpoint_metric
)
except:
best_val = None
if best_val is None:
best_val = float("inf")
return best_val
def cli_main():
try:
from hydra._internal.utils import get_args
cfg_name = get_args().config_name or "config"
except:
logger.warning("Failed to get config name from hydra args")
cfg_name = "config"
hydra_init(cfg_name)
hydra_main()
if __name__ == "__main__":
cli_main()
| 2,715 | 28.204301 | 116 | py |
CIF-HieraDist | CIF-HieraDist-main/fairseq_cli/eval_lm.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Evaluate the perplexity of a trained language model.
"""
import logging
import math
import os
import sys
from argparse import Namespace
from typing import Iterable, List, Optional
import torch
import fairseq
from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter
from fairseq.sequence_scorer import SequenceScorer
from omegaconf import DictConfig
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.eval_lm")
def eval_lm(
models: List[fairseq.models.FairseqModel],
source_dictionary: fairseq.data.Dictionary,
batch_iterator: Iterable,
post_process: Optional[str] = None,
output_word_probs: bool = False,
output_word_stats: bool = False,
target_dictionary: Optional[fairseq.data.Dictionary] = None,
softmax_batch: int = 0,
remove_bos_token: bool = False,
device: Optional[torch.device] = None,
):
"""
Args:
models (List[~fairseq.models.FairseqModel]): list of models to
evaluate. Models are essentially `nn.Module` instances, but
must be compatible with fairseq's `SequenceScorer`.
source_dictionary (~fairseq.data.Dictionary): dictionary for
applying any relevant post processing or outputing word
probs/stats.
batch_iterator (Iterable): yield batches of data
post_process (Optional[str]): post-process text by removing BPE,
letter segmentation, etc. Valid options can be found in
fairseq.data.utils.post_process, although not all options
are implemented here.
output_word_probs (Optional[bool]): output words and their
predicted log probabilities
output_word_stats (Optional[bool]): output word statistics such
as word count and average probability
target_dictionary (Optional[~fairseq.data.Dictionary]): output
dictionary (defaults to *source_dictionary*)
softmax_batch (Optional[bool]): if BxT is more than this, will
batch the softmax over vocab to this amount of tokens, in
order to fit into GPU memory
remove_bos_token (Optional[bool]): if True, confirm that the
first token is the beginning-of-sentence symbol (according
to the relevant dictionary) and remove it from the output
device (Optional[torch.device]): device to use for evaluation
(defaults to device of first model parameter)
"""
if target_dictionary is None:
target_dictionary = source_dictionary
if device is None:
device = next(models[0].parameters()).device
gen_timer = StopwatchMeter()
scorer = SequenceScorer(target_dictionary, softmax_batch)
score_sum = 0.0
count = 0
if post_process is not None:
if post_process in {"subword_nmt", "@@ "}:
bpe_cont = post_process.rstrip()
bpe_toks = {
i
for i in range(len(source_dictionary))
if source_dictionary[i].endswith(bpe_cont)
}
else:
raise NotImplementedError(
"--post-process={post_process} is not implemented"
)
bpe_len = len(bpe_cont)
else:
bpe_toks = None
bpe_len = 0
word_stats = dict()
for sample in batch_iterator:
if "net_input" not in sample:
continue
sample = utils.move_to_cuda(sample, device=device)
gen_timer.start()
hypos = scorer.generate(models, sample)
gen_timer.stop(sample["ntokens"])
for i, hypos_i in enumerate(hypos):
hypo = hypos_i[0]
sample_id = sample["id"][i]
tokens = hypo["tokens"]
tgt_len = tokens.numel()
pos_scores = hypo["positional_scores"].float()
if remove_bos_token:
assert hypo["tokens"][0].item() == target_dictionary.bos()
tokens = tokens[1:]
pos_scores = pos_scores[1:]
skipped_toks = 0
if bpe_toks is not None:
for i in range(tgt_len - 1):
if tokens[i].item() in bpe_toks:
skipped_toks += 1
pos_scores[i + 1] += pos_scores[i]
pos_scores[i] = 0
inf_scores = pos_scores.eq(float("inf")) | pos_scores.eq(float("-inf"))
if inf_scores.any():
logger.info(
"skipping tokens with inf scores:",
target_dictionary.string(tokens[inf_scores.nonzero()]),
)
pos_scores = pos_scores[(~inf_scores).nonzero()]
score_sum += pos_scores.sum().cpu()
count += pos_scores.numel() - skipped_toks
if output_word_probs or output_word_stats:
w = ""
word_prob = []
is_bpe = False
for i in range(len(tokens)):
w_ind = tokens[i].item()
w += source_dictionary[w_ind]
if bpe_toks is not None and w_ind in bpe_toks:
w = w[:-bpe_len]
is_bpe = True
else:
word_prob.append((w, pos_scores[i].item()))
next_prob = None
ind = i + 1
while ind < len(tokens):
if pos_scores[ind].item() != 0:
next_prob = pos_scores[ind]
break
ind += 1
word_stats.setdefault(w, WordStat(w, is_bpe)).add(
pos_scores[i].item(), next_prob
)
is_bpe = False
w = ""
if output_word_probs:
logger.info(
str(int(sample_id))
+ " "
+ (
"\t".join(
"{} [{:2f}]".format(x[0], x[1]) for x in word_prob
)
)
)
avg_nll_loss = (
-score_sum / count / math.log(2) if count > 0 else 0
) # convert to base 2
logger.info(
"Evaluated {:,} tokens in {:.1f}s ({:.2f} tokens/s)".format(
gen_timer.n, gen_timer.sum, 1.0 / gen_timer.avg if gen_timer.avg > 0 else 0
)
)
if output_word_stats:
for ws in sorted(word_stats.values(), key=lambda x: x.count, reverse=True):
logger.info(ws)
return {
"loss": avg_nll_loss,
"perplexity": 2**avg_nll_loss,
}
class WordStat(object):
def __init__(self, word, is_bpe):
self.word = word
self.is_bpe = is_bpe
self.log_prob = 0
self.next_word_prob = 0
self.count = 0
self.missing_next_words = 0
def add(self, log_prob, next_word_prob):
"""increments counters for the sum of log probs of current word and next
word (given context ending at current word). Since the next word might be at the end of the example,
or it might be not counted because it is not an ending subword unit,
also keeps track of how many of those we have seen"""
if next_word_prob is not None:
self.next_word_prob += next_word_prob
else:
self.missing_next_words += 1
self.log_prob += log_prob
self.count += 1
def __str__(self):
return "{}\t{}\t{}\t{}\t{}\t{}".format(
self.word,
self.count,
self.log_prob,
self.is_bpe,
self.next_word_prob,
self.count - self.missing_next_words,
)
def main(cfg: DictConfig, **unused_kwargs):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
utils.import_user_module(cfg.common)
logger.info(cfg)
if cfg.eval_lm.context_window > 0:
# reduce tokens per sample by the required context window size
cfg.task.tokens_per_sample -= cfg.eval_lm.context_window
# Initialize the task using the current *cfg*
task = tasks.setup_task(cfg.task)
# Load ensemble
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, model_args, task = checkpoint_utils.load_model_ensemble_and_task(
[cfg.common_eval.path],
arg_overrides=eval(cfg.common_eval.model_overrides),
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
task=task,
)
use_fp16 = cfg.common.fp16
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
if use_cuda:
torch.cuda.set_device(cfg.distributed_training.device_id)
# Optimize ensemble for generation and set the source and dest dicts on the model
# (required by scorer)
for model in models:
if use_fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
assert len(models) > 0
logger.info(
"num. model params: {:,}".format(sum(p.numel() for p in models[0].parameters()))
)
# Load dataset splits
task.load_dataset(cfg.dataset.gen_subset)
dataset = task.dataset(cfg.dataset.gen_subset)
logger.info(
"{} {} {:,} examples".format(
cfg.task.data, cfg.dataset.gen_subset, len(dataset)
)
)
itr = task.eval_lm_dataloader(
dataset=dataset,
max_tokens=cfg.dataset.max_tokens or 36000,
batch_size=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
*[model.max_positions() for model in models]
),
num_shards=max(
cfg.dataset.num_shards,
cfg.distributed_training.distributed_world_size,
),
shard_id=max(
cfg.dataset.shard_id,
cfg.distributed_training.distributed_rank,
),
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
context_window=cfg.eval_lm.context_window,
)
itr = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
results = eval_lm(
models=models,
source_dictionary=task.source_dictionary,
batch_iterator=itr,
post_process=cfg.common_eval.post_process,
output_word_probs=cfg.eval_lm.output_word_probs,
output_word_stats=cfg.eval_lm.output_word_stats,
target_dictionary=task.target_dictionary,
softmax_batch=cfg.eval_lm.softmax_batch,
remove_bos_token=getattr(cfg.task, "add_bos_token", False),
)
logger.info(
"Loss (base 2): {:.4f}, Perplexity: {:.2f}".format(
results["loss"], results["perplexity"]
)
)
return results
def cli_main():
parser = options.get_eval_lm_parser()
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(convert_namespace_to_omegaconf(args), main)
if __name__ == "__main__":
cli_main()
| 11,959 | 33.367816 | 108 | py |
CIF-HieraDist | CIF-HieraDist-main/fairseq_cli/interactive.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate raw text with a trained model. Batches data on-the-fly.
"""
import ast
import fileinput
import logging
import math
import os
import sys
import time
from argparse import Namespace
from collections import namedtuple
import numpy as np
import torch
from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.token_generation_constraints import pack_constraints, unpack_constraints
from fairseq_cli.generate import get_symbols_to_strip_from_output
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.interactive")
Batch = namedtuple("Batch", "ids src_tokens src_lengths constraints")
Translation = namedtuple("Translation", "src_str hypos pos_scores alignments")
def buffered_read(input, buffer_size):
buffer = []
with fileinput.input(files=[input], openhook=fileinput.hook_encoded("utf-8")) as h:
for src_str in h:
buffer.append(src_str.strip())
if len(buffer) >= buffer_size:
yield buffer
buffer = []
if len(buffer) > 0:
yield buffer
def make_batches(lines, cfg, task, max_positions, encode_fn):
def encode_fn_target(x):
return encode_fn(x)
if cfg.generation.constraints:
# Strip (tab-delimited) contraints, if present, from input lines,
# store them in batch_constraints
batch_constraints = [list() for _ in lines]
for i, line in enumerate(lines):
if "\t" in line:
lines[i], *batch_constraints[i] = line.split("\t")
# Convert each List[str] to List[Tensor]
for i, constraint_list in enumerate(batch_constraints):
batch_constraints[i] = [
task.target_dictionary.encode_line(
encode_fn_target(constraint),
append_eos=False,
add_if_not_exist=False,
)
for constraint in constraint_list
]
if cfg.generation.constraints:
constraints_tensor = pack_constraints(batch_constraints)
else:
constraints_tensor = None
tokens, lengths = task.get_interactive_tokens_and_lengths(lines, encode_fn)
itr = task.get_batch_iterator(
dataset=task.build_dataset_for_inference(
tokens, lengths, constraints=constraints_tensor
),
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=max_positions,
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
).next_epoch_itr(shuffle=False)
for batch in itr:
ids = batch["id"]
src_tokens = batch["net_input"]["src_tokens"]
src_lengths = batch["net_input"]["src_lengths"]
constraints = batch.get("constraints", None)
yield Batch(
ids=ids,
src_tokens=src_tokens,
src_lengths=src_lengths,
constraints=constraints,
)
def main(cfg: FairseqConfig):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
start_time = time.time()
total_translate_time = 0
utils.import_user_module(cfg.common)
if cfg.interactive.buffer_size < 1:
cfg.interactive.buffer_size = 1
if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None:
cfg.dataset.batch_size = 1
assert (
not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
not cfg.dataset.batch_size
or cfg.dataset.batch_size <= cfg.interactive.buffer_size
), "--batch-size cannot be larger than --buffer-size"
logger.info(cfg)
# Fix seed for stochastic decoding
if cfg.common.seed is not None and not cfg.generation.no_seed_provided:
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
# Setup task, e.g., translation
task = tasks.setup_task(cfg.task)
# Load ensemble
overrides = ast.literal_eval(cfg.common_eval.model_overrides)
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
utils.split_paths(cfg.common_eval.path),
arg_overrides=overrides,
task=task,
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
)
# Set dictionaries
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
# Optimize ensemble for generation
for model in models:
if model is None:
continue
if cfg.common.fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
# Initialize generator
generator = task.build_generator(models, cfg.generation)
# Handle tokenization and BPE
tokenizer = task.build_tokenizer(cfg.tokenizer)
bpe = task.build_bpe(cfg.bpe)
def encode_fn(x):
if tokenizer is not None:
x = tokenizer.encode(x)
if bpe is not None:
x = bpe.encode(x)
return x
def decode_fn(x):
if bpe is not None:
x = bpe.decode(x)
if tokenizer is not None:
x = tokenizer.decode(x)
return x
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(cfg.generation.replace_unk)
max_positions = utils.resolve_max_positions(
task.max_positions(), *[model.max_positions() for model in models]
)
if cfg.generation.constraints:
logger.warning(
"NOTE: Constrained decoding currently assumes a shared subword vocabulary."
)
if cfg.interactive.buffer_size > 1:
logger.info("Sentence buffer size: %s", cfg.interactive.buffer_size)
logger.info("NOTE: hypothesis and token scores are output in base 2")
logger.info("Type the input sentence and press return:")
start_id = 0
for inputs in buffered_read(cfg.interactive.input, cfg.interactive.buffer_size):
results = []
for batch in make_batches(inputs, cfg, task, max_positions, encode_fn):
bsz = batch.src_tokens.size(0)
src_tokens = batch.src_tokens
src_lengths = batch.src_lengths
constraints = batch.constraints
if use_cuda:
src_tokens = src_tokens.cuda()
src_lengths = src_lengths.cuda()
if constraints is not None:
constraints = constraints.cuda()
sample = {
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
},
}
translate_start_time = time.time()
translations = task.inference_step(
generator, models, sample, constraints=constraints
)
translate_time = time.time() - translate_start_time
total_translate_time += translate_time
list_constraints = [[] for _ in range(bsz)]
if cfg.generation.constraints:
list_constraints = [unpack_constraints(c) for c in constraints]
for i, (id, hypos) in enumerate(zip(batch.ids.tolist(), translations)):
src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad())
constraints = list_constraints[i]
results.append(
(
start_id + id,
src_tokens_i,
hypos,
{
"constraints": constraints,
"time": translate_time / len(translations),
},
)
)
# sort output to match input order
for id_, src_tokens, hypos, info in sorted(results, key=lambda x: x[0]):
src_str = ""
if src_dict is not None:
src_str = src_dict.string(src_tokens, cfg.common_eval.post_process)
print("S-{}\t{}".format(id_, src_str))
print("W-{}\t{:.3f}\tseconds".format(id_, info["time"]))
for constraint in info["constraints"]:
print(
"C-{}\t{}".format(
id_,
tgt_dict.string(constraint, cfg.common_eval.post_process),
)
)
# Process top predictions
for hypo in hypos[: min(len(hypos), cfg.generation.nbest)]:
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo["tokens"].int().cpu(),
src_str=src_str,
alignment=hypo["alignment"],
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=cfg.common_eval.post_process,
extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator),
)
detok_hypo_str = decode_fn(hypo_str)
score = hypo["score"] / math.log(2) # convert to base 2
# original hypothesis (after tokenization and BPE)
print("H-{}\t{}\t{}".format(id_, score, hypo_str))
# detokenized hypothesis
print("D-{}\t{}\t{}".format(id_, score, detok_hypo_str))
print(
"P-{}\t{}".format(
id_,
" ".join(
map(
lambda x: "{:.4f}".format(x),
# convert from base e to base 2
hypo["positional_scores"].div_(math.log(2)).tolist(),
)
),
)
)
if cfg.generation.print_alignment:
alignment_str = " ".join(
["{}-{}".format(src, tgt) for src, tgt in alignment]
)
print("A-{}\t{}".format(id_, alignment_str))
# update running id_ counter
start_id += len(inputs)
logger.info(
"Total time: {:.3f} seconds; translation time: {:.3f}".format(
time.time() - start_time, total_translate_time
)
)
def cli_main():
parser = options.get_interactive_generation_parser()
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(convert_namespace_to_omegaconf(args), main)
if __name__ == "__main__":
cli_main()
| 11,465 | 35.056604 | 88 | py |
CIF-HieraDist | CIF-HieraDist-main/fairseq_cli/train.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
"""
import argparse
import logging
import math
import random
import os
import sys
from typing import Dict, Optional, Any, List, Tuple, Callable
# We need to setup root logger before importing any fairseq libraries.
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.train")
import numpy as np
import torch
from fairseq import (
checkpoint_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq.data import iterators, data_utils
from fairseq.data.plasma_utils import PlasmaStore
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.distributed import fsdp_enable_wrap, fsdp_wrap, utils as distributed_utils
from fairseq.file_io import PathManager
from fairseq.logging import meters, metrics, progress_bar
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from fairseq.trainer import Trainer
from omegaconf import DictConfig, OmegaConf
def seed_torch(seed):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
torch.set_num_threads(16)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False # only conv matters here
torch.backends.cudnn.deterministic = True
def main(cfg: FairseqConfig) -> None:
if isinstance(cfg, argparse.Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
utils.import_user_module(cfg.common)
if (
distributed_utils.is_master(cfg.distributed_training)
and "job_logging_cfg" in cfg
):
# make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126)
logging.config.dictConfig(OmegaConf.to_container(cfg.job_logging_cfg))
assert (
cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
metrics.reset()
if cfg.common.log_file is not None:
handler = logging.FileHandler(filename=cfg.common.log_file)
logger.addHandler(handler)
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
seed_torch(cfg.common.seed)
if distributed_utils.is_master(cfg.distributed_training):
checkpoint_utils.verify_checkpoint_directory(cfg.checkpoint.save_dir)
# Print args
logger.info(cfg)
if cfg.checkpoint.write_checkpoints_asynchronously:
try:
import iopath # noqa: F401
except ImportError:
logging.exception(
"Asynchronous checkpoint writing is specified but iopath is "
"not installed: `pip install iopath`"
)
return
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(cfg.task)
assert cfg.criterion, "Please specify criterion to train a model"
# Build model and criterion
if cfg.distributed_training.ddp_backend == "fully_sharded":
with fsdp_enable_wrap(cfg.distributed_training):
model = fsdp_wrap(task.build_model(cfg.model))
else:
model = task.build_model(cfg.model)
criterion = task.build_criterion(cfg.criterion)
logger.info(model)
logger.info("task: {}".format(task.__class__.__name__))
logger.info("model: {}".format(model.__class__.__name__))
logger.info("criterion: {}".format(criterion.__class__.__name__))
logger.info(
"num. shared model params: {:,} (num. trained: {:,})".format(
sum(
p.numel() for p in model.parameters() if not getattr(p, "expert", False)
),
sum(
p.numel()
for p in model.parameters()
if not getattr(p, "expert", False) and p.requires_grad
),
)
)
logger.info(
"num. expert model params: {} (num. trained: {})".format(
sum(p.numel() for p in model.parameters() if getattr(p, "expert", False)),
sum(
p.numel()
for p in model.parameters()
if getattr(p, "expert", False) and p.requires_grad
),
)
)
# Load valid dataset (we load training data below, based on the latest checkpoint)
# We load the valid dataset AFTER building the model
data_utils.raise_if_valid_subsets_unintentionally_ignored(cfg)
if cfg.dataset.combine_valid_subsets:
task.load_dataset("valid", combine=True, epoch=1)
else:
for valid_sub_split in cfg.dataset.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
# (optionally) Configure quantization
if cfg.common.quantization_config_path is not None:
quantizer = quantization_utils.Quantizer(
config_path=cfg.common.quantization_config_path,
max_epoch=cfg.optimization.max_epoch,
max_update=cfg.optimization.max_update,
)
else:
quantizer = None
# Build trainer
if cfg.common.model_parallel_size == 1:
trainer = Trainer(cfg, task, model, criterion, quantizer)
else:
trainer = MegatronTrainer(cfg, task, model, criterion)
logger.info(
"training on {} devices (GPUs/TPUs)".format(
cfg.distributed_training.distributed_world_size
)
)
logger.info(
"max tokens per device = {} and max sentences per device = {}".format(
cfg.dataset.max_tokens,
cfg.dataset.batch_size,
)
)
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(
cfg.checkpoint,
trainer,
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
if cfg.common.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous("load_checkpoint") # wait for all workers
max_epoch = cfg.optimization.max_epoch or math.inf
lr = trainer.get_lr()
# print("before training")
train_meter = meters.StopwatchMeter()
train_meter.start()
while epoch_itr.next_epoch_idx <= max_epoch:
if lr <= cfg.optimization.stop_min_lr:
logger.info(
f"stopping training because current learning rate ({lr}) is smaller "
"than or equal to minimum learning rate "
f"(--stop-min-lr={cfg.optimization.stop_min_lr})"
)
break
# train for one epoch
valid_losses, should_stop = train(cfg, trainer, task, epoch_itr)
if should_stop:
break
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=task.has_sharded_data("train"),
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
train_meter.stop()
logger.info("done training in {:.1f} seconds".format(train_meter.sum))
# ioPath implementation to wait for all asynchronous file writes to complete.
if cfg.checkpoint.write_checkpoints_asynchronously:
logger.info(
"ioPath PathManager waiting for all asynchronous checkpoint "
"writes to finish."
)
PathManager.async_close()
logger.info("ioPath PathManager finished waiting.")
def should_stop_early(cfg: DictConfig, valid_loss: float) -> bool:
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if cfg.checkpoint.patience <= 0:
return False
def is_better(a, b):
return a > b if cfg.checkpoint.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, "best", None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= cfg.checkpoint.patience:
logger.info(
"early stop since valid performance hasn't improved for last {} runs".format(
cfg.checkpoint.patience
)
)
return True
else:
return False
@metrics.aggregate("train")
def train(
cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr
) -> Tuple[List[Optional[float]], bool]:
"""Train the model for one epoch and return validation losses."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=cfg.distributed_training.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > cfg.dataset.curriculum),
)
update_freq = (
cfg.optimization.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(cfg.optimization.update_freq)
else cfg.optimization.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
if cfg.common.tpu:
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_file=cfg.common.log_file,
log_interval=cfg.common.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
cfg.common.tensorboard_logdir
if distributed_utils.is_master(cfg.distributed_training)
else None
),
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
wandb_project=(
cfg.common.wandb_project
if distributed_utils.is_master(cfg.distributed_training)
else None
),
wandb_run_name=os.environ.get(
"WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir)
),
azureml_logging=(
cfg.common.azureml_logging
if distributed_utils.is_master(cfg.distributed_training)
else False
),
)
progress.update_config(_flatten_config(cfg))
trainer.begin_epoch(epoch_itr.epoch)
valid_subsets = cfg.dataset.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
logger.info("Start iterating over samples")
for i, samples in enumerate(progress):
with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function(
"train_step-%d" % i
):
log_output = trainer.train_step(samples)
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % cfg.common.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
cfg, trainer, task, epoch_itr, valid_subsets, end_of_epoch
)
if should_stop:
break
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop
def _flatten_config(cfg: DictConfig):
config = OmegaConf.to_container(cfg)
# remove any legacy Namespaces and replace with a single "args"
namespace = None
for k, v in list(config.items()):
if isinstance(v, argparse.Namespace):
namespace = v
del config[k]
if namespace is not None:
config["args"] = vars(namespace)
return config
def validate_and_save(
cfg: DictConfig,
trainer: Trainer,
task: tasks.FairseqTask,
epoch_itr,
valid_subsets: List[str],
end_of_epoch: bool,
) -> Tuple[List[Optional[float]], bool]:
num_updates = trainer.get_num_updates()
max_update = cfg.optimization.max_update or math.inf
# Stopping conditions (and an additional one based on validation loss later
# on)
should_stop = False
if num_updates >= max_update:
should_stop = True
logger.info(
f"Stopping training due to "
f"num_updates: {num_updates} >= max_update: {max_update}"
)
training_time_hours = trainer.cumulative_training_time() / (60 * 60)
if (
cfg.optimization.stop_time_hours > 0
and training_time_hours > cfg.optimization.stop_time_hours
):
should_stop = True
logger.info(
f"Stopping training due to "
f"cumulative_training_time: {training_time_hours} > "
f"stop_time_hours: {cfg.optimization.stop_time_hours} hour(s)"
)
do_save = (
(end_of_epoch and epoch_itr.epoch % cfg.checkpoint.save_interval == 0)
or should_stop
or (
cfg.checkpoint.save_interval_updates > 0
and num_updates > 0
and num_updates % cfg.checkpoint.save_interval_updates == 0
and num_updates >= cfg.dataset.validate_after_updates
)
)
do_validate = (
(
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % cfg.dataset.validate_interval == 0)
or should_stop
or (
cfg.dataset.validate_interval_updates > 0
and num_updates > 0
and num_updates % cfg.dataset.validate_interval_updates == 0
)
)
and not cfg.dataset.disable_validation
and num_updates >= cfg.dataset.validate_after_updates
)
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets)
should_stop |= should_stop_early(cfg, valid_losses[0])
# Save checkpoint
if do_save or should_stop:
checkpoint_utils.save_checkpoint(
cfg.checkpoint, trainer, epoch_itr, valid_losses[0]
)
return valid_losses, should_stop
def get_training_stats(stats: Dict[str, Any]) -> Dict[str, Any]:
stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0)
return stats
def validate(
cfg: DictConfig,
trainer: Trainer,
task: tasks.FairseqTask,
epoch_itr,
subsets: List[str],
) -> List[Optional[float]]:
"""Evaluate the model on the validation set(s) and return the losses."""
if cfg.dataset.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(cfg.dataset.fixed_validation_seed)
trainer.begin_valid_epoch(epoch_itr.epoch)
valid_losses = []
for subset in subsets:
logger.info('begin validation on "{}" subset'.format(subset))
# Initialize data iterator
itr = trainer.get_valid_iterator(subset).next_epoch_itr(
shuffle=False, set_dataset_epoch=False # use a fixed valid set
)
if cfg.common.tpu:
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
cfg.common.tensorboard_logdir
if distributed_utils.is_master(cfg.distributed_training)
else None
),
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
wandb_project=(
cfg.common.wandb_project
if distributed_utils.is_master(cfg.distributed_training)
else None
),
wandb_run_name=os.environ.get(
"WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir)
),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for i, sample in enumerate(progress):
if (
cfg.dataset.max_valid_steps is not None
and i > cfg.dataset.max_valid_steps
):
break
trainer.valid_step(sample)
# log validation stats
stats = get_valid_stats(cfg, trainer, agg.get_smoothed_values())
if hasattr(task, "post_validate"):
task.post_validate(trainer.get_model(), stats, agg)
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[cfg.checkpoint.best_checkpoint_metric])
return valid_losses
def get_valid_stats(
cfg: DictConfig, trainer: Trainer, stats: Dict[str, Any]
) -> Dict[str, Any]:
stats["num_updates"] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, "best"):
key = "best_{0}".format(cfg.checkpoint.best_checkpoint_metric)
best_function = max if cfg.checkpoint.maximize_best_checkpoint_metric else min
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best,
stats[cfg.checkpoint.best_checkpoint_metric],
)
return stats
def cli_main(
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None
) -> None:
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
cfg = convert_namespace_to_omegaconf(args)
if cfg.common.use_plasma_view:
server = PlasmaStore(path=cfg.common.plasma_path)
logger.info(
f"Started plasma server pid {server.server.pid} {cfg.common.plasma_path}"
)
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, main)
else:
distributed_utils.call_main(cfg, main)
# if cfg.common.use_plasma_view:
# server.server.kill()
if __name__ == "__main__":
cli_main()
| 19,207 | 33.860254 | 108 | py |
SOLikeT | SOLikeT-master/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# Import SOLikeT (for autodoc)
import sys
sys.path.insert(0, "..")
# Create some mock imports
import mock
MOCK_MODULES = ["cosmopower", "tensorflow", "pyccl", "camb"]
for module in MOCK_MODULES:
sys.modules[module] = mock.Mock()
import soliket
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = 'SOLikeT'
copyright = '2023, The SO Collaboration'
author = 'The SO Collaboration'
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
"sphinx.ext.autodoc", # Generate doc pages from source docstrings
"sphinx.ext.viewcode", # Generate links to source code
"sphinx.ext.mathjax", # Mathematical symbols
"sphinx_rtd_theme", # readthedocs theme
]
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
| 1,440 | 31.022222 | 87 | py |
IndependentEvaluation | IndependentEvaluation-main/Code For Figure 16/PIPO-FAN-master/pipo_fan/train_sf_partial.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 5 16:00:33 2017
@author: yan
"""
# %% train the network
import argparse
import datetime
import math
import numpy as np
import os
from os import path
import shutil
import time
import torch
from torch import cuda
from torch import optim
#from torch.optim import lr_scheduler
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
#from collections import OrderedDict
from torch.nn import init
import dataset.dataset_liverCT_2D as dl1
import dataset.dataset_muor_2D as dl2
# from model.denseu_net import DenseUNet
# from model.unet import UNet
from model.concave_dps_w import ResUNet
# from model.concave_res_w3 import ResUNet
# from model.resu_net import ResUNet
# from model.concave_dcc import ResUNet
#from model.concave_sh import ResUNet
# from scipy.misc import imsave
# %%
parser = argparse.ArgumentParser(description='PyTorch ResUNet Training')
parser.add_argument('--epochs', default=4000, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-b', '--batchsize', default=8, type=int,
metavar='N', help='mini-batch size (default: 8)')
parser.add_argument('--blocksize', default=224, type=int,
metavar='N', help='H/W of each image block (default: 224)')
parser.add_argument('-s', '--slices', default=3, type=int,
metavar='N', help='number of slices (default: 3)')
parser.add_argument('--lr', '--learning-rate', default=0.0002, type=float,
metavar='LR', help='initial learning rate (default: 0.002)')
parser.add_argument('--momentum', default=0.9, type=float,
metavar='N', help='momentum for optimizer (default: 0.9)')
parser.add_argument('--view', default='axial', type=str,
metavar='View', help='view for segmentation (default: axial)')
parser.add_argument('--cv_n', default='1', type=str,
help='Cross validation Dataset num')
# %%
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
# %%
# def dice_similarity(output, target):
# """Computes the Dice similarity"""
# #batch_size = target.size(0)
# smooth = 0.00001
# # max returns values and positions
# seg_channel = output.max(dim=1)[1]
# seg_channel = seg_channel.float()
# target = target.float()
# #print('Shapes: {}, {}'.format(seg_channel.shape, target.shape))
# intersection = (seg_channel * target).sum(dim=2).sum(dim=1)
# union = (seg_channel + target).sum(dim=2).sum(dim=1)
# dice = 2. * intersection / (union + smooth)
# #print(intersection, union, dice)
# return torch.mean(dice)
def dice_similarity(output, target):
"""Computes the Dice similarity"""
#batch_size = target.size(0)
smooth = 0.00001
# max returns values and positions
# output = output>0.5
output = output.float()
target = target.float()
seg_channel = output.view(output.size(0), -1)
target_channel = target.view(target.size(0), -1)
#print('Shapes: {}, {}'.format(seg_channel.shape, target.shape))
intersection = (seg_channel * target_channel).sum()
union = (seg_channel + target_channel).sum()
dice = (2. * intersection) / (union + smooth)
#print(intersection, union, dice)
return torch.mean(dice)
def dice_similarity_u(output, target):
"""Computes the Dice similarity"""
#batch_size = target.size(0)
total_dice = 0
output = output.clone()
target = target.clone()
# print('target:',target.sum())
for i in range(1, output.shape[1]):
target_i = torch.zeros(target.shape)
target_i = target_i.cuda().clone()
target_i[target == i] = 1
output_i = output[:, i:i+1].clone()
dice_i = dice_similarity(output_i, target_i)
# print('dice_: ',i,dice_i.data)
# print('target_i: ',target_i.sum())
# print('output_i: ',output_i.sum())
total_dice += dice_i
total_dice = total_dice / (output.shape[1] - 1)
#print(intersection, union, dice)
return total_dice
def visualize_train(d,name):
name = name
da = d.cpu().data.numpy()
db = np.transpose(da[0], (1,2,0))
# print('db.shape',db.shape)
if db.shape[2] == 3:
imsave(path.join('/home/fangx2/mu_or/train_u', name+'.png'), db, format='png')
else:
imsave(path.join('/home/fangx2/mu_or/train_u', name+'.png'), db[:,:,0], format='png')
def visualize_train1(d,name):
name = name
da = d.cpu().data.numpy()
db = da[0,:,:]
imsave(path.join('/home/fangx2/mu_or/train_u', name+'.png'), db, format='png')
def visualize_val(d,name):
name = name
da = d.cpu().data.numpy()
db = np.transpose(da[0], (1,2,0))
# print('db.shape',db.shape)
if db.shape[2] == 3:
imsave(path.join('/home/fangx2/mu_or/val_u', name+'.png'), db, format='png')
else:
imsave(path.join('/home/fangx2/mu_or/val_u', name+'.png'), db[:,:,0], format='png')
def visualize_val1(d,name):
name = name
da = d.cpu().data.numpy()
db = da[0,:,:]
imsave(path.join('/home/fangx2/mu_or/val_u', name+'.png'), db, format='png')
# %%
def train(train_loader, data_type, model, criterion, optimizer, epoch, verbose=True):
"""Function for training"""
batch_time = AverageMeter()
#data_time = AverageMeter()
losses = AverageMeter()
dice = AverageMeter()
# switch to train mode
model.train()
end_time = time.time()
for i, sample_batched in enumerate(train_loader):
# measure data loading time
#data_time.update(time.time() - end_time)
image_batch = sample_batched['image']
# label should be only the middle slice
label_batch = sample_batched['label'][:,0,:,:]
# mask = sample_batched['mask'][:,0:1,:,:]
# print('mask shape:', mask.shape)
#print('label batch size: {}'.format(label_batch.shape))
#image_batch = image_batch.cuda()
#label_batch = label_batch.cuda(async=True)
input_var = Variable(image_batch).float()
input_var = input_var.cuda()
target_var = Variable(label_batch).long()
target_var = target_var.cuda()
# mask_var = Variable(mask).float()
# mask_var = mask_var.cuda()
# compute output
output = model(input_var)
output = torch.clamp(output, min=1e-10, max=1)
if data_type == '1':
output_p2 = output[:,1:2,:,:].clone()
output_p1 = output[:,0:1,:,:].clone() + output[:,2:3,:,:].clone() + output[:,3:4,:,:].clone()
output_p = torch.cat((output_p1, output_p2), 1)
if data_type == '2':
output_p2 = output[:,2:3,:,:].clone()
output_p1 = output[:,0:1,:,:].clone() + output[:,1:2,:,:].clone() + output[:,3:4,:,:].clone()
output_p = torch.cat((output_p1, output_p2), 1)
if data_type == '3':
output_p2 = output[:,3:4,:,:].clone()
output_p1 = output[:,0:1,:,:].clone() + output[:,1:2,:,:].clone() + output[:,2:3,:,:].clone()
output_p = torch.cat((output_p1, output_p2), 1)
if data_type == '4':
output_p = output.clone()
# print('output p:',output_p.sum())
# output = output * mask_var
# print('Output size: {}, type: {}'.format(output.size(), type(output)))
# print('Target size: {}, type: {}'.format(target_var.size(), type(target_var)))
loss = criterion(output_p, target_var)
# if epoch % 5 == 0:
# visualize_train(output_p[:,1:4,:,:], str(epoch) + 'output')
# visualize_train1(target_var[:,:,:], str(epoch) + 'target')
# measure accuracy and record loss
losses.update(loss.data, image_batch.size(0))
ds = dice_similarity_u(output_p, target_var)
#print(ds.data)
dice.update(ds.data, image_batch.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
current_time = time.time()
batch_time.update(current_time - end_time)
end_time = current_time
if ((i+1) % 10 == 0) and verbose:
print('Train ep {0} [batch {1}/{2}]: '
#'Time {batch_time.val:.1f}s, '
'Loss avg: {loss.avg:.4f}, '
'Dice avg: {dice.avg:.4f}'.format(
epoch+1, i+1, len(train_loader),
#batch_time=batch_time,
loss=losses,
dice=dice))
print('Training -> loss: {loss.avg:.4f}, '
'Dice {dice.avg:.3f}'.format(
loss=losses, dice=dice))
#return {'train_loss': loss.avg, 'train_acc': dice.avg}
return losses.avg, dice.avg
# %%
def validate(loader, data_type, model, criterion, epoch, verbose=True):
batch_time = AverageMeter()
losses = AverageMeter()
dice = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, sample_batched in enumerate(loader):
image_batch = sample_batched['image']
# label should be only the middle slice
label_batch = sample_batched['label'][:,0,:,:]
# mask = sample_batched['mask'][:,0:1,:,:]
input_var = Variable(image_batch, volatile=True).float()
input_var = input_var.cuda()
target_var = Variable(label_batch, volatile=True).long()
target_var = target_var.cuda()
# mask_var = Variable(mask).float()
# mask_var = mask_var.cuda()
# compute output
output = model(input_var)
# output = output * mask_var
if data_type == '1':
output_p = output[:,0:2,:,:].clone()
output_p[:,0,:,:] = output[:,0,:,:].clone() + output[:,2,:,:].clone() + output[:,3,:,:].clone()
if data_type == '2':
output_p = output[:,1:3,:,:].clone()
output_p[:,0,:,:] = output[:,0,:,:].clone() + output[:,1,:,:].clone() + output[:,3,:,:].clone()
if data_type == '3':
output_p = output[:,2:4,:,:].clone()
output_p[:,0,:,:] = output[:,0,:,:].clone() + output[:,1,:,:].clone() + output[:,2,:,:].clone()
if data_type == '4':
output_p = output.clone()
# if epoch % 5 == 0:
# visualize_val(output_p[:,1:4,:,:], str(epoch) + 'output')
# visualize_val1(target_var[:,:,:], str(epoch) + 'target')
loss = criterion(output_p, target_var)
#torch.save(input_var, '/home/yanp2/tmp/resu-net/logs/input_{}.pth'.format(i))
#torch.save(target_var, '/home/yanp2/tmp/resu-net/logs/target_{}.pth'.format(i))
#torch.save(output, '/home/yanp2/tmp/resu-net/logs/output_{}.pth'.format(i))
# measure accuracy and record loss
losses.update(loss.data, image_batch.size(0))
ds = dice_similarity_u(output_p, target_var)
dice.update(ds.data, image_batch.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if ((i+1) % 10 == 0) and verbose:
print('Validation ep {0} [batch {1}/{2}]: '
#'Time {batch_time.val:.1f}s, '
'Loss avg: {loss.avg:.4f}, '
'Dice avg: {dice.avg:.4f}'.format(
epoch+1, i+1, len(loader),
#batch_time=batch_time,
loss=losses,
dice=dice))
print('Validation ep {} -> loss: {loss.avg:.4f}, '
'Dice {dice.avg:.3f}'.format(
epoch+1, loss=losses, dice=dice))
return losses.avg, dice.avg
#def adjust_learning_rate(optimizer, epoch):
def adjust_learning_rate(optimizer, gamma=0.9):
for param_group in optimizer.param_groups:
param_group['lr'] *= gamma
# %%
def save_checkpoint(state, is_best, log_folder, view='axial',
filename='checkpoint.pth.tar'):
"""Save checkpoints
"""
filename = path.join(log_folder, filename)
torch.save(state, filename)
if is_best:
filename_best = path.join(log_folder, 'resu_best_{}.pth.tar'.format(view))
shutil.copyfile(filename, filename_best)
# def compute_length(inputs, edge_op):
# """Compute the length of segmentation boundary"""
# # Get segmentation
# seg_channel = inputs.max(dim=1)[1]
# seg_channel = seg_channel.unsqueeze(1)
# seg_channel = seg_channel.float()
# #print(seg_channel.shape)
# g2 = F.conv2d(seg_channel, edge_op, padding=1)
# gx = g2 ** 2
# gx = torch.sum(torch.squeeze(gx), dim=0)
# # Adding small number to increase the numerical stability
# #gx = torch.sqrt(gx + 1e-16)
# gm = torch.mean(gx.view(-1))
# return gm
# class HybridLoss2d(nn.Module):
# def __init__(self, edge_op, weight=None, size_average=True):
# super(HybridLoss2d, self).__init__()
# self.nll_loss = nn.NLLLoss2d(weight, size_average)
# self.op = edge_op
# def forward(self, inputs, targets):
# #return self.nll_loss(F.log_softmax(inputs, dim=1), targets)
# ce = self.nll_loss(F.log_softmax(inputs, dim=1), targets)
# # dice
# dice = dice_similarity(inputs, targets)
# # boundary length
# length = compute_length(inputs, self.op)
# return ce - 0.1 * dice + length
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=None, size_average=True):
super(CrossEntropyLoss2d, self).__init__()
self.nll_loss = nn.NLLLoss2d(weight, size_average)
def forward(self, inputs, targets):
return self.nll_loss(torch.log(inputs), targets)
# class FocalLoss2d(nn.Module):
# def __init__(self, weight=None, size_average=True):
# super(FocalLoss2d, self).__init__()
# self.nll_loss = nn.NLLLoss2d(weight, size_average)
# def forward(self, inputs, targets):
# focal_frequency = F.nll_loss(F.softmax(inputs, dim=1), targets, reduction = 'none')
# # print('shape1:',focal_frequency.shape)
# focal_frequency += 1.0
# focal_frequency = torch.pow(focal_frequency, 2)
# focal_frequency = focal_frequency.repeat(2, 1, 1, 1)
# focal_frequency = focal_frequency.transpose(1,0)
# # print('shape:',focal_frequency.shape)
# return self.nll_loss(focal_frequency * F.log_softmax(inputs, dim=1), targets)
# %%
if __name__ == "__main__":
global args
args = parser.parse_args()
cv = args.cv_n
use_cuda = cuda.is_available()
checkpoing_dir = path.expanduser('/home/fangx2/mu_or/tmp/sf_134')
if not path.isdir(checkpoing_dir):
os.makedirs(checkpoing_dir)
log_dir = path.expanduser('/home/fangx2/mu_or/tmp/sf_134')
if not path.isdir(log_dir):
os.makedirs(log_dir)
"""
training
"""
num_classes = 4
num_in_channels = args.slices
# model = DenseUNet(num_channels = num_in_channels, num_classes = num_classes)
model = ResUNet(num_in_channels, num_classes)
# model = UNet(num_in_channels, num_classes)
resunet_checkpoint = torch.load('/home/fangx2/mu_or/tmp/sf_pr0_1216_dps/resunet_checkpoint_final.pth.tar')
resunet_dict = resunet_checkpoint['state_dict']
model.resnet.load_state_dict(resunet_dict)
optimizer = optim.RMSprop(model.parameters(), lr=args.lr, momentum=args.momentum)
folder_training_1 = '/home/fangx2/data/LIver_submit1/data3/training_a/'
folder_validation_1 = '/home/fangx2/data/LIver_submit1/data3/training_a/'
folder_training_2 = '/home/fangx2/kits19/training_256_ras_a/'
folder_validation_2 = '/home/fangx2/kits19/training_256_ras_a/'
folder_training_3 = '/home/fangx2/data/code/data/spleen/training_a/'
folder_validation_3 = '/home/fangx2/data/code/data/spleen/training_a/'
folder_training_4 = '/home/fangx2/BTCV/training_256/'
folder_validation_4 = '/home/fangx2/BTCV/validation_256/'
# folder_training = r'/home/fangx2/data/LIver_submit1/dataset_256'
# folder_validation = r'/home/fangx2/data/LIver_submit1/dataset_256'
# Set L2 penalty using weight_decay
#optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
# optimizer = optim.RMSprop(model.parameters(), lr=args.lr, momentum=args.momentum)
# Initialize Sobel edge detection filter
sobel_x = np.asarray([1.0, 0, -1.0, 2.0, 0, -2.0, 1.0, 0, -1.0], dtype=np.float32)
sobel_x /= 4.0
sobel_x = np.reshape(sobel_x, (1, 1, 3, 3))
sobel_y = np.asarray([1.0, 2.0, 1.0, 0, 0, 0, -1.0, -2.0, -1.0], dtype=np.float32)
sobel_y /= 4.0
sobel_y = np.reshape(sobel_y, (1, 1, 3, 3))
sobel = np.concatenate((sobel_x, sobel_y), axis=0)
sobel = Variable(torch.from_numpy(sobel), requires_grad=False)
if use_cuda:
sobel = sobel.cuda()
# weights = torch.Tensor([0.2, 1.2])
#Cross entropy Loss
criterion = CrossEntropyLoss2d()
# criterion = FocalLoss2d(weights)
#criterion = HybridLoss2d(sobel, weights)
if use_cuda:
print('\n***** Training ResU-Net with GPU *****\n')
model.cuda()
criterion.cuda()
blocksize = args.blocksize
view = args.view
if view == 'axial' or view == 'sagittal' or view == 'coronal':
composed = dl1.get_composed_transform(blocksize, num_in_channels, view)
composed4 = dl2.get_composed_transform(blocksize, num_in_channels, view)
else:
print('The given view of <{}> is not supported!'.format(view))
batchsize = args.batchsize
#Dataset 1,2,3
dataset_train1 = dl1.LiverCTDataset(folder_training_1,
transform=composed)
train_loader1 = dl1.DataLoader(dataset_train1,
batch_size=args.batchsize,
shuffle=True,
num_workers=4,
drop_last=False
)
dataset_validation1 = dl1.LiverCTDataset(folder_validation_1,
transform=composed)
val_loader1 = dl1.DataLoader(dataset_validation1,
batch_size=args.batchsize,
shuffle=False,
num_workers=2,
drop_last=False
)
# dataset_train2 = dl1.LiverCTDataset(folder_training_2,
# transform=composed)
# train_loader2 = dl1.DataLoader(dataset_train2,
# batch_size=args.batchsize,
# shuffle=True,
# num_workers=4,
# drop_last=False
# )
# dataset_validation2 = dl1.LiverCTDataset(folder_validation_2,
# transform=composed)
# val_loader2 = dl1.DataLoader(dataset_validation2,
# batch_size=args.batchsize,
# shuffle=False,
# num_workers=2,
# drop_last=False
# )
dataset_train3 = dl1.LiverCTDataset(folder_training_3,
transform=composed)
train_loader3 = dl1.DataLoader(dataset_train3,
batch_size=args.batchsize,
shuffle=True,
num_workers=4,
drop_last=False
)
dataset_validation3 = dl1.LiverCTDataset(folder_validation_3,
transform=composed)
val_loader3 = dl1.DataLoader(dataset_validation3,
batch_size=args.batchsize,
shuffle=False,
num_workers=2,
drop_last=False
)
#Dataset4
dataset_train4 = dl2.LiverCTDataset(folder_training_4,
transform=composed4)
train_loader4 = dl2.DataLoader(dataset_train4,
batch_size=args.batchsize,
shuffle=True,
num_workers=4,
drop_last=False
)
dataset_validation4 = dl2.LiverCTDataset(folder_validation_4,
transform=composed4)
val_loader4 = dl2.DataLoader(dataset_validation4,
batch_size=args.batchsize,
shuffle=False,
num_workers=2,
drop_last=False
)
best_dice = -1.0
#for epoch in range(args.start_epoch, args.epochs):
num_epochs = args.epochs
train_history = []
val_history = []
for epoch in range(num_epochs):
print('Training epoch {} of {}...'.format(epoch + 1, num_epochs))
# start timing
t_start = time.time()
# train for one epoch
if epoch % 3 == 0:
train_loss = train(train_loader1, '1', model, criterion,
optimizer, epoch, verbose=True)
elif epoch % 3 == 1:
# train_loss = train(train_loader2, '2', model, criterion,
# optimizer, epoch, verbose=True)
# # elif epoch % 4 == 2:
# else:
train_loss = train(train_loader3, '3', model, criterion,
optimizer, epoch, verbose=True)
else:
train_loss = train(train_loader4, '4', model, criterion,
optimizer, epoch, verbose=True)
# train_loss = train(train_loader4, '4', model, criterion,
# optimizer, epoch, verbose=True)
train_history.append(train_loss)
# Gradually reducing learning rate
if epoch % 40 == 0:
adjust_learning_rate(optimizer, gamma=0.99)
# evaluate on validation set
val_loss = validate(val_loader4, '4', model, criterion, epoch, verbose=True)
val_history.append(val_loss)
dice = val_loss[1]
# remember best prec@1 and save checkpoint
is_best = dice > best_dice
best_dice = max(dice, best_dice)
if is_best:
fn_checkpoint = 'resu_checkpoint_ep{:04d}.pth.tar'.format(epoch + 1)
save_checkpoint({'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_dice': best_dice,
'optimizer' : optimizer.state_dict(),},
is_best,
checkpoing_dir,
view,
filename=fn_checkpoint)
if epoch == num_epochs - 1:
filename = path.join(checkpoing_dir, 'resunet_checkpoint_final.pth.tar')
torch.save({'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_dice': best_dice,
'optimizer' : optimizer.state_dict(),},filename)
elapsed_time = time.time() - t_start
print('Epoch {} completed in {:.2f}s\n'.format(epoch+1, elapsed_time))
# save the training history
time_now = datetime.datetime.now()
time_str = time_now.strftime('%y%m%d-%H%M%S')
fn_train_history = path.join(log_dir, 'train_hist_{}.npy'.format(time_str))
fn_val_history = path.join(log_dir, 'val_hist_{}.npy'.format(time_str))
np.save(fn_train_history, np.asarray(train_history))
np.save(fn_val_history, np.asarray(val_history))
time_disp_str = time_now.strftime('%H:%M:%S on %Y-%m-%d')
print('Training completed at {}'.format(time_disp_str))
print('Training history saved into:\n<{}>'.format(fn_train_history))
print('<{}>'.format(fn_val_history)) | 24,733 | 35.480826 | 110 | py |
IndependentEvaluation | IndependentEvaluation-main/Code For Figure 16/PIPO-FAN-master/pipo_fan/train_concave0.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 5 16:00:33 2017
@author: yan
"""
# %% train the network
import argparse
import datetime
import math
import numpy as np
import os
from os import path
import shutil
import time
import torch
from torch import cuda
from torch import optim
#from torch.optim import lr_scheduler
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
#from collections import OrderedDict
from torch.nn import init
# from lovasz_losses import lovasz_softmax
import dataset.dataset_liverCT_2D as dl
# import dataset.dataset_all as dl
#from u_net import UNet
# from model.concave_sh import ResUNet
# from model.MIMO_att import ResUNet
# from model.concave_res2 import ResUNet
from model.concave_dps import ResUNet
# from model.concave_dps_dc import ResUNet
# from model.concave_dps3 import ResUNet
#from resu_scalecov import ResUNet
#from coordu_net import UNet
# %%
parser = argparse.ArgumentParser(description='PyTorch ResUNet Training')
parser.add_argument('--epochs', default=2000, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-b', '--batchsize', default=8, type=int,
metavar='N', help='mini-batch size (default: 8)')
parser.add_argument('--blocksize', default=224, type=int,
metavar='N', help='H/W of each image block (default: 320)')
parser.add_argument('-s', '--slices', default=3, type=int,
metavar='N', help='number of slices (default: 1)')
parser.add_argument('-n', '--num_classes', default=2, type=int,
metavar='N', help='number of slices (default: 3)')
parser.add_argument('--lr', '--learning-rate', default=0.002, type=float,
metavar='LR', help='initial learning rate (default: 0.002)')
parser.add_argument('--momentum', default=0.9, type=float,
metavar='N', help='momentum for optimizer (default: 0.9)')
parser.add_argument('--view', default='axial', type=str,
metavar='View', help='view for segmentation (default: axial)')
parser.add_argument('--cv_n', default='1', type=str,
help='Cross validation Dataset num')
# %%
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
# %%
def dice_similarity(output, target):
"""Computes the Dice similarity"""
#batch_size = target.size(0)
smooth = 0.00001
# max returns values and positions
seg_channel = output.max(dim=1)[1]
seg_channel = seg_channel.float()
target = target.float()
#print('Shapes: {}, {}'.format(seg_channel.shape, target.shape))
intersection = (seg_channel * target).sum(dim=2).sum(dim=1)
union = (seg_channel + target).sum(dim=2).sum(dim=1)
dice = 2. * intersection / (union + smooth)
#print(intersection, union, dice)
return torch.mean(dice)
# def dice_similarity(output, target):
# """Computes the Dice similarity"""
# #batch_size = target.size(0)
# smooth = 0.00001
# # max returns values and positions
# output = output>0.5
# output = output.float()
# target = target.float()
# seg_channel = output.view(output.size(0), -1)
# target_channel = target.view(target.size(0), -1)
# #print('Shapes: {}, {}'.format(seg_channel.shape, target.shape))
# intersection = (seg_channel * target_channel).sum()
# union = (seg_channel + target_channel).sum()
# dice = (2. * intersection) / (union + smooth)
# #print(intersection, union, dice)
# return torch.mean(dice)
# %%
def train(train_loader, model, criterion, optimizer, epoch, verbose=True):
"""Function for training"""
batch_time = AverageMeter()
#data_time = AverageMeter()
losses = AverageMeter()
dice = AverageMeter()
losses_1 = AverageMeter()
dice_1 = AverageMeter()
losses_2 = AverageMeter()
dice_2 = AverageMeter()
losses_3 = AverageMeter()
dice_3 = AverageMeter()
losses_4 = AverageMeter()
dice_4 = AverageMeter()
losses_5 = AverageMeter()
dice_5 = AverageMeter()
# losses_6 = AverageMeter()
# dice_6 = AverageMeter()
# switch to train mode
model.train()
end_time = time.time()
for i, sample_batched in enumerate(train_loader):
# measure data loading time
#data_time.update(time.time() - end_time)
image_batch = sample_batched['image']
# label should be only the middle slice
label_batch1 = sample_batched['label'][:,0,:,:]
input_var = Variable(image_batch, volatile=True).float()
input_var = input_var.cuda()
target_var = Variable(label_batch1, volatile=True).long().cuda()
# target_var = nn.Upsample(size = [256,256], mode='bilinear', align_corners=True)(target_var)
# compute output
output1, output2, output3, output4, output5 = model(input_var)
# print('output:',output1.shape,output2.shape,output3.shape)
loss1 = criterion(output1, target_var)
loss2 = criterion(output2, target_var)
loss3 = criterion(output3, target_var)
loss4 = criterion(output4, target_var)
loss5 = criterion(output5, target_var)
# loss6 = criterion(output6, target_var)
# a = (output1 - output2 + 1) / 2
# a_tar = (target_var1 - target_var2 + 1) / 2
# loss4 = criterion(a, a_tar)
# b = (output3 - output2 +1) / 2
# b_tar = (target_var3 - target_var2 + 1) / 2
# loss5 = criterion(b, b_tar)
# loss = loss1 + loss2 + loss3 + 0.5 * loss4 + 0.5 * loss5
loss = loss1 + loss2 + loss3 + loss4 + loss5
# measure accuracy and record loss
losses.update(loss.data, image_batch.size(0))
losses_1.update(loss1.data, image_batch.size(0))
losses_2.update(loss2.data, image_batch.size(0))
losses_3.update(loss3.data, image_batch.size(0))
losses_4.update(loss4.data, image_batch.size(0))
losses_5.update(loss5.data, image_batch.size(0))
# losses_6.update(loss6.data, image_batch.size(0))
ds_1 = dice_similarity(output1, target_var)
ds_2 = dice_similarity(output2, target_var)
ds_3 = dice_similarity(output3, target_var)
ds_4 = dice_similarity(output4, target_var)
ds_5 = dice_similarity(output5, target_var)
# ds_6 = dice_similarity(output6, target_var)
#print(ds.data)
dice_1.update(ds_1.data, image_batch.size(0))
dice_2.update(ds_2.data, image_batch.size(0))
dice_3.update(ds_3.data, image_batch.size(0))
dice_4.update(ds_4.data, image_batch.size(0))
dice_5.update(ds_5.data, image_batch.size(0))
# dice_6.update(ds_6.data, image_batch.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
current_time = time.time()
batch_time.update(current_time - end_time)
end_time = current_time
if ((i+1) % 10 == 0) and verbose:
print('Train ep {0} [batch {1}/{2}]: '
#'Time {batch_time.val:.1f}s, '
'Loss avg: {loss.avg:.4f}, '
'Dice avg: {dice.avg:.4f}'.format(
epoch+1, i+1, len(train_loader),
#batch_time=batch_time,
loss=losses,
dice=dice))
print('Training -> loss: {loss.avg:.4f}'.format(
loss=losses))
print('Training -> loss_1: {loss.avg:.4f}, '
'Dice_1 {dice_1.avg:.3f}'.format(
loss=losses_1, dice_1=dice_1))
print('Training -> loss_2: {loss.avg:.4f}, '
'Dice_2 {dice_2.avg:.3f}'.format(
loss=losses_2, dice_2=dice_2))
print('Training -> loss_3: {loss.avg:.4f}, '
'Dice_3 {dice_3.avg:.3f}'.format(
loss=losses_3, dice_3=dice_3))
print('Training -> loss_4: {loss.avg:.4f}, '
'Dice_4 {dice_4.avg:.3f}'.format(
loss=losses_4, dice_4=dice_4))
print('Training -> loss_5: {loss.avg:.4f}, '
'Dice_5 {dice_5.avg:.3f}'.format(
loss=losses_5, dice_5=dice_5))
# print('Training -> loss_6: {loss.avg:.4f}, '
# 'Dice_6 {dice_6.avg:.3f}'.format(
# loss=losses_5, dice_6=dice_6))
#return {'train_loss': loss.avg, 'train_acc': dice.avg}
return losses.avg, dice_5.avg
# %%
def validate(loader, model, criterion, epoch, verbose=True):
batch_time = AverageMeter()
losses = AverageMeter()
dice = AverageMeter()
losses_1 = AverageMeter()
dice_1 = AverageMeter()
losses_2 = AverageMeter()
dice_2 = AverageMeter()
losses_3 = AverageMeter()
dice_3 = AverageMeter()
losses_4 = AverageMeter()
dice_4 = AverageMeter()
losses_5 = AverageMeter()
dice_5 = AverageMeter()
# losses_6 = AverageMeter()
# dice_6 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, sample_batched in enumerate(loader):
image_batch = sample_batched['image']
# label should be only the middle slice
label_batch1 = sample_batched['label'][:,0,:,:]
input_var = Variable(image_batch, volatile=True).float()
input_var = input_var.cuda()
target_var = Variable(label_batch1, volatile=True).long().cuda()
# compute output
output1, output2, output3, output4, output5 = model(input_var)
loss1 = criterion(output1, target_var)
loss2 = criterion(output2, target_var)
loss3 = criterion(output3, target_var)
loss4 = criterion(output4, target_var)
loss5 = criterion(output5, target_var)
# loss6 = criterion(output6, target_var)
# a = (output1 - output2 + 1) / 2
# a_tar = (target_var1 - target_var2 + 1) / 2
# loss4 = criterion(a, a_tar)
# b = (output3 - output2 +1) / 2
# b_tar = (target_var3 - target_var2 + 1) / 2
# loss5 = criterion(b, b_tar)
# loss = loss1 + loss2 + loss3 + 0.5 * loss4 + 0.5 * loss5
loss = loss1 + loss2 + loss3 + loss4 + loss5
#torch.save(input_var, '/home/yanp2/tmp/resu-net/logs/input_{}.pth'.format(i))
#torch.save(target_var, '/home/yanp2/tmp/resu-net/logs/target_{}.pth'.format(i))
#torch.save(output, '/home/yanp2/tmp/resu-net/logs/output_{}.pth'.format(i))
# measure accuracy and record loss
# measure accuracy and record loss
losses.update(loss.data, image_batch.size(0))
losses_1.update(loss1.data, image_batch.size(0))
losses_2.update(loss2.data, image_batch.size(0))
losses_3.update(loss3.data, image_batch.size(0))
losses_4.update(loss4.data, image_batch.size(0))
losses_5.update(loss5.data, image_batch.size(0))
# losses_6.update(loss6.data, image_batch.size(0))
ds_1 = dice_similarity(output1, target_var)
ds_2 = dice_similarity(output2, target_var)
ds_3 = dice_similarity(output3, target_var)
ds_4 = dice_similarity(output4, target_var)
ds_5 = dice_similarity(output5, target_var)
# ds_6 = dice_similarity(output6, target_var)
dice_1.update(ds_1.data, image_batch.size(0))
dice_2.update(ds_2.data, image_batch.size(0))
dice_3.update(ds_3.data, image_batch.size(0))
dice_4.update(ds_4.data, image_batch.size(0))
dice_5.update(ds_5.data, image_batch.size(0))
# dice_6.update(ds_6.data, image_batch.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if ((i+1) % 10 == 0) and verbose:
print('Validation ep {0} [batch {1}/{2}]: '
#'Time {batch_time.val:.1f}s, '
'Loss avg: {loss.avg:.4f}, '
'Dice avg: {dice.avg:.4f}'.format(
epoch+1, i+1, len(loader),
#batch_time=batch_time,
loss=losses,
dice=dice))
print('Validation ep {} -> loss: {loss.avg:.4f}, '
'Dice {dice.avg:.3f}'.format(
epoch+1, loss=losses, dice=dice))
print('Validation -> loss_1: {loss.avg:.4f}, '
'Dice_1 {dice_1.avg:.3f}'.format(
loss=losses_1, dice_1=dice_1))
print('Validation -> loss_2: {loss.avg:.4f}, '
'Dice_2 {dice_2.avg:.3f}'.format(
loss=losses_2, dice_2=dice_2))
print('Validation -> loss_3: {loss.avg:.4f}, '
'Dice_3 {dice_3.avg:.3f}'.format(
loss=losses_3, dice_3=dice_3))
print('Validation -> loss_4: {loss.avg:.4f}, '
'Dice_4 {dice_4.avg:.3f}'.format(
loss=losses_4, dice_4=dice_4))
print('Validation -> loss_5: {loss.avg:.4f}, '
'Dice_5 {dice_5.avg:.3f}'.format(
loss=losses_5, dice_5=dice_5))
# print('Validation -> loss_6: {loss.avg:.4f}, '
# 'Dice_6 {dice_6.avg:.3f}'.format(
# loss=losses_6, dice_6=dice_6))
return losses.avg, dice_5.avg
#def adjust_learning_rate(optimizer, epoch):
def adjust_learning_rate(optimizer, gamma=0.9):
for param_group in optimizer.param_groups:
param_group['lr'] *= gamma
# %%
def save_checkpoint(state, is_best, log_folder, view='axial',
filename='checkpoint.pth.tar'):
"""Save checkpoints
"""
filename = path.join(log_folder, filename)
torch.save(state, filename)
if is_best:
filename_best = path.join(log_folder, 'resu_best_{}.pth.tar'.format(view))
shutil.copyfile(filename, filename_best)
def compute_length(inputs, edge_op):
"""Compute the length of segmentation boundary"""
# Get segmentation
seg_channel = inputs.max(dim=1)[1]
seg_channel = seg_channel.unsqueeze(1)
seg_channel = seg_channel.float()
#print(seg_channel.shape)
g2 = F.conv2d(seg_channel, edge_op, padding=1)
gx = g2 ** 2
gx = torch.sum(torch.squeeze(gx), dim=0)
# Adding small number to increase the numerical stability
#gx = torch.sqrt(gx + 1e-16)
gm = torch.mean(gx.view(-1))
return gm
class HybridLoss2d(nn.Module):
def __init__(self, edge_op, weight=None, size_average=True):
super(HybridLoss2d, self).__init__()
self.nll_loss = nn.NLLLoss2d(weight, size_average)
self.op = edge_op
def forward(self, inputs, targets):
#return self.nll_loss(F.log_softmax(inputs, dim=1), targets)
ce = self.nll_loss(F.log_softmax(inputs, dim=1), targets)
# dice
dice = dice_similarity(inputs, targets)
# boundary length
length = compute_length(inputs, self.op)
return ce - 0.1 * dice + length
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=None, size_average=True):
super(CrossEntropyLoss2d, self).__init__()
self.nll_loss = nn.NLLLoss2d(weight, size_average)
def forward(self, inputs, targets):
return self.nll_loss(F.log_softmax(inputs, dim=1), targets)
class LovaszLoss2d(nn.Module):
def forward(self, inputs, targets):
return lovasz_softmax(F.softmax(inputs), targets)
class LoCeLoss2d(nn.Module):
def __init__(self, weight=None, size_average=True):
super(LoCeLoss2d, self).__init__()
self.nll_loss = nn.NLLLoss2d(weight, size_average)
def forward(self, inputs, targets):
return lovasz_softmax(F.softmax(inputs), targets) + self.nll_loss(F.log_softmax(inputs, dim=1), targets)
class DiceLoss(nn.Module):
def __init__(self):
super(DiceLoss, self).__init__()
def forward(self, input, target):
smooth = 0.00001
input = input.float()
target = target.float()
iflat = input.view(-1)
tflat = target.view(-1)
intersection = (iflat * tflat).sum()
return 1 - ((2. * intersection + smooth) /
(iflat.sum() + tflat.sum() + smooth))
class FocalLoss2d(nn.Module):
def __init__(self, weight=None, size_average=True):
super(FocalLoss2d, self).__init__()
self.nll_loss = nn.NLLLoss2d(weight, size_average)
def forward(self, inputs, targets):
focal_frequency = F.nll_loss(F.softmax(inputs, dim=1), targets, reduction = 'none')
# print('shape1:',focal_frequency.shape)
focal_frequency += 1.0
focal_frequency = torch.pow(focal_frequency, 2)
focal_frequency = focal_frequency.repeat(2, 1, 1, 1)
focal_frequency = focal_frequency.transpose(1,0)
# print('shape:',focal_frequency.shape)
return self.nll_loss(focal_frequency * F.log_softmax(inputs, dim=1), targets)
# %%
if __name__ == "__main__":
global args
args = parser.parse_args()
cv = args.cv_n
view = args.view
use_cuda = cuda.is_available()
# checkpoing_dir = path.expanduser('/home/fangx2/data/LIver_submit1/data' + cv + '/tmp/spleen_dps_1105')
# checkpoing_dir = path.expanduser('/home/fangx2/data/code/data/spleen/spleen_dps_1105')
# checkpoing_dir = path.expanduser('/home/fangx2/data/LIver_submit1/data3/tmp/liver_ras_1106')
# if not path.isdir(checkpoing_dir):
# os.makedirs(checkpoing_dir)
# # log_dir = path.expanduser('/home/fangx2/data/LIver_submit1/data' + cv + '/tmp/spleen_dps_1105')
# # log_dir = path.expanduser('/home/fangx2/data/code/data/spleen/spleen_dps_1105')
# log_dir = path.expanduser('/home/fangx2/data/LIver_submit1/data3/tmp/liver_ras_1106')
# if not path.isdir(log_dir):
# os.makedirs(log_dir)
checkpoing_dir = path.expanduser('/home/fangx2/data/code/data/spleen/5_fold_cv/fold' + cv + '/tmp/concave')
if not path.isdir(checkpoing_dir):
os.makedirs(checkpoing_dir)
log_dir = path.expanduser('/home/fangx2/data/code/data/spleen/5_fold_cv/fold' + cv + '/tmp/concave')
if not path.isdir(log_dir):
os.makedirs(log_dir)
"""
training
"""
num_classes = args.num_classes
num_in_channels = args.slices
#model = UNet(5, 2)
model = ResUNet(num_in_channels,num_classes)
# resunet_checkpoint = torch.load('/home/fangx2/data/LIver_submit1/data' + cv + '/tmp/concave_dps_pre/resu_best_axial.pth.tar')
# resunet_dict = resunet_checkpoint['state_dict']
# model.load_state_dict(resunet_dict)
# folder_training = r'/home/fangx2/data/LIver_submit1/data3/training_ras'
# folder_validation = r'/home/fangx2/data/LIver_submit1/data3/validation_ras'
folder_training = '/home/fangx2/data/code/data/spleen/5_fold_cv/fold' + cv + '/training/'
folder_validation = '/home/fangx2/data/code/data/spleen/5_fold_cv/fold' + cv + '/validation/'
# folder_training = r'/home/fangx2/data/code/data/spleen/training'
# folder_validation = r'/home/fangx2/data/code/data/spleen/validation'
# folder_training = r'/home/fangx2/data/LIver_submit1/data' + cv + '/training/'
# folder_validation = r'/home/fangx2/data/LIver_submit1/data' + cv + '/validation/'
# folder_training = r'/home/fangx2/data/a_submit2/dataset_256/'
# folder_validation = r'/home/fangx2/data/a_submit2/dataset_256/'
# Set L2 penalty using weight_decay
#optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
optimizer = optim.RMSprop(model.parameters(), lr=args.lr, momentum=args.momentum)
#optimizer = optim.Adam(model.parameters(), lr=args.lr)
# resunet_checkpoint = torch.load('/home/fangx2/data/LIver_submit1/data1/tmp/concave_lr/resu_best_axial.pth.tar')
# resunet_dict = resunet_checkpoint['state_dict']
# model.load_state_dict(resunet_dict)
# Initialize Sobel edge detection filter
sobel_x = np.asarray([1.0, 0, -1.0, 2.0, 0, -2.0, 1.0, 0, -1.0], dtype=np.float32)
sobel_x /= 4.0
sobel_x = np.reshape(sobel_x, (1, 1, 3, 3))
sobel_y = np.asarray([1.0, 2.0, 1.0, 0, 0, 0, -1.0, -2.0, -1.0], dtype=np.float32)
sobel_y /= 4.0
sobel_y = np.reshape(sobel_y, (1, 1, 3, 3))
sobel = np.concatenate((sobel_x, sobel_y), axis=0)
sobel = Variable(torch.from_numpy(sobel), requires_grad=False)
if use_cuda:
sobel = sobel.cuda()
weights = torch.Tensor([0.2, 1.2])
criterion = CrossEntropyLoss2d(weights)
# criterion = FocalLoss2d(weights)
# criterion = DiceLoss()
#criterion = HybridLoss2d(sobel, weights)
#criterion = LoCeLoss2d(weights)
if use_cuda:
print('\n***** Training ResU-Net with GPU *****\n')
model.cuda()
criterion.cuda()
blocksize = args.blocksize
if view == 'axial' or view == 'sagittal' or view == 'coronal':
composed = dl.get_composed_transform(blocksize, num_in_channels, view)
else:
print('The given view of <{}> is not supported!'.format(view))
batchsize = args.batchsize
dataset_train = dl.LiverCTDataset(folder_training,
transform=composed)
train_loader = dl.DataLoader(dataset_train,
batch_size=args.batchsize,
shuffle=True,
num_workers=4,
drop_last=False
)
dataset_validation = dl.LiverCTDataset(folder_validation,
transform=composed)
val_loader = dl.DataLoader(dataset_validation,
batch_size=args.batchsize,
shuffle=False,
num_workers=2,
drop_last=False
)
best_dice = -1.0
#for epoch in range(args.start_epoch, args.epochs):
num_epochs = args.epochs
train_history = []
val_history = []
for epoch in range(num_epochs):
print('Training epoch {} of {}...'.format(epoch + 1, num_epochs))
# start timing
t_start = time.time()
# train for one epoch
train_loss = train(train_loader, model, criterion,
optimizer, epoch, verbose=True)
train_history.append(train_loss)
# Gradually reducing learning rate
if epoch % 40 == 0:
adjust_learning_rate(optimizer, gamma=0.99)
# evaluate on validation set
val_loss = validate(val_loader, model, criterion, epoch, verbose=True)
val_history.append(val_loss)
dice = val_loss[1]
# remember best prec@1 and save checkpoint
is_best = dice > best_dice
best_dice = max(dice, best_dice)
if is_best:
fn_checkpoint = 'resu_checkpoint_ep{:04d}.pth.tar'.format(epoch + 1)
save_checkpoint({'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_dice': best_dice,
'optimizer' : optimizer.state_dict(),},
is_best,
checkpoing_dir,
view,
filename=fn_checkpoint)
if epoch == num_epochs - 1:
filename = path.join(checkpoing_dir, 'resunet_checkpoint_final.pth.tar')
torch.save({'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_dice': best_dice,
'optimizer' : optimizer.state_dict(),},filename)
elapsed_time = time.time() - t_start
print('Epoch {} completed in {:.2f}s\n'.format(epoch+1, elapsed_time))
# save the training history
time_now = datetime.datetime.now()
time_str = time_now.strftime('%y%m%d-%H%M%S')
fn_train_history = path.join(log_dir, 'train_hist_{}.npy'.format(time_str))
fn_val_history = path.join(log_dir, 'val_hist_{}.npy'.format(time_str))
np.save(fn_train_history, np.asarray(train_history))
np.save(fn_val_history, np.asarray(val_history))
time_disp_str = time_now.strftime('%H:%M:%S on %Y-%m-%d')
print('Training completed at {}'.format(time_disp_str))
print('Training history saved into:\n<{}>'.format(fn_train_history))
print('<{}>'.format(fn_val_history))
| 24,809 | 35.485294 | 131 | py |
IndependentEvaluation | IndependentEvaluation-main/Code For Figure 16/PIPO-FAN-master/pipo_fan/segment_sf_partial.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 28 16:24:59 2017
@author: yan
Load pre-trained network to segment a new image
Code v0.01
"""
# %% Resnet blocks in U-net
import argparse
import datetime
import nibabel as nib
import numpy as np
import os
from os import path
from scipy import ndimage
import SimpleITK as sitk
import time
import torch
from torch import cuda
from torch import optim
from torch.autograd import Variable
import torch.nn as nn
# from unet_context import UNet_ctx
#from u_net import UNet
# from model.concave_dps import ResUNet
from model.concave_dps_w import ResUNet
# from model.concave_res2 import ResUNet
# from model.concave_res_w3 import ResUNet
#from fcoordresu_net import ResUNet
#from resu_ctx import ResUNet
# %%
parser = argparse.ArgumentParser(description='ResUNet CT segmentation')
parser.add_argument('input_filename', type=str, metavar='input_filename',
help='File of image to be segmented')
parser.add_argument('output_filename', type=str, metavar='output_filename',
help='File to save the segmentation result')
parser.add_argument('-s', '--slices', default=3, type=int,
help='number of slices (default: 5)')
parser.add_argument('--begin', default=0, type=int,
help='Beginning slice for segmentation')
parser.add_argument('--end', default=9999, type=int,
help='Ending slice for segmentation')
parser.add_argument('-c', '--cuda', default=True, type=bool, metavar='Use GPU CUDA',
help='Use GPU for computation')
parser.add_argument('-e', '--evaluating', default=False, type=bool,
metavar='evaluation after segmentation', help='Use GT label for evaluation after completing segmentation')
parser.add_argument('-l', '--label_filename', default=None, type=str,
metavar='label_filename',
help='File containing the ground truth segmentation label for evaluation')
parser.add_argument('--network_path', default='./', type=str,
metavar='path of network file',
help='File containing the pre-trained network')
parser.add_argument('--view', default='axial', type=str,
metavar='View', help='view for segmentation (default: axial)')
# %%
def load_image(image_filename, evaluating=False, label_filename=None):
"""
"""
image = nib.load(image_filename)
if evaluating and path.isfile(label_filename):
label = nib.load(label_filename)
else:
label = None
return {'image':image, 'label':label}
# %%
def load_network(fn_network, gpu=True):
""" Load pre-trained network
"""
if path.isfile(fn_network):
print("=> loading checkpoint '{}'".format(fn_network))
if gpu:
checkpoint = torch.load(fn_network)
else:
checkpoint = torch.load(fn_network, map_location=lambda storage, loc: storage)
# Currently only support binary segmentation
# num_classes = 2
#model = UNet(5,2)
#model = UNet_ctx(3,5,2)
model = ResUNet(3,4)
model.load_state_dict(checkpoint['state_dict'])
if gpu:
model.cuda()
else:
model.cpu()
# optimizer = optim.SGD(model.parameters(), lr=0.02)
# if gpu:
# optimizer.load_state_dict(checkpoint['optimizer'])
# else:
optimizer = None
print("=> loaded checkpoint at epoch {}"
.format(checkpoint['epoch']))
return model, optimizer
else:
print("=> no checkpoint found at '{}'".format(fn_network))
return None, None
# %%
def compute_dice(la, lb):
intersection = np.sum(la * lb)
union = np.sum(la + lb)
return 2 * intersection / (union + 0.00001)
# %%
class SimpleITKAsNibabel(nib.Nifti1Image):
"""
Minimal interface to use a SimpleITK image as if it were
a nibabel object. Currently only supports the subset of the
interface used by NiftyNet and is read only
"""
def __init__(self, itk_image):
#try:
self._SimpleITKImage = itk_image
#except RuntimeError as err:
# if 'Unable to determine ImageIO reader' in str(err):
# raise nibabel.filebasedimages.ImageFileError(str(err))
# else:
# raise
# self._header = SimpleITKAsNibabelHeader(self._SimpleITKImage)
affine = make_affine(self._SimpleITKImage)
# super(SimpleITKAsNibabel, self).__init__(
# sitk.GetArrayFromImage(self._SimpleITKImage).transpose(), affine)
nib.Nifti1Image.__init__(
self,
sitk.GetArrayFromImage(self._SimpleITKImage).transpose(), affine)
class SimpleITKAsNibabelHeader(nib.spatialimages.SpatialHeader):
def __init__(self, image_reference):
super(SimpleITKAsNibabelHeader, self).__init__(
data_dtype=sitk.GetArrayViewFromImage(image_reference).dtype,
shape=sitk.GetArrayViewFromImage(image_reference).shape,
zooms=image_reference.GetSpacing())
def make_affine(simpleITKImage):
# get affine transform in LPS
c = [simpleITKImage.TransformContinuousIndexToPhysicalPoint(p)
for p in ((1, 0, 0),
(0, 1, 0),
(0, 0, 1),
(0, 0, 0))]
c = np.array(c)
affine = np.concatenate([
np.concatenate([c[0:3] - c[3:], c[3:]], axis=0),
[[0.], [0.], [0.], [1.]]], axis=1)
affine = np.transpose(affine)
# convert to RAS to match nibabel
affine = np.matmul(np.diag([-1., -1., 1., 1.]), affine)
return affine
# %%
class Nifti_from_numpy(nib.Nifti1Image):
"""
Minimal interface to use a SimpleITK image as if it were
a nibabel object. Currently only supports the subset of the
interface used by NiftyNet and is read only
"""
def __init__(self, array, itk_image):
#try:
self._SimpleITKImage = itk_image
#except RuntimeError as err:
# if 'Unable to determine ImageIO reader' in str(err):
# raise nibabel.filebasedimages.ImageFileError(str(err))
# else:
# raise
# self._header = SimpleITKAsNibabelHeader(self._SimpleITKImage)
affine = make_affine(self._SimpleITKImage)
# super(SimpleITKAsNibabel, self).__init__(
# sitk.GetArrayFromImage(self._SimpleITKImage).transpose(), affine)
nib.Nifti1Image.__init__(
self, array.transpose(), affine)
def extract_volume(volume):
volumes = []
x_coord = []
y_coord = []
for x in range(0,volume.shape[1],112):
for y in range(0,volume.shape[2],112):
end_x = x + 224
end_y = y + 224
if end_x > volume.shape[1]:
x = volume.shape[1] - 224
end_x = volume.shape[1]
if end_y > volume.shape[2]:
y = volume.shape[2] - 224
end_y = volume.shape[2]
cur_img = volume[:, x:end_x, y:end_y]
volumes.append(cur_img)
x_coord.append(x)
y_coord.append(y)
if y == volume.shape[2] - 224:
break
if x == volume.shape[1] - 224:
break
return volumes, x_coord, y_coord
def construct_volume(volumes,x_coord, y_coord):
x_len = max(x_coord) + 224
y_len = max(y_coord) + 224
seg_matrix = []
mul_matrix = []
for i in range(len(volumes)):
output = torch.zeros([volumes[i].shape[0],volumes[i].shape[1],x_len,y_len],dtype=torch.float32)
time_matrix = torch.zeros([volumes[i].shape[0],volumes[i].shape[1], x_len,y_len])
x_start = x_coord[i]
y_start = y_coord[i]
x_end = x_start + 224
y_end = y_start + 224
output[:,:,x_start:x_end, y_start:y_end] = volumes[i]
time_matrix[:,:, x_start:x_end, y_start:y_end] = torch.ones(volumes[i].shape)
seg_matrix.append(output)
mul_matrix.append(time_matrix)
seg_matrix = torch.cat(seg_matrix,0)
mul_matrix = torch.cat(mul_matrix,0)
seg_matrix = torch.sum(seg_matrix, 0)
mul_matrix = torch.sum(mul_matrix, 0)
seg_final = torch.div(seg_matrix, mul_matrix)
seg_final = seg_final.cuda()
return seg_final
# %%
if __name__ == "__main__":
args = parser.parse_args()
evaluating = args.evaluating
use_cuda = args.cuda
slice_begin = args.begin
slice_end = args.end
view = args.view
if not cuda.is_available():
print('No available GPU can be used for computation!')
use_cuda = False
num_channels = args.slices
# num_channels = 3
#fn_network = path.expanduser('~/tmp/resu-net3D/checkpoints/resu3d_checkpoint_ep0578.pth.tar')
#fn_network = path.join(args.network_path, 'resu_best.pth.tar')
#load the trained best 2D model
# fn_network = path.join(args.network_path,'resunet_checkpoint_final.pth.tar')
fn_network = path.join(args.network_path,'resu_best_' + view + '.pth.tar')
print('Loading network from <{}>'.format(fn_network))
if not path.isfile(fn_network):
raise Exception('Missing network <{}>! File Not Found!'.format(fn_network))
model_axial, optimizer = load_network(fn_network, gpu=use_cuda)
# Set model to evaluation mode
model_axial.eval()
#img_filename = path.expanduser(args.input_filename)
#file in computer/home/data/ct_nih
img_filename = args.input_filename
print('Input image for segmentation:\t{}'.format(img_filename))
dicom_input = False
# Check if it is DICOM folder
if path.isdir(img_filename):
reader = sitk.ImageSeriesReader()
dicom_names = reader.GetGDCMSeriesFileNames( img_filename )
reader.SetFileNames(dicom_names)
image = reader.Execute()
dicom_input = True
w, h, d = image.GetSize()
img_data = sitk.GetArrayFromImage(image)
else:
volume = load_image(img_filename, evaluating, args.label_filename)
image, label = volume['image'], volume['label']
w, h, d = image.shape[:3]
img_data = np.squeeze(image.get_data())
print('Size of the input image: {}x{}x{}'.format(w, h, d))
img_data = img_data.astype(np.float32)
if view == 'axial':
img_data = img_data
elif view == 'coronal':
img_data = img_data.transpose((2,0,1))
else:
img_data = img_data.transpose(2,1,0)
img_data[img_data > 200] = 200.0
img_data[img_data < -200] = -200.0
img_data /= 200.0
print('Segmenting image...')
start_time = time.time()
results = []
num_half_channels = num_channels >> 1
# Define the range of segmentation
first = max(num_half_channels, slice_begin)
last = min(d - num_half_channels - 1, slice_end)
#last = min(d - num_channels + 1, slice_end)
num_segmented_slices = last - first + 1
print('Segmenting {} slices between [{}, {}]'.format(
num_segmented_slices, first, last))
for i in range(first):
#results.append(np.zeros((1,1,w,h)))
results.append(np.zeros((1,h,w)))
#for depth in range(d - num_channels + 1):
for depth in range(first - num_half_channels,
last - num_half_channels):
if dicom_input:
subvolume = img_data[depth:depth+num_channels,:,:]
else:
subvolume = img_data[:,:,depth:depth+num_channels]
subvolume = subvolume.transpose((2, 1, 0))
subvolumes, x_coor, y_coor = extract_volume(subvolume)
outputs = []
for volume in subvolumes:
volume = volume[np.newaxis,:,:,:]
volume = Variable(torch.from_numpy(volume), volatile=True).float()
if use_cuda:
volume = volume.cuda()
#subs.append(subvolume)
# output1, output2, output3, output4, output5 = model_axial(volume)
output5 = model_axial(volume)
# output_s = nn.Softmax2d()(output5)
outputs.append(output5)
output = construct_volume(outputs, x_coor, y_coor)
output = output.max(dim=0)[1].cpu().data.numpy()
output = output[np.newaxis,:,:]
results.append(output)
#results.append(output.cpu().data.numpy())
print('It took {:.1f}s to segment {} slices'.format(
time.time() - start_time, num_segmented_slices))
#for i in range(num_half_channels):
for i in range(d - last):
#results.append(np.zeros((1,1,w,h)))
results.append(np.zeros((1,h,w)))
results = np.squeeze(np.asarray(results))
#dsize = list(results.shape)
c, h, w = results.shape
#print('Segmentation result in CxHxW: {}x{}x{}'.format(c, h, w))
if not dicom_input:
if view == 'axial':
results = np.transpose(results, (2, 1, 0))
elif view == 'coronal':
results = np.transpose(results,(1, 0, 2))
else:
results = results
print('Segmentation result in HxWxC: {}x{}x{}'.format(h, w, c))
# results[results > 0.49] = 1
# results[results < 0.5] = 0
results = results.astype(np.uint8)
if evaluating:
label_data = label.get_data()
# remove tumor label
label_data[label_data > 1] = 1
dice = compute_dice(results, label_data)
print('Dice score of ResU-Net: {:.3f}'.format(dice))
# print('Starting morphological post-processing...')
# #print('no postprocess...')
# # perform morphological operation
# #remove small noisy segmentation
# results = ndimage.binary_opening(results, iterations=5)
# #Generate smooth segmentation
# results = ndimage.binary_dilation(results, iterations=3)
# results = ndimage.binary_fill_holes(results)
# results = ndimage.binary_erosion(results, iterations=3)
# perform largest connected component analysis
# labeled_array, num_features = ndimage.label(results)
# size_features = np.zeros((num_features))
# for i in range(num_features):
# size_features[i] = np.sum(labeled_array == i+1)
# results = np.zeros_like(labeled_array)
# results[labeled_array == np.argmax(size_features) + 1] = 1
results_post = np.zeros_like(results)
min_co = 0
for i in range(1, 4):
#liver
if i ==1:
results_i = np.zeros(results.shape)
# results_i = results_i.cuda().clone()
results_i[results == i] = 1
labeled_array_i, num_features_i = ndimage.label(results_i)
size_features_i = np.zeros((num_features_i))
for j in range(num_features_i):
size_features_i[j] = np.sum(labeled_array_i == j+1)
results_i = np.zeros_like(labeled_array_i)
results_i[labeled_array_i == np.argmax(size_features_i) + 1] = i
results_i = results_i.astype(np.uint8)
summed_1 = np.sum(results_i.sum(axis=0), axis=0)
non0_list = np.asarray([i for i in range(summed_1.size)])
non0_list = non0_list[summed_1 > 1]
min_co = 0.8 * np.min(non0_list)
min_co = int(min_co)
print('min_co', min_co)
#kidney
if i == 2:
results_i = np.zeros(results.shape)
# results_i = results_i.cuda().clone()
results_i[results == i] = 1
results_i[:,:,:min_co] = 0
labeled_array_i, num_features_i = ndimage.label(results_i)
size_features_i = np.zeros((num_features_i))
for j in range(num_features_i):
size_features_i[j] = np.sum(labeled_array_i == j+1)
results_i = np.zeros_like(labeled_array_i)
# print('idx1:',np.argmax(size_features_i))
results_i[labeled_array_i == np.argmax(size_features_i) + 1] = i
results1_i = np.zeros_like(labeled_array_i)
idx2 = np.argsort(-size_features_i)[1]
# print('idx2:',idx2)
results1_i[labeled_array_i == idx2 + 1] = i
results_i = results_i + results1_i
results_i = results_i.astype(np.uint8)
#spleen
else:
results_i = np.zeros(results.shape)
# results_i = results_i.cuda().clone()
results_i[results == i] = 1
results_i[:,:,:min_co] = 0
labeled_array_i, num_features_i = ndimage.label(results_i)
size_features_i = np.zeros((num_features_i))
for j in range(num_features_i):
size_features_i[j] = np.sum(labeled_array_i == j+1)
results_i = np.zeros_like(labeled_array_i)
results_i[labeled_array_i == np.argmax(size_features_i) + 1] = i
results_i = results_i.astype(np.uint8)
results_post += results_i
results = results_post
# results = results.astype(np.uint8)
# Create the segmentation image for saving
if dicom_input:
new_image = Nifti_from_numpy(results, image)
else:
header = image.header
header.set_data_dtype(np.uint8)
# if nifty1
if header['sizeof_hdr'] == 348:
new_image = nib.Nifti1Image(results, image.affine, header=header)
# if nifty2
elif header['sizeof_hdr'] == 540:
new_image = nib.Nifti2Image(results, image.affine, header=header)
else:
raise IOError('Input image header problem')
#seg_dir = path.expanduser('~/tmp/resu-net/segmentation')
#fn_seg = path.join(seg_dir, 'segmentation.nii')
fn_seg = path.expanduser(args.output_filename)
print('Writing segmentation result into <{}>...'.format(fn_seg))
#mu.write_mhd_file(fn_seg, results, meta_dict=header)
nib.save(new_image, fn_seg)
print('Segmentation result has been saved.')
# Compute Dice for evaluating
if evaluating:
dice = compute_dice(results, label_data)
print('Final Dice score: {:.3f}'.format(dice))
| 17,990 | 34.001946 | 126 | py |
IndependentEvaluation | IndependentEvaluation-main/Code For Figure 16/PIPO-FAN-master/pipo_fan/dataset/dataset_liverCT_2D.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 22 14:10:33 2017
@author: yanrpi
"""
# %%
import glob
import numpy as np
import nibabel as nib
import random
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from os import path
# from scipy.misc import imsave
from scipy import ndimage
# %%
class LiverCTDataset(Dataset):
"""Liver CT image dataset."""
def __init__(self, root_dir, transform=None, verbose=False):
"""
Args:
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied on a sample.
"""
if not path.isdir(root_dir):
raise ValueError("\"{}\" is not a valid directory path!".format(root_dir))
self.root_dir = root_dir
self.transform = transform
self.verbose = verbose
res = glob.glob(path.join(root_dir, 'volume-*.nii'))
#print(res)
self.num_images = len(res)
self.ct_filenames = res
def __len__(self):
return self.num_images
def __getitem__(self, idx):
img_name = self.ct_filenames[idx]
seg_name = img_name.replace('volume', 'segmentation')
image = nib.load(img_name)
segmentation = nib.load(seg_name)
# image = nib.as_closest_canonical(image)
# segmentation = nib.as_closest_canonical(segmentation)
if self.verbose:
print('{} -> {}'.format(idx, img_name))
print('Image shape: {}'.format(image.shape))
print('Segmentation shape: {}'.format(segmentation.shape))
sample = {'image': image, 'label': segmentation}
#sample = {'image': img_name, 'segmentation': seg_name}
if self.transform:
sample = self.transform(sample)
return sample
# %%
class RandomCrop(object):
"""Crop randomly the image in a sample.
For segmentation training, only crop sections with non-zero label
Args:
output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self, output_size, view):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size, output_size)
else:
assert len(output_size) == 3
self.output_size = output_size
self.view = view
def __call__(self, sample):
image, segmentation = sample['image'], sample['label']
h, w, d = image.shape
new_h, new_w, new_d = self.output_size
view = self.view
new_d_half = new_d >> 1
# Find slices containing segmentation object
seg_data = segmentation.get_data()
img_data = image.get_data()
if view == 'axial':
img_data = img_data
seg_data = seg_data
elif view == 'coronal':
img_data = img_data.transpose((2, 0, 1))
seg_data = seg_data.transpose((2, 0, 1))
else:
img_data = img_data.transpose((2, 1, 0))
seg_data = seg_data.transpose((2, 1, 0))
summed = np.sum(seg_data.sum(axis=0), axis=0)
non0_list = np.asarray([i for i in range(summed.size)])
non0_list = non0_list[summed > 10]
seg_start = max(np.min(non0_list) - new_d_half, 0)
seg_end = min(np.max(non0_list) + new_d_half, d)
if new_h == h:
top = 0
left = 0
else:
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
#ant = np.random.randint(0, d - new_d)
ant = np.random.randint(seg_start, seg_end - new_d)
img_data = img_data[top: top + new_h,
left: left + new_w,
ant: ant + new_d]
img_data = img_data.astype(np.float32)
ant_seg = ant + new_d_half
seg_data = seg_data[top: top + new_h,
left: left + new_w,
ant_seg: ant_seg + 1]
# seg_data = seg_data[top: top + new_h,
# left: left + new_w,
# ant: ant + new_d]
seg_data = seg_data.astype(np.float32)
# Merge labels
seg_data[seg_data > 1] = 1
# flip up side down to correct
# image = np.flip(img_data, axis=1).copy()
# label = np.flip(seg_data, axis=1).copy()
return {'image': img_data, 'label': seg_data}
class RandomHorizontalFlip(object):
"""Randomly flip the image in the horizontal direction.
"""
def __call__(self, sample):
if random.uniform(0,1) < 0.5:
return sample
# else return flipped sample
image, label = sample['image'], sample['label']
image = np.flip(image, axis=0).copy()
label = np.flip(label, axis=0).copy()
return {'image': image, 'label': label}
class RandomVerticalFlip(object):
"""Randomly flip the image in the horizontal direction.
"""
def __call__(self, sample):
if random.uniform(0,1) < 0.5:
return sample
# else return flipped sample
image, label = sample['image'], sample['label']
image = np.flip(image, axis=1).copy()
label = np.flip(label, axis=1).copy()
return {'image': image, 'label': label}
# def pixel_mask(image, p):
# p_map = np.random.random(size = image.shape)
# mask = p_map <= p
# return mask
# def boundary_mask(label, p1, p2):
# d_map_in = ndimage.distance_transform_edt(label)
# label_r = 1 - label
# d_map_out = ndimage.distance_transform_edt(label_r)
# d_map = d_map_in + d_map_out
# d_map[d_map<=3] = 1
# d_map[d_map>3] = 0
# # d_map = d_map<=5
# # print('d_map:',d_map.sum())
# p_map = d_map
# p_map[p_map == 1] = p1
# p_map[p_map == 0] = p2
# # print('p_map:',(p_map==p1).sum())
# r_map = np.random.random(size = label.shape)
# mask = r_map <= p_map
# mask = 1*mask
# return mask
# def bkg_mask(label, p1, p2):
# p_map = label.copy()
# p_map[p_map>=1] = 1
# p_map[p_map<1] = 0
# # print('P_map.sum0',(p_map==0).sum())
# # print('P_map.sum1',(p_map==1).sum())
# p_map[p_map == 0] = p2
# # print('p_mapsum1',p_map.sum())
# p_map[p_map == 1] = p1
# # print('p_map:',(p_map==p1).sum())
# r_map = np.random.random(size = label.shape)
# mask = r_map <= p_map
# mask = 1*mask
# # print('mask.sum:',mask.sum())
# return mask
# def bdy2blk(bdy, nrows, ncols, p1, p2):
# # print(bdy.shape)
# bdy1 = np.squeeze(bdy,-1)
# # 224 x 224
# h, w = bdy1.shape
# # print(h,nrows,h/nrows)
# # 16 x 16 x 14 x 14
# bdy1 = bdy1.reshape(h//nrows, nrows, -1, ncols).swapaxes(1,2).reshape(-1, nrows, ncols)
# bdy1 = bdy1.reshape(nrows, ncols, int(h/nrows), int(w/nrows))
# # print('bdy1.shape:',bdy1.shape)
# for i in range(bdy1.shape[0]):
# for j in range(bdy1.shape[1]):
# if bdy1[i][j].sum() >= 1:
# if np.random.random_sample() <= p1:
# bdy1[i][j] = np.ones(bdy1[i][j].shape)
# else:
# bdy1[i][j] = np.zeros(bdy1[i][j],shape)
# else:
# if np.random.random_sample() <= p2:
# bdy1[i][j] = np.ones(bdy1[i][j].shape)
# else:
# bdy1[i][j] = np.zeros(bdy1[i][j].shape)
# return bdy1
# def blk_mask(label, p1, p2):
# d_map_in = ndimage.distance_transform_edt(label)
# label_r = 1 - label
# d_map_out = ndimage.distance_transform_edt(label_r)
# d_map = d_map_in + d_map_out
# d_map[d_map<=5] = 1
# d_map[d_map>5] = 0
# p_map = d_map
# # print('p_map_shape:', p_map.shape)
# mask = bdy2blk(p_map,16,16, p1, p2)
# # p_map size 16 x 16 x 14 x 14
# # p_map[p_map == 1] = p1
# # p_map[p_map == 0] = p2
# # r_map = np.random.random(size = label.shape)
# # mask = r_map <= p_map
# # 16x16 --> 224 x 224
# # print('mask_shape1', mask.shape)
# mask = np.hstack(mask)
# mask = np.hstack(mask)
# # print('mask_shape', mask.shape)
# mask = np.expand_dims(mask, -1)
# return mask
# class BdyblkOut(object):
# def __init__(self, probability1, probability2):
# self.pa = probability1
# self.pb = probability2
# def __call__(self, sample):
# image, label = sample['image'], sample['label']
# p1 = self.pa + (1 - self.pa) * np.random.random_sample()
# p2 = self.pb + (1 - self.pb) * np.random.random_sample()
# # mask = boundary_mask(label, p1, p2)
# mask = bdyblk_mask(label, p1, p2)
# # print('mask:',mask.shape)
# image = image * mask
# label = label * mask
# return {'image': image, 'label': label, 'mask': mask}
# class BoundaryOut(object):
# def __init__(self, probability1, probability2):
# self.pa = probability1
# self.pb = probability2
# def __call__(self, sample):
# image, label = sample['image'], sample['label']
# p1 = self.pa + (1 - self.pa) * np.random.random_sample()
# p2 = self.pb + (1 - self.pb) * np.random.random_sample()
# # p1 = self.pa
# # p2 = self.pb
# mask = boundary_mask(label, p1, p2)
# # mask = bdyblk_mask(label, p1, p2)
# # print('mask_:',mask.sum())
# # noise = np.random.normal(0,0.33,image.shape)
# # noise[noise>1] = 1
# # noise[noise<-1] = -1
# # noise = noise*(1-mask)
# image = image * mask
# # image = image
# # image = image + noise
# # label = label * mask
# return {'image': image, 'label': label, 'mask': mask}
# class BkgOut(object):
# def __init__(self, probability1, probability2):
# self.pa = probability1
# self.pb = probability2
# def __call__(self, sample):
# image, label = sample['image'], sample['label']
# p1 = self.pa + (1 - self.pa) * np.random.random_sample()
# p2 = self.pb + (1 - self.pb) * np.random.random_sample()
# mask = bkg_mask(label, p1, p2)
# # print('mask:',mask.shape)
# image = image * mask
# label = label * mask
# return {'image': image, 'label': label, 'mask': mask}
# class MaskOut(object):
# def __init__(self, probability):
# self.pb = probability
# def __call__(self, sample):
# image, label = sample['image'], sample['label']
# p = self.pb + (1 - self.pb) * np.random.random_sample()
# mask = pixel_mask(image, p)
# # print('mask:',mask.shape)
# image = image * mask
# label = label * mask
# return {'image': image, 'label': label, 'mask': mask}
class Clip(object):
"""Clip the intensity values.
Args:
Lower and upper bounds.
"""
def __init__(self, lower_bound, upper_bound):
'''
'''
# Make sure upper bound is larger than the lower bound
self.LB = min(lower_bound, upper_bound)
self.UB = max(lower_bound, upper_bound)
def __call__(self, sample):
image, label = sample['image'], sample['label']
image[image>self.UB] = self.UB
image[image<self.LB] = self.LB
return {'image': image, 'label': label}
class Normalize(object):
"""Normalize the input data to 0 mean 1 std per channel"""
def __init__(self, lower_bound, upper_bound):
self.LB = min(lower_bound, upper_bound)
self.UB = max(lower_bound, upper_bound)
def __call__(self, sample):
image, label = sample['image'], sample['label']
#img_mean = np.mean(image, axis=(0,1))
#img_std = np.std(image, axis=(0,1))
#nc = image.shape[2]
#for c in range(nc):
# image[:,:,c] = (image[:,:,c] - img_mean[c]) / img_std[c]
mid_point = (self.LB + self.UB) / 2.0
image -= mid_point
half_range = (self.UB - self.LB) / 2.0
image /= (half_range + 0.000001)
return {'image': image, 'label': label}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image, label = sample['image'], sample['label']
# image, label, mask = sample['image'], sample['label'], sample['mask']
# swap color axis because
# numpy image: W x H x C
# torch image: C X H X W
image = image.transpose((2, 1, 0))
#print(image.shape, type(image), image.dtype)
label = label.transpose((2, 1, 0))
# mask = mask.transpose(2, 1, 0)
#print(label.shape, type(label), label.dtype)
return {'image': torch.from_numpy(image),
'label': torch.from_numpy(label)}
# return {'image': torch.from_numpy(image),
# 'label': torch.from_numpy(label),
# 'mask': torch.from_numpy(mask)}
def get_composed_transform(hw, slices, view):
composed = transforms.Compose([RandomCrop((hw, hw, slices),view),
Clip(-200, 200),
Normalize(-200, 200),
RandomHorizontalFlip(),
RandomVerticalFlip(),
# MaskOut(0.5),
# BoundaryOut(0.5, 1),
# BdyblkOut(1, 0.5),
# BkgOut(1,0.5),
ToTensor()])
return composed
# %% Tester
if __name__ == '__main__':
img_folder = '/home/data/LITS/training'
#img_folder = '/Users/yan/Documents/data/LITS_training'
log_dir = path.expanduser('~/tmp/u-net/logs')
composed = get_composed_transform(224, 3, 'axial')
dataset = LiverCTDataset(img_folder,
transform=composed,
verbose = True)
'''
for i in range(5):
sample = dataset[i]
img = sample['image']
print(i, img.size(), type(img))
label = sample['label']
print(i, label.size(), type(label))
'''
# num_workers = 4 to use more processes
dataloader = DataLoader(dataset, batch_size=1, shuffle=True,
num_workers=0)
#for i_batch, sample_batched in enumerate(dataloader):
batch_it = iter(dataloader)
sample_batched = next(batch_it)
image_batch = sample_batched['image']
label_batch = sample_batched['label']
mask_batch = sample_batched['mask']
print('Batch size: {}, image size: {}, label size: {}, mask size: {}'.format(len(image_batch),
image_batch.size(2),
label_batch.size(2),
mask_batch.size(2)))
img_data = image_batch[0,0,:,:].numpy()
v_min = img_data.min()
v_max = img_data.max()
print('Img -> max: {}, min: {}'.format(v_max, v_min))
img_data = (img_data - v_min) / (v_max - v_min) * 255
img_data = img_data.astype(np.uint8)
label_data = label_batch[0,0,:,:].numpy()
mask_data = mask_batch[0,0,:,:].numpy()
v_min = label_data.min()
v_max = label_data.max()
print('Label -> max: {}, min: {}'.format(v_max, v_min))
label_data *= 255
lable_data = label_data.astype(np.uint8)
m_min = mask_data.min()
m_max = mask_data.max()
print('mask:',mask_data.shape,m_min,m_max)
mask_data *= 255
mask_data = mask_data.astype(np.uint8)
# Save images
imsave(path.join(log_dir, 'image_sample.png'), img_data, format='png')
imsave(path.join(log_dir, 'label_sample.png'), label_data, format='png')
imsave(path.join(log_dir, 'mask_sample.png'), mask_data, format='png')
| 16,145 | 31.552419 | 99 | py |
IndependentEvaluation | IndependentEvaluation-main/Code For Figure 16/PIPO-FAN-master/pipo_fan/dataset/dataset_muor_2D.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 22 14:10:33 2017
@author: yanrpi
"""
# %%
import glob
import numpy as np
import nibabel as nib
import random
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from os import path
# from scipy.misc import imsave
from scipy import ndimage
# from scipy.misc import imsave
# %%
class LiverCTDataset(Dataset):
"""Liver CT image dataset."""
def __init__(self, root_dir, transform=None, verbose=False):
"""
Args:
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied on a sample.
"""
if not path.isdir(root_dir):
raise ValueError("\"{}\" is not a valid directory path!".format(root_dir))
self.root_dir = root_dir
self.transform = transform
self.verbose = verbose
res = glob.glob(path.join(root_dir, 'volume-*.nii'))
#print(res)
self.num_images = len(res)
self.ct_filenames = res
def __len__(self):
return self.num_images
def __getitem__(self, idx):
img_name = self.ct_filenames[idx]
seg_name = img_name.replace('volume', 'segmentation')
image = nib.load(img_name)
segmentation = nib.load(seg_name)
# image = nib.as_closest_canonical(image)
# segmentation = nib.as_closest_canonical(segmentation)
if self.verbose:
print('{} -> {}'.format(idx, img_name))
print('Image shape: {}'.format(image.shape))
print('Segmentation shape: {}'.format(segmentation.shape))
sample = {'image': image, 'label': segmentation}
#sample = {'image': img_name, 'segmentation': seg_name}
if self.transform:
sample = self.transform(sample)
return sample
# %%
class RandomCrop(object):
"""Crop randomly the image in a sample.
For segmentation training, only crop sections with non-zero label
Args:
output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self, output_size, view):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size, output_size)
else:
assert len(output_size) == 3
self.output_size = output_size
self.view = view
def __call__(self, sample):
image, segmentation = sample['image'], sample['label']
h, w, d = image.shape
new_h, new_w, new_d = self.output_size
view = self.view
new_d_half = new_d >> 1
# Find slices containing segmentation object
seg_data = segmentation.get_data()
img_data = image.get_data()
if view == 'axial':
img_data = img_data
seg_data = seg_data
elif view == 'coronal':
img_data = img_data.transpose((2, 0, 1))
seg_data = seg_data.transpose((2, 0, 1))
else:
img_data = img_data.transpose((2, 1, 0))
seg_data = seg_data.transpose((2, 1, 0))
summed = np.sum(seg_data.sum(axis=0), axis=0)
non0_list = np.asarray([i for i in range(summed.size)])
non0_list = non0_list[summed > 10]
seg_start = max(np.min(non0_list) - new_d_half, 0)
seg_end = min(np.max(non0_list) + new_d_half, d)
if new_h == h:
top = 0
left = 0
else:
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
#ant = np.random.randint(0, d - new_d)
ant = np.random.randint(seg_start, seg_end - new_d)
img_data = img_data[top: top + new_h,
left: left + new_w,
ant: ant + new_d]
img_data = img_data.astype(np.float32)
ant_seg = ant + new_d_half
seg_data = seg_data[top: top + new_h,
left: left + new_w,
ant_seg: ant_seg + 1]
# seg_data = seg_data[top: top + new_h,
# left: left + new_w,
# ant: ant + new_d]
seg_data = seg_data.astype(np.float32)
# Merge labels
# seg_data[seg_data > 1] = 1
# flip up side down to correct
# image = np.flip(img_data, axis=1).copy()
# label = np.flip(seg_data, axis=1).copy()
return {'image': img_data, 'label': seg_data}
class RandomHorizontalFlip(object):
"""Randomly flip the image in the horizontal direction.
"""
def __call__(self, sample):
if random.uniform(0,1) < 0.5:
return sample
# else return flipped sample
image, label = sample['image'], sample['label']
image = np.flip(image, axis=0).copy()
label = np.flip(label, axis=0).copy()
return {'image': image, 'label': label}
class RandomVerticalFlip(object):
"""Randomly flip the image in the horizontal direction.
"""
def __call__(self, sample):
if random.uniform(0,1) < 0.5:
return sample
# else return flipped sample
image, label = sample['image'], sample['label']
image = np.flip(image, axis=1).copy()
label = np.flip(label, axis=1).copy()
return {'image': image, 'label': label}
# def pixel_mask(image, p):
# p_map = np.random.random(size = image.shape)
# mask = p_map <= p
# return mask
# def boundary_mask(label, p1, p2):
# d_map_in = ndimage.distance_transform_edt(label)
# label_r = 1 - label
# d_map_out = ndimage.distance_transform_edt(label_r)
# d_map = d_map_in + d_map_out
# d_map[d_map<=3] = 1
# d_map[d_map>3] = 0
# # d_map = d_map<=5
# # print('d_map:',d_map.sum())
# p_map = d_map
# p_map[p_map == 1] = p1
# p_map[p_map == 0] = p2
# # print('p_map:',(p_map==p1).sum())
# r_map = np.random.random(size = label.shape)
# mask = r_map <= p_map
# mask = 1*mask
# return mask
# def bkg_mask(label, p1, p2):
# p_map = label.copy()
# p_map[p_map>=1] = 1
# p_map[p_map<1] = 0
# # print('P_map.sum0',(p_map==0).sum())
# # print('P_map.sum1',(p_map==1).sum())
# p_map[p_map == 0] = p2
# # print('p_mapsum1',p_map.sum())
# p_map[p_map == 1] = p1
# # print('p_map:',(p_map==p1).sum())
# r_map = np.random.random(size = label.shape)
# mask = r_map <= p_map
# mask = 1*mask
# # print('mask.sum:',mask.sum())
# return mask
# def bdy2blk(bdy, nrows, ncols, p1, p2):
# # print(bdy.shape)
# bdy1 = np.squeeze(bdy,-1)
# # 224 x 224
# h, w = bdy1.shape
# # print(h,nrows,h/nrows)
# # 16 x 16 x 14 x 14
# bdy1 = bdy1.reshape(h//nrows, nrows, -1, ncols).swapaxes(1,2).reshape(-1, nrows, ncols)
# bdy1 = bdy1.reshape(nrows, ncols, int(h/nrows), int(w/nrows))
# # print('bdy1.shape:',bdy1.shape)
# for i in range(bdy1.shape[0]):
# for j in range(bdy1.shape[1]):
# if bdy1[i][j].sum() >= 1:
# if np.random.random_sample() <= p1:
# bdy1[i][j] = np.ones(bdy1[i][j].shape)
# else:
# bdy1[i][j] = np.zeros(bdy1[i][j],shape)
# else:
# if np.random.random_sample() <= p2:
# bdy1[i][j] = np.ones(bdy1[i][j].shape)
# else:
# bdy1[i][j] = np.zeros(bdy1[i][j].shape)
# return bdy1
# def blk_mask(label, p1, p2):
# d_map_in = ndimage.distance_transform_edt(label)
# label_r = 1 - label
# d_map_out = ndimage.distance_transform_edt(label_r)
# d_map = d_map_in + d_map_out
# d_map[d_map<=5] = 1
# d_map[d_map>5] = 0
# p_map = d_map
# # print('p_map_shape:', p_map.shape)
# mask = bdy2blk(p_map,16,16, p1, p2)
# # p_map size 16 x 16 x 14 x 14
# # p_map[p_map == 1] = p1
# # p_map[p_map == 0] = p2
# # r_map = np.random.random(size = label.shape)
# # mask = r_map <= p_map
# # 16x16 --> 224 x 224
# # print('mask_shape1', mask.shape)
# mask = np.hstack(mask)
# mask = np.hstack(mask)
# # print('mask_shape', mask.shape)
# mask = np.expand_dims(mask, -1)
# return mask
# class BdyblkOut(object):
# def __init__(self, probability1, probability2):
# self.pa = probability1
# self.pb = probability2
# def __call__(self, sample):
# image, label = sample['image'], sample['label']
# p1 = self.pa + (1 - self.pa) * np.random.random_sample()
# p2 = self.pb + (1 - self.pb) * np.random.random_sample()
# # mask = boundary_mask(label, p1, p2)
# mask = bdyblk_mask(label, p1, p2)
# # print('mask:',mask.shape)
# image = image * mask
# label = label * mask
# return {'image': image, 'label': label, 'mask': mask}
# class BoundaryOut(object):
# def __init__(self, probability1, probability2):
# self.pa = probability1
# self.pb = probability2
# def __call__(self, sample):
# image, label = sample['image'], sample['label']
# p1 = self.pa + (1 - self.pa) * np.random.random_sample()
# p2 = self.pb + (1 - self.pb) * np.random.random_sample()
# # p1 = self.pa
# # p2 = self.pb
# mask = boundary_mask(label, p1, p2)
# # mask = bdyblk_mask(label, p1, p2)
# # print('mask_:',mask.sum())
# # noise = np.random.normal(0,0.33,image.shape)
# # noise[noise>1] = 1
# # noise[noise<-1] = -1
# # noise = noise*(1-mask)
# image = image * mask
# # image = image
# # image = image + noise
# # label = label * mask
# return {'image': image, 'label': label, 'mask': mask}
# class BkgOut(object):
# def __init__(self, probability1, probability2):
# self.pa = probability1
# self.pb = probability2
# def __call__(self, sample):
# image, label = sample['image'], sample['label']
# p1 = self.pa + (1 - self.pa) * np.random.random_sample()
# p2 = self.pb + (1 - self.pb) * np.random.random_sample()
# mask = bkg_mask(label, p1, p2)
# # print('mask:',mask.shape)
# image = image * mask
# label = label * mask
# return {'image': image, 'label': label, 'mask': mask}
# class MaskOut(object):
# def __init__(self, probability):
# self.pb = probability
# def __call__(self, sample):
# image, label = sample['image'], sample['label']
# p = self.pb + (1 - self.pb) * np.random.random_sample()
# mask = pixel_mask(image, p)
# # print('mask:',mask.shape)
# image = image * mask
# label = label * mask
# return {'image': image, 'label': label, 'mask': mask}
class Clip(object):
"""Clip the intensity values.
Args:
Lower and upper bounds.
"""
def __init__(self, lower_bound, upper_bound):
'''
'''
# Make sure upper bound is larger than the lower bound
self.LB = min(lower_bound, upper_bound)
self.UB = max(lower_bound, upper_bound)
def __call__(self, sample):
image, label = sample['image'], sample['label']
image[image>self.UB] = self.UB
image[image<self.LB] = self.LB
return {'image': image, 'label': label}
class Normalize(object):
"""Normalize the input data to 0 mean 1 std per channel"""
def __init__(self, lower_bound, upper_bound):
self.LB = min(lower_bound, upper_bound)
self.UB = max(lower_bound, upper_bound)
def __call__(self, sample):
image, label = sample['image'], sample['label']
#img_mean = np.mean(image, axis=(0,1))
#img_std = np.std(image, axis=(0,1))
#nc = image.shape[2]
#for c in range(nc):
# image[:,:,c] = (image[:,:,c] - img_mean[c]) / img_std[c]
mid_point = (self.LB + self.UB) / 2.0
image -= mid_point
half_range = (self.UB - self.LB) / 2.0
image /= (half_range + 0.000001)
return {'image': image, 'label': label}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image, label = sample['image'], sample['label']
# image, label, mask = sample['image'], sample['label'], sample['mask']
# swap color axis because
# numpy image: W x H x C
# torch image: C X H X W
image = image.transpose((2, 1, 0))
#print(image.shape, type(image), image.dtype)
label = label.transpose((2, 1, 0))
# mask = mask.transpose(2, 1, 0)
#print(label.shape, type(label), label.dtype)
return {'image': torch.from_numpy(image),
'label': torch.from_numpy(label)}
# return {'image': torch.from_numpy(image),
# 'label': torch.from_numpy(label),
# 'mask': torch.from_numpy(mask)}
def get_composed_transform(hw, slices, view):
composed = transforms.Compose([RandomCrop((hw, hw, slices),view),
Clip(-200, 200),
Normalize(-200, 200),
RandomHorizontalFlip(),
RandomVerticalFlip(),
# MaskOut(0.5),
# BoundaryOut(0.5, 1),
# BdyblkOut(1, 0.5),
# BkgOut(1,0.5),
ToTensor()])
return composed
# %% Tester
if __name__ == '__main__':
img_folder = '/zion/fangx2/BTCV/training_256'
#img_folder = '/Users/yan/Documents/data/LITS_training'
log_dir = path.expanduser('/zion/fangx2/mu_or/train/logs/')
composed = get_composed_transform(224, 3, 'axial')
dataset = LiverCTDataset(img_folder,
transform=composed,
verbose = True)
'''
for i in range(5):
sample = dataset[i]
img = sample['image']
print(i, img.size(), type(img))
label = sample['label']
print(i, label.size(), type(label))
'''
# num_workers = 4 to use more processes
dataloader = DataLoader(dataset, batch_size=1, shuffle=True,
num_workers=0)
#for i_batch, sample_batched in enumerate(dataloader):
batch_it = iter(dataloader)
sample_batched = next(batch_it)
image_batch = sample_batched['image']
label_batch = sample_batched['label']
print('Batch size: {}, image size: {}, label size: {}'.format(len(image_batch),
image_batch.size(2),
label_batch.size(2)))
img_data = image_batch[0,0,:,:].numpy()
v_min = img_data.min()
v_max = img_data.max()
print('Img -> max: {}, min: {}'.format(v_max, v_min))
img_data = (img_data - v_min) / (v_max - v_min) * 255
img_data = img_data.astype(np.uint8)
label_data = label_batch[0,0,:,:].numpy()
v_min = label_data.min()
v_max = label_data.max()
print('Label -> max: {}, min: {}'.format(v_max, v_min))
label_data *= 255
lable_data = label_data.astype(np.uint8)
# Save images
imsave(path.join(log_dir, 'image_sample.png'), img_data, format='png')
imsave(path.join(log_dir, 'label_sample.png'), label_data, format='png') | 15,824 | 31.428279 | 93 | py |
IndependentEvaluation | IndependentEvaluation-main/Code For Figure 16/model/denseu_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
class _DenseLayer(nn.Sequential):
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
super(_DenseLayer, self).__init__()
self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
self.add_module('relu1', nn.ReLU(inplace=True)),
self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *
growth_rate, kernel_size=1, stride=1, bias=False)),
self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
self.add_module('relu2', nn.ReLU(inplace=True)),
self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,
kernel_size=3, stride=1, padding=1, bias=False)),
self.drop_rate = drop_rate
def forward(self, x):
new_features = super(_DenseLayer, self).forward(x)
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
return torch.cat([x, new_features], 1)
class _DenseBlock(nn.Sequential):
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate)
self.add_module('denselayer%d' % (i + 1), layer)
class _Transition(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,
kernel_size=1, stride=1, bias=False))
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
class up_in(nn.Sequential):
def __init__(self, num_input_features1, num_input_features2, num_output_features):
super(up_in, self).__init__()
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.add_module('conv1_1', nn.Conv2d(num_input_features1, num_input_features2,
kernel_size=1, stride=1, bias=False))
self.add_module('conv3_3', nn.Conv2d(num_input_features2, num_output_features,
kernel_size=3, stride=1, padding=1, bias=False))
self.add_module('norm', nn.BatchNorm2d(num_output_features))
self.add_module('relu', nn.ReLU(inplace=True))
def forward(self, x,y):
y = self.up(y)
x = self.conv1_1(x)
z = self.conv3_3(x+y)
z = self.norm(z)
z = self.relu(z)
return z
class upblock(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(upblock, self).__init__()
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.add_module('conv3_3', nn.Conv2d(num_input_features, num_output_features,
kernel_size=3, stride=1, padding=1, bias=False))
self.add_module('norm', nn.BatchNorm2d(num_output_features))
self.add_module('relu', nn.ReLU(inplace=True))
def forward(self, x,y):
y = self.up(y)
z = self.conv3_3(x+y)
z = self.norm(z)
z = self.relu(z)
return z
class up_out(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(up_out, self).__init__()
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.add_module('conv3_3', nn.Conv2d(num_input_features, num_output_features,
kernel_size=3, stride=1, padding=1, bias=False))
self.dropout = nn.Dropout2d(p=0.3)
self.add_module('norm', nn.BatchNorm2d(num_output_features))
self.add_module('relu', nn.ReLU(inplace=True))
def forward(self, y):
y = self.up(y)
y = self.conv3_3(y)
y = self.dropout(y)
y = self.norm(y)
y = self.relu(y)
return y
class DenseUNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
"""
def __init__(self, growth_rate=48, block_config=(6, 12, 36, 24),
num_init_features=96, bn_size=4, drop_rate=0, num_channels = 3, num_classes = 2):
super(DenseUNet, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(num_channels, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
self.features.add_module('denseblock%d' % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
self.features.add_module('transition%d' % (i + 1), trans)
num_features = num_features // 2
self.up1 = up_in(48*44, 48*46, 48*16)
self.up2 = upblock(48*16, 48*8)
self.up3 = upblock(48*8, 96)
self.up4 = upblock(96,96)
self.up5 = up_out(96,64)
self.outconv = outconv(64,num_classes)
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x):
features = self.features.conv0(x)
x0 = self.features.norm0(features)
x0 = self.features.relu0(x0)
x1 = self.features.pool0(x0)
x1 = self.features.denseblock1(x1)
x2 = self.features.transition1(x1)
x2 = self.features.denseblock2(x2)
x3 = self.features.transition2(x2)
x3 = self.features.denseblock3(x3)
x4 = self.features.transition3(x3)
x4 = self.features.denseblock4(x4)
y4 = self.up1(x3, x4)
y3 = self.up2(x2, y4)
y2 = self.up3(x1, y3)
y1 = self.up4(x0, y2)
y0 = self.up5(y1)
out = self.outconv(y0)
# out = F.softmax(out, dim=1)
return out
class outconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(outconv, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, 1)
def forward(self, x):
x = self.conv(x)
return x
| 7,830 | 42.505556 | 114 | py |
IndependentEvaluation | IndependentEvaluation-main/Code For Figure 16/model/unet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
class inconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(inconv, self).__init__()
self.conv = double_conv(in_ch, out_ch)
def forward(self, x):
x = self.conv(x)
return x
class down(nn.Module):
def __init__(self, in_ch, out_ch):
super(down, self).__init__()
self.mpconv = nn.Sequential(
nn.MaxPool2d(2),
double_conv(in_ch, out_ch)
)
def forward(self, x):
x = self.mpconv(x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=True):
super(up, self).__init__()
# would be a nice idea if the upsampling could be learned too,
# but my machine do not have enough memory to handle all those weights
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2)
self.conv = double_conv(in_ch, out_ch)
def forward(self, x1, x2):
x1 = self.up(x1)
diffX = x1.size()[2] - x2.size()[2]
diffY = x1.size()[3] - x2.size()[3]
x2 = F.pad(x2, (diffX // 2, int(diffX / 2),
diffY // 2, int(diffY / 2)))
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
class outconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(outconv, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, 1)
def forward(self, x):
x = self.conv(x)
return x
class UNet(nn.Module):
def __init__(self, n_channels, n_classes):
super(UNet, self).__init__()
self.inc = inconv(n_channels, 64)
self.down1 = down(64, 128)
self.down2 = down(128, 256)
self.down3 = down(256, 512)
self.down4 = down(512, 512)
self.up1 = up(1024, 256)
self.up2 = up(512, 128)
self.up3 = up(256, 64)
self.up4_1 = up(128, 64)
self.outc1 = outconv(64, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
a = self.up4_1(x, x1)
a = self.outc1(a)
a = F.softmax(a, dim=1)
return a
| 2,971 | 26.266055 | 86 | py |
IndependentEvaluation | IndependentEvaluation-main/Code For Figure 16/model/resu_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.BatchNorm2d(in_ch),
nn.ReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1)
)
def forward(self, x):
x = self.conv(x)
return x
class one_conv(nn.Module):
def __init__(self, in_ch, out_ch):
super(one_conv, self).__init__()
self.conv = nn.Sequential(
nn.BatchNorm2d(in_ch),
nn.ReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 1)
)
def forward(self,x):
x = self.conv(x)
return x
class inconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(inconv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1)
)
def forward(self, x):
x = self.conv(x)
return x
class down(nn.Module):
def __init__(self, in_ch, out_ch):
super(down, self).__init__()
self.pool = nn.MaxPool2d(2)
self.mpconv = double_conv(in_ch, out_ch)
self.bridge = one_conv(in_ch, out_ch)
def forward(self, x):
x = self.pool(x)
#print(x.shape)
x_1 = self.mpconv(x)
#print(x_1.shape)
x_2 = self.bridge(x)
#print(x_2.shape)
x = x_1 + x_2
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=True):
super(up, self).__init__()
# would be a nice idea if the upsampling could be learned too,
# but my machine do not have enough memory to handle all those weights
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2)
self.conv = double_conv(in_ch, out_ch)
self.bridge = one_conv(in_ch, out_ch)
def forward(self, x1, x2):
x1 = self.up(x1)
diffX = x1.size()[2] - x2.size()[2]
diffY = x1.size()[3] - x2.size()[3]
x2 = F.pad(x2, (diffX // 2, int(diffX / 2),
diffY // 2, int(diffY / 2)))
x = torch.cat([x2, x1], dim=1)
x = self.conv(x) + self.bridge(x)
return x
class outconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(outconv, self).__init__()
self.conv = nn.Sequential(
nn.BatchNorm2d(in_ch),
nn.ReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 1)
)
def forward(self, x):
x = self.conv(x)
return x
class ResUNet(nn.Module):
def __init__(self, n_channels, n_classes):
super(ResUNet, self).__init__()
self.inc = inconv(n_channels, 64)
self.down1 = down(64, 128)
self.down2 = down(128, 256)
self.down3 = down(256, 512)
self.down4 = down(512, 512)
self.up1 = up(1024, 256)
self.up2 = up(512, 128)
self.up3 = up(256, 64)
self.up4 = up(128, 64)
self.outc = outconv(64, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
# x = F.softmax(x, dim=1)
return x
| 3,831 | 27.176471 | 86 | py |
IndependentEvaluation | IndependentEvaluation-main/Code For Figure 16/model/concave_dps.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.BatchNorm2d(in_ch),
nn.ReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1)
)
def forward(self, x):
x = self.conv(x)
return x
class one_conv(nn.Module):
def __init__(self, in_ch, out_ch):
super(one_conv, self).__init__()
self.conv = nn.Sequential(
nn.BatchNorm2d(in_ch),
nn.ReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 1)
)
def forward(self,x):
x = self.conv(x)
return x
class res_conv(nn.Module):
def __init__(self, in_ch, out_ch):
super(res_conv, self).__init__()
self.conv1 = double_conv(in_ch, out_ch)
self.bridge = one_conv(in_ch, out_ch)
def forward(self, x):
x1 = self.conv1(x)
if x.shape == x1.shape:
r = x + x1
else:
r = self.bridge(x) + x1
return r
class inconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(inconv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1)
)
def forward(self, x):
x = self.conv(x)
return x
class down(nn.Module):
def __init__(self, in_ch, out_ch):
super(down, self).__init__()
self.pool = nn.MaxPool2d(2)
self.mpconv = double_conv(in_ch, out_ch)
self.bridge = one_conv(in_ch, out_ch)
def forward(self, x, y):
x = self.pool(x)
# Concatenation
x_1 = torch.cat((x,y),1)
# Summation
# x_1 = x + y
x_2 = self.mpconv(x_1)
if x_1.shape == x_2.shape:
x = x_1 + x_2
else:
x = self.bridge(x_1) + x_2
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=True):
super(up, self).__init__()
# would be a nice idea if the upsampling could be learned too,
# but my machine do not have enough memory to handle all those weights
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2)
self.conv = double_conv(in_ch, out_ch)
self.bridge = one_conv(in_ch, out_ch)
def forward(self, x1, x2):
x1 = self.up(x1)
diffX = x1.size()[2] - x2.size()[2]
diffY = x1.size()[3] - x2.size()[3]
x2 = F.pad(x2, (diffX // 2, int(diffX / 2),
diffY // 2, int(diffY / 2)))
x = torch.cat([x2, x1], dim=1)
x = self.conv(x) + self.bridge(x)
return x
class outconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(outconv, self).__init__()
self.conv = nn.Sequential(
nn.BatchNorm2d(in_ch),
nn.ReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 1)
)
def forward(self, x):
x = self.conv(x)
return x
class ResUNet(nn.Module):
def __init__(self, n_channels, n_classes):
super(ResUNet, self).__init__()
self.inc = inconv(n_channels, 64)
self.dbconv1 = res_conv(64,128)
self.down1 = down(128, 128)
self.dbconv2 = res_conv(64,128)
self.dbconv3 = res_conv(128,256)
self.down2 = down(256, 256)
self.dbconv4 = res_conv(64,128)
self.dbconv5 = res_conv(128,256)
self.dbconv6 = res_conv(256,512)
self.down3 = down(512, 512)
self.down4 = down(1024, 512)
self.dbup1 = res_conv(512,256)
self.dbup2 = res_conv(256,128)
self.dbup3 = res_conv(128,64)
self.dbup4 = res_conv(64,64)
self.up1 = up(1024, 256)
self.dbup5 = res_conv(256,128)
self.dbup6 = res_conv(128,64)
self.dbup7 = res_conv(64,64)
self.up2 = up(512, 128)
self.dbup8 = res_conv(128,64)
self.dbup9 = res_conv(64,64)
self.up3 = up(256, 64)
self.dbup10 = res_conv(64,64)
self.up4 = up(128, 64)
self.outc1 = outconv(64, n_classes)
self.outc2 = outconv(64, n_classes)
self.outc3 = outconv(64, n_classes)
self.outc4 = outconv(64, n_classes)
self.outc = outconv(64, n_classes)
self.pool = nn.AvgPool2d(2)
self.unpool = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
# self.unpool = nn.Upsample(scale_factor=2, mode='nearest')
# self.att = res_conv(64,1)
# self.gapool = nn.AvgPool2d(kernel_size=224)
def forward(self, x):
x1 = self.inc(x)
y1 = self.pool(x)
z1 = self.inc(y1)
x2 = self.down1(x1, z1)
y2 = self.pool(y1)
z2 = self.inc(y2)
a1 = self.dbconv1(z2)
x3 = self.down2(x2, a1)
y3 = self.pool(y2)
z3 = self.inc(y3)
a2 = self.dbconv2(z3)
a3 = self.dbconv3(a2)
x4 = self.down3(x3, a3)
y4 = self.pool(y3)
z4 = self.inc(y4)
a4 = self.dbconv4(z4)
a5 = self.dbconv5(a4)
a6 = self.dbconv6(a5)
x5 = self.down4(x4, a6)
o1 = self.dbup1(x5)
o1 = self.dbup2(o1)
o1 = self.dbup3(o1)
o1 = self.dbup4(o1)
out1 = self.outc1(o1)
x6 = self.up1(x5, x4)
o2 = self.dbup5(x6)
o2 = self.dbup6(o2)
o2 = self.dbup7(o2)
out2 = self.outc2(o2)
x7 = self.up2(x6, x3)
o3 = self.dbup8(x7)
o3 = self.dbup9(o3)
out3 = self.outc3(o3)
x8 = self.up3(x7, x2)
o4 = self.dbup10(x8)
out4 = self.outc4(o4)
o5 = self.up4(x8, x1)
out5 = self.outc(o5)
o1 = self.unpool(self.unpool(self.unpool(self.unpool(o1))))
o2 = self.unpool(self.unpool(self.unpool(o2)))
o3 = self.unpool(self.unpool(o3))
o4 = self.unpool(o4)
# w1 = self.att(o1)
# w2 = self.att(o2)
# w3 = self.att(o3)
# w4 = self.att(o4)
# w5 = self.att(o5)
# w1 = self.gapool(w1)
# w2 = self.gapool(w2)
# w3 = self.gapool(w3)
# w4 = self.gapool(w4)
# w5 = self.gapool(w5)
# w = torch.cat((w3, w4, w5),1)
# w = torch.nn.Softmax2d()(w)
# w3 = w[:,0:1,:,:]
# w4 = w[:,1:2,:,:]
# w5 = w[:,2:3,:,:]
# w4 = w[:,3:4,:,:]
# w5 = w[:,4:5,:,:]
out1 = self.unpool(self.unpool(self.unpool(self.unpool(out1))))
out2 = self.unpool(self.unpool(self.unpool(out2)))
out3 = self.unpool(self.unpool(out3))
out4 = self.unpool(out4)
# out = w3*out3 + w4*out4 + w5*out5
return out1, out2, out3, out4, out5
# class ResUNet(nn.Module):
# def __init__(self, n_channels, n_classes):
# super(ResUNet, self).__init__()
# self.resnet = ResUNet_0(n_channels, n_classes)
# # self.catconv = cat_conv(10,n_classes)
# self.att = nn.Sequential(
# nn.BatchNorm2d(2),
# nn.ReLU(inplace=True),
# nn.Conv2d(2, 1, 1),
# nn.BatchNorm2d(1),
# nn.ReLU(inplace=True),
# nn.Conv2d(1, 1, 3, padding=1)
# )
# self.gapool1 = nn.AvgPool2d(kernel_size=224)
# self.gapool2 = nn.MaxPool2d(kernel_size=224)
# def forward(self,x):
# a,b,c,d,e = self.resnet(x)
# w1 = self.att(a)
# w2 = self.att(b)
# w3 = self.att(c)
# w4 = self.att(d)
# w5 = self.att(e)
# w1 = self.gapool1(w1) + self.gapool2(w1)
# w2 = self.gapool1(w2) + self.gapool2(w2)
# w3 = self.gapool1(w3) + self.gapool2(w3)
# w4 = self.gapool1(w4) + self.gapool2(w4)
# w5 = self.gapool1(w5) + self.gapool2(w5)
# w = torch.cat((w1, w2, w3, w4, w5),1)
# w = torch.nn.Softmax2d()(w)
# w1 = w[:,0:1,:,:]
# w2 = w[:,1:2,:,:]
# w3 = w[:,2:3,:,:]
# w4 = w[:,3:4,:,:]
# w5 = w[:,4:5,:,:]
# fi_out = w1*a + w2*b + w3*c + w4*d + w5*e
# return fi_out
| 8,568 | 29.386525 | 86 | py |
IndependentEvaluation | IndependentEvaluation-main/Code For Figure 16/model/concave_dps_w.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .concave_dps import ResUNet as ResUNet_0
class attention(nn.Module):
def __init__(self, in_ch, out_ch):
super(attention, self).__init__()
self.conv = nn.Sequential(
nn.BatchNorm2d(in_ch),
nn.ReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 1),
nn.BatchNorm2d(1),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1)
)
def forward(self,x):
x = self.conv(x)
return x
class ResUNet(nn.Module):
def __init__(self, n_channels, n_classes):
super(ResUNet, self).__init__()
self.resnet = ResUNet_0(n_channels, n_classes)
# self.catconv = cat_conv(10,n_classes)
self.att = attention(n_classes, 1)
self.gapool1 = nn.AvgPool2d(kernel_size=224)
self.gapool2 = nn.MaxPool2d(kernel_size=224)
def forward(self,x):
a,b,c,d,e = self.resnet(x)
w1 = self.att(a)
w2 = self.att(b)
w3 = self.att(c)
w4 = self.att(d)
w5 = self.att(e)
w1 = self.gapool1(w1) + self.gapool2(w1)
w2 = self.gapool1(w2) + self.gapool2(w2)
w3 = self.gapool1(w3) + self.gapool2(w3)
w4 = self.gapool1(w4) + self.gapool2(w4)
w5 = self.gapool1(w5) + self.gapool2(w5)
w = torch.cat((w1, w2, w3, w4, w5),1)
w = torch.nn.Softmax2d()(w)
w1 = w[:,0:1,:,:]
w2 = w[:,1:2,:,:]
w3 = w[:,2:3,:,:]
w4 = w[:,3:4,:,:]
w5 = w[:,4:5,:,:]
fi_out = w1*a + w2*b + w3*c + w4*d + w5*e
#softmax for uniseg
fi_out = F.softmax(fi_out, dim=1)
return fi_out | 1,754 | 26.421875 | 54 | py |
IndependentEvaluation | IndependentEvaluation-main/Code For Figure 3/main.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import abc
import sys
import numpy as np
import pandas as pd
from sklearn import linear_model, preprocessing, cluster
import scipy.linalg as slin
import scipy.sparse.linalg as sparselin
import scipy.sparse as sparse
from scipy.optimize import fmin_ncg
import os.path
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.ops import array_ops
from tensorflow.python.keras import backend as k
from tensorflow.contrib.learn.python.learn.datasets import base
from influence.hessians import hessian_vector_product
from influence.dataset import DataSet
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import pandas as pd
from pylab import rcParams
rcParams['figure.figsize'] = 8, 10
########### Data Loading
mnist = input_data.read_data_sets('/tmp/tensorflow/mnist/input_data', one_hot=True)
Train_input = mnist.train.images
Train_label = mnist.train.labels
Test_input = mnist.test.images
Test_label = mnist.test.labels
########## The necessary functions
## the purposes of these functions are showed by their names
def get_influence_on_test_loss(sess, grad_total_loss_op, test_indices, train_idx=None,
approx_type='lissa', approx_params=None, force_refresh=True, test_description=None,
X_train = Train_input, Y_train = Train_label, X_test = Test_input, Y_test = Test_label):
# If train_idx is None then use X and Y (phantom points)
# Need to make sure test_idx stays consistent between models
# because mini-batching permutes dataset order
test_grad_loss_no_reg_val = get_test_grad_loss_no_reg_val(sess, grad_loss_no_reg_op, X_test, Y_test, test_indices,batch_size=100 )
print('Norm of test gradient: %s' % np.linalg.norm(test_grad_loss_no_reg_val[0]))
start_time = time.time()
if test_description is None:
test_description = test_indices
inverse_hvp = get_inverse_hvp_lissa(test_grad_loss_no_reg_val, sess, v_placeholder, hessian_vector,
batch_size=None,
scale=10, damping=0.0, num_samples=1, recursion_depth=1000)
duration = time.time() - start_time
print('Inverse HVP took %s sec' % duration)
start_time = time.time()
num_to_remove = 100
predicted_loss_diffs = np.zeros([num_to_remove])
for counter in np.arange(num_to_remove):
print(counter)
single_train_feed_dict = {x: X_train[counter, :], y_ : [Y_train[counter,:]]}
train_grad_loss_val = sess.run(grad_total_loss_op, feed_dict=single_train_feed_dict)
predicted_loss_diffs[counter] = np.dot(np.concatenate(inverse_hvp), np.concatenate(train_grad_loss_val)) / num_to_remove
duration = time.time() - start_time
print('Multiplying by %s train examples took %s sec' % (num_to_remove, duration))
return predicted_loss_diffs
def get_test_grad_loss_no_reg_val(sess, grad_loss_no_reg_op, Test_input, Test_label, test_indices, batch_size=100):
if test_indices is not None:
num_iter = int(np.ceil(len(test_indices) / batch_size))
test_grad_loss_no_reg_val = None
for i in range(num_iter):
start = i * batch_size
end = int(min((i+1) * batch_size, len(test_indices)))
test_feed_dict = fill_feed_dict_with_some_ex(x, y_, Test_input, Test_label, test_indices[start:end])
temp = sess.run(grad_loss_no_reg_op, feed_dict=test_feed_dict)
if test_grad_loss_no_reg_val is None:
test_grad_loss_no_reg_val = [a * (end-start) for a in temp]
else:
test_grad_loss_no_reg_val = [a + b * (end-start) for (a, b) in zip(test_grad_loss_no_reg_val, temp)]
test_grad_loss_no_reg_val = [a/len(test_indices) for a in test_grad_loss_no_reg_val]
return test_grad_loss_no_reg_val
def fill_feed_dict_with_all_but_one_ex(x, y_, data_images, data_labels, idx_to_remove):
num_examples = data_images.shape[0]
idx = np.array([True] * num_examples, dtype=bool)
idx[idx_to_remove] = False
feed_dict = {
x: data_images[idx, :],
y: data_labels[idx, :]
}
return feed_dict
def fill_feed_dict_with_some_ex(x, y_, data_images, data_labels, target_indices):
input_feed = data_images[target_indices, :]
labels_feed = data_labels[target_indices,:]
feed_dict = {
x: input_feed,
y_: labels_feed,
}
return feed_dict
def fill_feed_dict_with_batch(x, y_, Test_input, Test_label, batch_size=0):
if batch_size is None:
return fill_feed_dict_with_all_ex(x, y_, Test_input, Test_label)
def fill_feed_dict_with_all_ex(x, y_, data_images, data_labels):
feed_dict = {
x: data_images,
y_: data_labels
}
return feed_dict
def get_inverse_hvp_lissa(v, sess, v_placeholder, hessian_vector,
batch_size=None,
scale=10, damping=0.0, num_samples=1, recursion_depth=10000):
"""
This uses mini-batching; uncomment code for the single sample case.
"""
inverse_hvp = None
print_iter = recursion_depth / 10
for i in range(num_samples):
# samples = np.random.choice(self.num_train_examples, size=recursion_depth)
cur_estimate = v
for j in range(recursion_depth):
# feed_dict = fill_feed_dict_with_one_ex(
# data_set,
# images_placeholder,
# labels_placeholder,
# samples[j])
feed_dict = fill_feed_dict_with_batch(x, y_, Test_input, Test_label, batch_size=batch_size)
feed_dict = update_feed_dict_with_v_placeholder(v_placeholder, feed_dict, cur_estimate)
hessian_vector_val = sess.run(hessian_vector, feed_dict=feed_dict)
cur_estimate = [a + (1-damping) * b - c/scale for (a,b,c) in zip(v, cur_estimate, hessian_vector_val)]
# Update: v + (I - Hessian_at_x) * cur_estimate
if (j % print_iter == 0) or (j == recursion_depth - 1):
print("Recursion at depth %s: norm is %.8lf" % (j, np.linalg.norm(cur_estimate[0])))
feed_dict = update_feed_dict_with_v_placeholder(v_placeholder, feed_dict, cur_estimate)
if inverse_hvp is None:
inverse_hvp = [b/scale for b in cur_estimate]
else:
inverse_hvp = [a + b/scale for (a, b) in zip(inverse_hvp, cur_estimate)]
inverse_hvp = [a/num_samples for a in inverse_hvp]
return inverse_hvp
def update_feed_dict_with_v_placeholder(v_placeholder, feed_dict, vec):
for pl_block, vec_block in zip(v_placeholder, vec):
feed_dict[pl_block] = vec_block
return feed_dict
## Define the Model and Path for Gradients
batch_size = 50
total_batch = int(mnist.train.num_examples/batch_size)
num_epochs = 5
tf.reset_default_graph()
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10], name="truth")
#Set the weights for the network
xavier = tf.contrib.layers.xavier_initializer_conv2d()
conv1_weights = tf.get_variable(name="c1", initializer=xavier, shape=[5, 5, 1, 10])
conv1_biases = tf.Variable(tf.zeros([10]))
conv2_weights = tf.get_variable(name="c2", initializer=xavier, shape=[5, 5, 10, 25])
conv2_biases = tf.Variable(tf.zeros([25]))
conv3_weights = tf.get_variable(name="c3", initializer=xavier, shape=[4, 4, 25, 100])
conv3_biases = tf.Variable(tf.zeros([100]))
fc1_weights = tf.Variable(tf.truncated_normal([4 * 4 * 100, 10], stddev=0.1))
fc1_biases = tf.Variable(tf.zeros([10]))
#Stack the Layers
reshaped_input = tf.reshape(x, [-1, 28, 28, 1], name="absolute_input")
#layer 1
conv1 = tf.nn.conv2d(reshaped_input, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#layer 2
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#layer 3
conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME')
relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases))
pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#layer 4
pool_shape = pool3.get_shape().as_list()
reshaped = tf.reshape(pool3, [-1, pool_shape[1] * pool_shape[2] * pool_shape[3]])
y = tf.add(tf.matmul(reshaped, fc1_weights), fc1_biases, name="absolute_output")
# Define loss and optimizer
total_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_))
grads = tf.gradients(total_loss,x)
params = tf.trainable_variables()
grad_total_loss_op = tf.gradients(total_loss, params)
grad_loss_no_reg_op = grad_total_loss_op
v_placeholder = [tf.placeholder(tf.float32, shape=a.get_shape()) for a in params]
u_placeholder = [tf.placeholder(tf.float32, shape=a.get_shape()) for a in params]
hessian_vector = hessian_vector_product(total_loss, params, v_placeholder)
grad_loss_wrt_input_op = tf.gradients(total_loss, x)
# Because tf.gradients auto accumulates, we probably don't need the add_n (or even reduce_sum)
influence_op = tf.add_n(
[tf.reduce_sum(tf.multiply(a, array_ops.stop_gradient(b))) for a, b in zip(grad_total_loss_op, v_placeholder)])
grad_influence_wrt_input_op = tf.gradients(influence_op, x)
train_step = tf.train.AdamOptimizer(learning_rate=0.001).minimize(total_loss)
########### Import Trained Model
saver = tf.train.Saver()
sess = tf.Session()
saver.restore(sess,"save_model/MNIST.ckpt")
########### Test Indice is 34
Test_indices = [34]
test_grad_loss_no_reg_val = get_test_grad_loss_no_reg_val(sess, grad_loss_no_reg_op, Test_input, Test_label, Test_indices,batch_size=100 )
print('Norm of test gradient: %s' % np.linalg.norm(test_grad_loss_no_reg_val[0]))
inverse_hvp = get_inverse_hvp_lissa(test_grad_loss_no_reg_val, sess, v_placeholder, hessian_vector,
batch_size=None,
scale=10, damping=0.0, num_samples=1, recursion_depth=50)
########### Compute the Influence function
num_to_remove = 1000
predicted_loss_diffs = np.zeros([num_to_remove])
for counter in np.arange(num_to_remove):
print(counter)
single_train_feed_dict = {x: Train_input[counter:counter+1, :], y_ : Train_label[counter:counter+1,:]}
train_grad_loss_val = sess.run(grad_total_loss_op, feed_dict=single_train_feed_dict)
for q in np.arange(len(inverse_hvp)):
predicted_loss_diffs[counter] = predicted_loss_diffs[counter] + np.dot(np.reshape(inverse_hvp[q],(1,-1)), np.reshape(train_grad_loss_val[q],(-1,1)))
predicted_loss_diffs[counter] = predicted_loss_diffs[counter] / num_to_remove
#%%
plt.rcParams["font.family"] = "Times New Roman"
plt.figure()
plt.subplot(1,3,1)
fig = plt.imshow(np.reshape(Test_input[34,:], (28,28)))
plt.title('Test Image')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.subplot(1,3,2)
fig = plt.imshow(np.reshape(Train_input[0,:], (28,28)))
plt.title('Harmful Image')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.subplot(1,3,3)
fig = plt.imshow(np.reshape(Train_input[68,:], (28,28)))
plt.title('Harmful Image')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.savefig('InfluenceFunctionofDigits.png')
| 12,510 | 35.263768 | 168 | py |
IndependentEvaluation | IndependentEvaluation-main/Code For Figure 3/influence/image_utils.py | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, model_from_json
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
from keras import backend as K
sns.set(color_codes=True)
def plot_flat_bwimage(X, y=None, pos_class=1, neg_class=-1, side=28):
X = np.reshape(X, (side, side))
if y is not None:
if y == 1:
label = pos_class
else:
label = neg_class
with sns.axes_style("white"):
if y is not None:
plt.title('Label is %s' % label)
plt.imshow(X, cmap='gray', interpolation='none')
def plot_flat_bwgrad(X, side=28):
X = np.reshape(X, (side, side))
max_abs = np.max(np.abs(X))
with sns.axes_style("white"):
f, ax = plt.subplots()
colormap = ax.imshow(X, cmap='coolwarm', vmax=max_abs, vmin=-max_abs, interpolation='none')
f.colorbar(colormap)
def plot_flat_colorimage(X, y, pos_class=1, neg_class=-1, side=32):
X = np.reshape(X, (side, side, 3))
if y == 1:
label = pos_class
else:
label = neg_class
with sns.axes_style("white"):
f, ax = plt.subplots(figsize=(6,6))
ax.set_title('Label is %s' % label)
ax.imshow(X, interpolation='none')
# ax.imshow(X)
plt.show()
def plot_flat_colorgrad(X, side=32):
X = np.reshape(X, (side, side, 3))
with sns.axes_style("white"):
f, ax = plt.subplots()
colormap = ax.imshow(X, interpolation='none')
f.colorbar(colormap)
| 1,739 | 30.636364 | 99 | py |
IndependentEvaluation | IndependentEvaluation-main/Code For Figure 3/influence/imagenet_utils.py | # Taken from https://github.com/fchollet/keras/blob/master/keras/applications/imagenet_utils.py
import numpy as np
import json
from keras.utils.data_utils import get_file
from keras import backend as K
CLASS_INDEX = None
CLASS_INDEX_PATH = 'https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json'
def preprocess_input(x, dim_ordering='default'):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
assert dim_ordering in {'tf', 'th'}
if dim_ordering == 'th':
x[:, 0, :, :] -= 103.939
x[:, 1, :, :] -= 116.779
x[:, 2, :, :] -= 123.68
# 'RGB'->'BGR'
x = x[:, ::-1, :, :]
else:
x[:, :, :, 0] -= 103.939
x[:, :, :, 1] -= 116.779
x[:, :, :, 2] -= 123.68
# 'RGB'->'BGR'
x = x[:, :, :, ::-1]
return x
def decode_predictions(preds, top=5):
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = get_file('imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models')
CLASS_INDEX = json.load(open(fpath))
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
results.append(result)
return results
| 1,663 | 31.627451 | 105 | py |
IndependentEvaluation | IndependentEvaluation-main/Code For Figure 3/influence/inception_v3.py | # -*- coding: utf-8 -*-
"""Inception V3 model for Keras.
Note that the input image format for this model is different than for
the VGG16 and ResNet models (299x299 instead of 224x224),
and that the input preprocessing function is also different (same as Xception).
# Reference
- [Rethinking the Inception Architecture for Computer Vision](http://arxiv.org/abs/1512.00567)
"""
from __future__ import print_function
from __future__ import absolute_import
import warnings
import numpy as np
from keras.models import Model
from keras import layers
from keras.layers import Activation
from keras.layers import Dense
from keras.layers import Input
from keras.layers import BatchNormalization
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import AveragePooling2D
from keras.layers import GlobalAveragePooling2D
from keras.layers import GlobalMaxPooling2D
from keras.engine.topology import get_source_inputs
from keras.utils.layer_utils import convert_all_kernels_in_model
from keras.utils.data_utils import get_file
from keras import backend as K
from keras.applications.imagenet_utils import decode_predictions
from keras.applications.imagenet_utils import _obtain_input_shape
from keras.preprocessing import image
WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.5/inception_v3_weights_tf_dim_ordering_tf_kernels.h5'
WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.5/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
def conv2d_bn(x,
filters,
num_row,
num_col,
padding='same',
strides=(1, 1),
name=None):
"""Utility function to apply conv + BN.
Arguments:
x: input tensor.
filters: filters in `Conv2D`.
num_row: height of the convolution kernel.
num_col: width of the convolution kernel.
padding: padding mode in `Conv2D`.
strides: strides in `Conv2D`.
name: name of the ops; will become `name + '_conv'`
for the convolution and `name + '_bn'` for the
batch norm layer.
Returns:
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
if K.image_data_format() == 'channels_first':
bn_axis = 1
else:
bn_axis = 3
x = Conv2D(
filters, (num_row, num_col),
strides=strides,
padding=padding,
use_bias=False,
name=conv_name)(x)
x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
x = Activation('relu', name=name)(x)
return x
def InceptionV3(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Inception v3 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
Note that the default input image size for this model is 299x299.
Arguments:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)` (with `channels_last` data format)
or `(3, 299, 299)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 139.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(
input_shape,
default_size=299,
min_size=139,
data_format=K.image_data_format(),
include_top=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
img_input = Input(tensor=input_tensor, shape=input_shape)
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding='valid')
x = conv2d_bn(x, 32, 3, 3, padding='valid')
x = conv2d_bn(x, 64, 3, 3)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv2d_bn(x, 80, 1, 1, padding='valid')
x = conv2d_bn(x, 192, 3, 3, padding='valid')
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
# mixed 0, 1, 2: 35 x 35 x 256
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed0')
# mixed 1: 35 x 35 x 256
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed1')
# mixed 2: 35 x 35 x 256
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed2')
# mixed 3: 17 x 17 x 768
branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(
branch3x3dbl, 96, 3, 3, strides=(2, 2), padding='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate(
[branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed3')
# mixed 4: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 128, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 128, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed4')
# mixed 5, 6: 17 x 17 x 768
for i in range(2):
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 160, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed' + str(5 + i))
# mixed 7: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 192, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed7')
# mixed 8: 8 x 8 x 1280
branch3x3 = conv2d_bn(x, 192, 1, 1)
branch3x3 = conv2d_bn(branch3x3, 320, 3, 3,
strides=(2, 2), padding='valid')
branch7x7x3 = conv2d_bn(x, 192, 1, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = conv2d_bn(
branch7x7x3, 192, 3, 3, strides=(2, 2), padding='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate(
[branch3x3, branch7x7x3, branch_pool], axis=channel_axis, name='mixed8')
# mixed 9: 8 x 8 x 2048
for i in range(2):
branch1x1 = conv2d_bn(x, 320, 1, 1)
branch3x3 = conv2d_bn(x, 384, 1, 1)
branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = layers.concatenate(
[branch3x3_1, branch3x3_2], axis=channel_axis, name='mixed9_' + str(i))
branch3x3dbl = conv2d_bn(x, 448, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = layers.concatenate(
[branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch3x3, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed' + str(9 + i))
if include_top:
# Classification block
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='inception_v3')
# load weights
if weights == 'imagenet':
if K.image_data_format() == 'channels_first':
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
if include_top:
weights_path = get_file(
'inception_v3_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
md5_hash='9a0d58056eeedaa3f26cb7ebd46da564')
else:
# Replace this with a local copy for reproducibility
# weights_path = get_file(
# 'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5',
# WEIGHTS_PATH_NO_TOP,
# cache_subdir='models',
# md5_hash='bcbd6486424b2319ff4ef7d526e38f63')
weights_path = 'inception/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
model.load_weights(weights_path)
if K.backend() == 'theano':
convert_all_kernels_in_model(model)
return model
def preprocess_input(x):
x /= 255.
x -= 0.5
x *= 2.
return x
if __name__ == '__main__':
model = InceptionV3(include_top=True, weights='imagenet')
img_path = 'elephant.jpg'
img = image.load_img(img_path, target_size=(299, 299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
print('Predicted:', decode_predictions(preds))
| 15,178 | 35.753027 | 152 | py |
IndependentEvaluation | IndependentEvaluation-main/Code For Figure 3/influence/genericNeuralNet.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import abc
import sys
import numpy as np
import pandas as pd
from sklearn import linear_model, preprocessing, cluster
import scipy.linalg as slin
import scipy.sparse.linalg as sparselin
import scipy.sparse as sparse
from scipy.optimize import fmin_ncg
import os.path
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.ops import array_ops
from keras import backend as K
from tensorflow.contrib.learn.python.learn.datasets import base
from influence.hessians import hessian_vector_product
from influence.dataset import DataSet
def variable(name, shape, initializer):
dtype = tf.float32
var = tf.get_variable(
name,
shape,
initializer=initializer,
dtype=dtype)
return var
def variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float32
var = variable(
name,
shape,
initializer=tf.truncated_normal_initializer(
stddev=stddev,
dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def normalize_vector(v):
"""
Takes in a vector in list form, concatenates it to form a single vector,
normalizes it to unit length, then returns it in list form together with its norm.
"""
norm_val = np.linalg.norm(np.concatenate(v))
norm_v = [a/norm_val for a in v]
return norm_v, norm_val
class GenericNeuralNet(object):
"""
Multi-class classification.
"""
def __init__(self, **kwargs):
np.random.seed(0)
tf.set_random_seed(0)
self.batch_size = kwargs.pop('batch_size')
self.data_sets = kwargs.pop('data_sets')
self.train_dir = kwargs.pop('train_dir', 'output')
log_dir = kwargs.pop('log_dir', 'log')
self.model_name = kwargs.pop('model_name')
self.num_classes = kwargs.pop('num_classes')
self.initial_learning_rate = kwargs.pop('initial_learning_rate')
self.decay_epochs = kwargs.pop('decay_epochs')
if 'keep_probs' in kwargs: self.keep_probs = kwargs.pop('keep_probs')
else: self.keep_probs = None
if 'mini_batch' in kwargs: self.mini_batch = kwargs.pop('mini_batch')
else: self.mini_batch = True
if 'damping' in kwargs: self.damping = kwargs.pop('damping')
else: self.damping = 0.0
if not os.path.exists(self.train_dir):
os.makedirs(self.train_dir)
# Initialize session
config = tf.ConfigProto()
self.sess = tf.Session(config=config)
K.set_session(self.sess)
# Setup input
self.input_placeholder, self.labels_placeholder = self.placeholder_inputs()
self.num_train_examples = self.data_sets.train.labels.shape[0]
self.num_test_examples = self.data_sets.test.labels.shape[0]
# Setup inference and training
if self.keep_probs is not None:
self.keep_probs_placeholder = tf.placeholder(tf.float32, shape=(2))
self.logits = self.inference(self.input_placeholder, self.keep_probs_placeholder)
elif hasattr(self, 'inference_needs_labels'):
self.logits = self.inference(self.input_placeholder, self.labels_placeholder)
else:
self.logits = self.inference(self.input_placeholder)
self.total_loss, self.loss_no_reg, self.indiv_loss_no_reg = self.loss(
self.logits,
self.labels_placeholder)
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.learning_rate = tf.Variable(self.initial_learning_rate, name='learning_rate', trainable=False)
self.learning_rate_placeholder = tf.placeholder(tf.float32)
self.update_learning_rate_op = tf.assign(self.learning_rate, self.learning_rate_placeholder)
self.train_op = self.get_train_op(self.total_loss, self.global_step, self.learning_rate)
self.train_sgd_op = self.get_train_sgd_op(self.total_loss, self.global_step, self.learning_rate)
self.accuracy_op = self.get_accuracy_op(self.logits, self.labels_placeholder)
self.preds = self.predictions(self.logits)
# Setup misc
self.saver = tf.train.Saver()
# Setup gradients and Hessians
self.params = self.get_all_params()
self.grad_total_loss_op = tf.gradients(self.total_loss, self.params)
self.grad_loss_no_reg_op = tf.gradients(self.loss_no_reg, self.params)
self.v_placeholder = [tf.placeholder(tf.float32, shape=a.get_shape()) for a in self.params]
self.u_placeholder = [tf.placeholder(tf.float32, shape=a.get_shape()) for a in self.params]
self.hessian_vector = hessian_vector_product(self.total_loss, self.params, self.v_placeholder)
self.grad_loss_wrt_input_op = tf.gradients(self.total_loss, self.input_placeholder)
# Because tf.gradients auto accumulates, we probably don't need the add_n (or even reduce_sum)
self.influence_op = tf.add_n(
[tf.reduce_sum(tf.multiply(a, array_ops.stop_gradient(b))) for a, b in zip(self.grad_total_loss_op, self.v_placeholder)])
self.grad_influence_wrt_input_op = tf.gradients(self.influence_op, self.input_placeholder)
self.checkpoint_file = os.path.join(self.train_dir, "%s-checkpoint" % self.model_name)
self.all_train_feed_dict = self.fill_feed_dict_with_all_ex(self.data_sets.train)
self.all_test_feed_dict = self.fill_feed_dict_with_all_ex(self.data_sets.test)
init = tf.global_variables_initializer()
self.sess.run(init)
self.vec_to_list = self.get_vec_to_list_fn()
self.adversarial_loss, self.indiv_adversarial_loss = self.adversarial_loss(self.logits, self.labels_placeholder)
if self.adversarial_loss is not None:
self.grad_adversarial_loss_op = tf.gradients(self.adversarial_loss, self.params)
def get_vec_to_list_fn(self):
params_val = self.sess.run(self.params)
self.num_params = len(np.concatenate(params_val))
print('Total number of parameters: %s' % self.num_params)
def vec_to_list(v):
return_list = []
cur_pos = 0
for p in params_val:
return_list.append(v[cur_pos : cur_pos+len(p)])
cur_pos += len(p)
assert cur_pos == len(v)
return return_list
return vec_to_list
def reset_datasets(self):
for data_set in self.data_sets:
if data_set is not None:
data_set.reset_batch()
def fill_feed_dict_with_all_ex(self, data_set):
feed_dict = {
self.input_placeholder: data_set.x,
self.labels_placeholder: data_set.labels
}
return feed_dict
def fill_feed_dict_with_all_but_one_ex(self, data_set, idx_to_remove):
num_examples = data_set.x.shape[0]
idx = np.array([True] * num_examples, dtype=bool)
idx[idx_to_remove] = False
feed_dict = {
self.input_placeholder: data_set.x[idx, :],
self.labels_placeholder: data_set.labels[idx]
}
return feed_dict
def fill_feed_dict_with_batch(self, data_set, batch_size=0):
if batch_size is None:
return self.fill_feed_dict_with_all_ex(data_set)
elif batch_size == 0:
batch_size = self.batch_size
input_feed, labels_feed = data_set.next_batch(batch_size)
feed_dict = {
self.input_placeholder: input_feed,
self.labels_placeholder: labels_feed,
}
return feed_dict
def fill_feed_dict_with_some_ex(self, data_set, target_indices):
input_feed = data_set.x[target_indices, :].reshape(len(target_indices), -1)
labels_feed = data_set.labels[target_indices].reshape(-1)
feed_dict = {
self.input_placeholder: input_feed,
self.labels_placeholder: labels_feed,
}
return feed_dict
def fill_feed_dict_with_one_ex(self, data_set, target_idx):
input_feed = data_set.x[target_idx, :].reshape(1, -1)
labels_feed = data_set.labels[target_idx].reshape(-1)
feed_dict = {
self.input_placeholder: input_feed,
self.labels_placeholder: labels_feed,
}
return feed_dict
def fill_feed_dict_manual(self, X, Y):
X = np.array(X)
Y = np.array(Y)
input_feed = X.reshape(len(Y), -1)
labels_feed = Y.reshape(-1)
feed_dict = {
self.input_placeholder: input_feed,
self.labels_placeholder: labels_feed,
}
return feed_dict
def minibatch_mean_eval(self, ops, data_set):
num_examples = data_set.num_examples
assert num_examples % self.batch_size == 0
num_iter = int(num_examples / self.batch_size)
self.reset_datasets()
ret = []
for i in xrange(num_iter):
feed_dict = self.fill_feed_dict_with_batch(data_set)
ret_temp = self.sess.run(ops, feed_dict=feed_dict)
if len(ret)==0:
for b in ret_temp:
if isinstance(b, list):
ret.append([c / float(num_iter) for c in b])
else:
ret.append([b / float(num_iter)])
else:
for counter, b in enumerate(ret_temp):
if isinstance(b, list):
ret[counter] = [a + (c / float(num_iter)) for (a, c) in zip(ret[counter], b)]
else:
ret[counter] += (b / float(num_iter))
return ret
def print_model_eval(self):
params_val = self.sess.run(self.params)
if self.mini_batch == True:
grad_loss_val, loss_no_reg_val, loss_val, train_acc_val = self.minibatch_mean_eval(
[self.grad_total_loss_op, self.loss_no_reg, self.total_loss, self.accuracy_op],
self.data_sets.train)
test_loss_val, test_acc_val = self.minibatch_mean_eval(
[self.loss_no_reg, self.accuracy_op],
self.data_sets.test)
else:
grad_loss_val, loss_no_reg_val, loss_val, train_acc_val = self.sess.run(
[self.grad_total_loss_op, self.loss_no_reg, self.total_loss, self.accuracy_op],
feed_dict=self.all_train_feed_dict)
test_loss_val, test_acc_val = self.sess.run(
[self.loss_no_reg, self.accuracy_op],
feed_dict=self.all_test_feed_dict)
print('Train loss (w reg) on all data: %s' % loss_val)
print('Train loss (w/o reg) on all data: %s' % loss_no_reg_val)
print('Test loss (w/o reg) on all data: %s' % test_loss_val)
print('Train acc on all data: %s' % train_acc_val)
print('Test acc on all data: %s' % test_acc_val)
print('Norm of the mean of gradients: %s' % np.linalg.norm(np.concatenate(grad_loss_val)))
print('Norm of the params: %s' % np.linalg.norm(np.concatenate(params_val)))
def retrain(self, num_steps, feed_dict):
for step in xrange(num_steps):
self.sess.run(self.train_op, feed_dict=feed_dict)
def update_learning_rate(self, step):
assert self.num_train_examples % self.batch_size == 0
num_steps_in_epoch = self.num_train_examples / self.batch_size
epoch = step // num_steps_in_epoch
multiplier = 1
if epoch < self.decay_epochs[0]:
multiplier = 1
elif epoch < self.decay_epochs[1]:
multiplier = 0.1
else:
multiplier = 0.01
self.sess.run(
self.update_learning_rate_op,
feed_dict={self.learning_rate_placeholder: multiplier * self.initial_learning_rate})
def train(self, num_steps,
iter_to_switch_to_batch=20000,
iter_to_switch_to_sgd=40000,
save_checkpoints=True, verbose=True):
"""
Trains a model for a specified number of steps.
"""
if verbose: print('Training for %s steps' % num_steps)
sess = self.sess
for step in xrange(num_steps):
self.update_learning_rate(step)
start_time = time.time()
if step < iter_to_switch_to_batch:
feed_dict = self.fill_feed_dict_with_batch(self.data_sets.train)
_, loss_val = sess.run([self.train_op, self.total_loss], feed_dict=feed_dict)
elif step < iter_to_switch_to_sgd:
feed_dict = self.all_train_feed_dict
_, loss_val = sess.run([self.train_op, self.total_loss], feed_dict=feed_dict)
else:
feed_dict = self.all_train_feed_dict
_, loss_val = sess.run([self.train_sgd_op, self.total_loss], feed_dict=feed_dict)
duration = time.time() - start_time
if verbose:
if step % 1000 == 0:
# Print status to stdout.
print('Step %d: loss = %.8f (%.3f sec)' % (step, loss_val, duration))
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % 100000 == 0 or (step + 1) == num_steps:
if save_checkpoints: self.saver.save(sess, self.checkpoint_file, global_step=step)
if verbose: self.print_model_eval()
def load_checkpoint(self, iter_to_load, do_checks=True):
checkpoint_to_load = "%s-%s" % (self.checkpoint_file, iter_to_load)
self.saver.restore(self.sess, checkpoint_to_load)
if do_checks:
print('Model %s loaded. Sanity checks ---' % checkpoint_to_load)
self.print_model_eval()
def get_train_op(self, total_loss, global_step, learning_rate):
"""
Return train_op
"""
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(total_loss, global_step=global_step)
return train_op
def get_train_sgd_op(self, total_loss, global_step, learning_rate=0.001):
"""
Return train_sgd_op
"""
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(total_loss, global_step=global_step)
return train_op
def get_accuracy_op(self, logits, labels):
"""Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size], with values in the
range [0, NUM_CLASSES).
Returns:
A scalar int32 tensor with the number of examples (out of batch_size)
that were predicted correctly.
"""
correct = tf.nn.in_top_k(logits, labels, 1)
return tf.reduce_sum(tf.cast(correct, tf.int32)) / tf.shape(labels)[0]
def loss(self, logits, labels):
labels = tf.one_hot(labels, depth=self.num_classes)
# correct_prob = tf.reduce_sum(tf.multiply(labels, tf.nn.softmax(logits)), reduction_indices=1)
cross_entropy = - tf.reduce_sum(tf.multiply(labels, tf.nn.log_softmax(logits)), reduction_indices=1)
indiv_loss_no_reg = cross_entropy
loss_no_reg = tf.reduce_mean(cross_entropy, name='xentropy_mean')
tf.add_to_collection('losses', loss_no_reg)
total_loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
return total_loss, loss_no_reg, indiv_loss_no_reg
def adversarial_loss(self, logits, labels):
# Computes sum of log(1 - p(y = true|x))
# No regularization (because this is meant to be computed on the test data)
labels = tf.one_hot(labels, depth=self.num_classes)
wrong_labels = (labels - 1) * -1 # Flips 0s and 1s
wrong_labels_bool = tf.reshape(tf.cast(wrong_labels, tf.bool), [-1, self.num_classes])
wrong_logits = tf.reshape(tf.boolean_mask(logits, wrong_labels_bool), [-1, self.num_classes - 1])
indiv_adversarial_loss = tf.reduce_logsumexp(wrong_logits, reduction_indices=1) - tf.reduce_logsumexp(logits, reduction_indices=1)
adversarial_loss = tf.reduce_mean(indiv_adversarial_loss)
return adversarial_loss, indiv_adversarial_loss #, indiv_wrong_prob
def update_feed_dict_with_v_placeholder(self, feed_dict, vec):
for pl_block, vec_block in zip(self.v_placeholder, vec):
feed_dict[pl_block] = vec_block
return feed_dict
def get_inverse_hvp(self, v, approx_type='cg', approx_params=None, verbose=True):
assert approx_type in ['cg', 'lissa']
if approx_type == 'lissa':
return self.get_inverse_hvp_lissa(v, **approx_params)
elif approx_type == 'cg':
return self.get_inverse_hvp_cg(v, verbose)
def get_inverse_hvp_lissa(self, v,
batch_size=None,
scale=10, damping=0.0, num_samples=1, recursion_depth=10000):
"""
This uses mini-batching; uncomment code for the single sample case.
"""
inverse_hvp = None
print_iter = recursion_depth / 10
for i in range(num_samples):
# samples = np.random.choice(self.num_train_examples, size=recursion_depth)
cur_estimate = v
for j in range(recursion_depth):
# feed_dict = fill_feed_dict_with_one_ex(
# data_set,
# images_placeholder,
# labels_placeholder,
# samples[j])
feed_dict = self.fill_feed_dict_with_batch(self.data_sets.train, batch_size=batch_size)
feed_dict = self.update_feed_dict_with_v_placeholder(feed_dict, cur_estimate)
hessian_vector_val = self.sess.run(self.hessian_vector, feed_dict=feed_dict)
cur_estimate = [a + (1-damping) * b - c/scale for (a,b,c) in zip(v, cur_estimate, hessian_vector_val)]
# Update: v + (I - Hessian_at_x) * cur_estimate
if (j % print_iter == 0) or (j == recursion_depth - 1):
print("Recursion at depth %s: norm is %.8lf" % (j, np.linalg.norm(np.concatenate(cur_estimate))))
feed_dict = self.update_feed_dict_with_v_placeholder(feed_dict, cur_estimate)
if inverse_hvp is None:
inverse_hvp = [b/scale for b in cur_estimate]
else:
inverse_hvp = [a + b/scale for (a, b) in zip(inverse_hvp, cur_estimate)]
inverse_hvp = [a/num_samples for a in inverse_hvp]
return inverse_hvp
def minibatch_hessian_vector_val(self, v):
num_examples = self.num_train_examples
if self.mini_batch == True:
batch_size = 100
assert num_examples % batch_size == 0
else:
batch_size = self.num_train_examples
num_iter = int(num_examples / batch_size)
self.reset_datasets()
hessian_vector_val = None
for i in xrange(num_iter):
feed_dict = self.fill_feed_dict_with_batch(self.data_sets.train, batch_size=batch_size)
# Can optimize this
feed_dict = self.update_feed_dict_with_v_placeholder(feed_dict, v)
hessian_vector_val_temp = self.sess.run(self.hessian_vector, feed_dict=feed_dict)
if hessian_vector_val is None:
hessian_vector_val = [b / float(num_iter) for b in hessian_vector_val_temp]
else:
hessian_vector_val = [a + (b / float(num_iter)) for (a,b) in zip(hessian_vector_val, hessian_vector_val_temp)]
hessian_vector_val = [a + self.damping * b for (a,b) in zip(hessian_vector_val, v)]
return hessian_vector_val
def get_fmin_loss_fn(self, v):
def get_fmin_loss(x):
hessian_vector_val = self.minibatch_hessian_vector_val(self.vec_to_list(x))
return 0.5 * np.dot(np.concatenate(hessian_vector_val), x) - np.dot(np.concatenate(v), x)
return get_fmin_loss
def get_fmin_grad_fn(self, v):
def get_fmin_grad(x):
hessian_vector_val = self.minibatch_hessian_vector_val(self.vec_to_list(x))
return np.concatenate(hessian_vector_val) - np.concatenate(v)
return get_fmin_grad
def get_fmin_hvp(self, x, p):
hessian_vector_val = self.minibatch_hessian_vector_val(self.vec_to_list(p))
return np.concatenate(hessian_vector_val)
def get_cg_callback(self, v, verbose):
fmin_loss_fn = self.get_fmin_loss_fn(v)
def fmin_loss_split(x):
hessian_vector_val = self.minibatch_hessian_vector_val(self.vec_to_list(x))
return 0.5 * np.dot(np.concatenate(hessian_vector_val), x), -np.dot(np.concatenate(v), x)
def cg_callback(x):
# x is current params
v = self.vec_to_list(x)
idx_to_remove = 5
single_train_feed_dict = self.fill_feed_dict_with_one_ex(self.data_sets.train, idx_to_remove)
train_grad_loss_val = self.sess.run(self.grad_total_loss_op, feed_dict=single_train_feed_dict)
predicted_loss_diff = np.dot(np.concatenate(v), np.concatenate(train_grad_loss_val)) / self.num_train_examples
if verbose:
print('Function value: %s' % fmin_loss_fn(x))
quad, lin = fmin_loss_split(x)
print('Split function value: %s, %s' % (quad, lin))
print('Predicted loss diff on train_idx %s: %s' % (idx_to_remove, predicted_loss_diff))
return cg_callback
def get_inverse_hvp_cg(self, v, verbose):
fmin_loss_fn = self.get_fmin_loss_fn(v)
fmin_grad_fn = self.get_fmin_grad_fn(v)
cg_callback = self.get_cg_callback(v, verbose)
fmin_results = fmin_ncg(
f=fmin_loss_fn,
x0=np.concatenate(v),
fprime=fmin_grad_fn,
fhess_p=self.get_fmin_hvp,
callback=cg_callback,
avextol=1e-8,
maxiter=100)
return self.vec_to_list(fmin_results)
def get_test_grad_loss_no_reg_val(self, test_indices, batch_size=100, loss_type='normal_loss'):
if loss_type == 'normal_loss':
op = self.grad_loss_no_reg_op
elif loss_type == 'adversarial_loss':
op = self.grad_adversarial_loss_op
else:
raise ValueError, 'Loss must be specified'
if test_indices is not None:
num_iter = int(np.ceil(len(test_indices) / batch_size))
test_grad_loss_no_reg_val = None
for i in range(num_iter):
start = i * batch_size
end = int(min((i+1) * batch_size, len(test_indices)))
test_feed_dict = self.fill_feed_dict_with_some_ex(self.data_sets.test, test_indices[start:end])
temp = self.sess.run(op, feed_dict=test_feed_dict)
if test_grad_loss_no_reg_val is None:
test_grad_loss_no_reg_val = [a * (end-start) for a in temp]
else:
test_grad_loss_no_reg_val = [a + b * (end-start) for (a, b) in zip(test_grad_loss_no_reg_val, temp)]
test_grad_loss_no_reg_val = [a/len(test_indices) for a in test_grad_loss_no_reg_val]
else:
test_grad_loss_no_reg_val = self.minibatch_mean_eval([op], self.data_sets.test)[0]
return test_grad_loss_no_reg_val
def get_influence_on_test_loss(self, test_indices, train_idx,
approx_type='cg', approx_params=None, force_refresh=True, test_description=None,
loss_type='normal_loss',
X=None, Y=None):
# If train_idx is None then use X and Y (phantom points)
# Need to make sure test_idx stays consistent between models
# because mini-batching permutes dataset order
if train_idx is None:
if (X is None) or (Y is None): raise ValueError, 'X and Y must be specified if using phantom points.'
if X.shape[0] != len(Y): raise ValueError, 'X and Y must have the same length.'
else:
if (X is not None) or (Y is not None): raise ValueError, 'X and Y cannot be specified if train_idx is specified.'
test_grad_loss_no_reg_val = self.get_test_grad_loss_no_reg_val(test_indices, loss_type=loss_type)
print('Norm of test gradient: %s' % np.linalg.norm(np.concatenate(test_grad_loss_no_reg_val)))
start_time = time.time()
if test_description is None:
test_description = test_indices
approx_filename = os.path.join(self.train_dir, '%s-%s-%s-test-%s.npz' % (self.model_name, approx_type, loss_type, test_description))
if os.path.exists(approx_filename) and force_refresh == False:
inverse_hvp = list(np.load(approx_filename)['inverse_hvp'])
print('Loaded inverse HVP from %s' % approx_filename)
else:
inverse_hvp = self.get_inverse_hvp(
test_grad_loss_no_reg_val,
approx_type,
approx_params)
np.savez(approx_filename, inverse_hvp=inverse_hvp)
print('Saved inverse HVP to %s' % approx_filename)
duration = time.time() - start_time
print('Inverse HVP took %s sec' % duration)
start_time = time.time()
if train_idx is None:
num_to_remove = len(Y)
predicted_loss_diffs = np.zeros([num_to_remove])
for counter in np.arange(num_to_remove):
single_train_feed_dict = self.fill_feed_dict_manual(X[counter, :], [Y[counter]])
train_grad_loss_val = self.sess.run(self.grad_total_loss_op, feed_dict=single_train_feed_dict)
predicted_loss_diffs[counter] = np.dot(np.concatenate(inverse_hvp), np.concatenate(train_grad_loss_val)) / self.num_train_examples
else:
num_to_remove = len(train_idx)
predicted_loss_diffs = np.zeros([num_to_remove])
for counter, idx_to_remove in enumerate(train_idx):
single_train_feed_dict = self.fill_feed_dict_with_one_ex(self.data_sets.train, idx_to_remove)
train_grad_loss_val = self.sess.run(self.grad_total_loss_op, feed_dict=single_train_feed_dict)
predicted_loss_diffs[counter] = np.dot(np.concatenate(inverse_hvp), np.concatenate(train_grad_loss_val)) / self.num_train_examples
duration = time.time() - start_time
print('Multiplying by %s train examples took %s sec' % (num_to_remove, duration))
return predicted_loss_diffs
def find_eigvals_of_hessian(self, num_iter=100, num_prints=10):
# Setup
print_iterations = num_iter / num_prints
feed_dict = self.fill_feed_dict_with_one_ex(self.data_sets.train, 0)
# Initialize starting vector
grad_loss_val = self.sess.run(self.grad_total_loss_op, feed_dict=feed_dict)
initial_v = []
for a in grad_loss_val:
initial_v.append(np.random.random(a.shape))
initial_v, _ = normalize_vector(initial_v)
# Do power iteration to find largest eigenvalue
print('Starting power iteration to find largest eigenvalue...')
largest_eig = norm_val
print('Largest eigenvalue is %s' % largest_eig)
# Do power iteration to find smallest eigenvalue
print('Starting power iteration to find smallest eigenvalue...')
cur_estimate = initial_v
for i in range(num_iter):
cur_estimate, norm_val = normalize_vector(cur_estimate)
hessian_vector_val = self.minibatch_hessian_vector_val(cur_estimate)
new_cur_estimate = [a - largest_eig * b for (a,b) in zip(hessian_vector_val, cur_estimate)]
if i % print_iterations == 0:
print(-norm_val + largest_eig)
dotp = np.dot(np.concatenate(new_cur_estimate), np.concatenate(cur_estimate))
print("dot: %s" % dotp)
cur_estimate = new_cur_estimate
smallest_eig = -norm_val + largest_eig
assert dotp < 0, "Eigenvalue calc failed to find largest eigenvalue"
print('Largest eigenvalue is %s' % largest_eig)
print('Smallest eigenvalue is %s' % smallest_eig)
return largest_eig, smallest_eig
def get_grad_of_influence_wrt_input(self, train_indices, test_indices,
approx_type='cg', approx_params=None, force_refresh=True, verbose=True, test_description=None,
loss_type='normal_loss'):
"""
If the loss goes up when you remove a point, then it was a helpful point.
So positive influence = helpful.
If we move in the direction of the gradient, we make the influence even more positive,
so even more helpful.
Thus if we want to make the test point more wrong, we have to move in the opposite direction.
"""
# Calculate v_placeholder (gradient of loss at test point)
test_grad_loss_no_reg_val = self.get_test_grad_loss_no_reg_val(test_indices, loss_type=loss_type)
if verbose: print('Norm of test gradient: %s' % np.linalg.norm(np.concatenate(test_grad_loss_no_reg_val)))
start_time = time.time()
if test_description is None:
test_description = test_indices
approx_filename = os.path.join(self.train_dir, '%s-%s-%s-test-%s.npz' % (self.model_name, approx_type, loss_type, test_description))
if os.path.exists(approx_filename) and force_refresh == False:
inverse_hvp = list(np.load(approx_filename)['inverse_hvp'])
if verbose: print('Loaded inverse HVP from %s' % approx_filename)
else:
inverse_hvp = self.get_inverse_hvp(
test_grad_loss_no_reg_val,
approx_type,
approx_params,
verbose=verbose)
np.savez(approx_filename, inverse_hvp=inverse_hvp)
if verbose: print('Saved inverse HVP to %s' % approx_filename)
duration = time.time() - start_time
if verbose: print('Inverse HVP took %s sec' % duration)
grad_influence_wrt_input_val = None
for counter, train_idx in enumerate(train_indices):
# Put in the train example in the feed dict
grad_influence_feed_dict = self.fill_feed_dict_with_one_ex(
self.data_sets.train,
train_idx)
self.update_feed_dict_with_v_placeholder(grad_influence_feed_dict, inverse_hvp)
# Run the grad op with the feed dict
current_grad_influence_wrt_input_val = self.sess.run(self.grad_influence_wrt_input_op, feed_dict=grad_influence_feed_dict)[0][0, :]
if grad_influence_wrt_input_val is None:
grad_influence_wrt_input_val = np.zeros([len(train_indices), len(current_grad_influence_wrt_input_val)])
grad_influence_wrt_input_val[counter, :] = current_grad_influence_wrt_input_val
return grad_influence_wrt_input_val
def update_train_x(self, new_train_x):
assert np.all(new_train_x.shape == self.data_sets.train.x.shape)
new_train = DataSet(new_train_x, np.copy(self.data_sets.train.labels))
self.data_sets = base.Datasets(train=new_train, validation=self.data_sets.validation, test=self.data_sets.test)
self.all_train_feed_dict = self.fill_feed_dict_with_all_ex(self.data_sets.train)
self.reset_datasets()
def update_train_x_y(self, new_train_x, new_train_y):
new_train = DataSet(new_train_x, new_train_y)
self.data_sets = base.Datasets(train=new_train, validation=self.data_sets.validation, test=self.data_sets.test)
self.all_train_feed_dict = self.fill_feed_dict_with_all_ex(self.data_sets.train)
self.num_train_examples = len(new_train_y)
self.reset_datasets()
def update_test_x_y(self, new_test_x, new_test_y):
new_test = DataSet(new_test_x, new_test_y)
self.data_sets = base.Datasets(train=self.data_sets.train, validation=self.data_sets.validation, test=new_test)
self.all_test_feed_dict = self.fill_feed_dict_with_all_ex(self.data_sets.test)
self.num_test_examples = len(new_test_y)
self.reset_datasets()
| 33,790 | 39.565426 | 158 | py |
IndependentEvaluation | IndependentEvaluation-main/Code For Figure 3/influence/inceptionModel.py | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import abc
import sys
import numpy as np
import pandas as pd
from sklearn import linear_model, preprocessing, cluster
import scipy.linalg as slin
import scipy.sparse.linalg as sparselin
import scipy.sparse as sparse
import os.path
import time
import tensorflow as tf
import math
from influence.hessians import hessians
from influence.genericNeuralNet import GenericNeuralNet, variable, variable_with_weight_decay
from influence.inception_v3 import InceptionV3
from keras.layers import Flatten
from keras.layers import AveragePooling2D
from keras.utils.data_utils import get_file
from keras import backend as K
class BinaryInceptionModel(GenericNeuralNet):
def __init__(self, img_side, num_channels, weight_decay, **kwargs):
self.weight_decay = weight_decay
self.img_side = img_side
self.num_channels = num_channels
self.input_dim = img_side * img_side * num_channels
self.num_features = 2048 # Hardcoded for inception. For some reason Flatten() doesn't register num_features.
super(BinaryInceptionModel, self).__init__(**kwargs)
self.load_inception_weights()
# Do we need to set trainable to False?
# We might be unnecessarily blowing up the graph by including all of the train operations
# needed for the inception network.
self.set_params_op = self.set_params()
C = 1.0 / ((self.num_train_examples) * self.weight_decay)
self.sklearn_model = linear_model.LogisticRegression(
C=C,
tol=1e-8,
fit_intercept=False,
solver='lbfgs',
# multi_class='multinomial',
warm_start=True,
max_iter=1000)
C_minus_one = 1.0 / ((self.num_train_examples - 1) * self.weight_decay)
self.sklearn_model_minus_one = linear_model.LogisticRegression(
C=C_minus_one,
tol=1e-8,
fit_intercept=False,
solver='lbfgs',
# multi_class='multinomial',
warm_start=True,
max_iter=1000)
def get_all_params(self):
all_params = []
for layer in ['softmax_linear']:
# for var_name in ['weights', 'biases']:
for var_name in ['weights']:
temp_tensor = tf.get_default_graph().get_tensor_by_name("%s/%s:0" % (layer, var_name))
all_params.append(temp_tensor)
return all_params
def placeholder_inputs(self):
input_placeholder = tf.placeholder(
tf.float32,
shape=(None, self.input_dim),
name='input_placeholder')
labels_placeholder = tf.placeholder(
tf.int32,
shape=(None),
name='labels_placeholder')
return input_placeholder, labels_placeholder
def fill_feed_dict_with_all_ex(self, data_set):
feed_dict = {
self.input_placeholder: data_set.x,
self.labels_placeholder: data_set.labels,
K.learning_phase(): 0
}
return feed_dict
def fill_feed_dict_with_all_but_one_ex(self, data_set, idx_to_remove):
num_examples = data_set.x.shape[0]
idx = np.array([True] * num_examples, dtype=bool)
idx[idx_to_remove] = False
feed_dict = {
self.input_placeholder: data_set.x[idx, :],
self.labels_placeholder: data_set.labels[idx],
K.learning_phase(): 0
}
return feed_dict
def fill_feed_dict_with_batch(self, data_set, batch_size=0):
if batch_size is None:
return self.fill_feed_dict_with_all_ex(data_set)
elif batch_size == 0:
batch_size = self.batch_size
input_feed, labels_feed = data_set.next_batch(batch_size)
feed_dict = {
self.input_placeholder: input_feed,
self.labels_placeholder: labels_feed,
K.learning_phase(): 0
}
return feed_dict
def fill_feed_dict_with_some_ex(self, data_set, target_indices):
input_feed = data_set.x[target_indices, :].reshape(len(target_indices), -1)
labels_feed = data_set.labels[target_indices].reshape(-1)
feed_dict = {
self.input_placeholder: input_feed,
self.labels_placeholder: labels_feed,
K.learning_phase(): 0
}
return feed_dict
def fill_feed_dict_with_one_ex(self, data_set, target_idx):
input_feed = data_set.x[target_idx, :].reshape(1, -1)
labels_feed = data_set.labels[target_idx].reshape(1)
feed_dict = {
self.input_placeholder: input_feed,
self.labels_placeholder: labels_feed,
K.learning_phase(): 0
}
return feed_dict
def load_inception_weights(self):
# Replace this with a local copy for reproducibility
# TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.5/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
# weights_path = get_file('inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5',
# TF_WEIGHTS_PATH_NO_TOP,
# cache_subdir='models',
# md5_hash='bcbd6486424b2319ff4ef7d526e38f63')
weights_path = 'inception/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
self.inception_model.load_weights(weights_path)
def inference(self, input):
reshaped_input = tf.reshape(input, [-1, self.img_side, self.img_side, self.num_channels])
self.inception_model = InceptionV3(include_top=False, weights='imagenet', input_tensor=reshaped_input)
raw_inception_features = self.inception_model.output
pooled_inception_features = AveragePooling2D((8, 8), strides=(8, 8), name='avg_pool')(raw_inception_features)
self.inception_features = Flatten(name='flatten')(pooled_inception_features)
with tf.variable_scope('softmax_linear'):
weights = variable_with_weight_decay(
'weights',
[self.num_features],
stddev=1.0 / math.sqrt(float(self.num_features)),
wd=self.weight_decay)
logits = tf.matmul(self.inception_features, tf.reshape(weights, [-1, 1]))
zeros = tf.zeros_like(logits)
logits_with_zeros = tf.concat([zeros, logits], 1)
self.weights = weights
return logits_with_zeros
def predictions(self, logits):
preds = tf.nn.softmax(logits, name='preds')
return preds
def set_params(self):
# See if we can automatically infer weight shape
self.W_placeholder = tf.placeholder(
tf.float32,
shape=[self.num_features],
name='W_placeholder')
set_weights = tf.assign(self.weights, self.W_placeholder, validate_shape=True)
return [set_weights]
def retrain(self, num_steps, feed_dict):
self.train_with_LBFGS(
feed_dict=feed_dict,
save_checkpoints=False,
verbose=False)
def train(self, num_steps=None,
iter_to_switch_to_batch=None,
iter_to_switch_to_sgd=None,
save_checkpoints=True, verbose=True):
self.train_with_LBFGS(
feed_dict=self.all_train_feed_dict,
save_checkpoints=save_checkpoints,
verbose=verbose)
def train_with_SGD(self, **kwargs):
super(BinaryInceptionModel, self).train(**kwargs)
def minibatch_inception_features(self, feed_dict):
num_examples = feed_dict[self.input_placeholder].shape[0]
batch_size = 100
num_iter = int(np.ceil(num_examples / batch_size))
ret = np.zeros([num_examples, self.num_features])
batch_feed_dict = {}
batch_feed_dict[K.learning_phase()] = 0
for i in xrange(num_iter):
start = i * batch_size
end = (i+1) * batch_size
if end > num_examples:
end = num_examples
batch_feed_dict[self.input_placeholder] = feed_dict[self.input_placeholder][start:end]
batch_feed_dict[self.labels_placeholder] = feed_dict[self.labels_placeholder][start:end]
ret[start:end, :] = self.sess.run(self.inception_features, feed_dict=batch_feed_dict)
return ret
def train_with_LBFGS(self, feed_dict, save_checkpoints=True, verbose=True):
# More sanity checks to see if predictions are the same?
# X_train = feed_dict[self.input_placeholder]
# X_train = self.sess.run(self.inception_features, feed_dict=feed_dict)
X_train = self.minibatch_inception_features(feed_dict)
Y_train = feed_dict[self.labels_placeholder]
num_train_examples = len(Y_train)
assert len(Y_train.shape) == 1
assert X_train.shape[0] == Y_train.shape[0]
if num_train_examples == self.num_train_examples:
print('Using normal model')
model = self.sklearn_model
elif num_train_examples == self.num_train_examples - 1:
print('Using model minus one')
model = self.sklearn_model_minus_one
else:
raise ValueError, "feed_dict has incorrect number of training examples"
model.fit(X_train, Y_train)
# sklearn returns coefficients in shape num_classes x num_features
# whereas our weights are defined as num_features x num_classes
# so we have to tranpose them first.
W = np.reshape(model.coef_.T, -1)
# b = model.intercept_
params_feed_dict = {}
params_feed_dict[self.W_placeholder] = W
# params_feed_dict[self.b_placeholder] = b
self.sess.run(self.set_params_op, feed_dict=params_feed_dict)
if save_checkpoints: self.saver.save(self.sess, self.checkpoint_file, global_step=0)
if verbose:
print('LBFGS training took %s iter.' % model.n_iter_)
print('After training with LBFGS: ')
self.print_model_eval()
def load_weights_from_disk(self, weights_filename, do_check=True, do_save=True):
W = np.load('%s' % weights_filename)
params_feed_dict = {}
params_feed_dict[self.W_placeholder] = W
self.sess.run(self.set_params_op, feed_dict=params_feed_dict)
if do_save: self.saver.save(self.sess, self.checkpoint_file, global_step=0)
print('Loaded weights from disk.')
if do_check: self.print_model_eval()
def get_hessian(self):
H = self.sess.run(self.hessians_op)
print(H.shape)
# Maybe update Hessian every time main train routine is called?
| 11,113 | 34.059937 | 165 | py |
IndependentEvaluation | IndependentEvaluation-main/Code For Figure 10/main_adjointMethods.py | import numpy as np
import numpy.random as npr
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
import tensorflow.contrib.eager as tfe
from main_neural_ode import NeuralODE
### tf.enable_eager_execution must be called at program startup. Please restart your kernel.
plt.rcParams["font.family"] = "Times New Roman"
plt.rcParams.update({'font.size': 18})
keras = tf.keras
tfe.enable_eager_execution()
### Initialize parameters
t = np.linspace(0, 25, 200)
h0 = tf.to_float([[1., 0.]])
W = tf.to_float([[-0.1, 1.0], [-0.2, -0.1]])
### Define the Computational Graph
class Lambda(tf.keras.Model):
def call(self, inputs, **kwargs):
t, h = inputs
return tf.matmul(h, W)
neural_ode = NeuralODE(Lambda(), t=t)
hN, states_history = neural_ode.forward(h0, return_states="numpy")
initial_path = np.concatenate(states_history)
### This is a function to plot the trajectory
def plot_trajectory(trajectories, fig=True):
if fig:
plt.figure(figsize=(5, 5))
for path in trajectories:
if type(path) == tuple:
c, label, path = path
plt.plot(*path.T, c, lw=2, label=label)
else:
plt.plot(*path.T, lw=2)
plt.axis("equal")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plot_trajectory([initial_path])
### Define model parameters
optimizer = tf.train.MomentumOptimizer(learning_rate=1e-2, momentum=0.95)
h0_var = tf.contrib.eager.Variable(h0)
hN_target = tf.to_float([[0., 0.5]])
### compute the gradient with respect to the h0 and W
def compute_gradients_and_update():
with tf.GradientTape() as g:
hN = neural_ode.forward(h0_var)
g.watch(hN)
loss = tf.reduce_sum((hN_target - hN)**2)
dLoss = g.gradient(loss, hN) # same what 2 * (hN_target - hN)
h0_reconstruction, dfdh0, dWeights = neural_ode.backward(hN, dLoss)
optimizer.apply_gradients(zip([dfdh0], [h0_var]))
return loss
### Compile EAGER graph to static (this will be much faster)
compute_gradients_and_update = tfe.defun(compute_gradients_and_update)
### Show the Optimization Process
loss_history = []
for step in tqdm(range(201)):
with tf.GradientTape() as g:
hN = neural_ode.forward(h0_var)
g.watch(hN)
loss = tf.reduce_sum((hN_target - hN)**2)
dLoss = g.gradient(loss, hN) # same what 2 * (hN_target - hN)
h0_reconstruction, dfdh0, dWeights = neural_ode.backward(hN, dLoss)
print(dWeights)
optimizer.apply_gradients(zip([dfdh0], [h0_var]))
if step % 50 == 0:
yN, states_history_model = neural_ode.forward(h0_var, return_states="numpy")
plot_trajectory([
("r", "initial", initial_path),
("g", "optimized", np.concatenate(states_history_model))])
plt.show()
print(dfdh0)
print(h0_var)
| 2,914 | 25.026786 | 92 | py |
IndependentEvaluation | IndependentEvaluation-main/Code For Figure 10/main_neural_ode.py | from typing import Optional, List
import numpy as np
import tensorflow as tf
from tensorflow.python.framework.ops import EagerTensor
import tensorflow.contrib.eager as tfe
keras = tf.keras
def zip_map(zipped, update_op):
return [update_op(*elems) for elems in zipped]
def euler_update(h_list, dh_list, dt):
return zip_map(zip(h_list, dh_list), lambda h, dh: h + tf.cast(dt, h.dtype) * dh)
def euler_step(func, dt, state):
return euler_update(state, func(state), dt)
def rk2_step(func, dt, state):
k1 = func(state)
k2 = func(euler_update(state, k1, dt))
return zip_map(zip(state, k1, k2),
lambda h, dk1, dk2: h + tf.cast(dt, h.dtype) * (dk1 + dk2) / 2)
def rk4_step(func, dt, state):
k1 = func(state)
k2 = func(euler_update(state, k1, dt / 2))
k3 = func(euler_update(state, k2, dt / 2))
k4 = func(euler_update(state, k3, dt))
return zip_map(
zip(state, k1, k2, k3, k4),
lambda h, dk1, dk2, dk3, dk4: h + tf.cast(dt, h.dtype) * (
dk1 + 2 * dk2 + 2 * dk3 + dk4) / 6,
)
class NeuralODE:
def __init__(
self, model: tf.keras.Model, t=np.linspace(0, 1, 40),
solver=rk4_step
):
self._t = t
self._model = model
self._solver = solver
self._deltas_t = t[1:] - t[:-1]
def forward(self, inputs: tf.Tensor, return_states: Optional[str] = None):
def _forward_dynamics(_state):
"""Used in solver _state == (time, tensor)"""
return [1.0, self._model(inputs=_state)]
states = []
def _append_state(_state):
tensors = _state[1]
if return_states == "numpy":
states.append(tensors.numpy())
elif return_states == "tf":
states.append(tensors)
with tf.name_scope("forward"):
t0 = tf.to_float(self._t[0])
state = [t0, inputs]
_append_state(state)
for dt in self._deltas_t:
state = self._solver(
func=_forward_dynamics, dt=tf.to_float(dt), state=state
)
_append_state(state)
outputs = state[1]
if return_states:
return outputs, states
return outputs
def _backward_dynamics(self, state):
t = state[0]
ht = state[1]
at = -state[2]
with tf.GradientTape() as g:
g.watch(ht)
ht_new = self._model(inputs=[t, ht])
gradients = g.gradient(
target=ht_new, sources=[ht] + self._model.weights,
output_gradients=at
)
return [1.0, ht_new, *gradients]
def backward(self, outputs: tf.Tensor,
output_gradients: Optional[tf.Tensor] = None):
with tf.name_scope("backward"):
grad_weights = [tf.zeros_like(w) for w in self._model.weights]
t0 = tf.to_float(self._t[-1])
if output_gradients is None:
output_gradients = tf.zeros_like(outputs)
state = [t0, outputs, output_gradients, *grad_weights]
for dt in self._deltas_t[::-1]:
state = self._solver(
self._backward_dynamics, dt=-tf.to_float(dt), state=state
)
inputs = state[1]
dLdInputs = state[2]
dLdWeights = state[3:]
return inputs, dLdInputs, dLdWeights
def forward_odeint(
self,
inputs: tf.Tensor,
rtol=1e-6,
atol=1e-6,
method='dopri5',
return_states: bool = False,
):
"""Do forward with adaptive solver"""
with tf.name_scope("forward_odeint"):
t = tf.to_float(self._t)
if not return_states:
t = tf.to_float([t[0], t[-1]])
outputs, info_dict = tf.contrib.integrate.odeint(
func=lambda _y, _t: self._model(inputs=(_t, _y)),
y0=inputs,
t=t,
rtol=rtol,
atol=atol,
method=method,
full_output=True,
)
if return_states:
return outputs, info_dict
return outputs[-1, ...], info_dict
def defun_neural_ode(node: NeuralODE) -> NeuralODE:
node.forward = tfe.defun(node.forward)
node.backward = tfe.defun(node.backward)
node.forward_odeint = tfe.defun(node.forward_odeint)
return node | 4,469 | 28.215686 | 85 | py |
enpheeph | enpheeph-main/src/enpheeph/__init__.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# we ignore mypy/flake8/black as this file is autogenerated
# we ignore this specific error because of AUTOGEN_INIT
# mypy: ignore-errors
# the following flake8 syntax is wrong, as it will be read as generic noqa, but we use
# it to remember the errors appearing in the __init__.py
# additionally this is not caught by pygrep-hooks as it counts only "type: ignore" and
# "noqa", both with starting #
# flake8: noqa: E302,E305
# fmt: off
# this is required so that the mkinit script will generate the init imports only in this
# section
# <AUTOGEN_INIT>
def lazy_import(module_name, submodules, submod_attrs):
import importlib
import os
name_to_submod = {
func: mod for mod, funcs in submod_attrs.items()
for func in funcs
}
def __getattr__(name):
if name in submodules:
attr = importlib.import_module(
'{module_name}.{name}'.format(
module_name=module_name, name=name)
)
elif name in name_to_submod:
submodname = name_to_submod[name]
module = importlib.import_module(
'{module_name}.{submodname}'.format(
module_name=module_name, submodname=submodname)
)
attr = getattr(module, name)
else:
raise AttributeError(
'No {module_name} attribute {name}'.format(
module_name=module_name, name=name))
globals()[name] = attr
return attr
if os.environ.get('EAGER_IMPORT', ''):
for name in name_to_submod.values():
__getattr__(name)
for attrs in submod_attrs.values():
for attr in attrs:
__getattr__(attr)
return __getattr__
__getattr__ = lazy_import(
__name__,
submodules={
'handlers',
'helpers',
'injections',
'integrations',
'utils',
},
submod_attrs={
'handlers': [
'InjectionHandler',
'LibraryHandlerPluginABC',
'PyTorchHandlerPlugin',
'injectionhandler',
'libraryhandlerpluginabc',
'plugins',
'pytorchhandlerplugin',
],
'helpers': [
'FaultModelABC',
'ModelSummaryABC',
'ModelSummaryTorchinfo',
'abc',
'faultmodel',
'faultmodelabc',
'faultmodels',
'layersummaryabc',
'modelsummaryabc',
'modelsummarytorchinfo',
'plugins',
'sensitivityanalysis',
'summaries',
],
'injections': [
'AutoPyTorchMaskPlugin',
'CSVStoragePluginABC',
'CuPyPyTorchMaskPlugin',
'CustomBase',
'CustomBaseClass',
'DenseSparseOutputPyTorchFault',
'ExperimentRun',
'ExperimentRunBaseMixin',
'ExperimentRunProtocol',
'FPQuantizedOutputPyTorchFault',
'Fault',
'FaultABC',
'FaultBaseMixin',
'FaultProtocol',
'IndexingPlugin',
'IndexingPluginABC',
'Injection',
'InjectionABC',
'InjectionProtocol',
'LowLevelTorchMaskPluginABC',
'Monitor',
'MonitorABC',
'MonitorBaseMixin',
'MonitorProtocol',
'NumPyPyTorchMaskPlugin',
'OutputPyTorchFault',
'OutputPyTorchMonitor',
'PandasCSVStoragePlugin',
'PolymorphicMixin',
'PrunedDenseToSparseWeightPyTorchFault',
'PyTorchInjectionABC',
'PyTorchMaskMixin',
'PyTorchMonitorPostProcessorMixin',
'PyTorchSparseInterfaceMixin',
'PyTorchSparseInterfacePluginABC',
'PyTorchTensorObjectValidatorMixin',
'QuantizedOutputPyTorchFault',
'SNNOutputNorseFault',
'SQLStoragePluginABC',
'SQLiteStoragePlugin',
'Session',
'SessionBaseMixin',
'SessionProtocol',
'StoragePluginABC',
'WeightPyTorchFault',
'abc',
'autopytorchmaskplugin',
'csv',
'csvdataclasses',
'csvstorageplugin',
'csvstoragepluginabc',
'cupypytorchmaskplugin',
'densesparseoutputpytorchfault',
'faultabc',
'fix_pysqlite',
'fpquantizedoutputpytorchfault',
'indexing',
'indexingplugin',
'indexingpluginabc',
'injectionabc',
'lowleveltorchmaskpluginabc',
'mask',
'mixins',
'monitorabc',
'numpypytorchmaskplugin',
'outputpytorchfault',
'outputpytorchmonitor',
'plugins',
'pruneddensetosparseactivationpytorchfault',
'pruneddensetosparseweightpytorchfault',
'pysqlite_begin_emission_fix_on_connect',
'pytorchinjectionabc',
'pytorchmaskmixin',
'pytorchmonitorpostprocessormixin',
'pytorchquantizationmixin',
'pytorchsparseinterfacemixin',
'pytorchsparseinterfacepluginabc',
'pytorchtensorobjectvalidatormixin',
'quantizedoutputpytorchfault',
'set_sqlite_pragma',
'snnoutputnorsefault',
'sparse',
'sql',
'sqlalchemy_begin_emission_pysqlite',
'sqldataclasses',
'sqlitestorageplugin',
'sqlstoragepluginabc',
'sqlutils',
'storage',
'storagepluginabc',
'storagetypings',
'torch_geometric_mean',
'utils',
'weightpytorchfault',
],
'integrations': [
'InjectionCallback',
'injectioncallback',
'pytorchlightning',
],
'utils': [
'ActiveDimensionIndexType',
'AnyIndexType',
'AnyMaskType',
'ArrayType',
'BaseInjectionLocation',
'BitFaultMaskInfo',
'BitFaultValue',
'BitIndexInfo',
'BitWidth',
'DimensionDictType',
'DimensionIndexType',
'DimensionLocationIndexType',
'DimensionLocationMaskType',
'DimensionType',
'Endianness',
'FaultLocation',
'FaultLocationMixin',
'FaultMaskOperation',
'FaultMaskValue',
'HandlerStatus',
'IDGenerator',
'IDGeneratorSubclass',
'Index1DType',
'IndexMultiDType',
'IndexTimeType',
'InjectionLocationABC',
'LocationMixin',
'LocationModuleNameMixin',
'LocationOptionalMixin',
'LowLevelMaskArrayType',
'Mask1DType',
'MaskMultiDType',
'ModelType',
'MonitorLocation',
'MonitorMetric',
'ParameterType',
'PathType',
'ShapeType',
'SkipIfErrorContextManager',
'TensorType',
'camel_to_snake',
'classes',
'compare_version',
'constants',
'dataclasses',
'enums',
'functions',
'get_object_library',
'imports',
'is_module_available',
'typings',
],
},
)
def __dir__():
return __all__
__all__ = ['ActiveDimensionIndexType', 'AnyIndexType', 'AnyMaskType',
'ArrayType', 'AutoPyTorchMaskPlugin', 'BaseInjectionLocation',
'BitFaultMaskInfo', 'BitFaultValue', 'BitIndexInfo', 'BitWidth',
'CSVStoragePluginABC', 'CuPyPyTorchMaskPlugin', 'CustomBase',
'CustomBaseClass', 'DenseSparseOutputPyTorchFault',
'DimensionDictType', 'DimensionIndexType',
'DimensionLocationIndexType', 'DimensionLocationMaskType',
'DimensionType', 'Endianness', 'ExperimentRun',
'ExperimentRunBaseMixin', 'ExperimentRunProtocol',
'FPQuantizedOutputPyTorchFault', 'Fault', 'FaultABC',
'FaultBaseMixin', 'FaultLocation', 'FaultLocationMixin',
'FaultMaskOperation', 'FaultMaskValue', 'FaultModelABC',
'FaultProtocol', 'HandlerStatus', 'IDGenerator',
'IDGeneratorSubclass', 'Index1DType', 'IndexMultiDType',
'IndexTimeType', 'IndexingPlugin', 'IndexingPluginABC', 'Injection',
'InjectionABC', 'InjectionCallback', 'InjectionHandler',
'InjectionLocationABC', 'InjectionProtocol',
'LibraryHandlerPluginABC', 'LocationMixin',
'LocationModuleNameMixin', 'LocationOptionalMixin',
'LowLevelMaskArrayType', 'LowLevelTorchMaskPluginABC', 'Mask1DType',
'MaskMultiDType', 'ModelSummaryABC', 'ModelSummaryTorchinfo',
'ModelType', 'Monitor', 'MonitorABC', 'MonitorBaseMixin',
'MonitorLocation', 'MonitorMetric', 'MonitorProtocol',
'NumPyPyTorchMaskPlugin', 'OutputPyTorchFault',
'OutputPyTorchMonitor', 'PandasCSVStoragePlugin', 'ParameterType',
'PathType', 'PolymorphicMixin',
'PrunedDenseToSparseWeightPyTorchFault', 'PyTorchHandlerPlugin',
'PyTorchInjectionABC', 'PyTorchMaskMixin',
'PyTorchMonitorPostProcessorMixin', 'PyTorchSparseInterfaceMixin',
'PyTorchSparseInterfacePluginABC',
'PyTorchTensorObjectValidatorMixin', 'QuantizedOutputPyTorchFault',
'SNNOutputNorseFault', 'SQLStoragePluginABC', 'SQLiteStoragePlugin',
'Session', 'SessionBaseMixin', 'SessionProtocol', 'ShapeType',
'SkipIfErrorContextManager', 'StoragePluginABC', 'TensorType',
'WeightPyTorchFault', 'abc', 'autopytorchmaskplugin',
'camel_to_snake', 'classes', 'compare_version', 'constants', 'csv',
'csvdataclasses', 'csvstorageplugin', 'csvstoragepluginabc',
'cupypytorchmaskplugin', 'dataclasses',
'densesparseoutputpytorchfault', 'enums', 'faultabc', 'faultmodel',
'faultmodelabc', 'faultmodels', 'fix_pysqlite',
'fpquantizedoutputpytorchfault', 'functions', 'get_object_library',
'handlers', 'helpers', 'imports', 'indexing', 'indexingplugin',
'indexingpluginabc', 'injectionabc', 'injectioncallback',
'injectionhandler', 'injections', 'integrations',
'is_module_available', 'layersummaryabc', 'libraryhandlerpluginabc',
'lowleveltorchmaskpluginabc', 'mask', 'mixins', 'modelsummaryabc',
'modelsummarytorchinfo', 'monitorabc', 'numpypytorchmaskplugin',
'outputpytorchfault', 'outputpytorchmonitor', 'plugins',
'pruneddensetosparseactivationpytorchfault',
'pruneddensetosparseweightpytorchfault',
'pysqlite_begin_emission_fix_on_connect', 'pytorchhandlerplugin',
'pytorchinjectionabc', 'pytorchlightning', 'pytorchmaskmixin',
'pytorchmonitorpostprocessormixin', 'pytorchquantizationmixin',
'pytorchsparseinterfacemixin', 'pytorchsparseinterfacepluginabc',
'pytorchtensorobjectvalidatormixin', 'quantizedoutputpytorchfault',
'sensitivityanalysis', 'set_sqlite_pragma', 'snnoutputnorsefault',
'sparse', 'sql', 'sqlalchemy_begin_emission_pysqlite',
'sqldataclasses', 'sqlitestorageplugin', 'sqlstoragepluginabc',
'sqlutils', 'storage', 'storagepluginabc', 'storagetypings',
'summaries', 'torch_geometric_mean', 'typings', 'utils',
'weightpytorchfault']
# </AUTOGEN_INIT>
| 12,581 | 37.477064 | 88 | py |
enpheeph | enpheeph-main/src/enpheeph/abc/lowleveltorchmaskpluginabc.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import abc
import typing
import enpheeph.utils.enums
import enpheeph.utils.typings
# while 3rd party library should be before custom libraries, we move it down to avoid
# flake8 complaining, since it is a conditional import
if typing.TYPE_CHECKING:
import torch
class LowLevelTorchMaskPluginABC(abc.ABC):
@abc.abstractmethod
def to_torch(
self, array: enpheeph.utils.typings.LowLevelMaskArrayType
) -> "torch.Tensor":
pass
@abc.abstractmethod
def from_torch(
self, tensor: "torch.Tensor"
) -> enpheeph.utils.typings.LowLevelMaskArrayType:
pass
@abc.abstractmethod
def to_bitwise_type(
self, array: enpheeph.utils.typings.LowLevelMaskArrayType
) -> enpheeph.utils.typings.LowLevelMaskArrayType:
pass
@abc.abstractmethod
def to_target_type(
self,
array: enpheeph.utils.typings.LowLevelMaskArrayType,
target: enpheeph.utils.typings.LowLevelMaskArrayType,
) -> enpheeph.utils.typings.LowLevelMaskArrayType:
pass
@abc.abstractmethod
def make_mask_array(
self,
int_mask: int,
# this fill value is already final, as is the int mask
int_fill_value: int,
shape: typing.Sequence[int],
torch_placeholder: "torch.Tensor",
mask: typing.Optional[enpheeph.utils.typings.AnyMaskType] = None,
mask_index: typing.Optional[enpheeph.utils.typings.AnyIndexType] = None,
) -> enpheeph.utils.typings.LowLevelMaskArrayType:
pass
| 3,069 | 35.117647 | 85 | py |
enpheeph | enpheeph-main/src/enpheeph/abc/pytorchinjectionabc.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import abc
import typing
import enpheeph.injections.abc.injectionabc
# to avoid flake complaining that imports are after if, even though torch is 3rd-party
# library so it should be before self-imports
if typing.TYPE_CHECKING:
import torch
class PyTorchInjectionABC(enpheeph.injections.abc.injectionabc.InjectionABC):
handle: typing.Optional["torch.utils.hooks.RemovableHandle"]
@abc.abstractmethod
def setup(
self,
module: "torch.nn.Module",
) -> "torch.nn.Module":
pass
# we define here the teardown as it should be common for all injections
# if some injections require particular care, it should be overridden, as long as
# the signature is the same
def teardown(
self,
module: "torch.nn.Module",
) -> "torch.nn.Module":
# safe get the handle attribute if not defined
if getattr(self, "handle", None) is not None:
typing.cast(
"torch.utils.hooks.RemovableHandle",
self.handle,
).remove()
self.handle = None
return module
@property
@abc.abstractmethod
def module_name(self) -> str:
pass
| 2,730 | 34.934211 | 86 | py |
enpheeph | enpheeph-main/src/enpheeph/abc/pytorchsparseinterfacepluginabc.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import abc
# import typing
# import enpheeph.utils.enums
# import enpheeph.utils.typings
# while 3rd party library should be before custom libraries, we move it down to avoid
# flake8 complaining, since it is a conditional import
# if typing.TYPE_CHECKING:
# import torch
class PyTorchSparseInterfacePluginABC(abc.ABC):
# @abc.abstractmethod
# def to_torch(
# self, array: enpheeph.utils.typings.LowLevelMaskArrayType
# ) -> "torch.Tensor":
# pass
# @abc.abstractmethod
# def from_torch(
# self, tensor: "torch.Tensor"
# ) -> enpheeph.utils.typings.LowLevelMaskArrayType:
# pass
# @abc.abstractmethod
# def to_bitwise_type(
# self, array: enpheeph.utils.typings.LowLevelMaskArrayType
# ) -> enpheeph.utils.typings.LowLevelMaskArrayType:
# pass
# @abc.abstractmethod
# def to_target_type(
# self,
# array: enpheeph.utils.typings.LowLevelMaskArrayType,
# target: enpheeph.utils.typings.LowLevelMaskArrayType,
# ) -> enpheeph.utils.typings.LowLevelMaskArrayType:
# pass
# @abc.abstractmethod
# def make_mask_array(
# self,
# int_mask: int,
# # this fill value is already final, as is the int mask
# int_fill_value: int,
# shape: typing.Sequence[int],
# torch_placeholder: "torch.Tensor",
# mask: typing.Optional[enpheeph.utils.typings.AnyMaskType] = None,
# mask_index: typing.Optional[enpheeph.utils.typings.AnyIndexType] = None,
# ) -> enpheeph.utils.typings.LowLevelMaskArrayType:
# pass
pass
| 3,162 | 35.356322 | 85 | py |
enpheeph | enpheeph-main/src/enpheeph/abc/__init__.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# we ignore mypy/flake8/black as this file is autogenerated
# we ignore this specific error because of AUTOGEN_INIT
# mypy: ignore-errors
# the following flake8 syntax is wrong, as it will be read as generic noqa, but we use
# it to remember the errors appearing in the __init__.py
# additionally this is not caught by pygrep-hooks as it counts only "type: ignore" and
# "noqa", both with starting #
# flake8: noqa: E302,E305
# fmt: off
# this is required so that the mkinit script will generate the init imports only in this
# section
# <AUTOGEN_INIT>
def lazy_import(module_name, submodules, submod_attrs):
import importlib
import os
name_to_submod = {
func: mod for mod, funcs in submod_attrs.items()
for func in funcs
}
def __getattr__(name):
if name in submodules:
attr = importlib.import_module(
'{module_name}.{name}'.format(
module_name=module_name, name=name)
)
elif name in name_to_submod:
submodname = name_to_submod[name]
module = importlib.import_module(
'{module_name}.{submodname}'.format(
module_name=module_name, submodname=submodname)
)
attr = getattr(module, name)
else:
raise AttributeError(
'No {module_name} attribute {name}'.format(
module_name=module_name, name=name))
globals()[name] = attr
return attr
if os.environ.get('EAGER_IMPORT', ''):
for name in name_to_submod.values():
__getattr__(name)
for attrs in submod_attrs.values():
for attr in attrs:
__getattr__(attr)
return __getattr__
__getattr__ = lazy_import(
__name__,
submodules={
'handlers',
'helpers',
'injections',
'integrations',
'utils',
},
submod_attrs={
'handlers': [
'InjectionHandler',
'LibraryHandlerPluginABC',
'PyTorchHandlerPlugin',
'injectionhandler',
'libraryhandlerpluginabc',
'plugins',
'pytorchhandlerplugin',
],
'helpers': [
'FaultModelABC',
'ModelSummaryABC',
'ModelSummaryTorchinfo',
'abc',
'faultmodel',
'faultmodelabc',
'faultmodels',
'layersummaryabc',
'modelsummaryabc',
'modelsummarytorchinfo',
'plugins',
'sensitivityanalysis',
'summaries',
],
'injections': [
'AutoPyTorchMaskPlugin',
'CSVStoragePluginABC',
'CuPyPyTorchMaskPlugin',
'CustomBase',
'CustomBaseClass',
'DenseSparseOutputPyTorchFault',
'ExperimentRun',
'ExperimentRunBaseMixin',
'ExperimentRunProtocol',
'FPQuantizedOutputPyTorchFault',
'Fault',
'FaultABC',
'FaultBaseMixin',
'FaultProtocol',
'IndexingPlugin',
'IndexingPluginABC',
'Injection',
'InjectionABC',
'InjectionProtocol',
'LowLevelTorchMaskPluginABC',
'Monitor',
'MonitorABC',
'MonitorBaseMixin',
'MonitorProtocol',
'NumPyPyTorchMaskPlugin',
'OutputPyTorchFault',
'OutputPyTorchMonitor',
'PandasCSVStoragePlugin',
'PolymorphicMixin',
'PrunedDenseToSparseWeightPyTorchFault',
'PyTorchInjectionABC',
'PyTorchMaskMixin',
'PyTorchMonitorPostProcessorMixin',
'PyTorchSparseInterfaceMixin',
'PyTorchSparseInterfacePluginABC',
'PyTorchTensorObjectValidatorMixin',
'QuantizedOutputPyTorchFault',
'SNNOutputNorseFault',
'SQLStoragePluginABC',
'SQLiteStoragePlugin',
'Session',
'SessionBaseMixin',
'SessionProtocol',
'StoragePluginABC',
'WeightPyTorchFault',
'abc',
'autopytorchmaskplugin',
'csv',
'csvdataclasses',
'csvstorageplugin',
'csvstoragepluginabc',
'cupypytorchmaskplugin',
'densesparseoutputpytorchfault',
'faultabc',
'fix_pysqlite',
'fpquantizedoutputpytorchfault',
'indexing',
'indexingplugin',
'indexingpluginabc',
'injectionabc',
'lowleveltorchmaskpluginabc',
'mask',
'mixins',
'monitorabc',
'numpypytorchmaskplugin',
'outputpytorchfault',
'outputpytorchmonitor',
'plugins',
'pruneddensetosparseactivationpytorchfault',
'pruneddensetosparseweightpytorchfault',
'pysqlite_begin_emission_fix_on_connect',
'pytorchinjectionabc',
'pytorchmaskmixin',
'pytorchmonitorpostprocessormixin',
'pytorchquantizationmixin',
'pytorchsparseinterfacemixin',
'pytorchsparseinterfacepluginabc',
'pytorchtensorobjectvalidatormixin',
'quantizedoutputpytorchfault',
'set_sqlite_pragma',
'snnoutputnorsefault',
'sparse',
'sql',
'sqlalchemy_begin_emission_pysqlite',
'sqldataclasses',
'sqlitestorageplugin',
'sqlstoragepluginabc',
'sqlutils',
'storage',
'storagepluginabc',
'storagetypings',
'torch_geometric_mean',
'utils',
'weightpytorchfault',
],
'integrations': [
'InjectionCallback',
'injectioncallback',
'pytorchlightning',
],
'utils': [
'ActiveDimensionIndexType',
'AnyIndexType',
'AnyMaskType',
'ArrayType',
'BaseInjectionLocation',
'BitFaultMaskInfo',
'BitFaultValue',
'BitIndexInfo',
'BitWidth',
'DimensionDictType',
'DimensionIndexType',
'DimensionLocationIndexType',
'DimensionLocationMaskType',
'DimensionType',
'Endianness',
'FaultLocation',
'FaultLocationMixin',
'FaultMaskOperation',
'FaultMaskValue',
'HandlerStatus',
'IDGenerator',
'IDGeneratorSubclass',
'Index1DType',
'IndexMultiDType',
'IndexTimeType',
'InjectionLocationABC',
'LocationMixin',
'LocationModuleNameMixin',
'LocationOptionalMixin',
'LowLevelMaskArrayType',
'Mask1DType',
'MaskMultiDType',
'ModelType',
'MonitorLocation',
'MonitorMetric',
'ParameterType',
'PathType',
'ShapeType',
'SkipIfErrorContextManager',
'TensorType',
'camel_to_snake',
'classes',
'compare_version',
'constants',
'dataclasses',
'enums',
'functions',
'get_object_library',
'imports',
'is_module_available',
'typings',
],
},
)
def __dir__():
return __all__
__all__ = ['ActiveDimensionIndexType', 'AnyIndexType', 'AnyMaskType',
'ArrayType', 'AutoPyTorchMaskPlugin', 'BaseInjectionLocation',
'BitFaultMaskInfo', 'BitFaultValue', 'BitIndexInfo', 'BitWidth',
'CSVStoragePluginABC', 'CuPyPyTorchMaskPlugin', 'CustomBase',
'CustomBaseClass', 'DenseSparseOutputPyTorchFault',
'DimensionDictType', 'DimensionIndexType',
'DimensionLocationIndexType', 'DimensionLocationMaskType',
'DimensionType', 'Endianness', 'ExperimentRun',
'ExperimentRunBaseMixin', 'ExperimentRunProtocol',
'FPQuantizedOutputPyTorchFault', 'Fault', 'FaultABC',
'FaultBaseMixin', 'FaultLocation', 'FaultLocationMixin',
'FaultMaskOperation', 'FaultMaskValue', 'FaultModelABC',
'FaultProtocol', 'HandlerStatus', 'IDGenerator',
'IDGeneratorSubclass', 'Index1DType', 'IndexMultiDType',
'IndexTimeType', 'IndexingPlugin', 'IndexingPluginABC', 'Injection',
'InjectionABC', 'InjectionCallback', 'InjectionHandler',
'InjectionLocationABC', 'InjectionProtocol',
'LibraryHandlerPluginABC', 'LocationMixin',
'LocationModuleNameMixin', 'LocationOptionalMixin',
'LowLevelMaskArrayType', 'LowLevelTorchMaskPluginABC', 'Mask1DType',
'MaskMultiDType', 'ModelSummaryABC', 'ModelSummaryTorchinfo',
'ModelType', 'Monitor', 'MonitorABC', 'MonitorBaseMixin',
'MonitorLocation', 'MonitorMetric', 'MonitorProtocol',
'NumPyPyTorchMaskPlugin', 'OutputPyTorchFault',
'OutputPyTorchMonitor', 'PandasCSVStoragePlugin', 'ParameterType',
'PathType', 'PolymorphicMixin',
'PrunedDenseToSparseWeightPyTorchFault', 'PyTorchHandlerPlugin',
'PyTorchInjectionABC', 'PyTorchMaskMixin',
'PyTorchMonitorPostProcessorMixin', 'PyTorchSparseInterfaceMixin',
'PyTorchSparseInterfacePluginABC',
'PyTorchTensorObjectValidatorMixin', 'QuantizedOutputPyTorchFault',
'SNNOutputNorseFault', 'SQLStoragePluginABC', 'SQLiteStoragePlugin',
'Session', 'SessionBaseMixin', 'SessionProtocol', 'ShapeType',
'SkipIfErrorContextManager', 'StoragePluginABC', 'TensorType',
'WeightPyTorchFault', 'abc', 'autopytorchmaskplugin',
'camel_to_snake', 'classes', 'compare_version', 'constants', 'csv',
'csvdataclasses', 'csvstorageplugin', 'csvstoragepluginabc',
'cupypytorchmaskplugin', 'dataclasses',
'densesparseoutputpytorchfault', 'enums', 'faultabc', 'faultmodel',
'faultmodelabc', 'faultmodels', 'fix_pysqlite',
'fpquantizedoutputpytorchfault', 'functions', 'get_object_library',
'handlers', 'helpers', 'imports', 'indexing', 'indexingplugin',
'indexingpluginabc', 'injectionabc', 'injectioncallback',
'injectionhandler', 'injections', 'integrations',
'is_module_available', 'layersummaryabc', 'libraryhandlerpluginabc',
'lowleveltorchmaskpluginabc', 'mask', 'mixins', 'modelsummaryabc',
'modelsummarytorchinfo', 'monitorabc', 'numpypytorchmaskplugin',
'outputpytorchfault', 'outputpytorchmonitor', 'plugins',
'pruneddensetosparseactivationpytorchfault',
'pruneddensetosparseweightpytorchfault',
'pysqlite_begin_emission_fix_on_connect', 'pytorchhandlerplugin',
'pytorchinjectionabc', 'pytorchlightning', 'pytorchmaskmixin',
'pytorchmonitorpostprocessormixin', 'pytorchquantizationmixin',
'pytorchsparseinterfacemixin', 'pytorchsparseinterfacepluginabc',
'pytorchtensorobjectvalidatormixin', 'quantizedoutputpytorchfault',
'sensitivityanalysis', 'set_sqlite_pragma', 'snnoutputnorsefault',
'sparse', 'sql', 'sqlalchemy_begin_emission_pysqlite',
'sqldataclasses', 'sqlitestorageplugin', 'sqlstoragepluginabc',
'sqlutils', 'storage', 'storagepluginabc', 'storagetypings',
'summaries', 'torch_geometric_mean', 'typings', 'utils',
'weightpytorchfault']
# </AUTOGEN_INIT>
| 12,581 | 37.477064 | 88 | py |
enpheeph | enpheeph-main/src/enpheeph/injections/pruneddensetosparseactivationpytorchfault.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import typing
import enpheeph.injections.abc.faultabc
import enpheeph.injections.abc.pytorchinjectionabc
import enpheeph.injections.mixins.pytorchmaskmixin
import enpheeph.injections.mixins.pytorchsparseinterfacemixin
import enpheeph.injections.mixins.pytorchtensorobjectvalidatormixin
import enpheeph.injections.plugins.mask.abc.lowleveltorchmaskpluginabc
import enpheeph.utils.dataclasses
# we move this import down
if typing.TYPE_CHECKING:
import torch
class PrunedDenseToSparseWeightPyTorchFault(
enpheeph.injections.abc.faultabc.FaultABC,
enpheeph.injections.abc.pytorchinjectionabc.PyTorchInjectionABC,
enpheeph.injections.mixins.pytorchmaskmixin.PyTorchMaskMixin,
enpheeph.injections.mixins.pytorchsparseinterfacemixin.PyTorchSparseInterfaceMixin,
(
# fmt: off
enpheeph.injections.mixins.
pytorchtensorobjectvalidatormixin.PyTorchTensorObjectValidatorMixin
# fmt: on
),
):
location: enpheeph.utils.dataclasses.FaultLocation
low_level_plugin: (
# black has issues with long names
# fmt: off
enpheeph.injections.plugins.mask.
lowleveltorchmaskpluginabc.LowLevelTorchMaskPluginABC
# fmt: on
)
mask: typing.Optional["torch.Tensor"]
def __init__(
self,
indexing_plugin: (
enpheeph.injections.plugins.indexing.abc.indexingpluginabc.IndexingPluginABC
),
location: enpheeph.utils.dataclasses.FaultLocation,
low_level_torch_plugin: (
# black has issues with long names
# fmt: off
enpheeph.injections.plugins.mask.
lowleveltorchmaskpluginabc.LowLevelTorchMaskPluginABC
# fmt: on
),
) -> None:
super().__init__()
self.indexing_plugin = indexing_plugin
self.location = location
self.low_level_plugin = low_level_torch_plugin
self.handle = None
self.mask = None
@property
def module_name(self) -> str:
return self.location.module_name
def output_fault_hook(
self,
module: "torch.nn.Module",
input: typing.Union[typing.Tuple["torch.Tensor"], "torch.Tensor"],
output: "torch.Tensor",
) -> None:
target = self.get_sparse_injection_parameter(output)
self.indexing_plugin.select_active_dimensions(
[enpheeph.utils.enums.DimensionType.Tensor],
autoshift_to_boundaries=True,
)
self.generate_mask(target, tensor_only=None, force_recompute=True)
target = self.inject_mask(target, tensor_only=None)
output = self.set_sparse_injection_parameter(output, target).to_dense()
self.indexing_plugin.reset_active_dimensions()
return output
def setup(
self,
module: "torch.nn.Module",
) -> "torch.nn.Module":
self.handle = module.register_forward_hook(self.output_fault_hook)
return module
| 4,498 | 34.148438 | 88 | py |
enpheeph | enpheeph-main/src/enpheeph/injections/snnoutputnorsefault.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# type: ignore[misc,assignment,name-defined,unreachable,union-attr,attr-defined,operator]
# flake8: noqa
# we ignore mypy/flake8 errors here as this injection needs to be refactored
import typing
import norse
import enpheeph.injections.abc.pytorchinjectionabc
import enpheeph.injections.mixins.pytorchmaskmixin
import enpheeph.injections.plugins.mask.abc.lowleveltorchmaskpluginabc
import enpheeph.utils.dataclasses
class SNNOutputNorseFault(
enpheeph.injections.abc.pytorchinjectionabc.PyTorchInjectionABC,
enpheeph.injections.mixins.pytorchmaskmixin.PyTorchMaskMixin,
):
def __init__(
self,
fault_location: enpheeph.utils.dataclasses.FaultLocation,
low_level_torch_plugin: (
# black has issues with very long names
# fmt: off
enpheeph.injections.plugins.mask.
lowleveltorchmaskpluginabc.LowLevelTorchMaskPluginABC
# fmt: on
),
):
super().__init__()
if fault_location.time_index is None:
raise ValueError("time_index must be passed in the injection for SNNs")
self.fault_location = fault_location
self.low_level_plugin = low_level_torch_plugin
self.handle = None
self.mask = None
self.timestep_counter = None
@property
def module_name(self) -> str:
return self.fault_location.module_name
# this hook assumes that for each forward call, the initial state at the
# first execution point is None
# in this way we can count and locate precisely the timesteps, using only
# the forward hook and without modifying the norse code
# NOTE: it would not work if the initial state used as input is different
# from None, so be careful
def snn_output_fault_hook(
self,
module: "torch.nn.Module",
input: typing.Union[typing.Tuple["torch.Tensor"], "torch.Tensor"],
output: "torch.Tensor",
) -> "torch.Tensor":
if input[1] is None:
self.timestep_counter = 0
elif isinstance(input[1], tuple):
self.timestep_counter += 1
else:
raise RuntimeError("Not compatible with this way of calling")
# find a way to check if we are in the index range
# we simply check the different possibilities
time_index = self.fault_location.time_index
if isinstance(time_index, slice):
index = range(time_index.start, time_index.stop, time_index.step)
elif isinstance(time_index, typing.Sequence):
index = time_index
elif isinstance(time_index, type(Ellipsis)):
index = range(self.timestep_counter + 1)
elif isinstance(time_index, int):
index = (time_index,)
else:
raise IndexError("Unsupported time_index for SNN fault injection")
# if the current counter is in the index, then we inject the fault
if self.timestep_counter in index:
self.generate_mask(output)
masked_output = self.inject_mask(output)
return masked_output
else:
return output
def setup(
self,
module: "torch.nn.Module",
) -> "torch.nn.Module":
if not isinstance(module, norse.torch.module.snn.SNNCell):
raise RuntimeError(
"Currently SNN injection supports only SNNCell from norse"
)
self.handle = module.register_forward_hook(self.output_fault_hook)
return module
def teardown(
self,
module: "torch.nn.Module",
) -> "torch.nn.Module":
self.handle.remove()
self.handle = None
self.mask = None
return module
| 5,250 | 35.72028 | 89 | py |
enpheeph | enpheeph-main/src/enpheeph/injections/outputpytorchfault.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import typing
import enpheeph.injections.abc.faultabc
import enpheeph.injections.abc.pytorchinjectionabc
import enpheeph.injections.mixins.pytorchmaskmixin
import enpheeph.injections.mixins.pytorchtensorobjectvalidatormixin
import enpheeph.injections.plugins.mask.abc.lowleveltorchmaskpluginabc
import enpheeph.utils.dataclasses
# we move this import down
if typing.TYPE_CHECKING:
import torch
class OutputPyTorchFault(
enpheeph.injections.abc.faultabc.FaultABC,
enpheeph.injections.abc.pytorchinjectionabc.PyTorchInjectionABC,
enpheeph.injections.mixins.pytorchmaskmixin.PyTorchMaskMixin,
(
# fmt: off
enpheeph.injections.mixins.
pytorchtensorobjectvalidatormixin.PyTorchTensorObjectValidatorMixin
# fmt: on
),
):
handle: typing.Optional["torch.utils.hooks.RemovableHandle"]
# we need the index plugin to simplify the handling of the indices
indexing_plugin: (
enpheeph.injections.plugins.indexing.abc.indexingpluginabc.IndexingPluginABC
)
location: enpheeph.utils.dataclasses.FaultLocation
low_level_plugin: (
# black has issues with long names
# fmt: off
enpheeph.injections.plugins.mask.
lowleveltorchmaskpluginabc.LowLevelTorchMaskPluginABC
# fmt: on
)
mask: typing.Optional["torch.Tensor"]
def __init__(
self,
indexing_plugin: (
enpheeph.injections.plugins.indexing.abc.indexingpluginabc.IndexingPluginABC
),
location: enpheeph.utils.dataclasses.FaultLocation,
low_level_torch_plugin: (
# black has issues with long names
# fmt: off
enpheeph.injections.plugins.mask.
lowleveltorchmaskpluginabc.LowLevelTorchMaskPluginABC
# fmt: on
),
) -> None:
super().__init__()
self.indexing_plugin = indexing_plugin
self.location = location
self.low_level_plugin = low_level_torch_plugin
self.handle = None
self.mask = None
@property
def module_name(self) -> str:
return self.location.module_name
def output_fault_hook(
self,
module: "torch.nn.Module",
input: typing.Union[typing.Tuple["torch.Tensor"], "torch.Tensor"],
output: "torch.Tensor",
) -> "torch.Tensor":
self.generate_mask(output, tensor_only=True)
masked_output = self.inject_mask(output, tensor_only=False)
return masked_output
def setup(
self,
module: "torch.nn.Module",
) -> "torch.nn.Module":
self.handle = module.register_forward_hook(self.output_fault_hook)
return module
| 4,218 | 34.158333 | 88 | py |
enpheeph | enpheeph-main/src/enpheeph/injections/fpquantizedoutputpytorchfault.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import typing
import enpheeph.injections.abc.faultabc
import enpheeph.injections.abc.pytorchinjectionabc
import enpheeph.injections.mixins.pytorchmaskmixin
import enpheeph.injections.mixins.pytorchsparseinterfacemixin
import enpheeph.injections.mixins.pytorchtensorobjectvalidatormixin
import enpheeph.injections.plugins.mask.abc.lowleveltorchmaskpluginabc
import enpheeph.utils.dataclasses
# we move this import down
if typing.TYPE_CHECKING:
import torch
class FPQuantizedOutputPyTorchFault(
enpheeph.injections.abc.faultabc.FaultABC,
enpheeph.injections.abc.pytorchinjectionabc.PyTorchInjectionABC,
enpheeph.injections.mixins.pytorchmaskmixin.PyTorchMaskMixin,
(
# fmt: off
enpheeph.injections.mixins.
pytorchtensorobjectvalidatormixin.PyTorchTensorObjectValidatorMixin
# fmt: on
),
):
location: enpheeph.utils.dataclasses.FaultLocation
low_level_plugin: (
# black has issues with long names
# fmt: off
enpheeph.injections.plugins.mask.
lowleveltorchmaskpluginabc.LowLevelTorchMaskPluginABC
# fmt: on
)
mask: typing.Optional["torch.Tensor"]
def __init__(
self,
indexing_plugin: (
enpheeph.injections.plugins.indexing.abc.indexingpluginabc.IndexingPluginABC
),
location: enpheeph.utils.dataclasses.FaultLocation,
low_level_torch_plugin: (
# black has issues with long names
# fmt: off
enpheeph.injections.plugins.mask.
lowleveltorchmaskpluginabc.LowLevelTorchMaskPluginABC
# fmt: on
),
) -> None:
super().__init__()
self.indexing_plugin = indexing_plugin
self.location = location
self.low_level_plugin = low_level_torch_plugin
self.handle = None
self.mask = None
@property
def module_name(self) -> str:
return self.location.module_name
def output_fault_hook(
self,
module: "torch.nn.Module",
input: typing.Union[typing.Tuple["torch.Tensor"], "torch.Tensor"],
output: "torch.Tensor",
) -> None:
import torch
# here we need to generate target with a proper mixin
# in our case we use torch.int32, and we multiply by 2 ** 24 as to have a
# dynamic range of [-128, 127] in fp32 while having
# 2 ** -24 as precision in int32,~6e-08 which should be more than enough
shift_factor = 2**24
target_dtype = torch.int32
original_dtype = output.dtype
target = output * shift_factor
target = target.to(target_dtype)
self.generate_mask(output, tensor_only=True)
target = self.inject_mask(target, tensor_only=False)
# we divide the result
target = target.to(dtype=original_dtype)
target /= shift_factor
return target
def setup(
self,
module: "torch.nn.Module",
) -> "torch.nn.Module":
self.handle = module.register_forward_hook(self.output_fault_hook)
return module
| 4,619 | 34 | 88 | py |
enpheeph | enpheeph-main/src/enpheeph/injections/quantizedoutputpytorchfault.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import typing
import enpheeph.injections.abc.faultabc
import enpheeph.injections.abc.pytorchinjectionabc
import enpheeph.injections.mixins.pytorchmaskmixin
import enpheeph.injections.mixins.pytorchsparseinterfacemixin
import enpheeph.injections.mixins.pytorchtensorobjectvalidatormixin
import enpheeph.injections.plugins.mask.abc.lowleveltorchmaskpluginabc
import enpheeph.utils.dataclasses
# we move this import down
if typing.TYPE_CHECKING:
import torch
class QuantizedOutputPyTorchFault(
enpheeph.injections.abc.faultabc.FaultABC,
enpheeph.injections.abc.pytorchinjectionabc.PyTorchInjectionABC,
enpheeph.injections.mixins.pytorchmaskmixin.PyTorchMaskMixin,
(
# fmt: off
enpheeph.injections.mixins.
pytorchtensorobjectvalidatormixin.PyTorchTensorObjectValidatorMixin
# fmt: on
),
):
location: enpheeph.utils.dataclasses.FaultLocation
low_level_plugin: (
# black has issues with long names
# fmt: off
enpheeph.injections.plugins.mask.
lowleveltorchmaskpluginabc.LowLevelTorchMaskPluginABC
# fmt: on
)
mask: typing.Optional["torch.Tensor"]
def __init__(
self,
indexing_plugin: (
enpheeph.injections.plugins.indexing.abc.indexingpluginabc.IndexingPluginABC
),
location: enpheeph.utils.dataclasses.FaultLocation,
low_level_torch_plugin: (
# black has issues with long names
# fmt: off
enpheeph.injections.plugins.mask.
lowleveltorchmaskpluginabc.LowLevelTorchMaskPluginABC
# fmt: on
),
) -> None:
super().__init__()
self.indexing_plugin = indexing_plugin
self.location = location
self.low_level_plugin = low_level_torch_plugin
self.handle = None
self.mask = None
@property
def module_name(self) -> str:
return self.location.module_name
def output_fault_hook(
self,
module: "torch.nn.Module",
input: typing.Union[typing.Tuple["torch.Tensor"], "torch.Tensor"],
output: "torch.Tensor",
) -> None:
import torch
# here we need to generate target with a proper mixin
# in our case we use torch.int32, and we multiply by 2 ** 24 as to have a
# dynamic range of [-128, 127] in fp32 while having
# 2 ** -24 as precision in int32,~6e-08 which should be more than enough
shift_factor = 2**24
target_dtype = torch.int32
original_dtype = output.dtype
target = output * shift_factor
target = target.to(target_dtype)
self.generate_mask(output, tensor_only=True)
target = self.inject_mask(target, tensor_only=False)
# we divide the result
target = target.to(dtype=original_dtype)
target /= shift_factor
return target
def setup(
self,
module: "torch.nn.Module",
) -> "torch.nn.Module":
self.handle = module.register_forward_hook(self.output_fault_hook)
return module
| 4,617 | 33.984848 | 88 | py |
enpheeph | enpheeph-main/src/enpheeph/injections/pruneddensetosparseweightpytorchfault.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import copy
import typing
import enpheeph.injections.abc.faultabc
import enpheeph.injections.abc.pytorchinjectionabc
import enpheeph.injections.mixins.pytorchmaskmixin
import enpheeph.injections.mixins.pytorchsparseinterfacemixin
import enpheeph.injections.mixins.pytorchtensorobjectvalidatormixin
import enpheeph.injections.plugins.mask.abc.lowleveltorchmaskpluginabc
import enpheeph.injections.plugins.indexing.abc.indexingpluginabc
import enpheeph.utils.dataclasses
import enpheeph.utils.enums
# we move this import down
if typing.TYPE_CHECKING:
import torch
class PrunedDenseToSparseWeightPyTorchFault(
enpheeph.injections.abc.faultabc.FaultABC,
enpheeph.injections.abc.pytorchinjectionabc.PyTorchInjectionABC,
enpheeph.injections.mixins.pytorchmaskmixin.PyTorchMaskMixin,
enpheeph.injections.mixins.pytorchsparseinterfacemixin.PyTorchSparseInterfaceMixin,
(
# fmt: off
enpheeph.injections.mixins.
pytorchtensorobjectvalidatormixin.PyTorchTensorObjectValidatorMixin
# fmt: on
),
):
backup: typing.Optional["torch.Tensor"]
# we need the index plugin to simplify the handling of the indices
indexing_plugin: (
enpheeph.injections.plugins.indexing.abc.indexingpluginabc.IndexingPluginABC
)
location: enpheeph.utils.dataclasses.FaultLocation
low_level_plugin: (
# black has issues with long names
# fmt: off
enpheeph.injections.plugins.mask.abc.
lowleveltorchmaskpluginabc.LowLevelTorchMaskPluginABC
# fmt: on
)
mask: typing.Optional["torch.Tensor"]
def __init__(
self,
indexing_plugin: (
# black has issues with long names
# fmt: off
enpheeph.injections.plugins.indexing.abc.
indexingpluginabc.IndexingPluginABC
# fmt: on
),
location: enpheeph.utils.dataclasses.FaultLocation,
low_level_torch_plugin: (
# black has issues with long names
# fmt: off
enpheeph.injections.plugins.mask.abc.
lowleveltorchmaskpluginabc.LowLevelTorchMaskPluginABC
# fmt: on
),
) -> None:
super().__init__()
self.indexing_plugin = indexing_plugin
self.location = location
self.low_level_plugin = low_level_torch_plugin
self.backup = None
self.handle = None
self.mask = None
@property
def module_name(self) -> str:
return self.location.module_name
def inject_weight(
self,
module: "torch.nn.Module",
) -> "torch.nn.Module":
if self.backup is not None:
raise ValueError(
"This method must be called only when setting up the injection"
)
# first we get the element to be injected
weight = getattr(
module,
# sometimes type: ignore[arg-type] might be required for the following line
# mypy gives error as parameter_name can be None, but it cannot be since
# the dataclass checks for the validity
# so we simply cast it here
typing.cast(str, self.location.parameter_name),
)
# we back it up to restore it later
self.backup = copy.deepcopy(weight)
# we call the mixin interface to access the specific element, be it index or
# values of the sparse tensor
target_sparse_element = self.get_sparse_injection_parameter(weight)
# we select the dimensions to be accessed, which are all of them since we have
# no batches in the target sparse element
self.indexing_plugin.select_active_dimensions(
dimensions=[enpheeph.utils.enums.DimensionType.Tensor],
autoshift_to_boundaries=True,
)
# we generate the mask specific for this element
self.generate_mask(
target_sparse_element,
tensor_only=True,
batches_exist=False,
)
# we inject the mask
masked_sparse_element = self.inject_mask(
target_sparse_element,
tensor_only=True,
batches_exist=False,
)
# we update the weight with the new sparse element, using the sparse mixin
masked_weight = self.set_sparse_injection_parameter(
weight, masked_sparse_element
)
# we need to convert the masked weight to the proper class
masked_weight_corrected = self.convert_tensor_to_proper_class(
masked_weight, weight
)
# we set the masked weight in the proper location, overwriting the one that was
# backupped
# this is needed as it is impossible to modify the weight in-place, so the
# conversion is dense -> sparse -> sparse element -> injected sparse element ->
# new sparse tensor -> new dense
setattr(
module,
# sometimes type: ignore[arg-type] might be required for the following line
# mypy gives error as parameter_name can be None, but it cannot be since
# the dataclass checks for the validity
# so we simply cast it here
typing.cast(str, self.location.parameter_name),
masked_weight_corrected,
)
# we reset the active plugin dimensions, as they might be different in the next
# run, especially if the plugin is shared across multiple classes
self.indexing_plugin.reset_active_dimensions()
return module
def restore_weight(
self,
module: "torch.nn.Module",
) -> "torch.nn.Module":
if self.backup is None:
raise ValueError(
"This method must be called only when tearing down the injection"
)
setattr( # type: ignore[unreachable]
module,
typing.cast(str, self.location.parameter_name),
copy.deepcopy(self.backup),
)
self.backup = None
return module
def setup(
self,
module: "torch.nn.Module",
) -> "torch.nn.Module":
module = self.inject_weight(module)
return module
# we need to override the teardown as it is not common to the normal hook
# teardowns
def teardown(
self,
module: "torch.nn.Module",
) -> "torch.nn.Module":
module = self.restore_weight(module)
return module
| 7,989 | 35.318182 | 87 | py |
enpheeph | enpheeph-main/src/enpheeph/injections/__init__.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# we ignore mypy/flake8/black as this file is autogenerated
# we ignore this specific error because of AUTOGEN_INIT
# mypy: ignore-errors
# the following flake8 syntax is wrong, as it will be read as generic noqa, but we use
# it to remember the errors appearing in the __init__.py
# additionally this is not caught by pygrep-hooks as it counts only "type: ignore" and
# "noqa", both with starting #
# flake8: noqa: E302,E305
# fmt: off
# this is required so that the mkinit script will generate the init imports only in this
# section
# <AUTOGEN_INIT>
def lazy_import(module_name, submodules, submod_attrs):
import importlib
import os
name_to_submod = {
func: mod for mod, funcs in submod_attrs.items()
for func in funcs
}
def __getattr__(name):
if name in submodules:
attr = importlib.import_module(
'{module_name}.{name}'.format(
module_name=module_name, name=name)
)
elif name in name_to_submod:
submodname = name_to_submod[name]
module = importlib.import_module(
'{module_name}.{submodname}'.format(
module_name=module_name, submodname=submodname)
)
attr = getattr(module, name)
else:
raise AttributeError(
'No {module_name} attribute {name}'.format(
module_name=module_name, name=name))
globals()[name] = attr
return attr
if os.environ.get('EAGER_IMPORT', ''):
for name in name_to_submod.values():
__getattr__(name)
for attrs in submod_attrs.values():
for attr in attrs:
__getattr__(attr)
return __getattr__
__getattr__ = lazy_import(
__name__,
submodules={
'abc',
'densesparseoutputpytorchfault',
'fpquantizedoutputpytorchfault',
'mixins',
'outputpytorchfault',
'outputpytorchmonitor',
'plugins',
'pruneddensetosparseactivationpytorchfault',
'pruneddensetosparseweightpytorchfault',
'quantizedoutputpytorchfault',
'snnoutputnorsefault',
'weightpytorchfault',
},
submod_attrs={
'abc': [
'FaultABC',
'InjectionABC',
'MonitorABC',
'PyTorchInjectionABC',
'faultabc',
'injectionabc',
'monitorabc',
'pytorchinjectionabc',
],
'densesparseoutputpytorchfault': [
'DenseSparseOutputPyTorchFault',
],
'fpquantizedoutputpytorchfault': [
'FPQuantizedOutputPyTorchFault',
],
'mixins': [
'PyTorchMaskMixin',
'PyTorchMonitorPostProcessorMixin',
'PyTorchSparseInterfaceMixin',
'PyTorchTensorObjectValidatorMixin',
'pytorchmaskmixin',
'pytorchmonitorpostprocessormixin',
'pytorchquantizationmixin',
'pytorchsparseinterfacemixin',
'pytorchtensorobjectvalidatormixin',
'torch_geometric_mean',
],
'outputpytorchfault': [
'OutputPyTorchFault',
],
'outputpytorchmonitor': [
'OutputPyTorchMonitor',
],
'plugins': [
'AutoPyTorchMaskPlugin',
'CSVStoragePluginABC',
'CuPyPyTorchMaskPlugin',
'CustomBase',
'CustomBaseClass',
'ExperimentRun',
'ExperimentRunBaseMixin',
'ExperimentRunProtocol',
'Fault',
'FaultBaseMixin',
'FaultProtocol',
'IndexingPlugin',
'IndexingPluginABC',
'Injection',
'InjectionProtocol',
'LowLevelTorchMaskPluginABC',
'Monitor',
'MonitorBaseMixin',
'MonitorProtocol',
'NumPyPyTorchMaskPlugin',
'PandasCSVStoragePlugin',
'PolymorphicMixin',
'PyTorchSparseInterfacePluginABC',
'SQLStoragePluginABC',
'SQLiteStoragePlugin',
'Session',
'SessionBaseMixin',
'SessionProtocol',
'StoragePluginABC',
'abc',
'autopytorchmaskplugin',
'csv',
'csvdataclasses',
'csvstorageplugin',
'csvstoragepluginabc',
'cupypytorchmaskplugin',
'fix_pysqlite',
'indexing',
'indexingplugin',
'indexingpluginabc',
'lowleveltorchmaskpluginabc',
'mask',
'numpypytorchmaskplugin',
'pysqlite_begin_emission_fix_on_connect',
'pytorchsparseinterfacepluginabc',
'set_sqlite_pragma',
'sparse',
'sql',
'sqlalchemy_begin_emission_pysqlite',
'sqldataclasses',
'sqlitestorageplugin',
'sqlstoragepluginabc',
'sqlutils',
'storage',
'storagepluginabc',
'storagetypings',
'utils',
],
'pruneddensetosparseactivationpytorchfault': [
'PrunedDenseToSparseWeightPyTorchFault',
],
'pruneddensetosparseweightpytorchfault': [
'PrunedDenseToSparseWeightPyTorchFault',
],
'quantizedoutputpytorchfault': [
'QuantizedOutputPyTorchFault',
],
'snnoutputnorsefault': [
'SNNOutputNorseFault',
],
'weightpytorchfault': [
'WeightPyTorchFault',
],
},
)
def __dir__():
return __all__
__all__ = ['AutoPyTorchMaskPlugin', 'CSVStoragePluginABC',
'CuPyPyTorchMaskPlugin', 'CustomBase', 'CustomBaseClass',
'DenseSparseOutputPyTorchFault', 'ExperimentRun',
'ExperimentRunBaseMixin', 'ExperimentRunProtocol',
'FPQuantizedOutputPyTorchFault', 'Fault', 'FaultABC',
'FaultBaseMixin', 'FaultProtocol', 'IndexingPlugin',
'IndexingPluginABC', 'Injection', 'InjectionABC',
'InjectionProtocol', 'LowLevelTorchMaskPluginABC', 'Monitor',
'MonitorABC', 'MonitorBaseMixin', 'MonitorProtocol',
'NumPyPyTorchMaskPlugin', 'OutputPyTorchFault',
'OutputPyTorchMonitor', 'PandasCSVStoragePlugin',
'PolymorphicMixin', 'PrunedDenseToSparseWeightPyTorchFault',
'PyTorchInjectionABC', 'PyTorchMaskMixin',
'PyTorchMonitorPostProcessorMixin', 'PyTorchSparseInterfaceMixin',
'PyTorchSparseInterfacePluginABC',
'PyTorchTensorObjectValidatorMixin', 'QuantizedOutputPyTorchFault',
'SNNOutputNorseFault', 'SQLStoragePluginABC', 'SQLiteStoragePlugin',
'Session', 'SessionBaseMixin', 'SessionProtocol',
'StoragePluginABC', 'WeightPyTorchFault', 'abc',
'autopytorchmaskplugin', 'csv', 'csvdataclasses',
'csvstorageplugin', 'csvstoragepluginabc', 'cupypytorchmaskplugin',
'densesparseoutputpytorchfault', 'faultabc', 'fix_pysqlite',
'fpquantizedoutputpytorchfault', 'indexing', 'indexingplugin',
'indexingpluginabc', 'injectionabc', 'lowleveltorchmaskpluginabc',
'mask', 'mixins', 'monitorabc', 'numpypytorchmaskplugin',
'outputpytorchfault', 'outputpytorchmonitor', 'plugins',
'pruneddensetosparseactivationpytorchfault',
'pruneddensetosparseweightpytorchfault',
'pysqlite_begin_emission_fix_on_connect', 'pytorchinjectionabc',
'pytorchmaskmixin', 'pytorchmonitorpostprocessormixin',
'pytorchquantizationmixin', 'pytorchsparseinterfacemixin',
'pytorchsparseinterfacepluginabc',
'pytorchtensorobjectvalidatormixin', 'quantizedoutputpytorchfault',
'set_sqlite_pragma', 'snnoutputnorsefault', 'sparse', 'sql',
'sqlalchemy_begin_emission_pysqlite', 'sqldataclasses',
'sqlitestorageplugin', 'sqlstoragepluginabc', 'sqlutils', 'storage',
'storagepluginabc', 'storagetypings', 'torch_geometric_mean',
'utils', 'weightpytorchfault']
# </AUTOGEN_INIT>
| 8,989 | 36.302905 | 88 | py |
enpheeph | enpheeph-main/src/enpheeph/injections/densesparseoutputpytorchfault.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import typing
import enpheeph.injections.abc.faultabc
import enpheeph.injections.abc.pytorchinjectionabc
import enpheeph.injections.mixins.pytorchmaskmixin
import enpheeph.injections.mixins.pytorchsparseinterfacemixin
import enpheeph.injections.mixins.pytorchtensorobjectvalidatormixin
import enpheeph.injections.plugins.mask.abc.lowleveltorchmaskpluginabc
import enpheeph.utils.dataclasses
# we move this import down
if typing.TYPE_CHECKING:
import torch
class DenseSparseOutputPyTorchFault(
enpheeph.injections.abc.faultabc.FaultABC,
enpheeph.injections.abc.pytorchinjectionabc.PyTorchInjectionABC,
enpheeph.injections.mixins.pytorchmaskmixin.PyTorchMaskMixin,
enpheeph.injections.mixins.pytorchsparseinterfacemixin.PyTorchSparseInterfaceMixin,
(
# fmt: off
enpheeph.injections.mixins.
pytorchtensorobjectvalidatormixin.PyTorchTensorObjectValidatorMixin
# fmt: on
),
):
location: enpheeph.utils.dataclasses.FaultLocation
low_level_plugin: (
# black has issues with long names
# fmt: off
enpheeph.injections.plugins.mask.
lowleveltorchmaskpluginabc.LowLevelTorchMaskPluginABC
# fmt: on
)
mask: typing.Optional["torch.Tensor"]
def __init__(
self,
indexing_plugin: (
enpheeph.injections.plugins.indexing.abc.indexingpluginabc.IndexingPluginABC
),
location: enpheeph.utils.dataclasses.FaultLocation,
low_level_torch_plugin: (
# black has issues with long names
# fmt: off
enpheeph.injections.plugins.mask.
lowleveltorchmaskpluginabc.LowLevelTorchMaskPluginABC
# fmt: on
),
) -> None:
super().__init__()
self.indexing_plugin = indexing_plugin
self.location = location
self.low_level_plugin = low_level_torch_plugin
self.handle = None
self.mask = None
@property
def module_name(self) -> str:
return self.location.module_name
def output_fault_hook(
self,
module: "torch.nn.Module",
input: typing.Union[typing.Tuple["torch.Tensor"], "torch.Tensor"],
output: "torch.Tensor",
) -> None:
target = self.get_sparse_injection_parameter(output)
self.indexing_plugin.select_active_dimensions(
[enpheeph.utils.enums.DimensionType.Tensor],
autoshift_to_boundaries=True,
)
self.generate_mask(target, tensor_only=None, force_recompute=True)
target = self.inject_mask(target, tensor_only=None)
output = self.set_sparse_injection_parameter(output, target).to_dense()
self.indexing_plugin.reset_active_dimensions()
return output
def setup(
self,
module: "torch.nn.Module",
) -> "torch.nn.Module":
self.handle = module.register_forward_hook(self.output_fault_hook)
return module
| 4,490 | 34.085938 | 88 | py |
enpheeph | enpheeph-main/src/enpheeph/injections/outputpytorchmonitor.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import typing
import enpheeph.injections.abc.monitorabc
import enpheeph.injections.plugins.indexing.abc.indexingpluginabc
import enpheeph.injections.abc.pytorchinjectionabc
import enpheeph.injections.mixins.pytorchmonitorpostprocessormixin
import enpheeph.injections.plugins.storage.abc.storagepluginabc
import enpheeph.utils.dataclasses
import enpheeph.utils.enums
# so flake does not complain about the imports being not at the top after the if
if typing.TYPE_CHECKING:
import torch
class OutputPyTorchMonitor(
enpheeph.injections.abc.monitorabc.MonitorABC,
enpheeph.injections.abc.pytorchinjectionabc.PyTorchInjectionABC,
(
# black has issues with very long names
# fmt: off
enpheeph.injections.mixins.
pytorchmonitorpostprocessormixin.PyTorchMonitorPostProcessorMixin
# fmt: on
),
):
enabled_metrics: enpheeph.utils.enums.MonitorMetric
# we need the index plugin to simplify the handling of the indices
indexing_plugin: (
enpheeph.injections.plugins.indexing.abc.indexingpluginabc.IndexingPluginABC
)
location: enpheeph.utils.dataclasses.MonitorLocation
move_to_first: bool
storage_plugin: (
enpheeph.injections.plugins.storage.abc.storagepluginabc.StoragePluginABC
)
def __init__(
self,
indexing_plugin: (
enpheeph.injections.plugins.indexing.abc.indexingpluginabc.IndexingPluginABC
),
location: enpheeph.utils.dataclasses.MonitorLocation,
enabled_metrics: enpheeph.utils.enums.MonitorMetric,
storage_plugin: (
enpheeph.injections.plugins.storage.abc.storagepluginabc.StoragePluginABC
),
move_to_first: bool = True,
):
super().__init__()
self.indexing_plugin = indexing_plugin
self.location = location
self.enabled_metrics = enabled_metrics
self.storage_plugin = storage_plugin
self.move_to_first = move_to_first
self.handle = None
@property
def module_name(self) -> str:
return self.location.module_name
# this is compatible with PyTorch hook arguments and return type
def output_monitor_hook(
self,
module: "torch.nn.Module",
input: typing.Union[typing.Tuple["torch.Tensor"], "torch.Tensor"],
output: "torch.Tensor",
) -> None:
self.indexing_plugin.select_active_dimensions(
[
enpheeph.utils.enums.DimensionType.Batch,
enpheeph.utils.enums.DimensionType.Tensor,
],
autoshift_to_boundaries=True,
fill_empty_index=True,
filler=slice(None, None),
)
# NOTE: no support for bit_index yet
postprocess = self.postprocess(
output[
self.indexing_plugin.join_indices(
dimension_indices=self.location.dimension_index,
)
]
)
self.storage_plugin.add_payload(location=self.location, payload=postprocess)
def setup(self, module: "torch.nn.Module") -> "torch.nn.Module":
self.handle = module.register_forward_hook(self.output_monitor_hook)
if self.move_to_first:
# we push the current hook to the beginning of the queue,
# as this is
# for a monitor and its deployment must be before
# the fault injection
# we use move_to_end with last=False to move it to the beginning
# of the OrderedDict
# mypy has issues with Optional being set before, as it does not check them
# sometimes the following 2 lines fail, use type: ignore[union-attr]
# for both
self.handle.hooks_dict_ref().move_to_end(
self.handle.id,
last=False,
)
return module
| 5,405 | 37.070423 | 88 | py |
enpheeph | enpheeph-main/src/enpheeph/injections/weightpytorchfault.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import copy
import typing
import enpheeph.injections.abc.faultabc
import enpheeph.injections.abc.pytorchinjectionabc
import enpheeph.injections.mixins.pytorchmaskmixin
import enpheeph.injections.mixins.pytorchtensorobjectvalidatormixin
import enpheeph.injections.plugins.mask.abc.lowleveltorchmaskpluginabc
import enpheeph.utils.dataclasses
# we move this import down
if typing.TYPE_CHECKING:
import torch
# no need to use handles here as the change is done when the injection is setup
class WeightPyTorchFault(
enpheeph.injections.abc.faultabc.FaultABC,
enpheeph.injections.abc.pytorchinjectionabc.PyTorchInjectionABC,
enpheeph.injections.mixins.pytorchmaskmixin.PyTorchMaskMixin,
(
# fmt: off
enpheeph.injections.mixins.
pytorchtensorobjectvalidatormixin.PyTorchTensorObjectValidatorMixin
# fmt: on
),
):
backup: typing.Optional["torch.Tensor"]
# we need the index plugin to simplify the handling of the indices
indexing_plugin: (
enpheeph.injections.plugins.indexing.abc.indexingpluginabc.IndexingPluginABC
)
location: enpheeph.utils.dataclasses.FaultLocation
low_level_plugin: (
# black has issues with long names
# fmt: off
enpheeph.injections.plugins.mask.
lowleveltorchmaskpluginabc.LowLevelTorchMaskPluginABC
# fmt: on
)
mask: typing.Optional["torch.Tensor"]
def __init__(
self,
indexing_plugin: enpheeph.injections.plugins.indexing.abc.indexingpluginabc,
location: enpheeph.utils.dataclasses.FaultLocation,
low_level_torch_plugin: (
# black has issues with long names
# fmt: off
enpheeph.injections.plugins.mask.
lowleveltorchmaskpluginabc.LowLevelTorchMaskPluginABC
# fmt: on
),
) -> None:
super().__init__()
self.indexing_plugin = indexing_plugin
self.location = location
self.low_level_plugin = low_level_torch_plugin
self.backup = None
self.handle = None
self.mask = None
@property
def module_name(self) -> str:
return self.location.module_name
def inject_weight(
self,
module: "torch.nn.Module",
) -> "torch.nn.Module":
if self.backup is not None:
raise ValueError(
"This method must be called only when setting up the injection"
)
weight = getattr(
module,
# sometimes type: ignore[arg-type] might be required for the following line
# mypy gives error as parameter_name can be None, but it cannot be since
# the dataclass checks for the validity
# so we simply cast it here
typing.cast(str, self.location.parameter_name),
)
self.backup = copy.deepcopy(weight)
self.generate_mask(
weight,
tensor_only=True,
batches_exist=False,
)
masked_weight = self.inject_mask(
weight,
tensor_only=True,
batches_exist=False,
)
setattr(
module,
# sometimes type: ignore[arg-type] might be required for the following line
# mypy gives error as parameter_name can be None, but it cannot be since
# the dataclass checks for the validity
# so we simply cast it here
typing.cast(str, self.location.parameter_name),
masked_weight,
)
return module
def restore_weight(
self,
module: "torch.nn.Module",
) -> "torch.nn.Module":
if self.backup is None:
raise ValueError(
"This method must be called only when tearing down the injection"
)
setattr( # type: ignore[unreachable]
module,
typing.cast(str, self.location.parameter_name),
copy.deepcopy(self.backup),
)
self.backup = None
return module
def setup(
self,
module: "torch.nn.Module",
) -> "torch.nn.Module":
module = self.inject_weight(module)
return module
# we need to override the teardown as it is not common to the normal hook
# teardowns
def teardown(
self,
module: "torch.nn.Module",
) -> "torch.nn.Module":
module = self.restore_weight(module)
return module
| 5,994 | 32.49162 | 87 | py |
enpheeph | enpheeph-main/src/enpheeph/injections/plugins/__init__.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# we ignore mypy/flake8/black as this file is autogenerated
# we ignore this specific error because of AUTOGEN_INIT
# mypy: ignore-errors
# the following flake8 syntax is wrong, as it will be read as generic noqa, but we use
# it to remember the errors appearing in the __init__.py
# additionally this is not caught by pygrep-hooks as it counts only "type: ignore" and
# "noqa", both with starting #
# flake8: noqa: E302,E305
# fmt: off
# this is required so that the mkinit script will generate the init imports only in this
# section
# <AUTOGEN_INIT>
def lazy_import(module_name, submodules, submod_attrs):
import importlib
import os
name_to_submod = {
func: mod for mod, funcs in submod_attrs.items()
for func in funcs
}
def __getattr__(name):
if name in submodules:
attr = importlib.import_module(
'{module_name}.{name}'.format(
module_name=module_name, name=name)
)
elif name in name_to_submod:
submodname = name_to_submod[name]
module = importlib.import_module(
'{module_name}.{submodname}'.format(
module_name=module_name, submodname=submodname)
)
attr = getattr(module, name)
else:
raise AttributeError(
'No {module_name} attribute {name}'.format(
module_name=module_name, name=name))
globals()[name] = attr
return attr
if os.environ.get('EAGER_IMPORT', ''):
for name in name_to_submod.values():
__getattr__(name)
for attrs in submod_attrs.values():
for attr in attrs:
__getattr__(attr)
return __getattr__
__getattr__ = lazy_import(
__name__,
submodules={
'indexing',
'mask',
'sparse',
'storage',
},
submod_attrs={
'indexing': [
'IndexingPlugin',
'IndexingPluginABC',
'abc',
'indexingplugin',
'indexingpluginabc',
],
'mask': [
'AutoPyTorchMaskPlugin',
'CuPyPyTorchMaskPlugin',
'LowLevelTorchMaskPluginABC',
'NumPyPyTorchMaskPlugin',
'abc',
'autopytorchmaskplugin',
'cupypytorchmaskplugin',
'lowleveltorchmaskpluginabc',
'numpypytorchmaskplugin',
],
'sparse': [
'PyTorchSparseInterfacePluginABC',
'abc',
'pytorchsparseinterfacepluginabc',
],
'storage': [
'CSVStoragePluginABC',
'CustomBase',
'CustomBaseClass',
'ExperimentRun',
'ExperimentRunBaseMixin',
'ExperimentRunProtocol',
'Fault',
'FaultBaseMixin',
'FaultProtocol',
'Injection',
'InjectionProtocol',
'Monitor',
'MonitorBaseMixin',
'MonitorProtocol',
'PandasCSVStoragePlugin',
'PolymorphicMixin',
'SQLStoragePluginABC',
'SQLiteStoragePlugin',
'Session',
'SessionBaseMixin',
'SessionProtocol',
'StoragePluginABC',
'abc',
'csv',
'csvdataclasses',
'csvstorageplugin',
'csvstoragepluginabc',
'fix_pysqlite',
'pysqlite_begin_emission_fix_on_connect',
'set_sqlite_pragma',
'sql',
'sqlalchemy_begin_emission_pysqlite',
'sqldataclasses',
'sqlitestorageplugin',
'sqlstoragepluginabc',
'sqlutils',
'storagepluginabc',
'storagetypings',
'utils',
],
},
)
def __dir__():
return __all__
__all__ = ['AutoPyTorchMaskPlugin', 'CSVStoragePluginABC',
'CuPyPyTorchMaskPlugin', 'CustomBase', 'CustomBaseClass',
'ExperimentRun', 'ExperimentRunBaseMixin', 'ExperimentRunProtocol',
'Fault', 'FaultBaseMixin', 'FaultProtocol', 'IndexingPlugin',
'IndexingPluginABC', 'Injection', 'InjectionProtocol',
'LowLevelTorchMaskPluginABC', 'Monitor', 'MonitorBaseMixin',
'MonitorProtocol', 'NumPyPyTorchMaskPlugin',
'PandasCSVStoragePlugin', 'PolymorphicMixin',
'PyTorchSparseInterfacePluginABC', 'SQLStoragePluginABC',
'SQLiteStoragePlugin', 'Session', 'SessionBaseMixin',
'SessionProtocol', 'StoragePluginABC', 'abc',
'autopytorchmaskplugin', 'csv', 'csvdataclasses',
'csvstorageplugin', 'csvstoragepluginabc', 'cupypytorchmaskplugin',
'fix_pysqlite', 'indexing', 'indexingplugin', 'indexingpluginabc',
'lowleveltorchmaskpluginabc', 'mask', 'numpypytorchmaskplugin',
'pysqlite_begin_emission_fix_on_connect',
'pytorchsparseinterfacepluginabc', 'set_sqlite_pragma', 'sparse',
'sql', 'sqlalchemy_begin_emission_pysqlite', 'sqldataclasses',
'sqlitestorageplugin', 'sqlstoragepluginabc', 'sqlutils', 'storage',
'storagepluginabc', 'storagetypings', 'utils']
# </AUTOGEN_INIT>
| 6,027 | 34.251462 | 88 | py |
enpheeph | enpheeph-main/src/enpheeph/injections/plugins/sparse/__init__.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# we ignore mypy/flake8/black as this file is autogenerated
# we ignore this specific error because of AUTOGEN_INIT
# mypy: ignore-errors
# the following flake8 syntax is wrong, as it will be read as generic noqa, but we use
# it to remember the errors appearing in the __init__.py
# additionally this is not caught by pygrep-hooks as it counts only "type: ignore" and
# "noqa", both with starting #
# flake8: noqa: E302,E305
# fmt: off
# this is required so that the mkinit script will generate the init imports only in this
# section
# <AUTOGEN_INIT>
def lazy_import(module_name, submodules, submod_attrs):
import importlib
import os
name_to_submod = {
func: mod for mod, funcs in submod_attrs.items()
for func in funcs
}
def __getattr__(name):
if name in submodules:
attr = importlib.import_module(
'{module_name}.{name}'.format(
module_name=module_name, name=name)
)
elif name in name_to_submod:
submodname = name_to_submod[name]
module = importlib.import_module(
'{module_name}.{submodname}'.format(
module_name=module_name, submodname=submodname)
)
attr = getattr(module, name)
else:
raise AttributeError(
'No {module_name} attribute {name}'.format(
module_name=module_name, name=name))
globals()[name] = attr
return attr
if os.environ.get('EAGER_IMPORT', ''):
for name in name_to_submod.values():
__getattr__(name)
for attrs in submod_attrs.values():
for attr in attrs:
__getattr__(attr)
return __getattr__
__getattr__ = lazy_import(
__name__,
submodules={
'abc',
},
submod_attrs={
'abc': [
'PyTorchSparseInterfacePluginABC',
'pytorchsparseinterfacepluginabc',
],
},
)
def __dir__():
return __all__
__all__ = ['PyTorchSparseInterfacePluginABC', 'abc',
'pytorchsparseinterfacepluginabc']
# </AUTOGEN_INIT>
| 3,677 | 33.698113 | 88 | py |
enpheeph | enpheeph-main/src/enpheeph/injections/plugins/mask/numpypytorchmaskplugin.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import typing
import enpheeph.injections.plugins.mask.abc.lowleveltorchmaskpluginabc
import enpheeph.utils.functions
import enpheeph.utils.imports
if typing.TYPE_CHECKING or (
enpheeph.utils.imports.MODULE_AVAILABILITY[enpheeph.utils.imports.NUMPY_NAME]
and enpheeph.utils.imports.MODULE_AVAILABILITY[enpheeph.utils.imports.TORCH_NAME]
):
import numpy
import torch
class NumPyPyTorchMaskPlugin(
# we disable black to avoid too long line issue in flake8
# fmt: off
(
enpheeph.injections.plugins.mask.
lowleveltorchmaskpluginabc.LowLevelTorchMaskPluginABC
),
# fmt: on
):
def to_torch(self, array: "numpy.ndarray") -> "torch.Tensor":
return torch.from_numpy(array)
def from_torch(self, tensor: "torch.Tensor") -> "numpy.ndarray":
return tensor.numpy()
def to_bitwise_type(self, array: "numpy.ndarray") -> "numpy.ndarray":
return array.view(numpy.dtype(f"u{array.dtype.itemsize}"))
def to_target_type(
self, array: "numpy.ndarray", target: "numpy.ndarray"
) -> "numpy.ndarray":
return array.view(target.dtype)
def make_mask_array_from_index(
self,
int_mask: int,
mask_index: enpheeph.utils.typings.AnyIndexType,
int_fill_value: int,
shape: typing.Sequence[int],
torch_placeholder: "torch.Tensor",
) -> "numpy.ndarray":
# we convert the placeholder
placeholder = self.from_torch(torch_placeholder)
# we convert the integer value representing the fill value into
# an element with unsigned type and correct size
fill_value = numpy.array(
int_fill_value,
dtype=numpy.dtype(f"u{str(placeholder.dtype.itemsize)}"),
)
# we broadcast it onto the correct shape
# NOTE: broadcast_to creates a view, so the view is not writeable
# we have to make a copy of it to be able to write the mask in it
mask = numpy.broadcast_to(fill_value, shape).copy()
# we set the indices to the mask value
mask[mask_index] = int_mask
# we convert the mask to the right dtype
mask = mask.view(dtype=placeholder.dtype)
# we return the mask
return mask
def make_mask_array_from_mask(
self,
int_mask: int,
mask: enpheeph.utils.typings.AnyMaskType,
int_fill_value: int,
shape: typing.Sequence[int],
torch_placeholder: "torch.Tensor",
) -> "numpy.ndarray":
# we convert the placeholder
placeholder = self.from_torch(torch_placeholder)
# we convert the integer value representing the fill value into
# an element with unsigned type and correct size
fill_value = numpy.array(
int_fill_value,
dtype=numpy.dtype(f"u{str(placeholder.dtype.itemsize)}"),
)
# we broadcast it onto the correct shape
# NOTE: broadcast_to creates a view, so the view is not writeable
# we have to make a copy of it to be able to write the mask in it
fill_value_array = numpy.broadcast_to(fill_value, shape).copy()
# we create an array with the same shape as the input for the int_mask
# as then we will choose the correct element using numpy.where
# since our mask is a boolean array
int_mask_array: "numpy.ndarray" = (
numpy.ones(
shape,
dtype=numpy.dtype(f"u{str(placeholder.dtype.itemsize)}"),
)
* int_mask
)
# we set the indices to the mask value
# mask must become an array
final_mask = numpy.where(numpy.asarray(mask), int_mask_array, fill_value_array)
# we convert the mask to the right dtype
final_mask = final_mask.view(dtype=placeholder.dtype)
# we return the mask
return final_mask
def make_mask_array(
self,
int_mask: int,
int_fill_value: int,
shape: typing.Sequence[int],
torch_placeholder: "torch.Tensor",
mask: typing.Optional[enpheeph.utils.typings.AnyMaskType] = None,
mask_index: typing.Optional[enpheeph.utils.typings.AnyIndexType] = None,
) -> "numpy.ndarray":
if mask is None and mask_index is None:
raise ValueError("only one between mask and mask_index can be None")
elif mask is not None and mask_index is not None:
raise ValueError(
"at most one between mask and mask_index can be different from None"
)
elif mask is None:
return self.make_mask_array_from_index(
int_mask=int_mask,
mask_index=mask_index,
int_fill_value=int_fill_value,
shape=shape,
torch_placeholder=torch_placeholder,
)
elif mask_index is None:
return self.make_mask_array_from_mask(
int_mask=int_mask,
mask=mask,
int_fill_value=int_fill_value,
shape=shape,
torch_placeholder=torch_placeholder,
)
| 6,674 | 38.97006 | 87 | py |
enpheeph | enpheeph-main/src/enpheeph/injections/plugins/mask/cupypytorchmaskplugin.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import typing
import enpheeph.injections.plugins.mask.abc.lowleveltorchmaskpluginabc
import enpheeph.utils.functions
import enpheeph.utils.imports
if typing.TYPE_CHECKING or (
enpheeph.utils.imports.MODULE_AVAILABILITY[enpheeph.utils.imports.CUPY_NAME]
and enpheeph.utils.imports.MODULE_AVAILABILITY[enpheeph.utils.imports.TORCH_NAME]
):
import cupy
import torch
import torch.utils.dlpack
class CuPyPyTorchMaskPlugin(
# we disable black to avoid too long line issue in flake8
# fmt: off
(
enpheeph.injections.plugins.mask.
lowleveltorchmaskpluginabc.LowLevelTorchMaskPluginABC
),
# fmt: on
):
def to_torch(self, array: "cupy.ndarray") -> "torch.Tensor":
return torch.utils.dlpack.from_dlpack(array.toDlpack())
def from_torch(self, tensor: "torch.Tensor") -> "cupy.ndarray":
return cupy.fromDlpack(torch.utils.dlpack.to_dlpack(tensor))
def to_bitwise_type(self, array: "cupy.ndarray") -> "cupy.ndarray":
return array.view(cupy.dtype(f"u{array.dtype.itemsize}"))
def to_target_type(
self, array: "cupy.ndarray", target: "cupy.ndarray"
) -> "cupy.ndarray":
return array.view(target.dtype)
def make_mask_array_from_index(
self,
int_mask: int,
mask_index: enpheeph.utils.typings.AnyIndexType,
int_fill_value: int,
shape: typing.Sequence[int],
torch_placeholder: "torch.Tensor",
) -> "cupy.ndarray":
# we convert the placeholder
placeholder = self.from_torch(torch_placeholder)
# we convert the integer value representing the fill value into
# an element with unsigned type and correct size, as well as correct
# device for cupy
with placeholder.device:
fill_value = cupy.array(
int_fill_value,
dtype=cupy.dtype(f"u{str(placeholder.dtype.itemsize)}"),
)
# we broadcast it onto the correct shape
# we need to copy it to avoid issues with broadcasting
mask = cupy.broadcast_to(fill_value, shape).copy()
# we set the indices to the mask value
mask[mask_index] = int_mask
# we convert the mask to the right dtype
mask = mask.view(dtype=placeholder.dtype)
# we return the mask
return mask
def make_mask_array_from_mask(
self,
int_mask: int,
mask: enpheeph.utils.typings.AnyMaskType,
int_fill_value: int,
shape: typing.Sequence[int],
torch_placeholder: "torch.Tensor",
) -> "cupy.ndarray":
# we convert the placeholder
placeholder = self.from_torch(torch_placeholder)
# we convert the integer value representing the fill value into
# an element with unsigned type and correct size, as well as correct
# device for cupy
with placeholder.device:
fill_value = cupy.array(
int_fill_value,
dtype=cupy.dtype(f"u{str(placeholder.dtype.itemsize)}"),
)
# we broadcast it onto the correct shape
# we need to copy it to avoid issues with broadcasting
fill_value_array = cupy.broadcast_to(fill_value, shape).copy()
# we create an array with the same shape as the input for the int_mask
# as then we will choose the correct element using cupy.where
# since our mask is a boolean array
int_mask_array: "cupy.ndarray" = (
cupy.ones(
shape,
dtype=cupy.dtype(f"u{str(placeholder.dtype.itemsize)}"),
)
* int_mask
)
# we set the indices to the mask value
# mask must become an array
final_mask = cupy.where(
cupy.asarray(mask), int_mask_array, fill_value_array
)
# we convert the mask to the right dtype
final_mask = final_mask.view(dtype=placeholder.dtype)
# we return the mask
return final_mask
def make_mask_array(
self,
int_mask: int,
int_fill_value: int,
shape: typing.Sequence[int],
torch_placeholder: "torch.Tensor",
mask: typing.Optional[enpheeph.utils.typings.AnyMaskType] = None,
mask_index: typing.Optional[enpheeph.utils.typings.AnyIndexType] = None,
) -> "cupy.ndarray":
if mask is None and mask_index is None:
raise ValueError("only one between mask and mask_index can be None")
elif mask is not None and mask_index is not None:
raise ValueError(
"at most one between mask and mask_index can be different from None"
)
elif mask is None:
return self.make_mask_array_from_index(
int_mask=int_mask,
mask_index=mask_index,
int_fill_value=int_fill_value,
shape=shape,
torch_placeholder=torch_placeholder,
)
elif mask_index is None:
return self.make_mask_array_from_mask(
int_mask=int_mask,
mask=mask,
int_fill_value=int_fill_value,
shape=shape,
torch_placeholder=torch_placeholder,
)
| 6,908 | 39.168605 | 85 | py |
enpheeph | enpheeph-main/src/enpheeph/injections/plugins/mask/autopytorchmaskplugin.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import typing
import enpheeph.injections.plugins.mask
import enpheeph.injections.plugins.mask.abc.lowleveltorchmaskpluginabc
import enpheeph.utils.functions
import enpheeph.utils.imports
import enpheeph.utils.typings
if typing.TYPE_CHECKING:
import torch
import enpheeph.injections.plugins.mask.numpypytorchmaskplugin
import enpheeph.injections.plugins.mask.cupypytorchmaskplugin
else:
if enpheeph.utils.imports.MODULE_AVAILABILITY[enpheeph.utils.imports.TORCH_NAME]:
import torch
if enpheeph.utils.imports.MODULE_AVAILABILITY[enpheeph.utils.imports.CUPY_NAME]:
import enpheeph.injections.plugins.mask.cupypytorchmaskplugin
if enpheeph.utils.imports.MODULE_AVAILABILITY[enpheeph.utils.imports.NUMPY_NAME]:
import enpheeph.injections.plugins.mask.numpypytorchmaskplugin
class AutoPyTorchMaskPlugin(
# we disable black to avoid too long line issue in flake8
# fmt: off
(
enpheeph.injections.plugins.mask.
lowleveltorchmaskpluginabc.LowLevelTorchMaskPluginABC
),
# fmt: on
):
CPU_TORCH_DEVICE = "cpu"
GPU_TORCH_DEVICE = "cuda"
FROM_TORCH = {
CPU_TORCH_DEVICE: enpheeph.injections.plugins.mask.NumPyPyTorchMaskPlugin()
if enpheeph.utils.imports.MODULE_AVAILABILITY[enpheeph.utils.imports.NUMPY_NAME]
else None,
GPU_TORCH_DEVICE: enpheeph.injections.plugins.mask.CuPyPyTorchMaskPlugin()
if enpheeph.utils.imports.MODULE_AVAILABILITY[enpheeph.utils.imports.CUPY_NAME]
else None,
}
TO_TORCH = {
enpheeph.utils.imports.CUPY_NAME: FROM_TORCH[GPU_TORCH_DEVICE],
enpheeph.utils.imports.NUMPY_NAME: FROM_TORCH[CPU_TORCH_DEVICE],
}
def _get_from_torch_plugin_instance(
self, tensor: "torch.Tensor"
) -> (
enpheeph.injections.plugins.mask.abc.lowleveltorchmaskpluginabc.LowLevelTorchMaskPluginABC
):
plugin_instance = self.FROM_TORCH[tensor.device.type]
if plugin_instance is None:
raise ValueError(
"Check the requirements as the current plugin is " "not available"
)
return plugin_instance
def _get_to_torch_plugin_instance(
self,
array: enpheeph.utils.typings.ArrayType,
) -> (
enpheeph.injections.plugins.mask.abc.lowleveltorchmaskpluginabc.LowLevelTorchMaskPluginABC
):
plugin_instance = self.TO_TORCH[
typing.cast(
str,
enpheeph.utils.functions.get_object_library(array),
)
]
if plugin_instance is None:
raise ValueError(
"Check the requirements as the current plugin is " "not available"
)
return plugin_instance
def to_torch(self, array: enpheeph.utils.typings.ArrayType) -> "torch.Tensor":
plugin_instance = self._get_to_torch_plugin_instance(array)
return typing.cast("torch.Tensor", plugin_instance.to_torch(array))
def from_torch(self, tensor: "torch.Tensor") -> enpheeph.utils.typings.ArrayType:
plugin_instance = self._get_from_torch_plugin_instance(tensor)
return plugin_instance.from_torch(tensor)
def to_bitwise_type(
self, array: enpheeph.utils.typings.ArrayType
) -> enpheeph.utils.typings.ArrayType:
plugin_instance = self._get_to_torch_plugin_instance(array)
return plugin_instance.to_bitwise_type(array)
def to_target_type(
self,
array: enpheeph.utils.typings.ArrayType,
target: enpheeph.utils.typings.ArrayType,
) -> enpheeph.utils.typings.ArrayType:
plugin_instance = self._get_to_torch_plugin_instance(array)
return plugin_instance.to_target_type(array, target)
def make_mask_array(
self,
int_mask: int,
int_fill_value: int,
shape: typing.Sequence[int],
torch_placeholder: "torch.Tensor",
mask: typing.Optional[enpheeph.utils.typings.AnyMaskType] = None,
mask_index: typing.Optional[enpheeph.utils.typings.AnyIndexType] = None,
) -> enpheeph.utils.typings.ArrayType:
return self._get_from_torch_plugin_instance(torch_placeholder).make_mask_array(
int_mask=int_mask,
mask_index=mask_index,
mask=mask,
int_fill_value=int_fill_value,
shape=shape,
torch_placeholder=torch_placeholder,
)
| 5,947 | 38.653333 | 98 | py |
enpheeph | enpheeph-main/src/enpheeph/injections/plugins/mask/__init__.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# we ignore mypy/flake8/black as this file is autogenerated
# we ignore this specific error because of AUTOGEN_INIT
# mypy: ignore-errors
# the following flake8 syntax is wrong, as it will be read as generic noqa, but we use
# it to remember the errors appearing in the __init__.py
# additionally this is not caught by pygrep-hooks as it counts only "type: ignore" and
# "noqa", both with starting #
# flake8: noqa: E302,E305
# fmt: off
# this is required so that the mkinit script will generate the init imports only in this
# section
# <AUTOGEN_INIT>
def lazy_import(module_name, submodules, submod_attrs):
import importlib
import os
name_to_submod = {
func: mod for mod, funcs in submod_attrs.items()
for func in funcs
}
def __getattr__(name):
if name in submodules:
attr = importlib.import_module(
'{module_name}.{name}'.format(
module_name=module_name, name=name)
)
elif name in name_to_submod:
submodname = name_to_submod[name]
module = importlib.import_module(
'{module_name}.{submodname}'.format(
module_name=module_name, submodname=submodname)
)
attr = getattr(module, name)
else:
raise AttributeError(
'No {module_name} attribute {name}'.format(
module_name=module_name, name=name))
globals()[name] = attr
return attr
if os.environ.get('EAGER_IMPORT', ''):
for name in name_to_submod.values():
__getattr__(name)
for attrs in submod_attrs.values():
for attr in attrs:
__getattr__(attr)
return __getattr__
__getattr__ = lazy_import(
__name__,
submodules={
'abc',
'autopytorchmaskplugin',
'cupypytorchmaskplugin',
'numpypytorchmaskplugin',
},
submod_attrs={
'abc': [
'LowLevelTorchMaskPluginABC',
'lowleveltorchmaskpluginabc',
],
'autopytorchmaskplugin': [
'AutoPyTorchMaskPlugin',
],
'cupypytorchmaskplugin': [
'CuPyPyTorchMaskPlugin',
],
'numpypytorchmaskplugin': [
'NumPyPyTorchMaskPlugin',
],
},
)
def __dir__():
return __all__
__all__ = ['AutoPyTorchMaskPlugin', 'CuPyPyTorchMaskPlugin',
'LowLevelTorchMaskPluginABC', 'NumPyPyTorchMaskPlugin', 'abc',
'autopytorchmaskplugin', 'cupypytorchmaskplugin',
'lowleveltorchmaskpluginabc', 'numpypytorchmaskplugin']
# </AUTOGEN_INIT>
| 3,424 | 31.932692 | 88 | py |
enpheeph | enpheeph-main/src/enpheeph/injections/mixins/pytorchtensorobjectvalidatormixin.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import abc
import typing
import enpheeph.injections.abc.pytorchinjectionabc
import enpheeph.utils.dataclasses
import enpheeph.utils.functions
import enpheeph.utils.imports
import enpheeph.utils.typings
if (
typing.TYPE_CHECKING
or enpheeph.utils.imports.MODULE_AVAILABILITY[enpheeph.utils.imports.TORCH_NAME]
):
import torch
class PyTorchTensorObjectValidatorMixin(abc.ABC):
@staticmethod
def convert_tensor_to_proper_class(
source: "torch.Tensor", target: "torch.Tensor"
) -> "torch.Tensor":
# to avoid issues if we are using sub-classes like torch.nn.Parameter,
# we call tensor.__class__ to create a new object with the proper content
# however this cannot be done for torch.Tensor itself as it would requiring
# copying the tensor parameter
if target.__class__ == torch.Tensor:
return source
elif isinstance(source, torch.Tensor):
return target.__class__(source)
else:
raise TypeError("Wrong type for source")
| 2,582 | 38.738462 | 84 | py |
enpheeph | enpheeph-main/src/enpheeph/injections/mixins/pytorchmonitorpostprocessormixin.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import abc
import typing
import enpheeph.utils.classes
import enpheeph.utils.dataclasses
import enpheeph.utils.enums
import enpheeph.utils.functions
import enpheeph.utils.imports
if (
typing.TYPE_CHECKING
or enpheeph.utils.imports.MODULE_AVAILABILITY[enpheeph.utils.imports.TORCH_NAME]
):
import torch
def torch_geometric_mean(tensor: "torch.Tensor", dim: int = -1) -> "torch.Tensor":
log_x: "torch.Tensor" = torch.log(tensor)
result: "torch.Tensor" = torch.exp(torch.mean(log_x, dim=dim))
return result
class PyTorchMonitorPostProcessorMixin(abc.ABC):
enabled_metrics: enpheeph.utils.enums.MonitorMetric
monitor_location: enpheeph.utils.dataclasses.MonitorLocation
def postprocess(self, tensor: "torch.Tensor") -> typing.Dict[str, typing.Any]:
dict_ = {}
skip_if_error = enpheeph.utils.classes.SkipIfErrorContextManager(
NotImplementedError
)
metric_class = self.enabled_metrics.__class__
if metric_class.StandardDeviation in self.enabled_metrics:
with skip_if_error:
dict_[metric_class.StandardDeviation.name] = torch.std(
tensor, unbiased=True
).item()
if metric_class.Maximum in self.enabled_metrics:
with skip_if_error:
dict_[metric_class.Maximum.name] = torch.max(tensor).item()
if metric_class.Minimum in self.enabled_metrics:
with skip_if_error:
dict_[metric_class.Minimum.name] = torch.min(tensor).item()
if metric_class.ArithmeticMean in self.enabled_metrics:
with skip_if_error:
dict_[metric_class.ArithmeticMean.name] = torch.mean(tensor).item()
if metric_class.GeometricMean in self.enabled_metrics:
with skip_if_error:
dict_[metric_class.GeometricMean.name] = torch_geometric_mean(
tensor
).item()
return dict_
| 3,515 | 38.505618 | 84 | py |
enpheeph | enpheeph-main/src/enpheeph/injections/mixins/pytorchmaskmixin.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import abc
import typing
import enpheeph.injections.plugins.indexing.abc.indexingpluginabc
import enpheeph.injections.plugins.mask.abc.lowleveltorchmaskpluginabc
import enpheeph.injections.abc.pytorchinjectionabc
import enpheeph.utils.dataclasses
import enpheeph.utils.functions
import enpheeph.utils.imports
import enpheeph.utils.typings
if (
typing.TYPE_CHECKING
or enpheeph.utils.imports.MODULE_AVAILABILITY[enpheeph.utils.imports.TORCH_NAME]
):
import torch
class PyTorchMaskMixin(abc.ABC):
# we need the index plugin to simplify the handling of the indices
indexing_plugin: (
enpheeph.injections.plugins.indexing.abc.indexingpluginabc.IndexingPluginABC
)
# the used variables in the functions, must be initialized properly
location: enpheeph.utils.dataclasses.FaultLocation
low_level_plugin: (
# black has issues with long names
# fmt: off
enpheeph.injections.plugins.mask.
lowleveltorchmaskpluginabc.LowLevelTorchMaskPluginABC
# fmt: on
)
mask: typing.Optional["torch.Tensor"]
# Callables
convert_tensor_to_proper_class: typing.Callable[
["torch.Tensor", "torch.Tensor"],
"torch.Tensor",
]
def set_tensor_only_indexing(
self,
# this flag is used to consider batches as an extra dimension
# if enabled we fill the emtpy index due to missing batch/other dimensions
# otherwise it is not filled, leading to Tensor dimension covering the whole
# array
batches_exist: bool = True,
) -> None:
self.indexing_plugin.select_active_dimensions(
[
enpheeph.utils.enums.DimensionType.Tensor,
],
autoshift_to_boundaries=False,
fill_empty_index=batches_exist,
filler=slice(None, None),
)
def set_batch_tensor_indexing(self) -> None:
self.indexing_plugin.select_active_dimensions(
[
enpheeph.utils.enums.DimensionType.Batch,
enpheeph.utils.enums.DimensionType.Tensor,
],
autoshift_to_boundaries=False,
fill_empty_index=True,
filler=slice(None, None),
)
# mask is both set in self and returned
def generate_mask(
self,
tensor: "torch.Tensor",
force_recompute: bool = False,
# if True we use set_tensor_only_indexing, if False we use
# set_batch_tensor_indexing
# if explicitly non-boolean, we skip it, to allow for custom configurations
tensor_only: typing.Optional[bool] = True,
# this flag is used to consider batches as an extra dimension when using
# tensor_only, it has no effect if tensor_only is false
batches_exist: bool = True,
) -> "torch.Tensor":
if self.mask is None or force_recompute:
# NOTE: the following process is used to process the index,
# based on bitwidth and type
# the index may start from a non-compatible form, which is then
# checked and verified against the PyTorch indexing capabilities
# we get the dtype to compute its length in bytes, the return
# intermediate value is the dimension of the dtype in bytes
bytewidth = tensor.element_size()
# we create the boolean mask in torch, depending on whether we
# use 0 or 1 to fill the non-selected values
bit_mask_info = (
enpheeph.utils.dataclasses.BitFaultMaskInfo.from_bit_fault_value(
self.location.bit_fault_value
)
)
bool_mask: "torch.Tensor" = torch.tensor(
[bit_mask_info.fill_value] * bytewidth * 8, dtype=torch.bool
)
# we set the selected bits to the value provided by the fault
# locator
bool_mask[self.location.bit_index] = bit_mask_info.mask_value
# we get the correct indices from the boolean mask
# we convert it to indices in standard Python to create the final
# integer representation
indices: typing.List[int] = torch.where(bool_mask)[0].tolist()
# we get the final integer representation for the mask
int_mask = sum(2**i for i in indices)
# placeholder for having device and dtype to be converted
tensor_placeholder: "torch.Tensor" = torch.zeros(
0,
device=tensor.device,
dtype=tensor.dtype,
requires_grad=False,
)
# we set up the indices depending on the flag
# if the flag is different, we leave the existing active dimensions
if tensor_only is True:
self.set_tensor_only_indexing(batches_exist=batches_exist)
elif tensor_only is False:
self.set_batch_tensor_indexing()
tensor_shape = self.indexing_plugin.filter_dimensions(
tensor.shape,
)
# we get the values for mask and mask_index
# if they are None we use None otherwise we get it from the dict
# with default as None
mask = (
self.location.dimension_mask.get(
enpheeph.utils.enums.DimensionType.Tensor, None
)
if self.location.dimension_mask is not None
else None
)
mask_index = (
self.location.dimension_index.get(
enpheeph.utils.enums.DimensionType.Tensor, None
)
if self.location.dimension_index is not None
else None
)
# we create the low-level mask
# using the filtered dimensions
# we only need the tensor_index, as we do not cover the time/batch
# dimensions
mask_array = self.low_level_plugin.make_mask_array(
int_mask=int_mask,
# we give only the tensor dimension as possible mask
mask=mask,
# we use only the tensor index as the mask will be the same even
# across different batches/time-steps
# so it can be expanded/repeated later
mask_index=mask_index,
int_fill_value=(2 ** (bytewidth * 8) - 1) * bit_mask_info.fill_value,
shape=tensor_shape,
torch_placeholder=tensor_placeholder,
)
# we convert the mask back to PyTorch
mask = self.low_level_plugin.to_torch(mask_array)
# the indices are reset if we have set them up ourselvels
if isinstance(tensor_only, bool):
self.indexing_plugin.reset_active_dimensions()
else:
mask = self.mask
self.mask = mask
return self.mask
# we return the injected tensor
def inject_mask(
self,
tensor: "torch.Tensor",
# if True we use set_tensor_only_indexing, if False we use
# set_batch_tensor_indexing
# if explicitly non-boolean, we skip it, to allow for custom configurations
tensor_only: typing.Optional[bool] = True,
# this flag is used to consider batches as an extra dimension when using
# tensor_only, it has no effect if tensor_only is false
batches_exist: bool = True,
) -> "torch.Tensor":
if self.mask is None:
raise RuntimeError("Please call generate_mask before injection")
bit_mask_info = (
enpheeph.utils.dataclasses.BitFaultMaskInfo.from_bit_fault_value(
self.location.bit_fault_value
)
)
# we set up the indices depending on the flag
if tensor_only is True:
self.set_tensor_only_indexing(batches_exist=batches_exist)
elif tensor_only is False:
self.set_batch_tensor_indexing()
selected_batches_tensor = tensor[
self.indexing_plugin.join_indices(
{
**self.location.dimension_index,
**{
enpheeph.utils.enums.DimensionType.Tensor: ...,
},
},
)
]
low_level_tensor = self.low_level_plugin.from_torch(
selected_batches_tensor,
)
# mypy generates an error since self.mask can be None
# however we call self.generate_mask that will set the mask or raise errors
# stopping the execution
low_level_mask = self.low_level_plugin.from_torch(
# we use expand as to expand the mask onto the selected batches
# dimension
# expand creates views, so we should not change the elements in place,
# but it is doable as we are working on the mask which will not be modified
# sometimes the following line fails with mypy, use type: ignore[arg-type]
self.mask.expand_as(selected_batches_tensor)
)
bitwise_tensor = self.low_level_plugin.to_bitwise_type(low_level_tensor)
bitwise_mask = self.low_level_plugin.to_bitwise_type(low_level_mask)
bitwise_injected_tensor = bit_mask_info.operation.value(
bitwise_tensor,
bitwise_mask,
)
low_level_injected_tensor = self.low_level_plugin.to_target_type(
bitwise_injected_tensor,
low_level_tensor,
)
injected_tensor = self.low_level_plugin.to_torch(low_level_injected_tensor)
final_injected_tensor = injected_tensor[
self.indexing_plugin.join_indices(
{
**self.location.dimension_index,
**{
enpheeph.utils.enums.DimensionType.Tensor: ...,
},
},
)
]
# the indices are reset if we have set them up ourselvels
if isinstance(tensor_only, bool):
self.indexing_plugin.reset_active_dimensions()
# conversion to proper class
return self.convert_tensor_to_proper_class(final_injected_tensor, tensor)
| 11,796 | 40.10453 | 87 | py |
enpheeph | enpheeph-main/src/enpheeph/injections/mixins/__init__.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# we ignore mypy/flake8/black as this file is autogenerated
# we ignore this specific error because of AUTOGEN_INIT
# mypy: ignore-errors
# the following flake8 syntax is wrong, as it will be read as generic noqa, but we use
# it to remember the errors appearing in the __init__.py
# additionally this is not caught by pygrep-hooks as it counts only "type: ignore" and
# "noqa", both with starting #
# flake8: noqa: E302,E305
# fmt: off
# this is required so that the mkinit script will generate the init imports only in this
# section
# <AUTOGEN_INIT>
def lazy_import(module_name, submodules, submod_attrs):
import importlib
import os
name_to_submod = {
func: mod for mod, funcs in submod_attrs.items()
for func in funcs
}
def __getattr__(name):
if name in submodules:
attr = importlib.import_module(
'{module_name}.{name}'.format(
module_name=module_name, name=name)
)
elif name in name_to_submod:
submodname = name_to_submod[name]
module = importlib.import_module(
'{module_name}.{submodname}'.format(
module_name=module_name, submodname=submodname)
)
attr = getattr(module, name)
else:
raise AttributeError(
'No {module_name} attribute {name}'.format(
module_name=module_name, name=name))
globals()[name] = attr
return attr
if os.environ.get('EAGER_IMPORT', ''):
for name in name_to_submod.values():
__getattr__(name)
for attrs in submod_attrs.values():
for attr in attrs:
__getattr__(attr)
return __getattr__
__getattr__ = lazy_import(
__name__,
submodules={
'pytorchmaskmixin',
'pytorchmonitorpostprocessormixin',
'pytorchquantizationmixin',
'pytorchsparseinterfacemixin',
'pytorchtensorobjectvalidatormixin',
},
submod_attrs={
'pytorchmaskmixin': [
'PyTorchMaskMixin',
],
'pytorchmonitorpostprocessormixin': [
'PyTorchMonitorPostProcessorMixin',
'torch_geometric_mean',
],
'pytorchsparseinterfacemixin': [
'PyTorchSparseInterfaceMixin',
],
'pytorchtensorobjectvalidatormixin': [
'PyTorchTensorObjectValidatorMixin',
],
},
)
def __dir__():
return __all__
__all__ = ['PyTorchMaskMixin', 'PyTorchMonitorPostProcessorMixin',
'PyTorchSparseInterfaceMixin', 'PyTorchTensorObjectValidatorMixin',
'pytorchmaskmixin', 'pytorchmonitorpostprocessormixin',
'pytorchquantizationmixin', 'pytorchsparseinterfacemixin',
'pytorchtensorobjectvalidatormixin', 'torch_geometric_mean']
# </AUTOGEN_INIT>
| 3,646 | 33.40566 | 88 | py |
enpheeph | enpheeph-main/src/enpheeph/injections/mixins/pytorchsparseinterfacemixin.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import abc
import typing
import enpheeph.injections.plugins.indexing.abc.indexingpluginabc
import enpheeph.injections.plugins.mask.abc.lowleveltorchmaskpluginabc
import enpheeph.injections.abc.pytorchinjectionabc
import enpheeph.utils.dataclasses
import enpheeph.utils.functions
import enpheeph.utils.imports
import enpheeph.utils.typings
if typing.TYPE_CHECKING:
import torch
elif enpheeph.utils.imports.MODULE_AVAILABILITY[enpheeph.utils.imports.TORCH_NAME]:
import torch
class PyTorchSparseInterfaceMixin(abc.ABC):
# we need the index plugin to simplify the handling of the indices
indexing_plugin: (
enpheeph.injections.plugins.indexing.abc.indexingpluginabc.IndexingPluginABC
)
# the used variables in the functions, must be initialized properly
location: enpheeph.utils.dataclasses.BaseInjectionLocation
def _check_sparse_index_flag(self) -> bool:
# mypy has some issues in recognizing the enum names if taken from a name itself
# e.g. A.a.a
# we use separate values to avoid this issue
# however we still require typing from the enum,
# which limits the customizability of the interface, as before it could be any
# compatible enum but now it must be this specific one
# **NOTE**: a possible alternative is using .value at the end to extract the
# correct enum, which does nothing
# however value returns the integer value, so it is still not a clean trick
sparse_index_flag = (
self.location.parameter_type.Sparse | self.location.parameter_type.Index
)
return sparse_index_flag in self.location.parameter_type
def _check_sparse_value_flag(self) -> bool:
# mypy has some issues in recognizing the enum names if taken from a name itself
# e.g. A.a.a
# we use separate values to avoid this issue
# however we still require typing from the enum,
# which limits the customizability of the interface, as before it could be any
# compatible enum but now it must be this specific one
# **NOTE**: a possible alternative is using .value at the end to extract the
# correct enum, which does nothing
# however value returns the integer value, so it is still not a clean trick
sparse_value_flag = (
self.location.parameter_type.Sparse | self.location.parameter_type.Value
)
return sparse_value_flag in self.location.parameter_type
def get_sparse_injection_parameter(
self,
tensor: "torch.Tensor",
) -> "torch.Tensor":
sparse_target = tensor.to_sparse()
if self._check_sparse_index_flag():
target = sparse_target.indices()
elif self._check_sparse_value_flag():
target = sparse_target.values()
else:
raise ValueError("This operation is not supported with sparse tensors")
return target
def set_sparse_injection_parameter(
self,
target: "torch.Tensor",
new_value: "torch.Tensor",
) -> "torch.Tensor":
sparse_target = target.to_sparse()
if self._check_sparse_index_flag():
other_sparse_element = sparse_target.values()
new_target = torch.sparse_coo_tensor(
indices=new_value, values=other_sparse_element
)
elif self._check_sparse_value_flag():
other_sparse_element = sparse_target.indices()
new_target = torch.sparse_coo_tensor(
indices=other_sparse_element, values=new_value
)
else:
raise ValueError("This operation is not supported with sparse tensors")
# FIXME: how should we approach the sparse-to-dense conversion? maybe with a
# plugin? so that we can support different sparse representations without
# having to write code in the main code base
return new_target.to_dense()
| 5,496 | 41.945313 | 88 | py |
enpheeph | enpheeph-main/src/enpheeph/helpers/__init__.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
def lazy_import(module_name, submodules, submod_attrs):
import importlib
import os
name_to_submod = {
func: mod for mod, funcs in submod_attrs.items() for func in funcs
}
def __getattr__(name):
if name in submodules:
attr = importlib.import_module(
"{module_name}.{name}".format(module_name=module_name, name=name)
)
elif name in name_to_submod:
submodname = name_to_submod[name]
module = importlib.import_module(
"{module_name}.{submodname}".format(
module_name=module_name, submodname=submodname
)
)
attr = getattr(module, name)
else:
raise AttributeError(
"No {module_name} attribute {name}".format(
module_name=module_name, name=name
)
)
globals()[name] = attr
return attr
if os.environ.get("EAGER_IMPORT", ""):
for name in name_to_submod.values():
__getattr__(name)
for attrs in submod_attrs.values():
for attr in attrs:
__getattr__(attr)
return __getattr__
__getattr__ = lazy_import(
__name__,
submodules={
"faultmodels",
"summaries",
},
submod_attrs={
"faultmodels": [
"FaultModelABC",
"abc",
"faultmodel",
"faultmodelabc",
],
"summaries": [
"ModelSummaryABC",
"ModelSummaryTorchinfo",
"abc",
"layersummaryabc",
"modelsummaryabc",
"modelsummarytorchinfo",
"plugins",
"sensitivityanalysis",
],
},
)
def __dir__():
return __all__
__all__ = [
"FaultModelABC",
"ModelSummaryABC",
"ModelSummaryTorchinfo",
"abc",
"faultmodel",
"faultmodelabc",
"faultmodels",
"layersummaryabc",
"modelsummaryabc",
"modelsummarytorchinfo",
"plugins",
"sensitivityanalysis",
"summaries",
]
| 3,640 | 29.090909 | 81 | py |
enpheeph | enpheeph-main/src/enpheeph/helpers/summaries/__init__.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
def lazy_import(module_name, submodules, submod_attrs):
import importlib
import os
name_to_submod = {
func: mod for mod, funcs in submod_attrs.items() for func in funcs
}
def __getattr__(name):
if name in submodules:
attr = importlib.import_module(
"{module_name}.{name}".format(module_name=module_name, name=name)
)
elif name in name_to_submod:
submodname = name_to_submod[name]
module = importlib.import_module(
"{module_name}.{submodname}".format(
module_name=module_name, submodname=submodname
)
)
attr = getattr(module, name)
else:
raise AttributeError(
"No {module_name} attribute {name}".format(
module_name=module_name, name=name
)
)
globals()[name] = attr
return attr
if os.environ.get("EAGER_IMPORT", ""):
for name in name_to_submod.values():
__getattr__(name)
for attrs in submod_attrs.values():
for attr in attrs:
__getattr__(attr)
return __getattr__
__getattr__ = lazy_import(
__name__,
submodules={
"abc",
"modelsummarytorchinfo",
"plugins",
},
submod_attrs={
"abc": [
"ModelSummaryABC",
"layersummaryabc",
"modelsummaryabc",
],
"modelsummarytorchinfo": [
"ModelSummaryTorchinfo",
],
"plugins": [
"abc",
"sensitivityanalysis",
],
},
)
def __dir__():
return __all__
__all__ = [
"ModelSummaryABC",
"ModelSummaryTorchinfo",
"abc",
"layersummaryabc",
"modelsummaryabc",
"modelsummarytorchinfo",
"plugins",
"sensitivityanalysis",
]
| 3,440 | 29.451327 | 81 | py |
enpheeph | enpheeph-main/src/enpheeph/helpers/summaries/modelsummarytorchinfo.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import torchinfo
import enpheeph.helpers.summaries.abc.modelsummaryabc
class ModelSummaryTorchinfo(
enpheeph.helpers.summaries.abc.modelsummaryabc.ModelSummaryABC
):
def __init__(self, sensitivity_analysis_plugin=None):
self.sensitivity_analysis_plugin = sensitivity_analysis_plugin
def gather_summary(self, model, input_size):
self.summary = torchinfo.summary(
model=model, input_size=input_size, batch_dim=1, verbose=0
)
def compute_layer_set(self):
pass
| 2,062 | 38.673077 | 77 | py |
enpheeph | enpheeph-main/src/enpheeph/handlers/__init__.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# we ignore mypy/flake8/black as this file is autogenerated
# we ignore this specific error because of AUTOGEN_INIT
# mypy: ignore-errors
# the following flake8 syntax is wrong, as it will be read as generic noqa, but we use
# it to remember the errors appearing in the __init__.py
# additionally this is not caught by pygrep-hooks as it counts only "type: ignore" and
# "noqa", both with starting #
# flake8: noqa: E302,E305
# fmt: off
# this is required so that the mkinit script will generate the init imports only in this
# section
# <AUTOGEN_INIT>
def lazy_import(module_name, submodules, submod_attrs):
import importlib
import os
name_to_submod = {
func: mod for mod, funcs in submod_attrs.items()
for func in funcs
}
def __getattr__(name):
if name in submodules:
attr = importlib.import_module(
'{module_name}.{name}'.format(
module_name=module_name, name=name)
)
elif name in name_to_submod:
submodname = name_to_submod[name]
module = importlib.import_module(
'{module_name}.{submodname}'.format(
module_name=module_name, submodname=submodname)
)
attr = getattr(module, name)
else:
raise AttributeError(
'No {module_name} attribute {name}'.format(
module_name=module_name, name=name))
globals()[name] = attr
return attr
if os.environ.get('EAGER_IMPORT', ''):
for name in name_to_submod.values():
__getattr__(name)
for attrs in submod_attrs.values():
for attr in attrs:
__getattr__(attr)
return __getattr__
__getattr__ = lazy_import(
__name__,
submodules={
'injectionhandler',
'plugins',
},
submod_attrs={
'injectionhandler': [
'InjectionHandler',
],
'plugins': [
'LibraryHandlerPluginABC',
'PyTorchHandlerPlugin',
'libraryhandlerpluginabc',
'pytorchhandlerplugin',
],
},
)
def __dir__():
return __all__
__all__ = ['InjectionHandler', 'LibraryHandlerPluginABC',
'PyTorchHandlerPlugin', 'injectionhandler',
'libraryhandlerpluginabc', 'plugins', 'pytorchhandlerplugin']
# </AUTOGEN_INIT>
| 3,171 | 31.701031 | 88 | py |
enpheeph | enpheeph-main/src/enpheeph/handlers/plugins/pytorchhandlerplugin.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import typing
import enpheeph.handlers.plugins.libraryhandlerpluginabc
import enpheeph.injections.abc.injectionabc
import enpheeph.utils.typings
# we plac it after so flake8 does not complain about not-at-the-top imports
if typing.TYPE_CHECKING:
import torch
class PyTorchHandlerPlugin(
(enpheeph.handlers.plugins.libraryhandlerpluginabc.LibraryHandlerPluginABC),
):
def library_setup(
self,
model: enpheeph.utils.typings.ModelType,
active_injections: typing.List[
enpheeph.injections.abc.injectionabc.InjectionABC
],
) -> enpheeph.utils.typings.ModelType:
for inj in active_injections:
module = self.get_module(model, inj.location.module_name)
new_module = inj.setup(module)
self.set_module(model, inj.location.module_name, new_module)
return model
def library_teardown(
self,
model: enpheeph.utils.typings.ModelType,
active_injections: typing.List[
enpheeph.injections.abc.injectionabc.InjectionABC
],
) -> enpheeph.utils.typings.ModelType:
for inj in active_injections:
module = self.get_module(model, inj.location.module_name)
new_module = inj.teardown(module)
self.set_module(model, inj.location.module_name, new_module)
return model
def get_module(
self, model: "torch.nn.Module", full_module_name: str
) -> "torch.nn.Module":
dest_module = model
for submodule in full_module_name.split("."):
dest_module = getattr(dest_module, submodule)
return dest_module
def set_module(
self,
model: "torch.nn.Module",
full_module_name: str,
module: "torch.nn.Module",
) -> None:
dest_module = model
module_names_split = full_module_name.split(".")
module_names = module_names_split[:-1]
target_module_name = module_names_split[-1]
for submodule in module_names:
dest_module = getattr(dest_module, submodule)
setattr(dest_module, target_module_name, module)
| 3,666 | 37.6 | 80 | py |
enpheeph | enpheeph-main/src/enpheeph/handlers/plugins/__init__.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# we ignore mypy/flake8/black as this file is autogenerated
# we ignore this specific error because of AUTOGEN_INIT
# mypy: ignore-errors
# the following flake8 syntax is wrong, as it will be read as generic noqa, but we use
# it to remember the errors appearing in the __init__.py
# additionally this is not caught by pygrep-hooks as it counts only "type: ignore" and
# "noqa", both with starting #
# flake8: noqa: E302,E305
# fmt: off
# this is required so that the mkinit script will generate the init imports only in this
# section
# <AUTOGEN_INIT>
def lazy_import(module_name, submodules, submod_attrs):
import importlib
import os
name_to_submod = {
func: mod for mod, funcs in submod_attrs.items()
for func in funcs
}
def __getattr__(name):
if name in submodules:
attr = importlib.import_module(
'{module_name}.{name}'.format(
module_name=module_name, name=name)
)
elif name in name_to_submod:
submodname = name_to_submod[name]
module = importlib.import_module(
'{module_name}.{submodname}'.format(
module_name=module_name, submodname=submodname)
)
attr = getattr(module, name)
else:
raise AttributeError(
'No {module_name} attribute {name}'.format(
module_name=module_name, name=name))
globals()[name] = attr
return attr
if os.environ.get('EAGER_IMPORT', ''):
for name in name_to_submod.values():
__getattr__(name)
for attrs in submod_attrs.values():
for attr in attrs:
__getattr__(attr)
return __getattr__
__getattr__ = lazy_import(
__name__,
submodules={
'libraryhandlerpluginabc',
'pytorchhandlerplugin',
},
submod_attrs={
'libraryhandlerpluginabc': [
'LibraryHandlerPluginABC',
],
'pytorchhandlerplugin': [
'PyTorchHandlerPlugin',
],
},
)
def __dir__():
return __all__
__all__ = ['LibraryHandlerPluginABC', 'PyTorchHandlerPlugin',
'libraryhandlerpluginabc', 'pytorchhandlerplugin']
# </AUTOGEN_INIT>
| 3,042 | 31.72043 | 88 | py |
enpheeph | enpheeph-main/src/enpheeph/utils/enums.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import enum
import operator
class BitFaultValue(enum.Enum):
Random = enum.auto()
StuckAtZero = enum.auto()
StuckAtOne = enum.auto()
BitFlip = enum.auto()
class BitWidth(enum.IntEnum):
OneByte = 8
TwoBytes = 16
ThreeBytes = 24
FourBytes = 32
FiveBytes = 40
SixBytes = 48
SevenBytes = 56
EightBytes = 64
FloatingPoint16 = TwoBytes
FloatingPoint32 = FourBytes
FloatingPoint64 = EightBytes
Int32 = FourBytes
Int64 = EightBytes
class DimensionType(enum.Enum):
BitLevel = enum.auto()
Batch = enum.auto()
Tensor = enum.auto()
Time = enum.auto()
# NOTE: this endianness does not represent the actual endianness of the machine,
# only the endianness seen in the Python objects when accessing them
class Endianness(enum.Enum):
Little = "<"
Big = ">"
MSBAtIndexZero = Big
LSBAtIndexZero = Little
class FaultMaskOperation(enum.Enum):
InPlaceXor = operator.ixor
InPlaceAnd = operator.iand
InPlaceOr = operator.ior
Xor = operator.xor
And = operator.and_
Or = operator.or_
class FaultMaskValue(enum.IntEnum):
One = 1
Zero = 0
class HandlerStatus(enum.Enum):
Running = enum.auto()
Idle = enum.auto()
class ImportName(enum.Enum):
Cupy = "cupy"
Norse = "norse"
Numpy = "numpy"
PyTorch = "torch"
PyTorchLightning = "pytorch_lightning"
SQLAlchemy = "sqlalchemy"
# we use flag so that different metrics can be composed together
class MonitorMetric(enum.Flag):
StandardDeviation = enum.auto()
Maximum = enum.auto()
Minimum = enum.auto()
ArithmeticMean = enum.auto()
GeometricMean = enum.auto()
class ParameterType(enum.Flag):
# network type
DNN = enum.auto()
SNN = enum.auto()
# sub-network type, as we need special care for RNN
RNN = enum.auto()
# parameter type
Weight = enum.auto()
Activation = enum.auto()
State = enum.auto()
# state types
LIF = enum.auto()
# variables saved in state
Voltage = enum.auto()
Current = enum.auto()
# tensor type
Dense = enum.auto()
PrunedDense = enum.auto()
Sparse = enum.auto()
# sparse coordinates type
COO = enum.auto()
CSR = enum.auto()
# sparse coordinates
Index = enum.auto()
Value = enum.auto()
# complex types
DNNWeightDense = DNN | Weight | Dense
DNNActivationDense = DNN | Activation | Dense
SNNLIFStateVoltageDense = SNN | State | LIF | Voltage | Dense
| 4,043 | 25.431373 | 80 | py |
enpheeph | enpheeph-main/src/enpheeph/utils/typings.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import types
import typing
# we fake import cupy, numpy and torch to silence mypy
if typing.TYPE_CHECKING:
import cupy
import numpy
import torch
import enpheeph.utils.enums
# for the active_dimension_index
ActiveDimensionIndexType = typing.Union[
enpheeph.utils.enums.DimensionType,
types.EllipsisType,
]
# we could even add bit and other parameters in here
AnyIndexType = typing.Union[
"Index1DType",
"IndexMultiDType",
]
AnyMaskType = typing.Union[
"Mask1DType",
"MaskMultiDType",
]
ArrayType = typing.Union[
"cupy.ndarray",
"numpy.ndarray",
]
DimensionDictType = typing.Dict[
enpheeph.utils.enums.DimensionType,
"DimensionIndexType",
]
DimensionIndexType = typing.Union[
int,
types.EllipsisType,
# **NOTE**: we do not support tuples yet, one can duplicate enum values to have
# multiple indices with similar names
# typing.Tuple[int, ...],
]
DimensionLocationIndexType = typing.Dict[
enpheeph.utils.enums.DimensionType,
AnyIndexType,
]
DimensionLocationMaskType = typing.Dict[
enpheeph.utils.enums.DimensionType,
AnyMaskType,
]
# we use Tuple and not Sequence to allow hashability
# mypy reports error if one of the types is not valid
Index1DType = typing.Union[
int,
slice,
types.EllipsisType,
# we need List as Tuple is seen as multiple dimensions when indexing
# **NOTE**: this might give problems with hashing in the dataclasses
list[int],
]
IndexMultiDType = typing.Union[
int,
slice,
types.EllipsisType,
# we use Tuple as in this case we need to cover multiple dimensions
tuple[Index1DType, ...],
]
IndexTimeType = Index1DType
Mask1DType = typing.Sequence[bool]
MaskMultiDType = typing.Union[
Mask1DType,
typing.Sequence[Mask1DType],
]
LowLevelMaskArrayType = typing.Union[
"cupy.ndarray",
"numpy.ndarray",
]
ModelType = "torch.nn.Module"
ShapeType = tuple[int, ...]
TensorType = typing.Union[
ArrayType,
"torch.Tensor",
]
| 3,537 | 28.239669 | 83 | py |
enpheeph | enpheeph-main/src/enpheeph/integrations/__init__.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# we ignore mypy/flake8/black as this file is autogenerated
# we ignore this specific error because of AUTOGEN_INIT
# mypy: ignore-errors
# the following flake8 syntax is wrong, as it will be read as generic noqa, but we use
# it to remember the errors appearing in the __init__.py
# additionally this is not caught by pygrep-hooks as it counts only "type: ignore" and
# "noqa", both with starting #
# flake8: noqa: E302,E305
# fmt: off
# this is required so that the mkinit script will generate the init imports only in this
# section
# <AUTOGEN_INIT>
def lazy_import(module_name, submodules, submod_attrs):
import importlib
import os
name_to_submod = {
func: mod for mod, funcs in submod_attrs.items()
for func in funcs
}
def __getattr__(name):
if name in submodules:
attr = importlib.import_module(
'{module_name}.{name}'.format(
module_name=module_name, name=name)
)
elif name in name_to_submod:
submodname = name_to_submod[name]
module = importlib.import_module(
'{module_name}.{submodname}'.format(
module_name=module_name, submodname=submodname)
)
attr = getattr(module, name)
else:
raise AttributeError(
'No {module_name} attribute {name}'.format(
module_name=module_name, name=name))
globals()[name] = attr
return attr
if os.environ.get('EAGER_IMPORT', ''):
for name in name_to_submod.values():
__getattr__(name)
for attrs in submod_attrs.values():
for attr in attrs:
__getattr__(attr)
return __getattr__
__getattr__ = lazy_import(
__name__,
submodules={
'pytorchlightning',
},
submod_attrs={
'pytorchlightning': [
'InjectionCallback',
'injectioncallback',
],
},
)
def __dir__():
return __all__
__all__ = ['InjectionCallback', 'injectioncallback', 'pytorchlightning']
# </AUTOGEN_INIT>
| 2,891 | 31.494382 | 88 | py |
enpheeph | enpheeph-main/src/enpheeph/integrations/pytorchlightning/injectioncallback.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import collections
import copy
import datetime
import typing
import warnings
import enpheeph.handlers.injectionhandler
import enpheeph.injections.plugins.storage.abc.storagepluginabc
import enpheeph.utils.imports
if (
enpheeph.utils.imports.MODULE_AVAILABILITY[
enpheeph.utils.imports.PYTORCH_LIGHTNING_NAME
]
or typing.TYPE_CHECKING
):
import pytorch_lightning
import pytorch_lightning.callbacks
# to suppress all warnings
warnings.filterwarnings("ignore")
class InjectionCallback(pytorch_lightning.callbacks.Callback):
experiment_time_start: typing.Optional[datetime.datetime]
first_golden_run: typing.Union[bool, int]
injection_handler: enpheeph.handlers.injectionhandler.InjectionHandler
metrics: typing.DefaultDict[
int, typing.DefaultDict[int, typing.DefaultDict[typing.Any, typing.Any]]
]
metrics_save_frequency: typing.Optional[int]
storage_plugin: typing.Optional[
(enpheeph.injections.plugins.storage.abc.storagepluginabc.StoragePluginABC)
]
test_epoch: int
def __init__(
self,
injection_handler: (enpheeph.handlers.injectionhandler.InjectionHandler),
storage_plugin: typing.Optional[
(enpheeph.injections.plugins.storage.abc.storagepluginabc.StoragePluginABC)
] = None,
# number of batches every which to save the metrics
# additionally we save at the end of each epoch
metrics_save_frequency: typing.Optional[int] = None,
# if True, we use the first test run as golden run
# otherwise, we expect it to be a valid id for the golden run reference
first_golden_run: typing.Union[bool, int] = True,
# extra session info
extra_session_info: typing.Optional[typing.Dict[typing.Any, typing.Any]] = None,
# extra experiment info which can be used to identify experiments
extra_experiment_info: typing.Optional[
typing.Dict[typing.Any, typing.Any]
] = None,
):
self.experiment_time_start = None
self.injection_handler = injection_handler
self.storage_plugin = storage_plugin
# this number is used to indicate how often to save the results
# in terms of batch index
self.metrics_save_frequency = metrics_save_frequency
self.first_golden_run = first_golden_run
self.extra_experiment_info = extra_experiment_info
self.extra_session_info = extra_session_info
self.test_epoch: int = 0
# we use a defaultdict inside a defaultdict, so that when we access epoch, batch
# we generate an empty dict
# when we save this metric in the storage, it becomes a normal dict with
# default_factory being reset to None
self.metrics: typing.DefaultDict[
int, typing.DefaultDict[int, typing.DefaultDict[typing.Any, typing.Any]]
] = collections.defaultdict(
# mypy has issues with nested defaultdict
lambda: collections.defaultdict(dict) # type: ignore[arg-type]
)
# we create a new Session which will be closed on __del__
self.storage_plugin.create_session(extra_session_info=extra_session_info)
def __del__(self, *args, **kwargs):
self.storage_plugin.complete_session()
# not needed
# super().__del__(*args, **kwargs)
def on_test_start(
self,
trainer: pytorch_lightning.Trainer,
pl_module: pytorch_lightning.LightningModule,
) -> None:
self.test_epoch = 0
self.metrics = collections.defaultdict(
# mypy has issues with nested defaultdict
lambda: collections.defaultdict(dict) # type: ignore[arg-type]
)
self.injection_handler.setup(pl_module)
# FIXME: use a MockStorage implementation
# to allow this without checking for None
if self.storage_plugin is not None:
self.experiment_time_start = datetime.datetime.utcnow()
self.storage_plugin.create_experiment(
# we create an experiment with the active injections
injection_locations=[
inj.location for inj in self.injection_handler.active_injections
],
running=True,
# we enable the golden run for the first execution only if the flag is
# True
golden_run_flag=self.first_golden_run is True,
# we pass the id if the first_golden_run is an integer for the
# experiment id
# otherwise None to disable it
golden_run_id=self.first_golden_run
if isinstance(self.first_golden_run, int)
else None,
# we use UTC for dates as it is generic
start_time=self.experiment_time_start,
extra_experiment_info=self.extra_experiment_info,
)
# it will be True at most at the first iteration as we change it into int
if self.first_golden_run is True:
# casting as experiment_id is set, so it cannot be None
experiment_id = typing.cast(int, self.storage_plugin.experiment_id)
# we set the first_golden_run to the golden run id if the first test is
# a golden run
self.first_golden_run = experiment_id
def on_test_end(
self,
trainer: pytorch_lightning.Trainer,
pl_module: pytorch_lightning.LightningModule,
) -> None:
self.save_metrics(trainer, test_epoch=-1, batch_idx=-1)
self.test_epoch = 0
if self.storage_plugin is not None:
duration = (
datetime.datetime.utcnow() - self.experiment_time_start
if self.experiment_time_start is not None
else None
)
self.storage_plugin.complete_experiment(
total_duration=duration,
)
# we reset the start time
self.experiment_time_start = None
self.injection_handler.teardown(pl_module)
def on_test_epoch_start(
self,
trainer: pytorch_lightning.Trainer,
pl_module: pytorch_lightning.LightningModule,
) -> None:
pass
def on_test_epoch_end(
self,
trainer: pytorch_lightning.Trainer,
pl_module: pytorch_lightning.LightningModule,
) -> None:
self.save_metrics(trainer, test_epoch=self.test_epoch, batch_idx=-1)
self.test_epoch += 1
def on_test_batch_end(
self,
trainer: pytorch_lightning.Trainer,
pl_module: pytorch_lightning.LightningModule,
outputs: typing.Optional[pytorch_lightning.utilities.types.STEP_OUTPUT],
batch: typing.Any,
batch_idx: int,
dataloader_idx: int,
) -> None:
if (
self.metrics_save_frequency is not None
and not batch_idx % self.metrics_save_frequency
):
self.save_metrics(trainer, test_epoch=self.test_epoch, batch_idx=batch_idx)
def save_metrics(
self,
trainer: pytorch_lightning.Trainer,
# we use -1 for the final result, can be substituted by globally
# defined constant
test_epoch: int,
# we use -1 for the complete results at the end of the test
# it could be substituted by a fixed constant in the future
batch_idx: int,
) -> None:
# if the storage_plugin is None, we skip all the computations
if self.storage_plugin is not None:
# we save the metrics only if the storage is available
self.metrics[test_epoch][batch_idx] = copy.deepcopy(
# mypy has issues with nested defaultdict
# we need to save all the metrics, with progress bar < callback < logged
{
**trainer.progress_bar_metrics,
**trainer.callback_metrics,
**trainer.logged_metrics,
}
)
self.metrics[test_epoch][batch_idx] = {
k: v.item() for k, v in self.metrics[test_epoch][batch_idx].items()
}
# we copy the metrics, so we can change the defaultdict behaviour without
# changing the original
metrics = copy.deepcopy(self.metrics)
# we remove all the default factories so that a missing key gives KeyError
metrics.default_factory = None
for el in metrics.values():
el.default_factory = None
self.storage_plugin.add_experiment_metrics(metrics)
| 10,215 | 38.444015 | 88 | py |
enpheeph | enpheeph-main/papers/iros2022/comparisons/pytorchfi/pytorchfi_results/script.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from pytorchfi.core import fault_injection as pfi_core
import datetime
import random
class AlexNet(nn.Module):
"""
AlexNet for CIFAR10. FC layers are removed. Paddings are adjusted.
Without BN, the start learning rate should be 0.01
(c) YANG, Wei
"""
def __init__(self, num_classes=10):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=5),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.classifier = nn.Linear(256, num_classes)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def alexnet(**kwargs):
"""
AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
"""
model = AlexNet(**kwargs)
return model
class Custom_Sampler(torch.utils.data.Sampler):
def __init__(self, data):
self.data = data
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def _get_custom_sampler(singleIndex, total):
indices = random.choices([singleIndex], k=total)
return Custom_Sampler(indices)
def main(reps=100):
torch.manual_seed(0)
batchsize = 10000
workers = 1
channels = 3
img_size = 32
transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
)
testset = torchvision.datasets.CIFAR10(
root="/shared/ml/datasets/vision/CIFAR10/",
train=False,
download=True,
transform=transform,
)
custom_sampler = _get_custom_sampler(0, batchsize)
val_loader = torch.utils.data.DataLoader(
testset,
batch_size=batchsize,
shuffle=False,
num_workers=workers,
sampler=custom_sampler,
)
model = alexnet(num_classes=10)
golden_times = []
for _i in range(reps):
model.eval().cuda()
golden_outputs = []
time_now = datetime.datetime.utcnow()
with torch.no_grad():
for imgs, _label in iter(val_loader):
imgs = imgs.cuda()
golden_outputs.append(model(imgs))
print(f"Golden Time Execution: {datetime.datetime.utcnow() - time_now}")
# print(len(golden_outputs))
# print(golden_outputs[0].shape)
golden_times.append(str(datetime.datetime.utcnow() - time_now))
batch_i = list(range(batchsize))
layer_i = [0] * batchsize
c_i = [0] * batchsize
h_i = [1] * batchsize
w_i = [1] * batchsize
inj_value_i = [10000.0] * batchsize
inj = pfi_core(
model,
batchsize,
input_shape=[channels, img_size, img_size],
use_cuda=True,
)
corrupt_times = []
for _i in range(reps):
corrupt_outputs = []
time_now = datetime.datetime.utcnow()
with torch.no_grad():
for imgs, _label in iter(val_loader):
corrupt_model = inj.declare_neuron_fi(
batch=batch_i,
layer_num=layer_i,
dim1=c_i,
dim2=h_i,
dim3=w_i,
value=inj_value_i,
)
corrupt_model.eval().cuda()
imgs = imgs.cuda()
corrupt_outputs.append(corrupt_model(imgs))
print(f"Corrupt Time Execution: {datetime.datetime.utcnow() - time_now}")
# print(len(corrupt_outputs))
# print(corrupt_outputs[0].shape)
corrupt_times.append(str(datetime.datetime.utcnow() - time_now))
counter = 0
for g_out, c_out in zip(golden_outputs, corrupt_outputs):
if torch.all(c_out.eq(g_out)):
counter += 1
# print(f"Correct: {counter / len(golden_outputs)}")
print("golden," + ",".join(golden_times))
print("corrupt," + ",".join(corrupt_times))
if __name__ == "__main__":
main(reps=100)
| 6,245 | 29.617647 | 85 | py |
enpheeph | enpheeph-main/papers/iros2022/comparisons/tensorfi2/alexnet-cifar10.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import pathlib
import sys
import typing
import flash
import flash.image
import pytorch_lightning
import torch
import torchmetrics
import torchvision
import enpheeph
import enpheeph.injections.plugins.indexing.indexingplugin
CURRENT_DIR = pathlib.Path(__file__).absolute().parent
RESULTS_DIRECTORY = CURRENT_DIR / "results" / "alexnet-cifar10"
WEIGHTS_FILE = RESULTS_DIRECTORY / "weights" / "alexnet-cifar10.pt"
LOG_DIRECTORY = RESULTS_DIRECTORY / "injection_results"
WEIGHTS_FILE.parent.mkdir(parents=True, exist_ok=True)
LOG_DIRECTORY.mkdir(parents=True, exist_ok=True)
CIFAR_DIRECTORY = pathlib.Path("/shared/ml/datasets/vision/") / "CIFAR10"
class AlexNetLightningModule(pytorch_lightning.LightningModule):
def __init__(self, pretrained: bool = True, num_classes: int = 1000) -> None:
super().__init__()
self.num_classes = num_classes
self.pretrained = pretrained
self.model = torchvision.models.AlexNet(num_classes=num_classes)
if self.pretrained:
# must be accessed with sys.modules otherwise it uses the function
# which is imported from the sub-module
# we use type: ignore as mypy cannot check torchvision typings
# we have to split it otherwise black creates problems
mod = sys.modules["torchvision.models.alexnet"]
state_dict = torch.hub.load_state_dict_from_url(
mod.model_urls["alexnet"], # type: ignore[attr-defined]
progress=True,
)
# we must filter the mismatching keys in the state dict
# we generate the current model state dict
model_state_dict = self.model.state_dict()
filtered_state_dict = {
k: v_new
# we select the new value if the dimension is the same as with the old
# one
if v_new.size() == v_old.size()
# otherwise we use the initialized one from the model
else v_old
for (k, v_old), v_new in zip(
model_state_dict.items(),
state_dict.values(),
)
}
self.model.load_state_dict(filtered_state_dict, strict=False)
self.normalizer_fn = torch.nn.Softmax(dim=-1)
self.accuracy_fn = torchmetrics.Accuracy()
self.loss_fn = torch.nn.CrossEntropyLoss()
self.save_hyperparameters()
# we initialize the weights
self.init_weights()
def init_weights(self) -> None:
# this initialization is similar to the ResNet one
# taken from https://github.com/Lornatang/AlexNet-PyTorch/
# @ alexnet_pytorch/model.py#L63
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(
m.weight, mode="fan_out", nonlinearity="relu"
)
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Linear):
torch.nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
def forward(self, inpt: torch.Tensor) -> torch.Tensor:
return self.model(inpt)
def configure_optimizers(self) -> torch.optim.Optimizer:
optimizer = torch.optim.SGD(self.parameters(), lr=1e-2)
return optimizer
def inference(
self,
batch: typing.Union[
torch.Tensor,
typing.Dict[flash.core.data.data_source.DefaultDataKeys, torch.Tensor],
],
batch_idx: int,
) -> typing.Dict[str, torch.Tensor]:
# we need to check for the batch to be a flash batch or to be a standard tuple
# as otherwise it may not be compatible
if isinstance(batch, dict):
x = batch.get(flash.core.data.data_source.DefaultDataKeys.INPUT, None)
y = batch.get(flash.core.data.data_source.DefaultDataKeys.TARGET, None)
if x is None or y is None:
raise ValueError("Incompatible input for the batch")
else:
x, y = batch
output = self.forward(x)
return {
"loss": self.loss_fn(output, y),
"accuracy": self.accuracy_fn(self.normalizer_fn(output), y),
}
def training_step(
self,
batch: typing.Union[
torch.Tensor,
typing.Dict[flash.core.data.data_source.DefaultDataKeys, torch.Tensor],
],
batch_idx: int,
) -> torch.Tensor:
res = self.inference(batch, batch_idx)
self.log_dict(
{"train_loss": res["loss"], "train_accuracy": res["accuracy"]},
prog_bar=True,
on_step=True,
on_epoch=True,
logger=True,
)
return res["loss"]
def validation_step(
self,
batch: typing.Union[
torch.Tensor,
typing.Dict[flash.core.data.data_source.DefaultDataKeys, torch.Tensor],
],
batch_idx: int,
) -> None:
res = self.inference(batch, batch_idx)
self.log_dict(
{"val_loss": res["loss"], "val_accuracy": res["accuracy"]},
prog_bar=True,
on_step=True,
on_epoch=True,
logger=True,
)
def test_step(
self,
batch: typing.Union[
torch.Tensor,
typing.Dict[flash.core.data.data_source.DefaultDataKeys, torch.Tensor],
],
batch_idx: int,
) -> None:
res = self.inference(batch, batch_idx)
self.log_dict(
{"test_loss": res["loss"], "test_accuracy": res["accuracy"]},
prog_bar=True,
on_step=True,
on_epoch=True,
logger=True,
)
pytorch_lightning.seed_everything(seed=41, workers=True)
storage_plugin = enpheeph.injections.plugins.storage.SQLiteStoragePlugin(
db_url="sqlite:///" + str(LOG_DIRECTORY / "database.sqlite")
)
pytorch_mask_plugin = enpheeph.injections.plugins.NumPyPyTorchMaskPlugin()
pytorch_handler_plugin = enpheeph.handlers.plugins.PyTorchHandlerPlugin()
monitor_1 = enpheeph.injections.OutputPyTorchMonitor(
location=enpheeph.utils.data_classes.MonitorLocation(
module_name="model.features.0",
parameter_type=enpheeph.utils.enums.ParameterType.Activation,
dimension_index={
enpheeph.utils.enums.DimensionType.Tensor: ...,
enpheeph.utils.enums.DimensionType.Batch: ...,
},
bit_index=None,
),
enabled_metrics=enpheeph.utils.enums.MonitorMetric.StandardDeviation,
storage_plugin=storage_plugin,
move_to_first=False,
indexing_plugin=enpheeph.injections.plugins.indexing.indexingplugin.IndexingPlugin(
dimension_dict=enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
),
)
fault_1 = enpheeph.injections.OutputPyTorchFault(
location=enpheeph.utils.data_classes.FaultLocation(
module_name="model.features.0",
parameter_type=enpheeph.utils.enums.ParameterType.Weight,
parameter_name="weight",
dimension_index={
enpheeph.utils.enums.DimensionType.Tensor: (
...,
0,
0,
),
enpheeph.utils.enums.DimensionType.Batch: ...,
},
bit_index=[10, 16, 31],
bit_fault_value=enpheeph.utils.enums.BitFaultValue.StuckAtOne,
),
low_level_torch_plugin=pytorch_mask_plugin,
indexing_plugin=enpheeph.injections.plugins.indexing.indexingplugin.IndexingPlugin(
dimension_dict=enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
),
)
monitor_2 = enpheeph.injections.OutputPyTorchMonitor(
location=enpheeph.utils.data_classes.MonitorLocation(
module_name="model.features.0",
parameter_type=enpheeph.utils.enums.ParameterType.Activation,
dimension_index={
enpheeph.utils.enums.DimensionType.Tensor: ...,
enpheeph.utils.enums.DimensionType.Batch: ...,
},
bit_index=None,
),
enabled_metrics=enpheeph.utils.enums.MonitorMetric.StandardDeviation,
storage_plugin=storage_plugin,
move_to_first=False,
indexing_plugin=enpheeph.injections.plugins.indexing.indexingplugin.IndexingPlugin(
dimension_dict=enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
),
)
monitor_3 = enpheeph.injections.OutputPyTorchMonitor(
location=enpheeph.utils.data_classes.MonitorLocation(
module_name="model.classifier.1",
parameter_type=enpheeph.utils.enums.ParameterType.Activation,
dimension_index={
enpheeph.utils.enums.DimensionType.Tensor: (slice(10, 100),),
enpheeph.utils.enums.DimensionType.Batch: ...,
},
bit_index=None,
),
enabled_metrics=enpheeph.utils.enums.MonitorMetric.StandardDeviation,
storage_plugin=storage_plugin,
move_to_first=False,
indexing_plugin=enpheeph.injections.plugins.indexing.indexingplugin.IndexingPlugin(
dimension_dict=enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
),
)
fault_2 = enpheeph.injections.OutputPyTorchFault(
location=enpheeph.utils.data_classes.FaultLocation(
module_name="model.classifier.1",
parameter_type=enpheeph.utils.enums.ParameterType.Activation,
dimension_index={
enpheeph.utils.enums.DimensionType.Tensor: (slice(10, 100),),
enpheeph.utils.enums.DimensionType.Batch: ...,
},
bit_index=...,
bit_fault_value=enpheeph.utils.enums.BitFaultValue.StuckAtOne,
),
low_level_torch_plugin=pytorch_mask_plugin,
indexing_plugin=enpheeph.injections.plugins.indexing.indexingplugin.IndexingPlugin(
dimension_dict=enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
),
)
monitor_4 = enpheeph.injections.OutputPyTorchMonitor(
location=enpheeph.utils.data_classes.MonitorLocation(
module_name="model.classifier.1",
parameter_type=enpheeph.utils.enums.ParameterType.Activation,
dimension_index={
enpheeph.utils.enums.DimensionType.Tensor: (slice(10, 100),),
enpheeph.utils.enums.DimensionType.Batch: ...,
},
bit_index=None,
),
enabled_metrics=enpheeph.utils.enums.MonitorMetric.StandardDeviation,
storage_plugin=storage_plugin,
move_to_first=False,
indexing_plugin=enpheeph.injections.plugins.indexing.indexingplugin.IndexingPlugin(
dimension_dict=enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
),
)
injection_handler = enpheeph.handlers.InjectionHandler(
injections=[monitor_1, fault_1, monitor_2, monitor_3, fault_2, monitor_4],
library_handler_plugin=pytorch_handler_plugin,
)
callback = enpheeph.integrations.pytorchlightning.InjectionCallback(
injection_handler=injection_handler,
storage_plugin=storage_plugin,
)
trainer = pytorch_lightning.Trainer(
callbacks=[callback],
deterministic=True,
enable_checkpointing=False,
max_epochs=10,
# one can use gpu but some functions will not be deterministic, so deterministic
# must be set to False
accelerator="cpu",
devices=1,
# if one uses spawn or dp it will fail as sqlite connector is not picklable
# strategy="ddp",
)
model = AlexNetLightningModule(num_classes=10, pretrained=False)
# transform = torchvision.transforms.Compose(
# [
# #torchvision.transforms.ToTensor(),
# torchvision.transforms.Normalize(
# (0.5, 0.5, 0.5),
# (0.5, 0.5, 0.5),
# ),
# torchvision.transforms.RandomHorizontalFlip(),
# ]
# )
cifar_train = torchvision.datasets.CIFAR10(
str(CIFAR_DIRECTORY),
train=True,
download=True,
)
cifar_test = torchvision.datasets.CIFAR10(
str(CIFAR_DIRECTORY),
train=False,
download=True,
)
datamodule = flash.image.ImageClassificationData.from_datasets(
train_dataset=cifar_train,
test_dataset=cifar_test,
val_split=0.2,
num_workers=64,
batch_size=32,
)
if not WEIGHTS_FILE.exists():
trainer.fit(
model,
train_dataloaders=datamodule.train_dataloader(),
val_dataloaders=datamodule.val_dataloader(),
)
trainer.save_checkpoint(str(WEIGHTS_FILE))
model = model.load_from_checkpoint(str(WEIGHTS_FILE))
# no injections/monitors
print("\n\nBaseline, no injection or monitors\n")
trainer.test(
model,
dataloaders=datamodule.test_dataloader(),
)
# we enable only the monitors
# we use this as baseline, no injections
callback.injection_handler.activate([monitor_1, monitor_2, monitor_3, monitor_4])
print("\n\nBaseline, no injection, only monitors\n")
trainer.test(
model,
dataloaders=datamodule.test_dataloader(),
)
# we enable the faults
callback.injection_handler.activate([fault_1, fault_2])
print("\n\nWeight + activation injection\n")
trainer.test(
model,
dataloaders=datamodule.test_dataloader(),
)
# we disable the faults
callback.injection_handler.deactivate([fault_1, fault_2])
print("\n\nBaseline again, no injection, only monitors\n")
# we test again to reach same results as before injection
trainer.test(
model,
dataloaders=datamodule.test_dataloader(),
)
| 14,253 | 34.108374 | 87 | py |
enpheeph | enpheeph-main/papers/iros2022/experiments/injector_script.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import argparse
import collections.abc
import datetime
import functools
import importlib
import operator
import os
import pathlib
import random
import sys
import typing
import flash
import pytorch_lightning
import torch
import torch.quantization
import torchinfo
import enpheeph.injections.fpquantizedoutputpytorchfault
import enpheeph.injections.monitorabc
# for pickle to avoid explosion
if str(pathlib.Path(__file__).parent / "results/configs/snn_training") not in sys.path:
sys.path.append(str(pathlib.Path(__file__).parent / "results/configs/snn_training"))
sys.path.pop()
CURRENT_DIR = pathlib.Path(__file__).absolute().parent
RESULTS_DIRECTORY = CURRENT_DIR / "results"
DATASET_DIRECTORY = pathlib.Path("/shared/ml/datasets/vision/")
# it overwrites the keys with the new value
def recursive_dict_update(original: typing.Dict, mergee: typing.Dict) -> typing.Dict:
for k, v in mergee.items():
if k in original and isinstance(original[k], collections.abc.Mapping):
original[k] = recursive_dict_update(original[k], v)
else:
original[k] = v
return original
def safe_recursive_instantiate_dict(config: typing.Any) -> typing.Any:
# if we have a mapping-like, e.g. dict, we check whether it must be directly
# instantiated
# if yes we return the final object, otherwise we call the function on each
# value in the dict
if isinstance(config, collections.abc.Mapping):
# we use issubset to allow extra values if needed for other purposes
# which are not used in this instantiation and will be lost
if set(config.keys()) == {"callable", "callable_args"}:
# we need to pass the instantiated version of the config dict
return config["callable"](
**safe_recursive_instantiate_dict(config["callable_args"])
)
# otherwise we create a copy and we instantiate each value with the
# corresponding key
# copy.deepcopy does not work, we skip it
new_config = config
for key, value in config.items():
new_config[key] = safe_recursive_instantiate_dict(value)
return new_config
# if we have a sequence-like, e.g. list, we create the same class
# where each element is instantiated
elif isinstance(config, (list, tuple, set)):
new_config = config.__class__(
[safe_recursive_instantiate_dict(v) for v in config]
)
return new_config
# if we have a generic element, e.g. str, we return it as-is
else:
return config
def compute_layer_module_name(
layer: torchinfo.layer_info.LayerInfo,
) -> str:
# with this while loop we compute the layer name from the layer itself
# we simply join all the parent variable names until we reach the main model
module_name = layer.var_name
p = layer.parent_info
# we need to skip the main model as it would add an extra dot
# we can find it as its depth is 0
while p is not None and p.depth > 0:
module_name = p.var_name + "." + module_name
p = p.parent_info
return module_name
# we can create the injections
def create_injections_for_layer_with_randomness_value(
config: typing.Dict[str, typing.Any],
layer: torchinfo.layer_info.LayerInfo,
randomness: float,
) -> typing.Generator[enpheeph.utils.data_classes.InjectionLocationABC, None, None]:
module_name = compute_layer_module_name(layer=layer)
# we check if the layer is ok to run a fault injection on
if not layer.is_leaf_layer or not layer.executed:
return []
print(f"Layer: {module_name}\nRandomness: {randomness}\n\n")
injections = []
# inj_type = "activation"
# inj_type = "quantized_activation"
inj_type = "sparse_activation"
# inj_type = "weight"
if inj_type == "activation":
# we multiply by a very small number > 1 to increase the range and cover also 1
# we skip the batch size as the first dimension
shape = layer.output_size[1:]
if (
config.get("injection_config", {}).get(
"indexing_dimension_dict",
enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
)
!= enpheeph.utils.constants.PYTORCH_DIMENSION_DICT
):
# we remove the extra time dimension if it is an SNN
shape = shape[1:]
mask = torch.rand(*shape, device="cpu") * 1.00000001 <= randomness
inj = enpheeph.injections.OutputPyTorchFault(
location=enpheeph.utils.data_classes.FaultLocation(
module_name=module_name,
parameter_type=enpheeph.utils.enums.ParameterType.Activation,
dimension_index={
enpheeph.utils.enums.DimensionType.Batch: ...,
enpheeph.utils.enums.DimensionType.Time: ...,
},
dimension_mask={
enpheeph.utils.enums.DimensionType.Tensor: mask.tolist(),
},
bit_index=random.sample(
list(range(config.get("injection_config", {}).get("bitwidth", 32))),
1,
),
bit_fault_value=enpheeph.utils.enums.BitFaultValue.BitFlip,
),
low_level_torch_plugin=enpheeph.injections.plugins.mask.autopytorchmaskplugin.AutoPyTorchMaskPlugin(),
indexing_plugin=enpheeph.injections.plugins.indexing.IndexingPlugin(
dimension_dict=config.get("injection_config", {}).get(
"indexing_dimension_dict",
enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
),
),
)
elif inj_type == "sparse_activation":
shape = layer.output_size[1:]
approx_n_elements = functools.reduce(operator.mul, shape)
inj = enpheeph.injections.DenseSparseOutputPyTorchFault(
location=enpheeph.utils.data_classes.FaultLocation(
module_name=module_name,
parameter_type=enpheeph.utils.enums.ParameterType.Activation
| enpheeph.utils.enums.ParameterType.Sparse
| enpheeph.utils.enums.ParameterType.Value,
dimension_index={
enpheeph.utils.enums.DimensionType.Tensor: random.sample(
list(range(approx_n_elements)),
abs(int((random.random() - randomness) * approx_n_elements)),
),
},
dimension_mask=None,
bit_index=random.sample(
list(range(config.get("injection_config", {}).get("bitwidth", 32))),
1,
),
bit_fault_value=enpheeph.utils.enums.BitFaultValue.BitFlip,
),
low_level_torch_plugin=enpheeph.injections.plugins.mask.autopytorchmaskplugin.AutoPyTorchMaskPlugin(),
indexing_plugin=enpheeph.injections.plugins.indexing.IndexingPlugin(
dimension_dict=config.get("injection_config", {}).get(
"indexing_dimension_dict",
enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
),
),
)
elif inj_type == "quantized_activation":
# we multiply by a very small number > 1 to increase the range and cover also 1
# we skip the batch size as the first dimension
shape = layer.output_size[1:]
if (
config.get("injection_config", {}).get(
"indexing_dimension_dict",
enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
)
!= enpheeph.utils.constants.PYTORCH_DIMENSION_DICT
):
# we remove the extra time dimension if it is an SNN
shape = shape[1:]
mask = torch.rand(*shape, device="cpu") * 1.00000001 <= randomness
inj = enpheeph.injections.fpquantizedoutputpytorchfault.FPQuantizedOutputPyTorchFault(
location=enpheeph.utils.data_classes.FaultLocation(
module_name=module_name,
parameter_type=enpheeph.utils.enums.ParameterType.Activation,
dimension_index={
enpheeph.utils.enums.DimensionType.Batch: ...,
enpheeph.utils.enums.DimensionType.Time: ...,
},
dimension_mask={
enpheeph.utils.enums.DimensionType.Tensor: mask.tolist(),
},
bit_index=random.sample(
list(range(config.get("injection_config", {}).get("bitwidth", 32))),
1,
),
bit_fault_value=enpheeph.utils.enums.BitFaultValue.BitFlip,
),
low_level_torch_plugin=enpheeph.injections.plugins.mask.autopytorchmaskplugin.AutoPyTorchMaskPlugin(),
indexing_plugin=enpheeph.injections.plugins.indexing.IndexingPlugin(
dimension_dict=config.get("injection_config", {}).get(
"indexing_dimension_dict",
enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
),
),
)
elif inj_type == "weight":
# we multiply by a very small number > 1 to increase the range and cover also 1
# we skip the batch size as the first dimension
mask = (
torch.rand(*layer.module.weight.shape, device="cpu") * 1.00000001
<= randomness
)
inj = enpheeph.injections.WeightPyTorchFault(
location=enpheeph.utils.data_classes.FaultLocation(
module_name=module_name,
parameter_type=enpheeph.utils.enums.ParameterType.Weight,
parameter_name="weight",
dimension_index={
enpheeph.utils.enums.DimensionType.Batch: ...,
enpheeph.utils.enums.DimensionType.Time: ...,
},
dimension_mask={
enpheeph.utils.enums.DimensionType.Tensor: mask.tolist(),
},
bit_index=random.sample(
list(range(config.get("injection_config", {}).get("bitwidth", 32))),
1,
),
bit_fault_value=enpheeph.utils.enums.BitFaultValue.BitFlip,
),
low_level_torch_plugin=enpheeph.injections.plugins.mask.autopytorchmaskplugin.AutoPyTorchMaskPlugin(),
indexing_plugin=enpheeph.injections.plugins.indexing.IndexingPlugin(
dimension_dict=config.get("injection_config", {}).get(
"indexing_dimension_dict",
enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
),
),
)
injections.append(inj)
return injections
def setup_argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"--config",
type=pathlib.Path,
required=True,
)
parser.add_argument(
"--model-weight-file",
type=pathlib.Path,
required=True,
)
parser.add_argument(
"--storage-file",
type=pathlib.Path,
required=True,
)
parser.add_argument(
"--csv-results",
type=pathlib.Path,
default=pathlib.Path(os.devnull),
)
mutex_quantize_group = parser.add_mutually_exclusive_group()
mutex_quantize_group.add_argument(
"--static-quantize",
action="store_true",
)
mutex_quantize_group.add_argument(
"--dynamic-quantize",
action="store_true",
)
mutex_device_group = parser.add_mutually_exclusive_group()
mutex_device_group.add_argument(
"--cpu",
action="store_true",
)
mutex_device_group.add_argument(
"--gpu",
action="store_true",
)
injection_type_group = parser.add_mutually_exclusive_group()
injection_type_group.add_argument(
"--random",
action="store_true",
)
injection_type_group.add_argument(
"--custom",
action="store_true",
)
return parser
def main(args=None):
parser = setup_argument_parser()
namespace = parser.parse_args(args=args)
# here we append the path of the configuration to sys.path so that it can
# be easily imported
sys.path.append(str(namespace.config.parent))
# we import the module by taking its name
config_module = importlib.import_module(namespace.config.with_suffix("").name)
# we select the devices on which we run the simulation
if namespace.gpu:
gpu_config = importlib.import_module("gpu_config")
device_config = gpu_config.config()
elif namespace.cpu:
cpu_config = importlib.import_module("cpu_config")
device_config = cpu_config.config()
else:
device_config = {}
if namespace.random:
random_config = importlib.import_module("random_multi_config")
injection_config = random_config.config()
else:
injection_config = {}
# we remove the previously appended path to leave it as is
sys.path.pop()
# we instantiate the config from the imported module
initial_config = config_module.config(
dataset_directory=DATASET_DIRECTORY,
model_weight_file=namespace.model_weight_file,
storage_file=namespace.storage_file,
)
config = recursive_dict_update(initial_config, device_config)
config = recursive_dict_update(initial_config, injection_config)
config = safe_recursive_instantiate_dict(config)
pytorch_lightning.seed_everything(**config.get("seed_everything", {}))
trainer = config["trainer"]
model = config["model"]
# model = config["model_post_init"](model)
datamodule = config["datamodule"]
# if the static quantization was selected
# we train the model for an additional epoch (set in the default trainer config)
# to be able to create the proper static quantization weights + activations
# **NOTE**: static quantization is not supported on GPU
if namespace.static_quantize:
config["injection_handler"].deactivate()
trainer.callbacks.append(
pytorch_lightning.callbacks.QuantizationAwareTraining()
)
trainer.fit(
model,
datamodule=datamodule,
)
# with the dynamic quantization we quantize only the weights by a fixed
# configuration
# **NOTE**: dynamic quantization does not work on GPU
elif namespace.dynamic_quantize:
model = torch.quantization.quantize_dynamic(
model,
qconfig_spec=config.get("dynamic_quantization_config", {}).get(
"qconfig",
{
torch.nn.Linear,
torch.nn.LSTM,
torch.nn.GRU,
torch.nn.LSTMCell,
torch.nn.RNNCell,
torch.nn.GRUCell,
torch.nn.EmbeddingBag,
},
),
dtype=config.get("dynamic_quantization_config", {}).get(
"qdtype",
torch.qint8,
),
# we need to force in-place otherwise Flash Models cannot be deep-copied
inplace=True,
)
print("\n\nNo injections at all\n\n")
config["injection_handler"].deactivate()
time = datetime.datetime.utcnow()
res = trainer.test(
model,
dataloaders=datamodule.test_dataloader(),
)[
0
] # we have only one test dataloader
execution_time = datetime.datetime.utcnow() - time
namespace.csv_results.parent.mkdir(parents=True, exist_ok=True)
with namespace.csv_results.open("a") as f:
f.write(
f"randomness,layer_name,execution_time,{','.join(str(x) for x in res.keys())}\n"
)
f.write(
f"0,-,{str(execution_time.total_seconds())},{','.join(str(x) for x in res.values())}\n"
)
if config.get("injection_config", {}).get("custom", True):
# we do only monitors if we activate any injection
if config["injection_handler"].activate(
[
monitor
for monitor in config["injection_handler"].injections
if isinstance(monitor, enpheeph.injections.monitorabc.MonitorABC)
]
):
print("\n\nOnly monitors\n\n")
time = datetime.datetime.utcnow()
res = trainer.test(
model,
dataloaders=datamodule.test_dataloader(),
)[
0
] # we have only one test dataloader
execution_time = datetime.datetime.utcnow() - time
with namespace.csv_results.open("a") as f:
f.write(
f"0,-,{str(execution_time.total_seconds())},{','.join(str(x) for x in res.values())}\n"
)
print("\n\nAll injections\n\n")
config["injection_handler"].activate()
time = datetime.datetime.utcnow()
res = trainer.test(
model,
dataloaders=datamodule.test_dataloader(),
)[
0
] # we have only one test dataloader
execution_time = datetime.datetime.utcnow() - time
with namespace.csv_results.open("a") as f:
f.write(
f"custom,custom,{str(execution_time.total_seconds())},{','.join(str(x) for x in res.values())}\n"
)
else:
inp = next(iter(datamodule.test_dataloader()))
if isinstance(inp, dict):
inp = inp[flash.core.data.data_source.DefaultDataKeys.INPUT]
shape = list(inp.shape)
else:
inp = inp[0]
shape = list(inp.shape)
shape[1] = 1
# otherwise it does not work for SNNs
shape[0] = 1
# we take the shape from the datamodule
summary = torchinfo.summary(model=model, input_size=shape, device="cpu")
#
allowed_layers = config.get("injection_config", {}).get("layers", None)
for r in config.get("injection_config", {}).get("randomness", []):
for layer in summary.summary_list:
if (
allowed_layers is not None
and compute_layer_module_name(layer) not in allowed_layers
):
continue
config["injection_handler"].remove_injections()
injections = create_injections_for_layer_with_randomness_value(
config=config, layer=layer, randomness=r
)
config["injection_handler"].add_injections(injections)
config["injection_handler"].deactivate()
# we do only monitors if we activate any injection
if config["injection_handler"].activate(
[
monitor
for monitor in config["injection_handler"].injections
if isinstance(
monitor, enpheeph.injections.monitorabc.MonitorABC
)
]
):
print("\n\nOnly monitors\n\n")
time = datetime.datetime.utcnow()
res = trainer.test(
model,
dataloaders=datamodule.test_dataloader(),
)[
0
] # we have only one test dataloader
execution_time = datetime.datetime.utcnow() - time
with namespace.csv_results.open("a") as f:
f.write(
f"0,-,{str(execution_time.total_seconds())},{','.join(str(x) for x in res.values())}\n"
)
if config["injection_handler"].activate():
print("\n\nAll injections\n\n")
time = datetime.datetime.utcnow()
res = trainer.test(
model,
dataloaders=datamodule.test_dataloader(),
)[
0
] # we have only one test dataloader
execution_time = datetime.datetime.utcnow() - time
with namespace.csv_results.open("a") as f:
f.write(
f"{str(r)},{compute_layer_module_name(layer)},{str(execution_time.total_seconds())},{','.join(str(x) for x in res.values())}\n"
)
print("\n\nAgain no injections at all\n\n")
config["injection_handler"].deactivate()
time = datetime.datetime.utcnow()
res = trainer.test(
model,
dataloaders=datamodule.test_dataloader(),
)[
0
] # we have only one test dataloader
execution_time = datetime.datetime.utcnow() - time
with namespace.csv_results.open("a") as f:
f.write(
f"0,-,{str(execution_time.total_seconds())},{','.join(str(x) for x in res.values())}\n"
)
if __name__ == "__main__":
main()
| 22,693 | 38.605585 | 155 | py |
enpheeph | enpheeph-main/papers/iros2022/experiments/results/configs/base_config.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import typing
import pytorch_lightning
def config(
**kwargs: typing.Any,
) -> typing.Dict[str, typing.Any]:
return {
"seed_everything": {
"seed": 42,
"workers": True,
},
"model": {},
"datamodule": {},
"injection_handler": {},
"trainer": {
"callable": pytorch_lightning.Trainer,
"callable_args": {
"callbacks": [
pytorch_lightning.callbacks.TQDMProgressBar(
refresh_rate=10,
)
],
"deterministic": True,
"enable_checkpointing": False,
"max_epochs": 1,
# one can use gpu but some functions will not be deterministic,
# so deterministic
# must be set to False
"accelerator": "gpu",
"devices": 1,
# if one uses spawn or dp it will fail
# as sqlite connector is not picklable
# "strategy": "ddp",
},
},
}
| 1,884 | 32.660714 | 79 | py |
enpheeph | enpheeph-main/papers/iros2022/experiments/results/configs/image_classification_config.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import pathlib
import typing
import flash
import flash.image
import enpheeph
import enpheeph.injections.plugins.mask.autopytorchmaskplugin
import base_config
import cifar10_config
import quantization_config
def config(
*,
dataset_directory: pathlib.Path,
model_weight_file: pathlib.Path,
storage_file: pathlib.Path,
**kwargs: typing.Any,
) -> typing.Dict[str, typing.Any]:
pytorch_handler_plugin = enpheeph.handlers.plugins.PyTorchHandlerPlugin()
storage_plugin = enpheeph.injections.plugins.storage.SQLiteStoragePlugin(
db_url="sqlite:///" + str(storage_file)
)
injection_handler = enpheeph.handlers.InjectionHandler(
injections=[],
library_handler_plugin=pytorch_handler_plugin,
)
model = {
"callable": flash.image.ImageClassifier.load_from_checkpoint,
"callable_args": {
"checkpoint_path": str(model_weight_file),
# issues with loading GPU model on CPU
# it should work with PyTorch but there must be some problems with
# PyTorch Lightning/Flash leading to use some GPU memory
"map_location": "cpu",
},
}
config = base_config.config()
# datamodule update
config.update(cifar10_config.config(dataset_directory=dataset_directory))
# dynamic quantization update
config.update(quantization_config.config())
config["model"] = model
# update the Trainer with flash as we are using flash models, to avoid
# compatibility issues such as CUDA out of memory on CPU-only
config["trainer"]["callable"] = flash.Trainer
# we delay the instantiation of the callback to allow the saving of the
# current configuration
callback = enpheeph.integrations.pytorchlightning.InjectionCallback(
injection_handler=injection_handler,
storage_plugin=storage_plugin,
extra_session_info=config,
)
config["trainer"]["callable_args"]["callbacks"].append(callback)
# to save the injection handler to enable/disable faults
config["injection_handler"] = injection_handler
# to save the callback to access to the same storage plugin
config["injection_callback"] = callback
# custom is used to avoid the random injections
config["injection_config"] = {}
return config
| 3,094 | 33.775281 | 78 | py |
enpheeph | enpheeph-main/papers/iros2022/experiments/results/configs/cifar10_config.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import pathlib
import typing
import flash
import torchvision
def config(
*,
dataset_directory: pathlib.Path,
**kwargs: typing.Any,
) -> typing.Dict[str, typing.Any]:
return {
"datamodule": {
"callable": flash.image.ImageClassificationData.from_datasets,
"callable_args": {
"train_dataset": torchvision.datasets.CIFAR10(
str(dataset_directory / "CIFAR10"),
train=True,
download=True,
),
"test_dataset": torchvision.datasets.CIFAR10(
str(dataset_directory / "CIFAR10"),
train=False,
download=True,
),
"val_split": 0.2,
"num_workers": 64,
"batch_size": 32,
},
},
}
| 1,651 | 32.04 | 77 | py |
enpheeph | enpheeph-main/papers/iros2022/experiments/results/configs/semantic_segmentantion_config.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import pathlib
import typing
import flash
import flash.image
import enpheeph
import enpheeph.injections.plugins.mask.autopytorchmaskplugin
import base_config
import carla_config
import quantization_config
def config(
*,
dataset_directory: pathlib.Path,
model_weight_file: pathlib.Path,
storage_file: pathlib.Path,
**kwargs: typing.Any,
) -> typing.Dict[str, typing.Any]:
pytorch_handler_plugin = enpheeph.handlers.plugins.PyTorchHandlerPlugin()
storage_plugin = enpheeph.injections.plugins.storage.SQLiteStoragePlugin(
db_url="sqlite:///" + str(storage_file)
)
injection_handler = enpheeph.handlers.InjectionHandler(
injections=[],
library_handler_plugin=pytorch_handler_plugin,
)
model = {
"callable": flash.image.SemanticSegmentation.load_from_checkpoint,
"callable_args": {
"checkpoint_path": str(model_weight_file),
# issues with loading GPU model on CPU
# it should work with PyTorch but there must be some problems with
# PyTorch Lightning/Flash leading to use some GPU memory
"map_location": "cpu",
},
}
config = base_config.config()
# datamodule update
config.update(carla_config.config(dataset_directory=dataset_directory))
# dynamic quantization update
config.update(quantization_config.config())
config["model"] = model
# update the Trainer with flash as we are using flash models, to avoid
# compatibility issues such as CUDA out of memory on CPU-only
config["trainer"]["callable"] = flash.Trainer
# semantic segmentation must use deterministic=False
config["trainer"]["callable_args"]["deterministic"] = False
# we delay the instantiation of the callback to allow the saving of the
# current configuration
callback = enpheeph.integrations.pytorchlightning.InjectionCallback(
injection_handler=injection_handler,
storage_plugin=storage_plugin,
extra_session_info=config,
)
config["trainer"]["callable_args"]["callbacks"].append(callback)
# to save the injection handler to enable/disable faults
config["injection_handler"] = injection_handler
# to save the callback to access to the same storage plugin
config["injection_callback"] = callback
# custom is used to avoid the random injections
config["injection_config"] = {}
return config
| 3,217 | 33.978261 | 78 | py |
enpheeph | enpheeph-main/papers/iros2022/experiments/results/configs/snn_dvsgesture_config.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import pathlib
import typing
import flash
import flash.image
import enpheeph
import enpheeph.injections.plugins.mask.autopytorchmaskplugin
import base_config
import dvs128gesture_config
import quantization_config
import snn_training.dvs128gesturesnnmodule
def config(
*,
dataset_directory: pathlib.Path,
model_weight_file: pathlib.Path,
storage_file: pathlib.Path,
**kwargs: typing.Any,
) -> typing.Dict[str, typing.Any]:
pytorch_handler_plugin = enpheeph.handlers.plugins.PyTorchHandlerPlugin()
storage_plugin = enpheeph.injections.plugins.storage.SQLiteStoragePlugin(
db_url="sqlite:///" + str(storage_file)
)
injection_handler = enpheeph.handlers.InjectionHandler(
injections=[],
library_handler_plugin=pytorch_handler_plugin,
)
model = {
"callable": snn_training.dvs128gesturesnnmodule.DVS128GestureSNNModule.load_from_checkpoint,
"callable_args": {
"checkpoint_path": str(model_weight_file),
# issues with loading GPU model on CPU
# it should work with PyTorch but there must be some problems with
# PyTorch Lightning/Flash leading to use some GPU memory
"map_location": "cpu",
},
}
config = base_config.config()
# datamodule update
config.update(dvs128gesture_config.config(dataset_directory=dataset_directory))
# dynamic quantization update
config.update(quantization_config.config())
config["model"] = model
# update the Trainer with flash as we are using flash models, to avoid
# compatibility issues such as CUDA out of memory on CPU-only
config["trainer"]["callable"] = flash.Trainer
# we delay the instantiation of the callback to allow the saving of the
# current configuration
callback = enpheeph.integrations.pytorchlightning.InjectionCallback(
injection_handler=injection_handler,
storage_plugin=storage_plugin,
extra_session_info=config,
)
config["trainer"]["callable_args"]["callbacks"].append(callback)
# to save the injection handler to enable/disable faults
config["injection_handler"] = injection_handler
# to save the callback to access to the same storage plugin
config["injection_callback"] = callback
# custom is used to avoid the random injections
config["injection_config"] = {
"indexing_dimension_dict": enpheeph.utils.constants.NORSE_DIMENSION_DICT,
}
return config
| 3,267 | 34.521739 | 100 | py |
enpheeph | enpheeph-main/papers/iros2022/experiments/results/configs/snn_dvsgesture_config_single.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import pathlib
import typing
import enpheeph.injections
import enpheeph.injections.plugins
import snn_dvsgesture_config
def config(
*,
dataset_directory: pathlib.Path,
model_weight_file: pathlib.Path,
storage_file: pathlib.Path,
**kwargs: typing.Any,
) -> typing.Dict[str, typing.Any]:
config = snn_dvsgesture_config.config(
dataset_directory=dataset_directory,
model_weight_file=model_weight_file,
storage_file=storage_file,
)
config["injection_callback"].storage_plugin
pytorch_mask_plugin = (
enpheeph.injections.plugins.mask.autopytorchmaskplugin.AutoPyTorchMaskPlugin()
)
fault_2 = enpheeph.injections.OutputPyTorchFault(
location=enpheeph.utils.data_classes.FaultLocation(
# 2/6 is conv, 11/13 is linear
# 3 is lif
module_name="sequential.2",
parameter_type=enpheeph.utils.enums.ParameterType.Activation,
dimension_index={
enpheeph.utils.enums.DimensionType.Tensor: (slice(10, 15), ...),
enpheeph.utils.enums.DimensionType.Batch: ...,
enpheeph.utils.enums.DimensionType.Time: ...,
},
bit_index=[31],
bit_fault_value=enpheeph.utils.enums.BitFaultValue.StuckAtOne,
),
low_level_torch_plugin=pytorch_mask_plugin,
indexing_plugin=enpheeph.injections.plugins.indexing.IndexingPlugin(
dimension_dict=enpheeph.utils.constants.NORSE_DIMENSION_DICT,
),
)
config["injection_handler"].add_injections(
injections=[fault_2],
)
# custom is used to avoid the random injections
config["injection_config"] = {
"custom": True,
}
return config
| 3,295 | 35.622222 | 86 | py |
enpheeph | enpheeph-main/papers/iros2022/experiments/results/configs/quantization_config.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import typing
import torch
import torch.quantization
def config(
**kwargs: typing.Any,
) -> typing.Dict[str, typing.Any]:
return {
"dynamic_quantization_config": {
"qconfig": {
torch.nn.Linear,
torch.nn.LSTM,
torch.nn.GRU,
torch.nn.LSTMCell,
torch.nn.RNNCell,
torch.nn.GRUCell,
torch.nn.EmbeddingBag,
},
"dtype": torch.qint8,
}
}
| 1,291 | 30.512195 | 77 | py |
enpheeph | enpheeph-main/papers/iros2022/experiments/results/configs/image_classification_config_single.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import pathlib
import typing
import enpheeph.injections
import enpheeph.injections.plugins
import image_classification_config
def config(
*,
dataset_directory: pathlib.Path,
model_weight_file: pathlib.Path,
storage_file: pathlib.Path,
**kwargs: typing.Any,
) -> typing.Dict[str, typing.Any]:
config = image_classification_config.config(
dataset_directory=dataset_directory,
model_weight_file=model_weight_file,
storage_file=storage_file,
)
storage_plugin = config["injection_callback"].storage_plugin
pytorch_mask_plugin = (
enpheeph.injections.plugins.mask.autopytorchmaskplugin.AutoPyTorchMaskPlugin()
)
monitor_1 = enpheeph.injections.OutputPyTorchMonitor(
location=enpheeph.utils.data_classes.MonitorLocation(
# resnet18
# module_name="adapter.backbone.conv1",
# vgg11
module_name="adapter.backbone.0",
parameter_type=enpheeph.utils.enums.ParameterType.Activation,
dimension_index={
enpheeph.utils.enums.DimensionType.Tensor: ...,
enpheeph.utils.enums.DimensionType.Batch: ...,
},
bit_index=...,
),
enabled_metrics=enpheeph.utils.enums.MonitorMetric.StandardDeviation,
storage_plugin=storage_plugin,
move_to_first=False,
indexing_plugin=enpheeph.injections.plugins.indexing.IndexingPlugin(
dimension_dict=enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
),
)
fault_1 = enpheeph.injections.WeightPyTorchFault(
location=enpheeph.utils.data_classes.FaultLocation(
# resnet18
# module_name="adapter.backbone.conv1",
# vgg11
module_name="adapter.backbone.0",
parameter_type=enpheeph.utils.enums.ParameterType.Weight,
parameter_name="weight",
dimension_index={
enpheeph.utils.enums.DimensionType.Tensor: (
...,
0,
0,
),
},
bit_index=[10, 16, 31],
bit_fault_value=enpheeph.utils.enums.BitFaultValue.StuckAtOne,
),
low_level_torch_plugin=pytorch_mask_plugin,
indexing_plugin=enpheeph.injections.plugins.indexing.IndexingPlugin(
dimension_dict=enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
),
)
monitor_2 = enpheeph.injections.OutputPyTorchMonitor(
location=enpheeph.utils.data_classes.MonitorLocation(
# resnet18
# module_name="adapter.backbone.conv1",
# vgg11
module_name="adapter.backbone.0",
parameter_type=enpheeph.utils.enums.ParameterType.Activation,
dimension_index={
enpheeph.utils.enums.DimensionType.Tensor: ...,
enpheeph.utils.enums.DimensionType.Batch: ...,
},
bit_index=None,
),
enabled_metrics=enpheeph.utils.enums.MonitorMetric.StandardDeviation,
storage_plugin=storage_plugin,
move_to_first=False,
indexing_plugin=enpheeph.injections.plugins.indexing.IndexingPlugin(
dimension_dict=enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
),
)
monitor_3 = enpheeph.injections.OutputPyTorchMonitor(
location=enpheeph.utils.data_classes.MonitorLocation(
module_name="adapter.backbone",
parameter_type=enpheeph.utils.enums.ParameterType.Activation,
dimension_index={
enpheeph.utils.enums.DimensionType.Tensor: ...,
enpheeph.utils.enums.DimensionType.Batch: ...,
},
bit_index=None,
),
enabled_metrics=enpheeph.utils.enums.MonitorMetric.StandardDeviation,
storage_plugin=storage_plugin,
move_to_first=False,
indexing_plugin=enpheeph.injections.plugins.indexing.IndexingPlugin(
dimension_dict=enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
),
)
fault_2 = enpheeph.injections.OutputPyTorchFault(
location=enpheeph.utils.data_classes.FaultLocation(
module_name="adapter.backbone",
parameter_type=enpheeph.utils.enums.ParameterType.Activation,
dimension_index={
enpheeph.utils.enums.DimensionType.Tensor: (slice(10, 15),),
enpheeph.utils.enums.DimensionType.Batch: ...,
},
bit_index=[31],
bit_fault_value=enpheeph.utils.enums.BitFaultValue.StuckAtOne,
),
low_level_torch_plugin=pytorch_mask_plugin,
indexing_plugin=enpheeph.injections.plugins.indexing.IndexingPlugin(
dimension_dict=enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
),
)
monitor_4 = enpheeph.injections.OutputPyTorchMonitor(
location=enpheeph.utils.data_classes.MonitorLocation(
module_name="adapter.backbone",
parameter_type=enpheeph.utils.enums.ParameterType.Activation,
dimension_index={
enpheeph.utils.enums.DimensionType.Tensor: ...,
enpheeph.utils.enums.DimensionType.Batch: ...,
},
bit_index=None,
),
enabled_metrics=enpheeph.utils.enums.MonitorMetric.StandardDeviation,
storage_plugin=storage_plugin,
move_to_first=False,
indexing_plugin=enpheeph.injections.plugins.indexing.IndexingPlugin(
dimension_dict=enpheeph.utils.constants.PYTORCH_DIMENSION_DICT,
),
)
config["injection_handler"].add_injections(
injections=[monitor_1, fault_1, monitor_2, monitor_3, fault_2, monitor_4],
)
# custom is used to avoid the random injections
config["injection_config"] = {
"custom": True,
}
return config
| 6,648 | 38.577381 | 86 | py |
enpheeph | enpheeph-main/papers/iros2022/experiments/results/configs/snn_training/dvs128gesturesnnmodule.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import functools
import typing
import norse
import pytorch_lightning
import pytorch_lightning.utilities.cli
import torch
import torchmetrics
import torchvision
class SNNReturnTuple(typing.NamedTuple):
output: torch.Tensor
state: torch.Tensor
# decorator to be used for running the proper loop with a forward of the main
# model
def snn_module_forward_decorator(model_forward):
@functools.wraps(model_forward)
def inner_forward(
self,
inputs: torch.Tensor,
*,
state: typing.Optional[typing.Sequence[typing.Tuple[torch.Tensor]]] = None,
) -> typing.Union[torch.Tensor, SNNReturnTuple]:
# we encode the inputs, if enabled
if self.encoding_flag:
encoded_inputs = self.encoder(inputs)
else:
encoded_inputs = inputs
# we save the sequence length from the shape of the inputs
seq_length = encoded_inputs.size()[0]
# states will contain the states at each time step, and the second
# dimension will be the one covering the number of stateful layers
# which returns states, which are named tuple
# we initialize the states with the given ones, and then we add
# new ones for covering the evolution of the system
# this is done only if we will return the state at the end
if self.return_state:
states = [state] + [None] * seq_length
# we need a list to save the output at each time step
out = []
# we iterate over the timesteps
for ts in range(seq_length):
# we load the correct state depending on whether we are saving
# them all or we only need it for execution
if self.return_state:
state = states[ts]
# we need to use self explicitly as this function is not
# bound to an instance since it's wrapped
output, state = model_forward(self, encoded_inputs[ts], state=state)
# we append the output at the current timestep to
# the output list
out.append(output)
# also here we save the state in a list for returning it,
# otherwise we save it just for the following execution
if self.return_state:
states[ts + 1] = state
# we stack the output to a torch tensor
torch_out = torch.stack(out)
# we decode the outputs, if enabled
if self.decoding_flag:
decoded_output = self.decoder(torch_out)
else:
decoded_output = output
if self.return_state:
return SNNReturnTuple(output=decoded_output, state=states)
else:
return decoded_output
return inner_forward
class DVS128GestureSNNModule(pytorch_lightning.LightningModule):
DEFAULT_ENCODER = torch.nn.Identity()
DEFAULT_DECODER = torch.nn.Identity()
DEFAULT_OPTIMIZER_CLASS = torch.optim.Adam
DEFAULT_LEARNING_RATE = 1e-3
DEFAULT_RETURN_STATE = False
DEFAULT_ENCODING_FLAG = True
DEFAULT_DECODING_FLAG = True
DEFAULT_TRAINABLE_NEURON_PARAMETERS = True
DEFAULT_EXAMPLE_INPUT_ARRAY_SIZE = (1, 1, 1, 128, 128)
DEFAULT_DIMS = None
DEFAULT_NUM_CLASSES = None
DIMS = (1, 128, 128)
NUM_CLASSES = 11
def __init__(
self,
*args: typing.Any,
encoder: typing.Callable[[torch.Tensor], torch.Tensor] = DEFAULT_ENCODER,
decoder: typing.Callable[[torch.Tensor], torch.Tensor] = DEFAULT_DECODER,
return_state: bool = DEFAULT_RETURN_STATE,
encoding_flag: bool = DEFAULT_ENCODING_FLAG,
decoding_flag: bool = DEFAULT_DECODING_FLAG,
trainable_neuron_parameters: bool = DEFAULT_TRAINABLE_NEURON_PARAMETERS,
dims: typing.Optional[typing.Sequence[int]] = DIMS,
num_classes: typing.Optional[int] = NUM_CLASSES,
example_input_array_size: typing.Optional[
typing.Sequence[int]
] = DEFAULT_EXAMPLE_INPUT_ARRAY_SIZE,
optimizer_class: type(torch.optim.Optimizer) = DEFAULT_OPTIMIZER_CLASS,
learning_rate: float = DEFAULT_LEARNING_RATE,
map: typing.Optional[torch.device] = None,
**kwargs: typing.Any,
):
super().__init__(*args, **kwargs)
self.save_hyperparameters()
self.encoder = encoder
self.decoder = decoder
self.encoding_flag = self.hparams.encoding_flag
self.decoding_flag = self.hparams.decoding_flag
self.return_state = self.hparams.return_state
self.trainable_neuron_parameters = self.hparams.trainable_neuron_parameters
self.optimizer_classes = optimizer_class
self.learning_rates = learning_rate
self.normalize_prob_func = torch.nn.Identity()
self.pre_accuracy_func = torch.nn.Identity()
self.loss_func = torch.nn.CrossEntropyLoss()
self.accuracy_func = self.custom_argmax_accuracy
# we save the input size
self.dims = dims
if self.dims is None and hasattr(self, "DIMS"):
self.dims = self.DIMS
# we save the number of classes
self.num_classes = num_classes
if self.num_classes is None and hasattr(self, "NUM_CLASSES"):
self.num_classes = self.NUM_CLASSES
self.example_input_array_size = example_input_array_size
if self.example_input_array_size is not None:
self.example_input_array = torch.randn(*self.example_input_array_size)
self._check_encoder_decoder()
self.model_definition()
if map is not None:
self.to(map)
def _check_encoder_decoder(self):
callable_ = callable(self.encoder) and callable(self.decoder)
if not callable_:
raise ValueError("The encoder/decoder should be callable")
# this method is used to register possible hidden parameters inside the
# SNN configurations
def register_snn_parameters(self):
# we get all the Parameter elements from the modules
# some Parameters have nested Parameters, like LIFRefrac has
# a nested LIFParameters in it
p_list = []
# we need a counter as many parameters may have the same name
counter = 0
# we populate the list with direct children to the modules,
# using 'p' as variable name
# only if it is a namedtuple, with _asdict, or if it is a
# torch.nn.Module
for module in self.modules():
if hasattr(module, "p"):
p = module.p
if hasattr(p, "_asdict"):
p_list.extend(list(p._asdict().items()))
elif isinstance(p, torch.nn.Module):
p_list.extend(list(p.named_modules()))
# we iterate over the list until it's empty
while len(p_list) > 0:
p_name, p_value = p_list.pop()
# if the value is a namedtuple or a torch.nn.Module we extend the
# list
if hasattr(p_value, "_asdict"):
p_list.extend(list(p_value._asdict().items()))
elif isinstance(p_value, torch.nn.Module):
p_list.extend(list(p_value.named_modules()))
# we check wheter it is a tensor which requires gradient and
# it is not already registered
tensor_flag = isinstance(p_value, torch.Tensor)
grad_flag = getattr(p_value, "requires_grad", False)
id_param_list = [id(param) for param in self.parameters()]
parameter_flag = id(p_value) not in id_param_list
# if True we increase the counter and register the new parameter
if tensor_flag and grad_flag and parameter_flag:
counter += 1
module.register_parameter("p/" + p_name + "/" + str(counter), p_value)
# we delegate the weight initialization to each component
# decoder, model, encoder
def init_weights(self):
for mod in (self.decoder, self.encoder):
if (init_weights := getattr(mod, "init_weights", None)) is not None:
init_weights()
# this initialization is similar to the ResNet one
# taken from https://github.com/Lornatang/AlexNet-PyTorch/
# @ alexnet_pytorch/model.py#L63
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(
m.weight, mode="fan_out", nonlinearity="relu"
)
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Linear):
torch.nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
# implemented by us for compatibility between forward and validation/test
# steps
def inference_step(self, batch, batch_idx):
x, y = batch
y_hat = self.normalize_prob_func(self.forward(x))
loss = self.loss_func(y_hat, y)
acc = self.accuracy_func(self.pre_accuracy_func(y_hat), y)
return {"acc": acc, "loss": loss}
def training_step(self, batch, batch_idx):
m = self.inference_step(batch, batch_idx)
metrics = {
"train_acc": m["acc"],
"train_loss": m["loss"],
}
self.log_dict(metrics, prog_bar=True, on_step=True, on_epoch=True, logger=True)
# here we need to return the loss to be able to properly train
return m["loss"]
def validation_step(self, batch, batch_idx):
m = self.inference_step(batch, batch_idx)
metrics = {
"val_acc": m["acc"],
"val_loss": m["loss"],
}
self.log_dict(metrics, prog_bar=True, on_step=True, on_epoch=True, logger=True)
# this may not be needed, as for logging we already use self.log_dict
# return metrics
def test_step(self, batch, batch_idx):
m = self.inference_step(batch, batch_idx)
metrics = {
"test_acc": m["acc"],
"test_loss": m["loss"],
}
self.log_dict(metrics, prog_bar=True, on_step=True, on_epoch=True, logger=True)
# this may not be needed, as for logging we already use self.log_dict
# return metrics
def configure_optimizers(self):
optimizer = self.optimizer_classes(self.parameters(), self.learning_rates)
return optimizer
def model_definition(self):
if self.trainable_neuron_parameters:
lif1 = norse.torch.LIFCell(
p=norse.torch.LIFParameters(
tau_syn_inv=torch.nn.Parameter(
torch.full(
size=[32, 32, 32],
fill_value=(
norse.torch.LIFParameters._field_defaults.get(
"tau_syn_inv"
)
),
),
),
tau_mem_inv=torch.nn.Parameter(
torch.full(
size=[32, 32, 32],
fill_value=(
norse.torch.LIFParameters._field_defaults.get(
"tau_mem_inv"
)
),
),
),
v_leak=torch.nn.Parameter(
norse.torch.LIFParameters._field_defaults.get("v_leak")
),
v_th=torch.nn.Parameter(
torch.full(
size=[32, 32, 32],
fill_value=(
0.4
# norse.torch.LIFParameters.
# _field_defaults.get(
# "v_th"
# )
),
),
),
v_reset=torch.nn.Parameter(
torch.full(
size=[32, 32, 32],
fill_value=(
norse.torch.LIFParameters._field_defaults.get("v_reset")
),
),
),
alpha=norse.torch.LIFParameters._field_defaults.get("alpha"),
method="super",
),
dt=0.01,
)
lif2 = norse.torch.LIFCell(
p=norse.torch.LIFParameters(
tau_syn_inv=torch.nn.Parameter(
torch.full(
size=[32, 16, 16],
fill_value=(
norse.torch.LIFParameters._field_defaults.get(
"tau_syn_inv"
)
),
),
),
tau_mem_inv=torch.nn.Parameter(
torch.full(
size=[32, 16, 16],
fill_value=(
norse.torch.LIFParameters._field_defaults.get(
"tau_mem_inv"
)
),
),
),
v_leak=torch.nn.Parameter(
norse.torch.LIFParameters._field_defaults.get("v_leak")
),
v_th=torch.nn.Parameter(
torch.full(
size=[32, 16, 16],
fill_value=(
0.4
# norse.torch.LIFParameters.
# _field_defaults.get(
# "v_th"
# )
),
),
),
v_reset=torch.nn.Parameter(
torch.full(
size=[32, 16, 16],
fill_value=(
norse.torch.LIFParameters._field_defaults.get("v_reset")
),
),
),
alpha=norse.torch.LIFParameters._field_defaults.get("alpha"),
method="super",
),
dt=0.01,
)
li = norse.torch.LICell(
p=norse.torch.LIParameters(
tau_syn_inv=torch.nn.Parameter(
torch.full(
size=[11],
fill_value=(
norse.torch.LIParameters._field_defaults.get(
"tau_syn_inv"
)
),
),
),
tau_mem_inv=torch.nn.Parameter(
torch.full(
size=[11],
fill_value=(
norse.torch.LIParameters._field_defaults.get(
"tau_mem_inv"
)
),
),
),
v_leak=torch.nn.Parameter(
norse.torch.LIParameters._field_defaults.get("v_leak")
),
),
dt=torch.nn.Parameter(
torch.full(
size=[11],
fill_value=0.01,
),
),
)
else:
lif1 = norse.torch.LIFCell()
lif2 = norse.torch.LIFCell()
li = norse.torch.LICell()
self.sequential = norse.torch.SequentialState(
torch.nn.AvgPool2d(
kernel_size=4,
stride=4,
padding=0,
ceil_mode=False,
),
torch.nn.Dropout(
p=0.1,
inplace=False,
),
# 2
torch.nn.Conv2d(
in_channels=1,
out_channels=32,
kernel_size=3,
padding=1,
dilation=1,
stride=1,
groups=1,
),
lif1,
torch.nn.AvgPool2d(
kernel_size=2,
stride=2,
padding=0,
ceil_mode=False,
),
torch.nn.Dropout(
p=0.1,
inplace=False,
),
# 6
torch.nn.Conv2d(
in_channels=32,
out_channels=32,
kernel_size=3,
padding=1,
dilation=1,
stride=1,
groups=1,
),
lif2,
torch.nn.AvgPool2d(
kernel_size=2,
stride=2,
padding=0,
ceil_mode=False,
),
torch.nn.Dropout(
p=0.2,
inplace=False,
),
torch.nn.Flatten(
start_dim=1,
end_dim=-1,
),
# 11
torch.nn.Linear(
in_features=2048,
out_features=500,
bias=True,
),
torch.nn.ReLU(),
# 13
torch.nn.Linear(
in_features=500,
out_features=11,
bias=True,
),
li,
)
# this must be called after setting up the SNN module
self.register_snn_parameters()
@snn_module_forward_decorator
def forward(self, x, state=None):
return self.sequential.forward(x, state=state)
# NOTE: this is a temporary solution, as it is difficult to implement
# temporary function with JSON
@staticmethod
def random_noise_max_membrane_voltage_log_softmax_decoder(inputs):
# we add some random noise
temp = inputs + 0.001 * torch.randn(*inputs.size(), device=inputs.device)
# we get the maximum for each membrane voltage over the time steps,
# dim=0
max_inputs, _ = torch.max(temp, dim=0)
return max_inputs
# NOTE: this is a temporary solution, as it is difficult to implement
# temporary function with JSON
@staticmethod
def label_smoothing_loss(y_hat, y, alpha=0.2):
log_probs = torch.nn.functional.log_softmax(y_hat, dim=-1)
xent = torch.nn.functional.nll_loss(log_probs, y, reduction="none")
KL = -log_probs.mean(dim=-1)
loss = (1 - alpha) * xent + alpha * KL
return loss.sum()
@staticmethod
def custom_softmax_accuracy(y_hat, y):
return torchmetrics.Accuracy().to(y_hat.device)(
torch.nn.functional.softmax(y_hat, dim=-1), y
)
# the following functions are for MNIST SNN training, from the norse
# tutorial
@staticmethod
def custom_argmax_accuracy(y_hat, y):
return torchmetrics.Accuracy().to(y_hat.device)(torch.argmax(y_hat, dim=-1), y)
# must be used if the target is one-hot encoded
@staticmethod
def custom_one_hot_argmax_accuracy(y_hat, y):
return torchmetrics.Accuracy().to(y_hat.device)(
torch.argmax(y_hat, dim=-1),
torch.max(y, dim=-1)[1],
)
@staticmethod
def max_log_softmax_probability(x):
x, _ = torch.max(x, 0)
log_p_y = torch.nn.functional.log_softmax(x, dim=-1)
return log_p_y
@staticmethod
def decoder_dvs128gesture(x):
return DVS128GestureSNNModule.max_log_softmax_probability(x)
@classmethod
def encoder_dvs128gesture(cls, input_):
encoder_name = "_encoder_dvs128gesture"
if (encoder := getattr(cls, encoder_name, None)) is None:
encoder = torchvision.transforms.Compose(
[
lambda x: x.to_dense() if x.is_sparse else x,
lambda x: x[:, :, 0:1, :, :],
functools.partial(
lambda x, dtype: x.to(dtype=dtype) if x.dtype != dtype else x,
dtype=torch.float32,
),
lambda x: x.permute(1, 0, 2, 3, 4),
]
)
setattr(cls, encoder_name, encoder)
return encoder(input_)
| 22,558 | 36.978114 | 88 | py |
enpheeph | enpheeph-main/papers/iros2022/experiments/results/configs/snn_training/snn_training.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import pathlib
import sys
import pytorch_lightning
try:
import dvs128gesturesnnmodule
import dvs128gesturedatamodule
except ImportError:
sys.path.append(str(pathlib.Path(__file__).absolute().parent))
import dvs128gesturesnnmodule
import dvs128gesturedatamodule
sys.path.pop()
BATCH_SIZE = 10
DVS128GESTURE_DATASET_PATH = pathlib.Path(
"/shared/ml/datasets/vision/snn/DVS128Gesture/"
)
MONITOR_METRIC_ACCURACY = "val_acc_epoch"
MONITOR_METRIC_ACCURACY_MODE = "max"
MONITOR_METRIC_LOSS = "val_loss_epoch"
MONITOR_METRIC_LOSS_MODE = "min"
# MONITOR_METRIC_LOSS = "val_acc_epoch"
# MONITOR_METRIC_LOSS_MODE = "max"
SEED = 42
TRAINING_DIR = pathlib.Path(__file__).parent / "checkpoints" / "dvs128gesture_snn"
def main():
pytorch_lightning.seed_everything(SEED)
model = dvs128gesturesnnmodule.DVS128GestureSNNModule(
encoder=dvs128gesturesnnmodule.DVS128GestureSNNModule.encoder_dvs128gesture,
decoder=dvs128gesturesnnmodule.DVS128GestureSNNModule.decoder_dvs128gesture,
return_state=False,
encoding_flag=True,
decoding_flag=True,
trainable_neuron_parameters=False,
learning_rate=1e-3,
)
datamodule = dvs128gesturedatamodule.DVS128GestureDataModule(
data_dir=DVS128GESTURE_DATASET_PATH,
num_workers=64,
drop_last=False,
shuffle=False,
batch_size=BATCH_SIZE,
seed=SEED,
pin_memory=False,
)
trainer = pytorch_lightning.Trainer(
accelerator="gpu",
callbacks=[
pytorch_lightning.callbacks.DeviceStatsMonitor(),
pytorch_lightning.callbacks.EarlyStopping(
check_finite=True,
min_delta=0.001,
mode=MONITOR_METRIC_LOSS_MODE,
# string of monitored metric
# default is early_stop_on
monitor=MONITOR_METRIC_LOSS,
patience=5,
verbose=True,
),
pytorch_lightning.callbacks.ModelCheckpoint(
dirpath=None,
every_n_epochs=1,
every_n_train_steps=None,
filename=None,
mode=MONITOR_METRIC_ACCURACY_MODE,
monitor=MONITOR_METRIC_ACCURACY,
save_last=True,
save_top_k=3,
save_weights_only=False,
verbose=True,
),
pytorch_lightning.callbacks.TQDMProgressBar(),
],
default_root_dir=str(TRAINING_DIR),
deterministic=True,
devices="auto",
logger=[
pytorch_lightning.loggers.TensorBoardLogger(
save_dir=str(TRAINING_DIR),
# experiment name, in this custom configuration it is default
name="default",
version=None,
# this enables the saving of the computational graph
# it requires example_input_array in the model
log_graph=True,
default_hp_metric=True,
prefix="",
)
],
log_every_n_steps=10,
replace_sampler_ddp=True,
strategy=pytorch_lightning.plugins.DDPPlugin(find_unused_parameters=False),
)
trainer.fit(model, datamodule=datamodule)
if __name__ == "__main__":
main()
| 4,122 | 32.520325 | 84 | py |
enpheeph | enpheeph-main/papers/iros2022/experiments/results/configs/snn_training/dvs128gesturedatamodule.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import typing
import pl_bolts
import tonic
import torch
import torchvision
class DVS128GestureDataModule(
pl_bolts.datamodules.vision_datamodule.VisionDataModule,
):
DEFAULT_TRAIN_TRANSFORMS = tonic.transforms.Compose(
[
# torch.tensor,
# tonic.transforms.Downsample(time_factor=0.0001),
# average number of timesteps is 7185841
# so we can use a time window of 100000 to make it into 72
tonic.transforms.MergePolarities(),
tonic.transforms.ToFrame(
tonic.datasets.dvsgesture.DVSGesture.sensor_size,
time_window=25_000,
),
]
)
DEFAULT_VAL_TRANSFORMS = DEFAULT_TRAIN_TRANSFORMS
DEFAULT_TEST_TRANSFORMS = DEFAULT_TRAIN_TRANSFORMS
DEFAULT_TARGET_TRANSFORM = None
DEFAULT_COLLATE_FN = torchvision.transforms.Compose(
[
tonic.collation.PadTensors(batch_first=True),
]
)
EXTRA_ARGS = {"target_transform": None}
# trick as dataset_cls should have this signature, using also download which is
# not required in tonic
# see the corresponding property
# dataset_cls = tonic.datasets.dvsgesture.DVSGesture
name = "DVSGesture"
dims = tonic.datasets.dvsgesture.DVSGesture.sensor_size
num_classes = 11
# trick as dataset_cls should have the signature of dataset_cls_interface,
# using also download which is not used in tonic
@property
def dataset_cls(self):
def dataset_cls_interface(
data_dir, train=True, download=True, transform=None, *args, **kwargs
):
return tonic.datasets.dvsgesture.DVSGesture(
save_to=data_dir, train=train, transform=transform
)
return dataset_cls_interface
def __init__(
self,
# generic VisionDataModule arguments
data_dir: typing.Optional[str] = None,
val_split: typing.Union[int, float] = 0.2,
num_workers: int = 16,
normalize: bool = False,
batch_size: int = 32,
seed: int = 42,
shuffle: bool = False,
pin_memory: bool = False,
drop_last: bool = False,
# generic transforms
train_transforms: typing.Optional[
typing.Callable[[typing.Any], torch.Tensor]
] = None,
val_transforms: typing.Optional[
typing.Callable[[typing.Any], torch.Tensor]
] = None,
test_transforms: typing.Optional[
typing.Callable[[typing.Any], torch.Tensor]
] = None,
# tonic specific arguments for collate_fn and target transform
target_transform: typing.Optional[
typing.Callable[[typing.Any], torch.Tensor]
] = None,
collate_fn: typing.Optional[
typing.Callable[[torch.Tensor], torch.Tensor]
] = None,
# extra argument
*args: typing.Any,
**kwargs: typing.Any,
):
super().__init__(
*args,
data_dir=data_dir,
val_split=val_split,
num_workers=num_workers,
normalize=normalize,
batch_size=batch_size,
seed=seed,
shuffle=shuffle,
pin_memory=pin_memory,
drop_last=drop_last,
**kwargs,
)
if train_transforms is None:
self.train_transforms = self.DEFAULT_TRAIN_TRANSFORMS
else:
self.train_transforms = train_transforms
if val_transforms is None:
self.val_transforms = self.DEFAULT_VAL_TRANSFORMS
else:
self.val_transforms = val_transforms
if test_transforms is None:
self.test_transforms = self.DEFAULT_TEST_TRANSFORMS
else:
self.test_transforms = test_transforms
if target_transform is None:
self.target_transform = self.DEFAULT_TARGET_TRANSFORM
else:
self.target_transform = target_transform
if collate_fn is None:
self.collate_fn = self.DEFAULT_COLLATE_FN
else:
self.collate_fn = collate_fn
# this is automatically passed in the dataset class
self.EXTRA_ARGS["target_transform"] = self.target_transform
# we call it here to initialize the datasets otherwise when using *_dataloader
# it is not automatically called
self.setup()
def default_transforms(self) -> typing.Callable[[typing.Any], torch.Tensor]:
return tonic.transforms.Compose([])
def _data_loader(
self, dataset: torch.utils.data.Dataset, shuffle: bool = False
) -> torch.utils.data.DataLoader:
return torch.utils.data.DataLoader(
dataset,
batch_size=self.batch_size,
collate_fn=self.collate_fn,
shuffle=shuffle,
num_workers=self.num_workers,
drop_last=self.drop_last,
pin_memory=self.pin_memory,
)
| 6,508 | 34.763736 | 86 | py |
enpheeph | enpheeph-main/tests/conftest.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import collections
import torchvision
import pytest
# with params we can parametrize the fixture
@pytest.fixture(
scope="function",
params=[
[None, object(), None],
["test.module", 2, "test"],
["foobar", "a", "foobar"],
["second_test", 2, "second_test"],
[False, [1, 2, 3], None],
],
ids=[
"None",
"test.module",
"foobar",
"second_test",
"deletion",
],
)
# we need to use request.param to access the parameter
def mock_object_with_library(monkeypatch, request):
# we get the name of the library to be tested and the object
library_name, obj, expected_library_name = request.param
if library_name is not False:
monkeypatch.setattr(obj.__class__, "__module__", library_name)
else:
monkeypatch.delattr(obj.__class__, "__module__")
return TestWithTarget(test_input=obj, target=expected_library_name)
# move everything to pytest_cases https://smarie.github.io/python-pytest-cases/
@pytest.fixture(
scope="class",
)
def trained_model_1epoch():
pass
@pytest.fixture(
scope="session",
params=[
[torchvision.datasets.CIFAR10],
],
ids=[
"CIFAR10",
],
)
def datamodule(tmp_path, request):
request.param[0]
TestWithTarget = collections.namedtuple("TestWithTarget", "test_input target")
| 2,909 | 30.290323 | 79 | py |
enpheeph | enpheeph-main/tests/test_enpheeph/unit_test/test_injections/test_abc/test_pytorchinjectionabc.py | # -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2022 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import torch
import enpheeph.injections.abc.pytorchinjectionabc
class TestPyTorchInjectionABC(object):
def test_abstract_method_setup(self):
assert getattr(
enpheeph.injections.abc.pytorchinjectionabc.PyTorchInjectionABC.setup,
"__isabstractmethod__",
False,
)
def test_teardown_not_abstract(self):
assert not getattr(
enpheeph.injections.abc.pytorchinjectionabc.PyTorchInjectionABC.teardown,
"__isabstractmethod__",
False,
)
def test_teardown(self):
class Implementation(
enpheeph.injections.abc.pytorchinjectionabc.PyTorchInjectionABC
):
def setup(self):
pass
def module_name(self):
pass
instance = Implementation()
module = torch.nn.ReLU()
module = instance.teardown(module)
assert module(torch.tensor([1])) == torch.tensor([1])
instance.handle = module.register_forward_hook(lambda m, i, o: o + 1)
assert module(torch.tensor([1])) == torch.tensor([2])
module = instance.teardown(module)
assert module(torch.tensor([1])) == torch.tensor([1])
module = instance.teardown(module)
assert module(torch.tensor([1])) == torch.tensor([1])
def test_abstract_module_name(self):
assert getattr(
enpheeph.injections.abc.pytorchinjectionabc.PyTorchInjectionABC.module_name,
"__isabstractmethod__",
False,
)
# we check whether the method is a property
assert isinstance(
enpheeph.injections.abc.pytorchinjectionabc.PyTorchInjectionABC.module_name,
property,
)
def test_attributes(self):
class_ = enpheeph.injections.abc.pytorchinjectionabc.PyTorchInjectionABC
# __annotations__ returns the annotated attributes in the class
assert "handle" in class_.__annotations__
| 3,525 | 34.26 | 88 | py |
enpheeph | enpheeph-main/notebooks/pruning_distribution_analysis/pruning_distribution_analysis_v2.2023_04_18__11_46_UTC.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# # Use the enpheeph-dev mamba environment
# The old one is enpheeph-dev-old-lightning-flash
# In[1]:
import math
import os
import pathlib
import time
import captum
import lightning
import numpy
import pandas
import torch
import torch.optim
import torchmetrics
import torchvision
import torchvision.datasets
import torchvision.transforms
# In[2]:
class Model(lightning.LightningModule):
@property
def LAYER_LIST(self):
return {
"vgg11": {
"model.features.0": [64, 32, 32],
"model.features.1": [64, 32, 32],
"model.features.2": [64, 16, 16],
"model.features.3": [128, 16, 16],
"model.features.4": [128, 16, 16],
"model.features.5": [128, 8, 8],
"model.features.6": [256, 8, 8],
"model.features.7": [256, 8, 8],
"model.features.8": [256, 8, 8],
"model.features.9": [256, 8, 8],
"model.features.10": [256, 4, 4],
"model.features.11": [512, 4, 4],
"model.features.12": [512, 4, 4],
"model.features.13": [512, 4, 4],
"model.features.14": [512, 4, 4],
"model.features.15": [512, 2, 2],
"model.features.16": [512, 2, 2],
"model.features.17": [512, 2, 2],
"model.features.18": [512, 2, 2],
"model.features.19": [512, 2, 2],
"model.features.20": [512, 1, 1],
"model.avgpool": [512, 7, 7],
"model.classifier.0": [4096],
"model.classifier.1": [4096],
"model.classifier.2": [4096],
"model.classifier.3": [4096],
"model.classifier.4": [4096],
"model.classifier.5": [4096],
"model.classifier.6": [self.num_classes],
},
"resnet18": {
"model.conv1": [64, 16, 16],
"model.bn1": [64, 16, 16],
"model.relu": [64, 16, 16],
"model.maxpool": [64, 8, 8],
"model.layer1.0.conv1": [64, 8, 8],
"model.layer1.0.bn1": [64, 8, 8],
"model.layer1.0.relu": [64, 8, 8],
"model.layer1.0.conv2": [64, 8, 8],
"model.layer1.0.bn2": [64, 8, 8],
"model.layer1.1.conv1": [64, 8, 8],
"model.layer1.1.bn1": [64, 8, 8],
"model.layer1.1.relu": [64, 8, 8],
"model.layer1.1.conv2": [64, 8, 8],
"model.layer1.1.bn2": [64, 8, 8],
"model.layer2.0.conv1": [128, 4, 4],
"model.layer2.0.bn1": [128, 4, 4],
"model.layer2.0.relu": [128, 4, 4],
"model.layer2.0.conv2": [128, 4, 4],
"model.layer2.0.bn2": [128, 4, 4],
"model.layer2.0.downsample.0": [128, 4, 4],
"model.layer2.0.downsample.1": [128, 4, 4],
"model.layer2.1.conv1": [128, 4, 4],
"model.layer2.1.bn1": [128, 4, 4],
"model.layer2.1.relu": [128, 4, 4],
"model.layer2.1.conv2": [128, 4, 4],
"model.layer2.1.bn2": [128, 4, 4],
"model.layer3.0.conv1": [256, 2, 2],
"model.layer3.0.bn1": [256, 2, 2],
"model.layer3.0.relu": [256, 2, 2],
"model.layer3.0.conv2": [256, 2, 2],
"model.layer3.0.bn2": [256, 2, 2],
"model.layer3.0.downsample.0": [256, 2, 2],
"model.layer3.0.downsample.1": [256, 2, 2],
"model.layer3.1.conv1": [256, 2, 2],
"model.layer3.1.bn1": [256, 2, 2],
"model.layer3.1.relu": [256, 2, 2],
"model.layer3.1.conv2": [256, 2, 2],
"model.layer3.1.bn2": [256, 2, 2],
"model.layer4.0.conv1": [512, 1, 1],
"model.layer4.0.bn1": [512, 1, 1],
"model.layer4.0.relu": [512, 1, 1],
"model.layer4.0.conv2": [512, 1, 1],
"model.layer4.0.bn2": [512, 1, 1],
"model.layer4.0.downsample.0": [512, 1, 1],
"model.layer4.0.downsample.1": [512, 1, 1],
"model.layer4.1.conv1": [512, 1, 1],
"model.layer4.1.bn1": [512, 1, 1],
"model.layer4.1.relu": [512, 1, 1],
"model.layer4.1.conv2": [512, 1, 1],
"model.layer4.1.bn2": [512, 1, 1],
"model.avgpool": [512, 1, 1],
"model.fc": [10],
},
}
def __init__(
self,
model_name,
num_classes,
accuracy_fn,
loss_fn,
dataframe_path,
optimizer_class,
learning_rate,
dataset_name=None,
):
super().__init__()
self.save_hyperparameters()
self.model_name = model_name.lower()
self.num_classes = num_classes
self.accuracy = accuracy_fn
self.loss = loss_fn
self.optimizer_class = optimizer_class
self.learning_rate = learning_rate
self.dataframe_path = pathlib.Path(dataframe_path)
self.setup_model(model_name=self.model_name, num_classes=self.num_classes)
self.handles = []
self.reset_dataframe()
self.init_model()
def setup_model(self, model_name, num_classes):
if model_name == "vgg11":
self.model = torchvision.models.vgg11(num_classes=num_classes)
elif model_name == "resnet18":
self.model = torchvision.models.resnet18(num_classes=num_classes)
elif model_name == "mlp":
self.model = torch.nn.Sequential(
torch.nn.Flatten(),
torch.nn.Linear(28 * 28, 100),
torch.nn.ReLU(),
torch.nn.Linear(100, num_classes),
)
else:
raise ValueError("unknown model")
def init_model(self):
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(
m.weight, mode="fan_out", nonlinearity="relu"
)
elif isinstance(m, (torch.nn.BatchNorm2d, torch.nn.GroupNorm)):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
def reset_dataframe(self):
self.dataframe = pandas.DataFrame(
columns=[
"module_name",
"tensor_type",
"batch_index",
"element_in_batch_index",
"location",
"neuron_attribution_sorting",
"value",
"accuracy",
"loss",
]
)
@staticmethod
def join_saved_dataframe(dataframe, dataframe_path: os.PathLike):
dataframe_path = pathlib.Path(dataframe_path)
if not dataframe_path.exists():
dataframe_path.parent.mkdir(parents=True, exist_ok=True)
dataframe.to_csv(dataframe_path, sep="|")
else:
df = pandas.read_csv(dataframe_path, sep="|", index_col=[0], header=[0])
new_df = pandas.concat([df, dataframe], axis=0)
new_df.reset_index(drop=True, inplace=True)
new_df.to_csv(dataframe_path, sep="|")
def forward(self, x):
return self.model(x)
def configure_optimizers(self):
optimizer = self.optimizer_class(self.parameters(), lr=self.learning_rate)
return optimizer
def make_neuron_output_function(
self, module_name, location, neuron_attribution_sorting
):
def save_neuron_output(module, args, output) -> None:
for b_idx, b in enumerate(output):
self.dataframe.loc[len(self.dataframe)] = [
module_name,
"output",
None,
b_idx,
location,
neuron_attribution_sorting,
b[location].item(),
None,
None,
]
return save_neuron_output
def add_hooks(self, attributions, topk=1, bottomk=1):
for layer_name, layer_attributions_and_deltas in attributions.items():
layer_attributions_cat = torch.cat(
tuple(l_attr for l_attr, _ in layer_attributions_and_deltas),
dim=0,
)
summed_layer_attributions = torch.sum(
layer_attributions_cat,
(0,),
)
topk_values, topk_indices = torch.topk(
abs(
summed_layer_attributions.flatten(),
),
k=topk,
largest=True,
sorted=True,
)
bottomk_values, bottomk_indices = torch.topk(
abs(
summed_layer_attributions.flatten(),
),
k=bottomk,
largest=False,
sorted=True,
)
indices = [
{"neuron_attribution_sorting": f"top{i}", "index": idx}
for i, idx in enumerate(topk_indices)
] + [
{"neuron_attribution_sorting": f"bottom{i}", "index": idx}
for i, idx in enumerate(bottomk_indices)
]
for index in indices:
target_neuron_location = numpy.unravel_index(
index["index"],
summed_layer_attributions.size(),
order="C",
)
module = self.get_layer_from_full_name(
self,
layer_name,
separator=".",
main_model_is_in_the_layer_name=False,
)
self.handles.append(
module.register_forward_hook(
self.make_neuron_output_function(
layer_name,
tuple(target_neuron_location),
neuron_attribution_sorting=index[
"neuron_attribution_sorting"
],
)
)
)
@staticmethod
def get_full_layer_name_from_summary(layer_summary, skip_main_model=True):
parent_info = layer_summary.parent_info
layer_full_name = layer_summary.var_name
while parent_info is not None and (
not skip_main_model
or skip_main_model
and parent_info.parent_info is not None
):
layer_full_name = f"{parent_info.var_name}.{layer_full_name}"
parent_info = parent_info.parent_info
return layer_full_name
@staticmethod
def get_layer_from_full_name(
model, layer_name, separator=".", main_model_is_in_the_layer_name=False
):
module = model
if main_model_is_in_the_layer_name:
layer_name = separator.join(layer_name.split(separator)[1:])
for l_n in layer_name.split(separator):
module = getattr(module, l_n)
return module
def get_attributions(
self,
dataloader,
layer_name_list,
attributions_checkpoint_path,
attribution=captum.attr.LayerConductance,
save_checkpoint=True,
load_checkpoint=True,
):
if attributions_checkpoint_path.exists() and load_checkpoint:
attributions = torch.load(str(attributions_checkpoint_path))
return attributions
elif save_checkpoint:
attributions_checkpoint_path.parent.mkdir(exist_ok=True, parents=True)
model = self.train(False).to(torch.device("cpu"))
attributions = {}
for layer_name in layer_name_list:
print(layer_name)
layer_attributions = []
attr_instance = attribution(
model, model.get_layer_from_full_name(model, layer_name)
)
for idx, b in enumerate(dataloader):
x, y = b
attr, delta = attr_instance.attribute(
inputs=x.to(torch.device("cpu")),
target=y.to(torch.device("cpu")),
return_convergence_delta=True,
)
layer_attributions.append(
[
attr.detach(),
delta.detach(),
],
)
if idx % 10 == 0:
print(f"Batches done: {idx}")
attributions[layer_name] = layer_attributions
if save_checkpoint:
torch.save(attributions, str(attributions_checkpoint_path))
if save_checkpoint:
torch.save(attributions, str(attributions_checkpoint_path))
return attributions
def inference_step(self, batch, only_x=False):
if only_x:
x = batch
else:
x, y = batch
y_hat = self(x)
if only_x:
d = {"loss": None, "accuracy": None, "predictions": y_hat}
else:
d = {
"loss": self.loss(y_hat, y),
"accuracy": self.accuracy(y_hat, y),
"predictions": y_hat,
}
return d
def training_step(self, batch, batch_idx):
metrics = self.inference_step(batch)
self.log_dict(
{"train_loss": metrics["loss"], "train_accuracy": metrics["accuracy"]},
on_step=True,
on_epoch=True,
prog_bar=True,
logger=True,
)
return metrics["loss"]
def test_step(self, batch, batch_idx, dataloader_idx=0):
metrics = self.inference_step(batch)
self.log_dict(
{"test_loss": metrics["loss"], "test_accuracy": metrics["accuracy"]},
on_step=True,
on_epoch=True,
prog_bar=True,
logger=True,
)
return metrics["predictions"]
def validation_step(self, batch, batch_idx):
metrics = self.inference_step(batch)
self.log_dict(
{"val_loss": metrics["loss"], "val_accuracy": metrics["accuracy"]},
on_step=True,
on_epoch=True,
prog_bar=True,
logger=True,
)
return metrics["predictions"]
def predict_step(self, batch, batch_idx, dataloader_idx=0):
metrics = self.inference_step(batch, only_x=True)
# self.log({"val_loss": metrics["loss"], "val_accuracy": metrics["accuracy"]}, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return metrics["predictions"]
def on_test_batch_end(self, outputs, batch, batch_idx, dataloader_idx=0):
# super().on_test_batch_end(outputs, batch, batch_idx, dataloader_idx)
_, y = batch
row_selector = (
self.dataframe["accuracy"].isnull() & self.dataframe["loss"].isnull()
)
self.dataframe.loc[row_selector, "batch_index"] = batch_idx
# assert len(self.dataframe.loc[row_selector]) / len(self.handles) == y.size()[0] == outputs.size()[0]
for bindex, (by_hat, by) in enumerate(zip(outputs, y)):
by_hat = by_hat.unsqueeze(0)
by = by.unsqueeze(0)
extra_row_selector = row_selector & (
self.dataframe["element_in_batch_index"] == bindex
)
self.dataframe.loc[extra_row_selector, "loss"] = self.loss(
by_hat, by
).item()
self.dataframe.loc[extra_row_selector, "accuracy"] = self.accuracy(
by_hat, by
).item()
self.dataframe_path.parent.mkdir(parents=True, exist_ok=True)
if batch_idx % 10 == 0:
self.join_saved_dataframe(self.dataframe, self.dataframe_path)
self.reset_dataframe()
# print(self.dataframe)
def on_test_end(self):
self.join_saved_dataframe(self.dataframe, self.dataframe_path)
class DataModule(lightning.LightningDataModule):
MNIST_DEFAULT_TRANSFORM = torchvision.transforms.Compose(
[
torchvision.transforms.Resize((28, 28)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,),
(0.3081,),
),
]
)
CIFAR10_DEFAULT_TRANSFORM = torchvision.transforms.Compose(
[
torchvision.transforms.Resize((32, 32)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]],
),
]
)
GTSRB_DEFAULT_TRANSFORM = torchvision.transforms.Compose(
[
torchvision.transforms.Resize((48, 48)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.3337, 0.3064, 0.3171),
(0.2672, 0.2564, 0.2629),
),
]
)
@staticmethod
def gtsrb_wrapper(data_dir, train: bool, transform=None, download: bool = True):
if train is True:
split = "train"
elif train is False:
split = "test"
else:
raise ValueError()
return torchvision.datasets.GTSRB(
str(data_dir),
split=split,
download=download,
transform=transform,
)
def __init__(
self,
dataset_name,
data_dir: str = "/shared/ml/datasets/vision/",
train_transform=None,
test_transform=None,
batch_size=64,
num_workers=32,
train_val_split=0.8,
seed=42,
dataset_class=None,
):
super().__init__()
self.dataset_name = dataset_name.lower()
self.dataset_class = dataset_class
self.data_dir = data_dir
self.batch_size = batch_size
self.train_val_split = train_val_split
self.num_workers = num_workers
self.seed = seed
self.train_transform = train_transform
self.test_transform = test_transform
self.num_classes = None
self.setup_dataset()
def setup_dataset(self):
if self.dataset_class is None:
if self.dataset_name == "cifar10":
self.dataset_class = torchvision.datasets.CIFAR10
elif self.dataset_name == "mnist":
self.dataset_class = torchvision.datasets.MNIST
elif self.dataset_name == "gtsrb":
self.dataset_class = self.__class__.gtsrb_wrapper
if self.dataset_class == self.__class__.gtsrb_wrapper:
if self.train_transform is None:
self.train_transform = torchvision.transforms.Compose(
[
torchvision.transforms.Resize((48, 48)),
torchvision.transforms.RandomCrop(48),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.RandomVerticalFlip(),
self.GTSRB_DEFAULT_TRANSFORM,
]
)
if self.test_transform is None:
self.test_transform = self.GTSRB_DEFAULT_TRANSFORM
self.num_classes = 43
elif self.dataset_class == torchvision.datasets.MNIST or issubclass(
self.dataset_class, torchvision.datasets.MNIST
):
if self.train_transform is None:
self.train_transform = torchvision.transforms.Compose(
[
torchvision.transforms.Resize((28, 28)),
torchvision.transforms.RandomCrop(28, padding=4),
self.MNIST_DEFAULT_TRANSFORM,
]
)
if self.test_transform is None:
self.test_transform = self.MNIST_DEFAULT_TRANSFORM
self.num_classes = 10
elif self.dataset_class == torchvision.datasets.CIFAR10 or issubclass(
self.dataset_class, torchvision.datasets.CIFAR10
):
if self.train_transform is None:
self.train_transform = torchvision.transforms.Compose(
[
torchvision.transforms.Resize((32, 32)),
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
self.CIFAR10_DEFAULT_TRANSFORM,
]
)
if self.test_transform is None:
self.test_transform = self.CIFAR10_DEFAULT_TRANSFORM
self.num_classes = 10
else:
raise ValueError("unknown dataset")
def prepare_data(self):
# download
self.dataset_class(self.data_dir, train=True, download=True)
self.dataset_class(self.data_dir, train=False, download=True)
def setup(self, stage: str):
# Assign train/val datasets for use in dataloaders
if stage == "fit":
dataset_train_transform = self.dataset_class(
self.data_dir, train=True, transform=self.train_transform
)
n_train_elements = math.floor(
len(dataset_train_transform) * self.train_val_split
)
self.dataset_train, _ = torch.utils.data.random_split(
dataset_train_transform,
[n_train_elements, len(dataset_train_transform) - n_train_elements],
generator=torch.Generator().manual_seed(self.seed),
)
dataset_test_transform = self.dataset_class(
self.data_dir, train=True, transform=self.test_transform
)
_, self.dataset_val = torch.utils.data.random_split(
dataset_test_transform,
[n_train_elements, len(dataset_train_transform) - n_train_elements],
generator=torch.Generator().manual_seed(self.seed),
)
# Assign test dataset for use in dataloader(s)
if stage == "test":
self.dataset_test = self.dataset_class(
self.data_dir, train=False, transform=self.test_transform
)
if stage == "predict":
self.dataset_predict = self.dataset_class(
self.data_dir, train=False, transform=self.test_transform
)
def train_dataloader(self):
return torch.utils.data.DataLoader(
self.dataset_train,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
)
def val_dataloader(self):
return torch.utils.data.DataLoader(
self.dataset_val,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
def test_dataloader(self):
return torch.utils.data.DataLoader(
self.dataset_test,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
def predict_dataloader(self):
return torch.utils.data.DataLoader(
self.dataset_predict,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
# In[3]:
TIME_FORMAT = "%Y_%m_%d__%H_%M_%S_%z"
time_string = time.strftime(TIME_FORMAT)
model_name = "resnet18"
dataset_name = "GTSRB"
pruned = False
sparse = False
base_path = pathlib.Path(
f"./results/trained_{model_name}_{dataset_name}_{'pruned' if pruned else 'original'}_{'sparse' if sparse else 'dense'}_earlystopping_lightning"
)
# model_checkpoint_path = base_path.with_suffix(f".{time_string}.pt")
model_checkpoint_path = pathlib.Path(
"./results/trained_resnet18_GTSRB_original_dense_earlystopping_lightning.2023_04_18__13_51_32_+0200.pt"
)
# attributions_checkpoint_path = base_path.with_suffix(f".{time_string}.attributions.pt")
attributions_checkpoint_path = pathlib.Path(
"./results/trained_resnet18_GTSRB_original_dense_earlystopping_lightning.2023_04_18__13_51_32_+0200.attributions.pt"
)
dataframe_path = base_path.with_suffix(f".{time_string}.csv")
learning_rate_finder = False
# seed = 7 # vgg11 cifar10
seed = 7 # resnet18 gtsrb
lightning.seed_everything(seed)
# In[4]:
trainer = lightning.Trainer(
accelerator="gpu",
devices=[2],
# setting max_epochs make it not work, it stops with max_epochs=0, also fast_dev_run=True breaks it
# fast_dev_run=True,
callbacks=[
lightning.pytorch.callbacks.EarlyStopping(
"val_loss",
min_delta=0.001,
patience=5,
verbose=True,
mode="min",
strict=True,
check_finite=True,
stopping_threshold=None,
divergence_threshold=None,
check_on_train_epoch_end=None,
log_rank_zero_only=False,
),
lightning.pytorch.callbacks.ModelCheckpoint(
dirpath=None,
filename=None,
monitor=None,
verbose=False,
save_last=None,
# to disable model saving
save_top_k=0,
save_weights_only=False,
mode="min",
auto_insert_metric_name=True,
every_n_train_steps=None,
train_time_interval=None,
every_n_epochs=None,
save_on_train_epoch_end=None,
),
lightning.pytorch.callbacks.RichProgressBar(
refresh_rate=10,
),
lightning.pytorch.callbacks.StochasticWeightAveraging(
swa_lrs=1e-2,
),
],
)
datamodule = DataModule(
dataset_name=dataset_name,
data_dir=f"/shared/ml/datasets/vision/{dataset_name}",
train_transform=None,
test_transform=None,
batch_size=64,
train_val_split=0.8,
seed=seed,
)
model = Model(
model_name=model_name,
num_classes=datamodule.num_classes,
accuracy_fn=torchmetrics.Accuracy(
task="multiclass",
num_classes=datamodule.num_classes,
),
loss_fn=torch.nn.CrossEntropyLoss(),
dataframe_path=dataframe_path,
optimizer_class=torch.optim.AdamW,
learning_rate=2e-3,
)
# In[ ]:
if learning_rate_finder:
tuner = lightning.pytorch.tuner.Tuner(trainer)
tuner.lr_find(model, datamodule=datamodule)
print(model.learning_rate)
raise Exception()
if model_checkpoint_path.exists():
model.__class__.load_from_checkpoint(str(model_checkpoint_path))
else:
model_checkpoint_path.parent.mkdir(parents=True, exist_ok=True)
trainer.fit(model, datamodule)
trainer.save_checkpoint(str(model_checkpoint_path))
# In[ ]:
datamodule.prepare_data()
datamodule.setup(stage="test")
attributions = model.__class__.get_attributions(
model,
datamodule.test_dataloader(),
list(model.LAYER_LIST[model.model_name].keys()),
attributions_checkpoint_path=attributions_checkpoint_path,
save_checkpoint=True,
load_checkpoint=True,
)
datamodule.teardown(stage="test")
model.add_hooks(attributions, topk=5, bottomk=5)
# In[ ]:
trainer.test(model, datamodule, ckpt_path=str(model_checkpoint_path))
| 28,259 | 34.280899 | 147 | py |
enpheeph | enpheeph-main/notebooks/pruning_distribution_analysis/pruning_distribution_analysis_v2.2023_04_15__15_28_UTC.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# enpheeph - Neural Fault Injection Framework
# Copyright (C) 2020-2023 Alessio "Alexei95" Colucci
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# # Use the enpheeph-dev mamba environment
# The old one is enpheeph-dev-old-lightning-flash
# In[1]:
import math
import os
import pathlib
import time
import captum
import lightning
import numpy
import pandas
import torch
import torch.optim
import torchmetrics
import torchvision
import torchvision.datasets
import torchvision.transforms
# In[2]:
class Model(lightning.LightningModule):
LAYER_LIST = {
"vgg11": {
"model.features.0": [64, 32, 32],
"model.features.1": [64, 32, 32],
"model.features.2": [64, 16, 16],
"model.features.3": [128, 16, 16],
"model.features.4": [128, 16, 16],
"model.features.5": [128, 8, 8],
"model.features.6": [256, 8, 8],
"model.features.7": [256, 8, 8],
"model.features.8": [256, 8, 8],
"model.features.9": [256, 8, 8],
"model.features.10": [256, 4, 4],
"model.features.11": [512, 4, 4],
"model.features.12": [512, 4, 4],
"model.features.13": [512, 4, 4],
"model.features.14": [512, 4, 4],
"model.features.15": [512, 2, 2],
"model.features.16": [512, 2, 2],
"model.features.17": [512, 2, 2],
"model.features.18": [512, 2, 2],
"model.features.19": [512, 2, 2],
"model.features.20": [512, 1, 1],
"model.avgpool": [512, 7, 7],
"model.classifier.0": [4096],
"model.classifier.1": [4096],
"model.classifier.2": [4096],
"model.classifier.3": [4096],
"model.classifier.4": [4096],
"model.classifier.5": [4096],
"model.classifier.6": [10],
},
}
def __init__(
self,
model_name,
num_classes,
accuracy_fn,
loss_fn,
dataframe_path,
optimizer_class,
learning_rate,
):
super().__init__()
self.save_hyperparameters()
self.model_name = model_name
self.num_classes = num_classes
self.accuracy = accuracy_fn
self.loss = loss_fn
self.optimizer_class = optimizer_class
self.learning_rate = learning_rate
self.dataframe_path = pathlib.Path(dataframe_path)
self.setup_model(model_name=self.model_name, num_classes=self.num_classes)
self.handles = []
self.reset_dataframe()
self.init_model()
def init_model(self):
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(
m.weight, mode="fan_out", nonlinearity="relu"
)
elif isinstance(m, (torch.nn.BatchNorm2d, torch.nn.GroupNorm)):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
def reset_dataframe(self):
self.dataframe = pandas.DataFrame(
columns=[
"module_name",
"tensor_type",
"batch_index",
"element_in_batch_index",
"location",
"value",
"accuracy",
"loss",
]
)
@staticmethod
def join_saved_dataframe(dataframe, dataframe_path: os.PathLike):
dataframe_path = pathlib.Path(dataframe_path)
if not dataframe_path.exists():
dataframe_path.parent.mkdir(parents=True, exist_ok=True)
dataframe.to_csv(dataframe_path, sep="|")
else:
df = pandas.read_csv(dataframe_path, sep="|", index_col=[0], header=[0])
new_df = pandas.concat([df, dataframe], axis=0)
new_df.reset_index(drop=True, inplace=True)
new_df.to_csv(dataframe_path, sep="|")
def forward(self, x):
return self.model(x)
def configure_optimizers(self):
optimizer = self.optimizer_class(self.parameters(), lr=self.learning_rate)
return optimizer
def make_neuron_output_function(self, module_name, location):
def save_neuron_output(module, args, output) -> None:
for b_idx, b in enumerate(output):
self.dataframe.loc[len(self.dataframe)] = [
module_name,
"output",
None,
b_idx,
location,
b[location].item(),
None,
None,
]
return save_neuron_output
def add_hooks(self, attributions, topk=1):
for layer_name, layer_attributions_and_deltas in attributions.items():
layer_attributions_cat = torch.cat(
tuple(l_attr for l_attr, _ in layer_attributions_and_deltas),
dim=0,
)
summed_layer_attributions = torch.sum(
layer_attributions_cat,
(0,),
)
topk_values, topk_indices = torch.topk(
abs(
summed_layer_attributions.flatten(),
),
k=topk,
largest=True,
sorted=True,
)
for top_index in topk_indices:
target_neuron_location = numpy.unravel_index(
top_index,
summed_layer_attributions.size(),
order="C",
)
module = self.get_layer_from_full_name(
self,
layer_name,
separator=".",
main_model_is_in_the_layer_name=False,
)
self.handles.append(
module.register_forward_hook(
self.make_neuron_output_function(
layer_name, tuple(target_neuron_location)
)
)
)
def setup_model(self, model_name, num_classes):
if model_name == "vgg11":
self.model = torchvision.models.vgg11(
num_classes=num_classes, init_weights=True
)
elif model_name == "mlp":
self.model = torch.nn.Sequential(
torch.nn.Flatten(),
torch.nn.Linear(28 * 28, 100),
torch.nn.ReLU(),
torch.nn.Linear(100, num_classes),
)
else:
raise ValueError("unknown model")
@staticmethod
def get_full_layer_name_from_summary(layer_summary, skip_main_model=True):
parent_info = layer_summary.parent_info
layer_full_name = layer_summary.var_name
while parent_info is not None and (
not skip_main_model
or skip_main_model
and parent_info.parent_info is not None
):
layer_full_name = f"{parent_info.var_name}.{layer_full_name}"
parent_info = parent_info.parent_info
return layer_full_name
@staticmethod
def get_layer_from_full_name(
model, layer_name, separator=".", main_model_is_in_the_layer_name=False
):
module = model
if main_model_is_in_the_layer_name:
layer_name = separator.join(layer_name.split(separator)[1:])
for l_n in layer_name.split(separator):
module = getattr(module, l_n)
return module
def get_attributions(
self,
dataloader,
layer_name_list,
attributions_checkpoint_path,
attribution=captum.attr.LayerConductance,
save_checkpoint=True,
load_checkpoint=True,
):
if attributions_checkpoint_path.exists() and load_checkpoint:
attributions = torch.load(str(attributions_checkpoint_path))
return attributions
elif save_checkpoint:
attributions_checkpoint_path.parent.mkdir(exist_ok=True, parents=True)
model = self.train(False).to(torch.device("cpu"))
attributions = {}
for layer_name in layer_name_list:
print(layer_name)
layer_attributions = []
attr_instance = attribution(
model, model.get_layer_from_full_name(model, layer_name)
)
for idx, b in enumerate(dataloader):
x, y = b
attr, delta = attr_instance.attribute(
inputs=x.to(torch.device("cpu")),
target=y.to(torch.device("cpu")),
return_convergence_delta=True,
)
layer_attributions.append(
[
attr.detach(),
delta.detach(),
],
)
if idx % 10 == 0:
print(f"Batches done: {idx}")
attributions[layer_name] = layer_attributions
if save_checkpoint:
torch.save(attributions, str(attributions_checkpoint_path))
if save_checkpoint:
torch.save(attributions, str(attributions_checkpoint_path))
return attributions
def inference_step(self, batch, only_x=False):
if only_x:
x = batch
else:
x, y = batch
y_hat = self(x)
if only_x:
d = {"loss": None, "accuracy": None, "predictions": y_hat}
else:
d = {
"loss": self.loss(y_hat, y),
"accuracy": self.accuracy(y_hat, y),
"predictions": y_hat,
}
return d
def training_step(self, batch, batch_idx):
metrics = self.inference_step(batch)
self.log_dict(
{"train_loss": metrics["loss"], "train_accuracy": metrics["accuracy"]},
on_step=True,
on_epoch=True,
prog_bar=True,
logger=True,
)
return metrics["loss"]
def test_step(self, batch, batch_idx, dataloader_idx=0):
metrics = self.inference_step(batch)
self.log_dict(
{"test_loss": metrics["loss"], "test_accuracy": metrics["accuracy"]},
on_step=True,
on_epoch=True,
prog_bar=True,
logger=True,
)
return metrics["predictions"]
def validation_step(self, batch, batch_idx):
metrics = self.inference_step(batch)
self.log_dict(
{"val_loss": metrics["loss"], "val_accuracy": metrics["accuracy"]},
on_step=True,
on_epoch=True,
prog_bar=True,
logger=True,
)
return metrics["predictions"]
def predict_step(self, batch, batch_idx, dataloader_idx=0):
metrics = self.inference_step(batch, only_x=True)
# self.log({"val_loss": metrics["loss"], "val_accuracy": metrics["accuracy"]}, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return metrics["predictions"]
def on_test_batch_end(self, outputs, batch, batch_idx, dataloader_idx=0):
# super().on_test_batch_end(outputs, batch, batch_idx, dataloader_idx)
_, y = batch
row_selector = (
self.dataframe["accuracy"].isnull() & self.dataframe["loss"].isnull()
)
self.dataframe.loc[row_selector, "batch_index"] = batch_idx
assert (
len(self.dataframe.loc[row_selector]) / len(self.handles)
== y.size()[0]
== outputs.size()[0]
)
for bindex, (by_hat, by) in enumerate(zip(outputs, y)):
by_hat = by_hat.unsqueeze(0)
by = by.unsqueeze(0)
extra_row_selector = row_selector & (
self.dataframe["element_in_batch_index"] == bindex
)
self.dataframe.loc[extra_row_selector, "loss"] = self.loss(
by_hat, by
).item()
self.dataframe.loc[extra_row_selector, "accuracy"] = self.accuracy(
by_hat, by
).item()
self.dataframe_path.parent.mkdir(parents=True, exist_ok=True)
if batch_idx % 10 == 0:
self.join_saved_dataframe(self.dataframe, self.dataframe_path)
self.reset_dataframe()
# print(self.dataframe)
def on_test_end(self):
self.join_saved_dataframe(self.dataframe, self.dataframe_path)
class DataModule(lightning.LightningDataModule):
MNIST_DEFAULT_TRANSFORM = torchvision.transforms.Compose(
[
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,),
(0.3081,),
),
]
)
CIFAR10_DEFAULT_TRANSFORM = torchvision.transforms.Compose(
[
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]],
),
]
)
def __init__(
self,
dataset_class,
data_dir: str = "/shared/ml/datasets/vision/",
train_transform=None,
test_transform=None,
batch_size=64,
num_workers=32,
train_val_split=0.8,
seed=42,
):
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
self.train_val_split = train_val_split
self.num_workers = num_workers
self.seed = seed
if issubclass(dataset_class, torchvision.datasets.MNIST):
if train_transform is None:
train_transform = torchvision.transforms.Compose(
[
torchvision.transforms.RandomCrop(28, padding=4),
self.MNIST_DEFAULT_TRANSFORM,
]
)
if test_transform is None:
test_transform = self.MNIST_DEFAULT_TRANSFORM
self.num_classes = 10
elif issubclass(dataset_class, torchvision.datasets.CIFAR10):
if train_transform is None:
train_transform = torchvision.transforms.Compose(
[
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
self.CIFAR10_DEFAULT_TRANSFORM,
]
)
if test_transform is None:
test_transform = self.CIFAR10_DEFAULT_TRANSFORM
self.num_classes = 10
else:
raise ValueError("unknown dataset")
self.dataset_class = dataset_class
self.train_transform = train_transform
self.test_transform = test_transform
def prepare_data(self):
# download
self.dataset_class(self.data_dir, train=True, download=True)
self.dataset_class(self.data_dir, train=False, download=True)
def setup(self, stage: str):
# Assign train/val datasets for use in dataloaders
if stage == "fit":
dataset_train_transform = self.dataset_class(
self.data_dir, train=True, transform=self.train_transform
)
n_train_elements = math.floor(
len(dataset_train_transform) * self.train_val_split
)
self.dataset_train, _ = torch.utils.data.random_split(
dataset_train_transform,
[n_train_elements, len(dataset_train_transform) - n_train_elements],
generator=torch.Generator().manual_seed(self.seed),
)
dataset_test_transform = self.dataset_class(
self.data_dir, train=True, transform=self.test_transform
)
_, self.dataset_val = torch.utils.data.random_split(
dataset_test_transform,
[n_train_elements, len(dataset_train_transform) - n_train_elements],
generator=torch.Generator().manual_seed(self.seed),
)
# Assign test dataset for use in dataloader(s)
if stage == "test":
self.dataset_test = self.dataset_class(
self.data_dir, train=False, transform=self.test_transform
)
if stage == "predict":
self.dataset_predict = self.dataset_class(
self.data_dir, train=False, transform=self.test_transform
)
def train_dataloader(self):
return torch.utils.data.DataLoader(
self.dataset_train,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
)
def val_dataloader(self):
return torch.utils.data.DataLoader(
self.dataset_val,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
def test_dataloader(self):
return torch.utils.data.DataLoader(
self.dataset_test,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
def predict_dataloader(self):
return torch.utils.data.DataLoader(
self.dataset_predict,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
# In[3]:
TIME_FORMAT = "%Y_%m_%d__%H_%M_%S_%z"
time_string = time.strftime(TIME_FORMAT)
base_path = pathlib.Path("./results/trained_vgg11_cifar10_earlystopping_lightning")
model_checkpoint_path = base_path.with_suffix(f".{time_string}.pt")
attributions_checkpoint_path = base_path.with_suffix(f".{time_string}.attributions.pt")
dataframe_path = base_path.with_suffix(f".{time_string}.csv")
learning_rate_finder = False
seed = 7
lightning.seed_everything(seed)
# In[4]:
trainer = lightning.Trainer(
accelerator="gpu",
devices=[2],
max_epochs=-1,
callbacks=[
lightning.pytorch.callbacks.EarlyStopping(
"val_loss",
min_delta=0.001,
patience=5,
verbose=True,
mode="min",
strict=True,
check_finite=True,
stopping_threshold=None,
divergence_threshold=None,
check_on_train_epoch_end=None,
log_rank_zero_only=False,
),
lightning.pytorch.callbacks.ModelCheckpoint(
dirpath=None,
filename=None,
monitor=None,
verbose=False,
save_last=None,
# to disable model saving
save_top_k=0,
save_weights_only=False,
mode="min",
auto_insert_metric_name=True,
every_n_train_steps=None,
train_time_interval=None,
every_n_epochs=None,
save_on_train_epoch_end=None,
),
lightning.pytorch.callbacks.RichProgressBar(
refresh_rate=10,
),
lightning.pytorch.callbacks.StochasticWeightAveraging(
swa_lrs=1e-2,
),
],
)
model = Model(
model_name="vgg11",
num_classes=10,
accuracy_fn=torchmetrics.Accuracy(
task="multiclass",
num_classes=10,
),
loss_fn=torch.nn.CrossEntropyLoss(),
dataframe_path=dataframe_path,
optimizer_class=torch.optim.AdamW,
learning_rate=1e-3,
)
datamodule = DataModule(
dataset_class=torchvision.datasets.CIFAR10,
data_dir="/shared/ml/datasets/vision/CIFAR10",
train_transform=None,
test_transform=None,
batch_size=64,
train_val_split=0.8,
seed=seed,
)
tuner = lightning.pytorch.tuner.Tuner(trainer)
# In[5]:
if learning_rate_finder:
tuner.lr_find(model, datamodule=datamodule)
print(model.learning_rate)
if model_checkpoint_path.exists():
model.__class__.load_from_checkpoint(str(model_checkpoint_path))
else:
model_checkpoint_path.parent.mkdir(parents=True, exist_ok=True)
trainer.fit(model, datamodule)
trainer.save_checkpoint(str(model_checkpoint_path))
# In[ ]:
datamodule.prepare_data()
datamodule.setup(stage="test")
attributions = model.__class__.get_attributions(
model,
datamodule.test_dataloader(),
list(model.LAYER_LIST[model.model_name].keys()),
attributions_checkpoint_path=attributions_checkpoint_path,
save_checkpoint=True,
load_checkpoint=True,
)
datamodule.teardown(stage="test")
model.add_hooks(attributions, topk=3)
# In[ ]:
trainer.test(model, datamodule, ckpt_path=str(model_checkpoint_path))
# In[ ]:
# dataframe_path = model_checkpoint_path.with_suffix(f".{time.strftime(TIME_FORMAT)}.csv")
# model.dataframe.to_csv(dataframe_path, sep="|")
# model.dataframe
| 21,405 | 31.482549 | 143 | py |
autoSDC | autoSDC-master/sphinx-docs/source/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# versastat documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 8 11:29:00 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../.."))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.mathjax", "sphinx.ext.viewcode"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "versastat"
copyright = "2018, Brian DeCost"
author = "Brian DeCost"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.1"
# The full version, including alpha/beta/rc tags.
release = "0.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
"**": [
"relations.html", # needs 'show_related': True theme option to display
"searchbox.html",
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "versastatdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "versastat.tex", "versastat Documentation", "Brian DeCost", "manual"),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "versastat", "versastat Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"versastat",
"versastat Documentation",
author,
"versastat",
"One line description of project.",
"Miscellaneous",
),
]
| 5,159 | 29.898204 | 87 | py |
pmb-nll | pmb-nll-main/src/single_image_inference.py | """
Probabilistic Detectron Single Image Inference Script
"""
import json
import os
import sys
import cv2
import torch
import tqdm
import core
# This is very ugly. Essential for now but should be fixed.
sys.path.append(os.path.join(core.top_dir(), "src", "detr"))
from detectron2.data import MetadataCatalog
from detectron2.data.transforms import ResizeShortestEdge
# Detectron imports
from detectron2.engine import launch
# Project imports
from core.evaluation_tools.evaluation_utils import (
get_train_contiguous_id_to_test_thing_dataset_id_dict,
)
from core.setup import setup_arg_parser, setup_config
from probabilistic_inference.inference_utils import build_predictor, instances_to_json
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(args):
# Setup config
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
# Make sure only 1 data point is processed at a time. This simulates
# deployment.
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 32
cfg.SOLVER.IMS_PER_BATCH = 1
cfg.MODEL.DEVICE = device.type
# Set up number of cpu threads#
torch.set_num_threads(cfg.DATALOADER.NUM_WORKERS)
# Create inference output directory
inference_output_dir = os.path.expanduser(args.output_dir)
os.makedirs(inference_output_dir, exist_ok=True)
# Get category mapping dictionary. Mapping here is from coco-->coco
train_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
cfg.DATASETS.TRAIN[0]
).thing_dataset_id_to_contiguous_id
test_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
cfg.DATASETS.TRAIN[0]
).thing_dataset_id_to_contiguous_id
# If both dicts are equal or if we are performing out of distribution
# detection, just flip the test dict.
cat_mapping_dict = get_train_contiguous_id_to_test_thing_dataset_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id,
)
# Build predictor
cfg.MODEL.WEIGHTS = os.path.expanduser(args.model_ckpt)
predictor = build_predictor(cfg)
# List images in image folder
image_folder = os.path.expanduser(args.image_dir)
image_list = os.listdir(image_folder)
# Construct image resizer
resizer = ResizeShortestEdge(
cfg.INPUT.MIN_SIZE_TEST, max_size=cfg.INPUT.MAX_SIZE_TEST
)
final_output_list = []
with torch.no_grad():
with tqdm.tqdm(total=len(image_list)) as pbar:
for idx, input_file_name in enumerate(image_list):
# Read image, apply shortest edge resize, and change to channel first position
cv2_image = cv2.imread(os.path.join(image_folder, input_file_name))
if cv2_image.size != 0:
shape = cv2_image.shape
height = shape[0]
width = shape[1]
output_transform = resizer.get_transform(cv2_image)
cv2_image = output_transform.apply_image(cv2_image)
input_im_tensor = torch.tensor(cv2_image).to().permute(2, 0, 1)
input_im = [
dict(
{
"filename": input_file_name,
"image_id": input_file_name,
"height": height,
"width": width,
"image": input_im_tensor,
}
)
]
# Perform inference
outputs = predictor(input_im)
# predictor.visualize_inference(input_im, outputs)
final_output_list.extend(
instances_to_json(
outputs, input_im[0]["image_id"], cat_mapping_dict
)
)
pbar.update(1)
else:
print("Failed to read image {}".format(input_file_name))
with open(os.path.join(inference_output_dir, "results.json"), "w") as fp:
json.dump(final_output_list, fp, indent=4, separators=(",", ": "))
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
# Support single gpu inference only.
args.num_gpus = 1
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 4,694 | 32.297872 | 94 | py |
pmb-nll | pmb-nll-main/src/apply_net.py | """
Probabilistic Detectron Inference Script
"""
import json
import os
import sys
from shutil import copyfile
import torch
import tqdm
import core
# This is very ugly. Essential for now but should be fixed.
sys.path.append(os.path.join(core.top_dir(), "src", "detr"))
from detectron2.data import MetadataCatalog, build_detection_test_loader
# Detectron imports
from detectron2.engine import launch
# Project imports
from core.evaluation_tools.evaluation_utils import (
get_train_contiguous_id_to_test_thing_dataset_id_dict,
)
from core.setup import setup_arg_parser, setup_config
from offline_evaluation import (
compute_average_precision,
compute_calibration_errors,
compute_ood_probabilistic_metrics,
compute_probabilistic_metrics,
)
from probabilistic_inference.inference_utils import (
build_predictor,
get_inference_output_dir,
instances_to_json,
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(args):
# Setup config
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
# Make sure only 1 data point is processed at a time. This simulates
# deployment.
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 32
cfg.SOLVER.IMS_PER_BATCH = 1
cfg.MODEL.DEVICE = device.type
# Set up number of cpu threads#
torch.set_num_threads(cfg.DATALOADER.NUM_WORKERS)
# Create inference output directory and copy inference config file to keep
# track of experimental settings
if args.inference_dir == "":
inference_output_dir = get_inference_output_dir(
cfg["OUTPUT_DIR"],
args.test_dataset,
args.inference_config,
args.image_corruption_level,
)
else:
inference_output_dir = args.inference_dir
if not os.path.isdir(inference_output_dir):
os.makedirs(inference_output_dir, exist_ok=True)
os.makedirs(inference_output_dir, exist_ok=True)
copyfile(
args.inference_config,
os.path.join(inference_output_dir, os.path.split(args.inference_config)[-1]),
)
# Get category mapping dictionary:
train_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
cfg.DATASETS.TRAIN[0]
).thing_dataset_id_to_contiguous_id
test_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
args.test_dataset
).thing_dataset_id_to_contiguous_id
# If both dicts are equal or if we are performing out of distribution
# detection, just flip the test dict.
cat_mapping_dict = get_train_contiguous_id_to_test_thing_dataset_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id,
)
# Build predictor
predictor = build_predictor(cfg)
test_data_loader = build_detection_test_loader(cfg, dataset_name=args.test_dataset)
final_output_list = []
if not args.eval_only:
with torch.no_grad():
with tqdm.tqdm(total=len(test_data_loader)) as pbar:
for idx, input_im in enumerate(test_data_loader):
# Apply corruption
outputs = predictor(input_im)
# print(f'Image id {input_im[0]["image_id"]}')
# predictor.visualize_inference(input_im, outputs)
final_output_list.extend(
instances_to_json(
outputs, input_im[0]["image_id"], cat_mapping_dict
)
)
pbar.update(1)
with open(
os.path.join(inference_output_dir, "coco_instances_results.json"), "w"
) as fp:
json.dump(final_output_list, fp, indent=4, separators=(",", ": "))
if "ood" in args.test_dataset:
compute_ood_probabilistic_metrics.main(args, cfg)
else:
compute_average_precision.main(args, cfg, inference_output_dir)
compute_probabilistic_metrics.main(
args, cfg, inference_output_dir=inference_output_dir, min_allowed_score=args.min_allowed_score
)
compute_calibration_errors.main(
args, cfg, inference_output_dir=inference_output_dir
)
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
# Support single gpu inference only.
args.num_gpus = 1
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 4,645 | 30.391892 | 106 | py |
pmb-nll | pmb-nll-main/src/single_image_inference_eval.py | """
Probabilistic Detectron Single Image Inference Script
Runs inference and evaluation on specified images, rather than on entire dataset.
"""
import json
import os
import sys
from shutil import copyfile, rmtree
import torch
import tqdm
import core
# This is very ugly. Essential for now but should be fixed.
sys.path.append(os.path.join(core.top_dir(), "src", "detr"))
from detectron2.data import MetadataCatalog, build_detection_test_loader
# Detectron imports
from detectron2.engine import launch
from core.evaluation_tools import evaluation_utils
# Project imports
from core.evaluation_tools.evaluation_utils import \
get_train_contiguous_id_to_test_thing_dataset_id_dict
from core.setup import setup_arg_parser, setup_config
from offline_evaluation import (compute_average_precision,
compute_calibration_errors,
compute_ood_probabilistic_metrics,
compute_probabilistic_metrics)
from probabilistic_inference.inference_utils import (build_predictor,
get_inference_output_dir,
instances_to_json)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(args):
# Setup config
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
# Make sure only 1 data point is processed at a time. This simulates
# deployment.
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 32
cfg.SOLVER.IMS_PER_BATCH = 1
cfg.MODEL.DEVICE = device.type
# Set up number of cpu threads#
torch.set_num_threads(cfg.DATALOADER.NUM_WORKERS)
# Create inference output directory and copy inference config file to keep
# track of experimental settings
if args.inference_dir == "":
inference_output_dir = get_inference_output_dir(
cfg["OUTPUT_DIR"],
args.test_dataset,
args.inference_config,
args.image_corruption_level,
)
else:
inference_output_dir = args.inference_dir
if not os.path.isdir(inference_output_dir):
os.makedirs(inference_output_dir, exist_ok=True)
os.makedirs(inference_output_dir, exist_ok=True)
copyfile(
args.inference_config,
os.path.join(inference_output_dir, os.path.split(args.inference_config)[-1]),
)
# Get category mapping dictionary:
train_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
cfg.DATASETS.TRAIN[0]
).thing_dataset_id_to_contiguous_id
test_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
args.test_dataset
).thing_dataset_id_to_contiguous_id
# If both dicts are equal or if we are performing out of distribution
# detection, just flip the test dict.
cat_mapping_dict = get_train_contiguous_id_to_test_thing_dataset_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id,
)
# Build predictor
predictor = build_predictor(cfg)
test_data_loader = build_detection_test_loader(cfg, dataset_name=args.test_dataset)
# Prepare GT annos
cfg.defrost()
cfg.ACTUAL_TEST_DATASET = args.test_dataset
preprocessed_gt_instances = (
evaluation_utils.get_per_frame_preprocessed_gt_instances(
cfg, inference_output_dir
)
)
final_output_list = []
# Example for image_ids to visualize, set to empty list for all images in dataset
image_ids = [2153,2261,6894,10764,17905,23272]
with torch.no_grad():
with tqdm.tqdm(total=len(test_data_loader)) as pbar:
for idx, input_im in enumerate(test_data_loader):
image_id = input_im[0]["image_id"]
if len(image_ids) and image_id not in image_ids:
pbar.update(1)
continue
if not args.eval_only:
# Apply corruption
outputs = predictor(input_im)
json_instances = instances_to_json(
outputs, image_id, cat_mapping_dict
)
final_output_list.extend(json_instances)
# Save instances for this prediction only to temporary dir
tmp_inference_dir = os.path.join(inference_output_dir, "tmp")
rmtree(tmp_inference_dir, ignore_errors=True)
os.makedirs(tmp_inference_dir, exist_ok=True)
with open(
os.path.join(tmp_inference_dir, "coco_instances_results.json"),
"w",
) as fp:
json.dump(json_instances, fp, indent=4, separators=(",", ": "))
# Load in standard evaluation format
preprocessed_predicted_instances = (
evaluation_utils.eval_predictions_preprocess(json_instances)
)
else:
tmp_inference_dir = inference_output_dir
outputs = (
evaluation_utils.get_per_frame_preprocessed_pred_instances(
cfg, tmp_inference_dir, image_id, 0.0
)
)
preprocessed_gt_instance = {}
for k, v in preprocessed_gt_instances.items():
for img_id, t in v.items():
if img_id == image_id:
preprocessed_gt_instance[k] = t
if len(preprocessed_gt_instance) == 0:
preprocessed_gt_instance = None
class_map = MetadataCatalog[cfg.ACTUAL_TEST_DATASET].get(
"thing_classes"
)
gt_class_map = MetadataCatalog[cfg.ACTUAL_TEST_DATASET].thing_dataset_id_to_contiguous_id
predictor.visualize_inference(
input_im,
outputs,
preprocessed_gt_instance,
min_allowed_score=0.1,
class_map=class_map,
gt_class_map=gt_class_map,
num_samples=0,
)
# Compute metrics for this prediction only
compute_average_precision.main(args, cfg, tmp_inference_dir, [image_id])
compute_probabilistic_metrics.main(
args,
cfg,
inference_output_dir=tmp_inference_dir,
image_ids=[image_id],
min_allowed_score=0.0,
)
pbar.update(1)
with open(
os.path.join(inference_output_dir, "coco_instances_results.json"), "w"
) as fp:
json.dump(final_output_list, fp, indent=4, separators=(",", ": "))
if "ood" in args.test_dataset:
compute_ood_probabilistic_metrics.main(args, cfg)
else:
compute_average_precision.main(args, cfg, inference_output_dir, image_ids)
compute_probabilistic_metrics.main(
args, cfg, inference_output_dir=inference_output_dir, image_ids=image_ids
)
compute_calibration_errors.main(
args, cfg, inference_output_dir=inference_output_dir
)
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
# Support single gpu inference only.
args.num_gpus = 1
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 7,829 | 36.644231 | 105 | py |
pmb-nll | pmb-nll-main/src/train_net.py | """
Probabilistic Detectron Training Script following Detectron2 training script found at detectron2/tools.
"""
import logging
import os
import random
import sys
import core
# This is very ugly. Essential for now but should be fixed.
sys.path.append(os.path.join(core.top_dir(), "src", "detr"))
# Detectron imports
import detectron2.utils.comm as comm
# DETR imports
from d2.train_net import Trainer as Detr_Trainer
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.data import build_detection_test_loader, build_detection_train_loader
from detectron2.engine import DefaultTrainer, launch
from detectron2.evaluation import COCOEvaluator, DatasetEvaluators, verify_results
from detectron2.modeling import build_model
# Project imports
from core.setup import setup_arg_parser, setup_config
from probabilistic_modeling.modeling_utils import freeze_non_probabilistic_weights
class Trainer(DefaultTrainer):
@classmethod
def build_evaluator(cls, cfg, dataset_name):
"""
Builds evaluators for post-training mAP report.
Args:
cfg(CfgNode): a detectron2 CfgNode
dataset_name(str): registered dataset name
Returns:
detectron2 DatasetEvaluators object
"""
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluators = [COCOEvaluator(dataset_name, cfg, True, output_folder)]
return DatasetEvaluators(evaluators)
@classmethod
def build_test_loader(cls, cfg, dataset_name):
"""
Builds DataLoader for test set.
Args:
cfg(CfgNode): a detectron2 CfgNode
dataset_name(str): registered dataset name
Returns:
detectron2 DataLoader object specific to the test set.
"""
return build_detection_test_loader(cfg, dataset_name)
@classmethod
def build_train_loader(cls, cfg):
"""
Builds DataLoader for train set.
Args:
cfg(CfgNode): a detectron2 CfgNode
Returns:
detectron2 DataLoader object specific to the train set.
"""
return build_detection_train_loader(cfg)
@classmethod
def build_model(cls, cfg):
"""
Returns:
torch.nn.Module:
It now calls :func:`detectron2.modeling.build_model`.
Overwrite it if you'd like a different model.
"""
model = build_model(cfg)
logger = logging.getLogger(__name__)
logger.info("Model:\n{}".format(model))
freeze_non_probabilistic_weights(cfg, model)
return model
class Custom_Detr_Trainer(Detr_Trainer):
@classmethod
def build_model(cls, cfg):
"""
Returns:
torch.nn.Module:
It now calls :func:`detectron2.modeling.build_model`.
Overwrite it if you'd like a different model.
"""
model = build_model(cfg)
logger = logging.getLogger(__name__)
logger.info("Model:\n{}".format(model))
freeze_non_probabilistic_weights(cfg, model)
return model
def main(args):
# Setup config node
cfg = setup_config(args, random_seed=args.random_seed)
# For debugging only
# cfg.defrost()
# cfg.DATALOADER.NUM_WORKERS = 0
# cfg.SOLVER.IMS_PER_BATCH = 1
# Eval only mode to produce mAP results
# Build Trainer from config node. Begin Training.
if cfg.MODEL.META_ARCHITECTURE == "ProbabilisticDetr":
trainer = Custom_Detr_Trainer(cfg)
else:
trainer = Trainer(cfg)
if args.eval_only:
model = trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = trainer.test(cfg, model)
if comm.is_main_process():
verify_results(cfg, res)
return res
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
print("Command Line Args:", args)
if args.random_port:
port = random.randint(1024, 65535)
args.dist_url = "tcp://127.0.0.1:{}".format(port)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 4,421 | 28.284768 | 103 | py |
pmb-nll | pmb-nll-main/src/core/setup.py | import os
import random
import time
from shutil import copyfile
# Detectron imports
import detectron2.utils.comm as comm
import numpy as np
import torch
# Detr imports
from d2.detr.config import add_detr_config
from detectron2.config import CfgNode as CN
from detectron2.config import get_cfg
from detectron2.engine import default_argument_parser, default_setup
from detectron2.utils.logger import setup_logger
from probabilistic_modeling.probabilistic_detr import ProbabilisticDetr
from probabilistic_modeling.probabilistic_generalized_rcnn import (
DropoutFastRCNNConvFCHead,
ProbabilisticGeneralizedRCNN,
ProbabilisticROIHeads,
)
from probabilistic_modeling.probabilistic_retinanet import ProbabilisticRetinaNet
# Project imports
import core
from core.datasets.setup_datasets import setup_all_datasets
def setup_arg_parser():
"""
Sets up argument parser for python scripts.
Returns:
arg_parser (ArgumentParser): Argument parser updated with probabilistic detectron args.
"""
arg_parser = default_argument_parser()
arg_parser.add_argument(
"--dataset-dir", type=str, default="", help="path to dataset directory."
)
arg_parser.add_argument(
"--random-seed",
type=int,
default=0,
help="random seed to be used for all scientific computing libraries",
)
# Inference arguments, will not be used during training.
arg_parser.add_argument(
"--inference-config",
type=str,
default="",
help="Inference parameter: Path to the inference config, which is different from training config. Check readme for more information.",
)
arg_parser.add_argument(
"--test-dataset",
type=str,
default="",
help="Inference parameter: Dataset used for testing. Can be one of the following: 'coco_2017_custom_val', 'openimages_val', 'openimages_ood_val' ",
)
arg_parser.add_argument(
"--image-corruption-level",
type=int,
default=0,
help="Inference parameter: Image corruption level between 0-5. Default is no corruption, level 0.",
)
# Evaluation arguments, will not be used during training.
arg_parser.add_argument(
"--iou-min",
type=float,
default=0.1,
help="Evaluation parameter: IOU threshold bellow which a detection is considered a false positive.",
)
arg_parser.add_argument(
"--iou-correct",
type=float,
default=0.5,
help="Evaluation parameter: IOU threshold above which a detection is considered a true positive.",
)
arg_parser.add_argument(
"--min-allowed-score",
type=float,
default=0.0,
help="Evaluation parameter:Minimum classification score for which a detection is considered in the evaluation. Set to -1 for optimal F1-score.",
)
arg_parser.add_argument(
"--inference-dir",
type=str,
default="",
help="Directory where inference files will be stored.",
)
# Single image inference parameters
arg_parser.add_argument(
"--model-ckpt",
type=str,
default="",
help="Single image inference parameter: path to model checkpoint used for inference.",
)
arg_parser.add_argument(
"--image-dir",
type=str,
default="",
help="Single image inference parameter: path to image directory",
)
arg_parser.add_argument(
"--output-dir", type=str, default="", help="Path to where to save outputs"
)
arg_parser.add_argument(
"--random-port",
action="store_true",
help="Use a randomized port number to avoid issues with multiple multi-GPU jobs on same machine.",
)
return arg_parser
def add_probabilistic_config(cfg):
"""
Add configuration elements specific to probabilistic detectron.
Args:
cfg (CfgNode): detectron2 configuration node.
"""
_C = cfg
# Probabilistic Modeling Setup
_C.MODEL.PROBABILISTIC_MODELING = CN()
_C.MODEL.PROBABILISTIC_MODELING.MC_DROPOUT = CN()
_C.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS = CN()
_C.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS = CN()
_C.MODEL.PROBABILISTIC_MODELING.NLL_MAX_NUM_SOLUTIONS = 25
_C.MODEL.PROBABILISTIC_MODELING.MATCHING_DISTANCE = "log_prob"
_C.MODEL.PROBABILISTIC_MODELING.PPP = CN()
# PPP stuff
_C.MODEL.PROBABILISTIC_MODELING.PPP.INTENSITY_TYPE = "uniform"
_C.MODEL.PROBABILISTIC_MODELING.PPP.UNIFORM_INTENSITY = -np.inf
_C.MODEL.PROBABILISTIC_MODELING.PPP.NUM_GAUSS_MIXTURES = 10
_C.MODEL.PROBABILISTIC_MODELING.PPP.COV_TYPE = "diagonal"
_C.MODEL.PROBABILISTIC_MODELING.PPP.USE_PREDICTION_MIXTURE = False
_C.MODEL.TRAIN_ONLY_UNCERTAINTY_PREDS = False
_C.MODEL.TRAIN_PPP = True
_C.MODEL.TRAIN_ONLY_PPP = False
# Annealing step for losses that require some form of annealing
_C.MODEL.PROBABILISTIC_MODELING.ANNEALING_STEP = 0
# Monte-Carlo Dropout Settings
_C.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE = 0.0
# Loss configs
_C.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NAME = "none"
_C.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NUM_SAMPLES = 3
_C.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NAME = "none"
_C.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.COVARIANCE_TYPE = "diagonal"
_C.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NUM_SAMPLES = 1000
_C.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE = "gaussian"
# Probabilistic Inference Setup
_C.PROBABILISTIC_INFERENCE = CN()
_C.PROBABILISTIC_INFERENCE.MC_DROPOUT = CN()
_C.PROBABILISTIC_INFERENCE.BAYES_OD = CN()
_C.PROBABILISTIC_INFERENCE.ENSEMBLES_DROPOUT = CN()
_C.PROBABILISTIC_INFERENCE.ENSEMBLES = CN()
# General Inference Configs
_C.PROBABILISTIC_INFERENCE.INFERENCE_MODE = "standard_nms"
_C.PROBABILISTIC_INFERENCE.MC_DROPOUT.ENABLE = False
_C.PROBABILISTIC_INFERENCE.MC_DROPOUT.NUM_RUNS = 1
_C.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD = 0.7
_C.PROBABILISTIC_INFERENCE.USE_MC_SAMPLING = True
_C.PROBABILISTIC_INFERENCE.TREAT_AS_MB = False
_C.PROBABILISTIC_INFERENCE.PPP_CONFIDENCE_THRES = 0.0
_C.PROBABILISTIC_INFERENCE.LOAD_PPP_FROM_MODEL = False
# Bayes OD Configs
_C.PROBABILISTIC_INFERENCE.BAYES_OD.BOX_MERGE_MODE = "bayesian_inference"
_C.PROBABILISTIC_INFERENCE.BAYES_OD.CLS_MERGE_MODE = "bayesian_inference"
_C.PROBABILISTIC_INFERENCE.BAYES_OD.DIRCH_PRIOR = "uniform"
# Ensembles Configs
_C.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_MERGE_MODE = "pre_nms"
_C.PROBABILISTIC_INFERENCE.ENSEMBLES.RANDOM_SEED_NUMS = [0, 1000, 2000, 3000, 4000]
# 'mixture_of_gaussian' or 'bayesian_inference'
_C.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_FUSION_MODE = "mixture_of_gaussians"
def setup_config(args, random_seed=None, is_testing=False):
"""
Sets up config node with probabilistic detectron elements. Also sets up a fixed random seed for all scientific
computing libraries, and sets up all supported datasets as instances of coco.
Args:
args (Namespace): args from argument parser
random_seed (int): set a fixed random seed throughout torch, numpy, and python
is_testing (bool): set to true if inference. If true function will return an error if checkpoint directory not
already existing.
Returns:
(CfgNode) detectron2 config object
"""
# Get default detectron config file
cfg = get_cfg()
add_detr_config(cfg)
add_probabilistic_config(cfg)
# Update default config file with custom config file
configs_dir = core.configs_dir()
args.config_file = os.path.join(configs_dir, args.config_file)
cfg.merge_from_file(args.config_file)
# Add dropout rate for faster RCNN box head
cfg.MODEL.ROI_BOX_HEAD.DROPOUT_RATE = cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE
# Update config with inference configurations. Only applicable for when in
# probabilistic inference mode.
if args.inference_config != "":
args.inference_config = os.path.join(configs_dir, args.inference_config)
cfg.merge_from_file(args.inference_config)
# Create output directory
model_name = os.path.split(os.path.split(args.config_file)[0])[-1]
dataset_name = os.path.split(os.path.split(os.path.split(args.config_file)[0])[0])[
-1
]
if args.output_dir == "":
cfg["OUTPUT_DIR"] = os.path.join(
core.data_dir(),
dataset_name,
model_name,
os.path.split(args.config_file)[-1][:-5],
"random_seed_" + str(random_seed),
time.strftime("%Y%m%d-%H%M%S"),
)
else:
cfg["OUTPUT_DIR"] = args.output_dir
if is_testing:
if not os.path.isdir(cfg["OUTPUT_DIR"]):
pass
"""raise NotADirectoryError(
"Checkpoint directory {} does not exist.".format(
cfg['OUTPUT_DIR']))"""
os.makedirs(cfg["OUTPUT_DIR"], exist_ok=True)
# copy config file to output directory
copyfile(
args.config_file,
os.path.join(cfg["OUTPUT_DIR"], os.path.split(args.config_file)[-1]),
)
# Freeze config file
cfg["SEED"] = random_seed
# Set device automatically
if not torch.cuda.is_available():
print("[NLLOD]: CUDA not available, using device=cpu")
cfg.MODEL.DEVICE = "cpu"
cfg.freeze()
# Initiate default setup
default_setup(cfg, args)
# Setup logger for probabilistic detectron module
setup_logger(
output=cfg.OUTPUT_DIR,
distributed_rank=comm.get_rank(),
name="Probabilistic Detectron",
)
# Set a fixed random seed for all numerical libraries
if random_seed is not None:
torch.manual_seed(random_seed)
np.random.seed(random_seed)
random.seed(random_seed)
# Setup datasets
if args.image_corruption_level != 0:
image_root_corruption_prefix = "_" + str(args.image_corruption_level)
else:
image_root_corruption_prefix = None
dataset_dir = os.path.expanduser(args.dataset_dir)
# Handle cases when this function has been called multiple times. In that case skip fully.
# Todo this is very bad practice, should fix.
try:
setup_all_datasets(
dataset_dir, image_root_corruption_prefix=image_root_corruption_prefix
)
return cfg
except AssertionError:
return cfg
| 10,529 | 32.858521 | 155 | py |
pmb-nll | pmb-nll-main/src/core/evaluation_tools/scoring_rules.py | import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def sigmoid_compute_cls_scores(input_matches, valid_idxs):
"""
Computes proper scoring rule for multilabel classification results provided by retinanet.
Args:
input_matches (dict): dictionary containing input matches
valid_idxs (tensor): a tensor containing valid element idxs for per-class computation
Returns:
output_dict (dict): dictionary containing ignorance and brier score.
"""
output_dict = {}
num_forecasts = input_matches["predicted_cls_probs"][valid_idxs].shape[0]
# Construct binary probability vectors. Essential for RetinaNet as it uses
# multilabel and not multiclass formulation.
predicted_class_probs = input_matches["predicted_score_of_gt_category"][valid_idxs]
# If no valid idxs, do not perform computation
if predicted_class_probs.shape[0] == 0:
output_dict.update({"ignorance_score_mean": None, "brier_score_mean": None})
return output_dict
predicted_multilabel_probs = torch.stack(
[predicted_class_probs, 1.0 - predicted_class_probs], dim=1
)
correct_multilabel_probs = torch.stack(
[torch.ones(num_forecasts), torch.zeros(num_forecasts)], dim=1
).to(device)
predicted_log_likelihood_of_correct_category = (
-correct_multilabel_probs * torch.log(predicted_multilabel_probs)
).sum(1)
cls_ignorance_score_mean = predicted_log_likelihood_of_correct_category.mean()
output_dict.update(
{"ignorance_score_mean": cls_ignorance_score_mean.to(device).tolist()}
)
# Classification Brier (Probability) Score
predicted_brier_raw = (
(predicted_multilabel_probs - correct_multilabel_probs) ** 2
).sum(1)
cls_brier_score_mean = predicted_brier_raw.mean()
output_dict.update({"brier_score_mean": cls_brier_score_mean.to(device).tolist()})
return output_dict
def softmax_compute_cls_scores(input_matches, valid_idxs):
"""
Computes proper scoring rule for multiclass classification results provided by faster_rcnn.
Args:
input_matches (dict): dictionary containing input matches
valid_idxs (tensor): a tensor containing valid element idxs for per-class computation
Returns:
output_dict (dict): dictionary containing ignorance and brier score.
"""
output_dict = {}
predicted_multilabel_probs = input_matches["predicted_cls_probs"][valid_idxs]
if predicted_multilabel_probs.shape[0] == 0:
output_dict.update({"ignorance_score_mean": None, "brier_score_mean": None})
return output_dict
if "gt_cat_idxs" in input_matches.keys():
correct_multilabel_probs = torch.nn.functional.one_hot(
input_matches["gt_cat_idxs"][valid_idxs].type(torch.LongTensor),
input_matches["predicted_cls_probs"][valid_idxs].shape[-1],
).to(device)
else:
correct_multilabel_probs = torch.zeros_like(predicted_multilabel_probs).to(
device
)
correct_multilabel_probs[:, -1] = 1.0
predicted_log_likelihood_of_correct_category = (
-correct_multilabel_probs * torch.log(predicted_multilabel_probs)
).sum(1)
cls_ignorance_score_mean = predicted_log_likelihood_of_correct_category.mean()
output_dict.update(
{"ignorance_score_mean": cls_ignorance_score_mean.to(device).tolist()}
)
# Classification Probability Score. Multiclass version of brier score.
predicted_brier_raw = (
(predicted_multilabel_probs - correct_multilabel_probs) ** 2
).sum(1)
cls_brier_score_mean = predicted_brier_raw.mean()
output_dict.update({"brier_score_mean": cls_brier_score_mean.to(device).tolist()})
return output_dict
def compute_reg_scores(input_matches, valid_idxs):
"""
Computes proper scoring rule for regression results.
Args:
input_matches (dict): dictionary containing input matches
valid_idxs (tensor): a tensor containing valid element idxs for per-class computation
Returns:
output_dict (dict): dictionary containing ignorance and energy scores.
"""
output_dict = {}
predicted_box_means = input_matches["predicted_box_means"][valid_idxs]
predicted_box_covars = input_matches["predicted_box_covariances"][valid_idxs]
gt_box_means = input_matches["gt_box_means"][valid_idxs]
# If no valid idxs, do not perform computation
if predicted_box_means.shape[0] == 0:
output_dict.update(
{
"ignorance_score_mean": None,
"mean_squared_error": None,
"energy_score_mean": None,
}
)
return output_dict
# Compute negative log likelihood
# Note: Juggling between CPU and GPU is due to magma library unresolvable issue, where cuda illegal memory access
# error is returned arbitrarily depending on the state of the GPU. This is only a problem for the
# torch.distributions code.
# Pytorch unresolved issue from 2019:
# https://github.com/pytorch/pytorch/issues/21819
predicted_multivariate_normal_dists = (
torch.distributions.multivariate_normal.MultivariateNormal(
predicted_box_means.to("cpu"),
predicted_box_covars.to("cpu")
+ 1e-2 * torch.eye(predicted_box_covars.shape[2]).to("cpu"),
)
)
predicted_multivariate_normal_dists.loc = (
predicted_multivariate_normal_dists.loc.to(device)
)
predicted_multivariate_normal_dists.scale_tril = (
predicted_multivariate_normal_dists.scale_tril.to(device)
)
predicted_multivariate_normal_dists._unbroadcasted_scale_tril = (
predicted_multivariate_normal_dists._unbroadcasted_scale_tril.to(device)
)
predicted_multivariate_normal_dists.covariance_matrix = (
predicted_multivariate_normal_dists.covariance_matrix.to(device)
)
predicted_multivariate_normal_dists.precision_matrix = (
predicted_multivariate_normal_dists.precision_matrix.to(device)
)
# Compute negative log probability
negative_log_prob = -predicted_multivariate_normal_dists.log_prob(gt_box_means)
negative_log_prob_mean = negative_log_prob.mean()
output_dict.update(
{"ignorance_score_mean": negative_log_prob_mean.to(device).tolist()}
)
# Compute mean square error
mean_squared_error = ((predicted_box_means - gt_box_means) ** 2).mean()
output_dict.update({"mean_squared_error": mean_squared_error.to(device).tolist()})
# Energy Score.
sample_set = predicted_multivariate_normal_dists.sample((1001,)).to(device)
sample_set_1 = sample_set[:-1]
sample_set_2 = sample_set[1:]
energy_score = torch.norm((sample_set_1 - gt_box_means), dim=2).mean(
0
) - 0.5 * torch.norm((sample_set_1 - sample_set_2), dim=2).mean(0)
energy_score_mean = energy_score.mean()
output_dict.update({"energy_score_mean": energy_score_mean.to(device).tolist()})
return output_dict
def compute_reg_scores_fn(false_negatives, valid_idxs):
"""
Computes proper scoring rule for regression false positive.
Args:
false_negatives (dict): dictionary containing false_negatives
valid_idxs (tensor): a tensor containing valid element idxs for per-class computation
Returns:
output_dict (dict): dictionary containing false positives ignorance and energy scores.
"""
output_dict = {}
predicted_box_means = false_negatives["predicted_box_means"][valid_idxs]
predicted_box_covars = false_negatives["predicted_box_covariances"][valid_idxs]
# If no valid idxs, do not perform computation
if predicted_box_means.shape[0] == 0:
output_dict.update({"total_entropy_mean": None, "fp_energy_score_mean": None})
return output_dict
predicted_multivariate_normal_dists = (
torch.distributions.multivariate_normal.MultivariateNormal(
predicted_box_means.to("cpu"),
predicted_box_covars.to("cpu")
+ 1e-2 * torch.eye(predicted_box_covars.shape[2]).to("cpu"),
)
)
predicted_multivariate_normal_dists.loc = (
predicted_multivariate_normal_dists.loc.to(device)
)
predicted_multivariate_normal_dists.scale_tril = (
predicted_multivariate_normal_dists.scale_tril.to(device)
)
predicted_multivariate_normal_dists._unbroadcasted_scale_tril = (
predicted_multivariate_normal_dists._unbroadcasted_scale_tril.to(device)
)
predicted_multivariate_normal_dists.covariance_matrix = (
predicted_multivariate_normal_dists.covariance_matrix.to(device)
)
predicted_multivariate_normal_dists.precision_matrix = (
predicted_multivariate_normal_dists.precision_matrix.to(device)
)
fp_entropy = predicted_multivariate_normal_dists.entropy()
fp_entropy_mean = fp_entropy.mean()
output_dict.update({"total_entropy_mean": fp_entropy_mean.to(device).tolist()})
# Energy Score.
sample_set = predicted_multivariate_normal_dists.sample((1001,)).to(device)
sample_set_1 = sample_set[:-1]
sample_set_2 = sample_set[1:]
fp_energy_score = torch.norm((sample_set_1 - sample_set_2), dim=2).mean(0)
fp_energy_score_mean = fp_energy_score.mean()
output_dict.update(
{"fp_energy_score_mean": fp_energy_score_mean.to(device).tolist()}
)
return output_dict
| 9,442 | 37.542857 | 117 | py |
pmb-nll | pmb-nll-main/src/core/evaluation_tools/evaluation_utils.py | import json
import os
from collections import defaultdict
import numpy as np
import torch
import tqdm
# Project imports
from core.datasets import metadata
# Detectron imports
from detectron2.data import MetadataCatalog
from detectron2.structures import Boxes, Instances, pairwise_iou
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def eval_predictions_preprocess(
predicted_instances, min_allowed_score=0.0, is_odd=False
):
predicted_boxes = defaultdict(torch.Tensor)
predicted_cls_probs = defaultdict(torch.Tensor)
predicted_covar_mats = defaultdict(torch.Tensor)
ppp_weights = defaultdict(dict)
image_sizes = defaultdict(list)
for predicted_instance in predicted_instances:
# Remove predictions with undefined category_id. This is used when the training and
# inference datasets come from different data such as COCO-->VOC or COCO-->OpenImages.
# Only happens if not ODD dataset, else all detections will be removed.
if len(predicted_instance["cls_prob"]) == 81:
cls_prob = predicted_instance["cls_prob"][:-1]
else:
cls_prob = predicted_instance["cls_prob"]
if not is_odd:
skip_test = (predicted_instance["category_id"] == -1) or (
np.array(cls_prob).max(0) < min_allowed_score
)
else:
skip_test = np.array(cls_prob).max(0) < min_allowed_score
if skip_test:
continue
box_inds = predicted_instance["bbox"]
box_inds = np.array(
[
box_inds[0],
box_inds[1],
box_inds[0] + box_inds[2],
box_inds[1] + box_inds[3],
]
)
predicted_boxes[predicted_instance["image_id"]] = torch.cat(
(
predicted_boxes[predicted_instance["image_id"]].to(device),
torch.as_tensor([box_inds], dtype=torch.float32).to(device),
)
)
predicted_cls_probs[predicted_instance["image_id"]] = torch.cat(
(
predicted_cls_probs[predicted_instance["image_id"]].to(device),
torch.as_tensor(
[predicted_instance["cls_prob"]], dtype=torch.float32
).to(device),
)
)
box_covar = np.array(predicted_instance["bbox_covar"])
transformation_mat = np.array(
[[1.0, 0, 0, 0], [0, 1.0, 0, 0], [1.0, 0, 1.0, 0], [0, 1.0, 0.0, 1.0]]
)
cov_pred = np.matmul(
np.matmul(transformation_mat, box_covar), transformation_mat.T
).tolist()
predicted_covar_mats[predicted_instance["image_id"]] = torch.cat(
(
predicted_covar_mats[predicted_instance["image_id"]].to(device),
torch.as_tensor([cov_pred], dtype=torch.float32).to(device),
)
)
if "ppp" in predicted_instance:
ppp_dict = {
k: torch.as_tensor(v, dtype=torch.float32).to(device)
for k, v in predicted_instance["ppp"].items()
}
ppp_weights[predicted_instance["image_id"]] = ppp_dict
else:
ppp_weights[predicted_instance["image_id"]] = torch.as_tensor(np.nan).to(
device
)
image_sizes[predicted_instance["image_id"]] = predicted_instance["image_size"]
return dict(
{
"predicted_boxes": predicted_boxes,
"predicted_cls_probs": predicted_cls_probs,
"predicted_covar_mats": predicted_covar_mats,
"ppp_weights": ppp_weights,
"image_size": image_sizes,
}
)
def eval_gt_preprocess(gt_instances):
gt_boxes, gt_cat_idxs, gt_is_truncated, gt_is_occluded = (
defaultdict(torch.Tensor),
defaultdict(torch.Tensor),
defaultdict(torch.Tensor),
defaultdict(torch.Tensor),
)
for gt_instance in gt_instances:
box_inds = gt_instance["bbox"]
box_inds = np.array(
[
box_inds[0],
box_inds[1],
box_inds[0] + box_inds[2],
box_inds[1] + box_inds[3],
]
)
gt_boxes[gt_instance["image_id"]] = torch.cat(
(
gt_boxes[gt_instance["image_id"]].to(device),
torch.as_tensor([box_inds], dtype=torch.float32).to(device),
)
)
gt_cat_idxs[gt_instance["image_id"]] = torch.cat(
(
gt_cat_idxs[gt_instance["image_id"]].to(device),
torch.as_tensor([[gt_instance["category_id"]]], dtype=torch.float32).to(
device
),
)
)
if "is_truncated" in gt_instance.keys():
gt_is_truncated[gt_instance["image_id"]] = torch.cat(
(
gt_is_truncated[gt_instance["image_id"]].to(device),
torch.as_tensor(
[gt_instance["is_truncated"]], dtype=torch.float32
).to(device),
)
)
gt_is_occluded[gt_instance["image_id"]] = torch.cat(
(
gt_is_occluded[gt_instance["image_id"]].to(device),
torch.as_tensor(
[gt_instance["is_occluded"]], dtype=torch.float32
).to(device),
)
)
if "is_truncated" in gt_instances[0].keys():
return dict(
{
"gt_boxes": gt_boxes,
"gt_cat_idxs": gt_cat_idxs,
"gt_is_truncated": gt_is_truncated,
"gt_is_occluded": gt_is_occluded,
}
)
else:
return dict({"gt_boxes": gt_boxes, "gt_cat_idxs": gt_cat_idxs})
def get_matched_results(
cfg, inference_output_dir, iou_min=0.1, iou_correct=0.7, min_allowed_score=0.0
):
try:
matched_results = torch.load(
os.path.join(
inference_output_dir,
"matched_results_{}_{}_{}.pth".format(
iou_min, iou_correct, min_allowed_score
),
),
map_location=device,
)
return matched_results
except FileNotFoundError:
(
preprocessed_predicted_instances,
preprocessed_gt_instances,
) = get_per_frame_preprocessed_instances(
cfg, inference_output_dir, min_allowed_score
)
predicted_box_means = preprocessed_predicted_instances["predicted_boxes"]
predicted_cls_probs = preprocessed_predicted_instances["predicted_cls_probs"]
predicted_box_covariances = preprocessed_predicted_instances[
"predicted_covar_mats"
]
gt_box_means = preprocessed_gt_instances["gt_boxes"]
gt_cat_idxs = preprocessed_gt_instances["gt_cat_idxs"]
if "gt_is_truncated" in preprocessed_gt_instances.keys():
is_truncated = preprocessed_gt_instances["gt_is_truncated"]
else:
is_truncated = None
if "gt_is_occluded" in preprocessed_gt_instances.keys():
is_occluded = preprocessed_gt_instances["gt_is_occluded"]
else:
is_occluded = None
matched_results = match_predictions_to_groundtruth(
predicted_box_means,
predicted_cls_probs,
predicted_box_covariances,
gt_box_means,
gt_cat_idxs,
iou_min,
iou_correct,
is_truncated=is_truncated,
is_occluded=is_occluded,
)
torch.save(
matched_results,
os.path.join(
inference_output_dir,
"matched_results_{}_{}_{}.pth".format(
iou_min, iou_correct, min_allowed_score
),
),
)
return matched_results
def get_per_frame_preprocessed_gt_instances(cfg, inference_output_dir):
meta_catalog = MetadataCatalog.get(cfg.ACTUAL_TEST_DATASET)
# Process GT
print("Began pre-processing ground truth annotations...")
try:
preprocessed_gt_instances = torch.load(
os.path.join(inference_output_dir, "preprocessed_gt_instances.pth"),
map_location=device,
)
except FileNotFoundError:
gt_info = json.load(open(meta_catalog.json_file, "r"))
gt_instances = gt_info["annotations"]
preprocessed_gt_instances = eval_gt_preprocess(gt_instances)
torch.save(
preprocessed_gt_instances,
os.path.join(inference_output_dir, "preprocessed_gt_instances.pth"),
)
print("Done!")
return preprocessed_gt_instances
def get_per_frame_preprocessed_pred_instances(
cfg, inference_output_dir, img_id, min_allowed_score=0.0
):
print("Began pre-processing predicted instances...")
prediction_file_name = os.path.join(
inference_output_dir, "coco_instances_results.json"
)
predicted_instances = json.load(open(prediction_file_name))
preprocessed_predicted_instances = eval_predictions_preprocess(
predicted_instances, min_allowed_score
)
img_size = preprocessed_predicted_instances["image_size"][img_id]
pred_boxes = Boxes(preprocessed_predicted_instances["predicted_boxes"][img_id])
pred_cls_probs = preprocessed_predicted_instances["predicted_cls_probs"][img_id]
pred_boxes_covariance = preprocessed_predicted_instances["predicted_covar_mats"][
img_id
]
scores, pred_classes = pred_cls_probs.max(dim=1)
instances = Instances(
image_size=img_size,
pred_boxes=pred_boxes,
pred_cls_probs=pred_cls_probs,
pred_boxes_covariance=pred_boxes_covariance,
scores=scores,
pred_classes=pred_classes,
)
print("Done!")
return instances
def get_per_frame_preprocessed_instances(
cfg, inference_output_dir, min_allowed_score=0.0
):
prediction_file_name = os.path.join(
inference_output_dir, "coco_instances_results.json"
)
meta_catalog = MetadataCatalog.get(cfg.ACTUAL_TEST_DATASET)
# Process GT
print("Began pre-processing ground truth annotations...")
try:
preprocessed_gt_instances = torch.load(
os.path.join(inference_output_dir, "preprocessed_gt_instances.pth"),
map_location=device,
)
except FileNotFoundError:
gt_info = json.load(open(meta_catalog.json_file, "r"))
gt_instances = gt_info["annotations"]
preprocessed_gt_instances = eval_gt_preprocess(gt_instances)
torch.save(
preprocessed_gt_instances,
os.path.join(inference_output_dir, "preprocessed_gt_instances.pth"),
)
print("Done!")
print("Began pre-processing predicted instances...")
try:
preprocessed_predicted_instances = torch.load(
os.path.join(
inference_output_dir,
"preprocessed_predicted_instances_{}.pth".format(min_allowed_score),
),
map_location=device,
)
# Process predictions
except FileNotFoundError:
predicted_instances = json.load(open(prediction_file_name))
preprocessed_predicted_instances = eval_predictions_preprocess(
predicted_instances, min_allowed_score
)
torch.save(
preprocessed_predicted_instances,
os.path.join(
inference_output_dir,
"preprocessed_predicted_instances_{}.pth".format(min_allowed_score),
),
)
print("Done!")
return preprocessed_predicted_instances, preprocessed_gt_instances
def match_predictions_to_groundtruth(
predicted_box_means,
predicted_cls_probs,
predicted_box_covariances,
gt_box_means,
gt_cat_idxs,
iou_min=0.1,
iou_correct=0.7,
is_truncated=None,
is_occluded=None,
):
# Flag to know if truncation and occlusion should be saved:
trunc_occ_flag = is_truncated is not None and is_occluded is not None
true_positives = dict(
{
"predicted_box_means": torch.Tensor().to(device),
"predicted_box_covariances": torch.Tensor().to(device),
"predicted_cls_probs": torch.Tensor().to(device),
"gt_box_means": torch.Tensor().to(device),
"gt_cat_idxs": torch.Tensor().to(device),
"iou_with_ground_truth": torch.Tensor().to(device),
"is_truncated": torch.Tensor().to(device),
"is_occluded": torch.Tensor().to(device),
}
)
localization_errors = dict(
{
"predicted_box_means": torch.Tensor().to(device),
"predicted_box_covariances": torch.Tensor().to(device),
"predicted_cls_probs": torch.Tensor().to(device),
"gt_box_means": torch.Tensor().to(device),
"gt_cat_idxs": torch.Tensor().to(device),
"iou_with_ground_truth": torch.Tensor().to(device),
"is_truncated": torch.Tensor().to(device),
"is_occluded": torch.Tensor().to(device),
}
)
duplicates = dict(
{
"predicted_box_means": torch.Tensor().to(device),
"predicted_box_covariances": torch.Tensor().to(device),
"predicted_cls_probs": torch.Tensor().to(device),
"gt_box_means": torch.Tensor().to(device),
"gt_cat_idxs": torch.Tensor().to(device),
"iou_with_ground_truth": torch.Tensor().to(device),
"is_truncated": torch.Tensor().to(device),
"is_occluded": torch.Tensor().to(device),
}
)
false_positives = dict(
{
"predicted_box_means": torch.Tensor().to(device),
"predicted_box_covariances": torch.Tensor().to(device),
"predicted_cls_probs": torch.Tensor().to(device),
}
)
false_negatives = dict(
{
"gt_box_means": torch.Tensor().to(device),
"gt_cat_idxs": torch.Tensor().to(device),
"is_truncated": torch.Tensor().to(device),
"is_occluded": torch.Tensor().to(device),
"count": list(),
}
)
with tqdm.tqdm(total=len(predicted_box_means)) as pbar:
for key in predicted_box_means.keys():
pbar.update(1)
# Check if gt available, if not all detections go to false
# positives
if key not in gt_box_means.keys():
false_positives["predicted_box_means"] = torch.cat(
(false_positives["predicted_box_means"], predicted_box_means[key])
)
false_positives["predicted_cls_probs"] = torch.cat(
(false_positives["predicted_cls_probs"], predicted_cls_probs[key])
)
false_positives["predicted_box_covariances"] = torch.cat(
(
false_positives["predicted_box_covariances"],
predicted_box_covariances[key],
)
)
false_negatives["count"].append((key, 0))
continue
# Compute iou between gt boxes and all predicted boxes in frame
frame_gt_boxes = Boxes(gt_box_means[key])
frame_predicted_boxes = Boxes(predicted_box_means[key])
num_predictions_in_frame = frame_predicted_boxes.tensor.shape[0]
match_iou = pairwise_iou(frame_gt_boxes, frame_predicted_boxes)
# False positives are detections that have an iou < match iou with
# any ground truth object.
false_positive_idxs = (match_iou <= iou_min).all(0)
false_positives["predicted_box_means"] = torch.cat(
(
false_positives["predicted_box_means"],
predicted_box_means[key][false_positive_idxs],
)
)
false_positives["predicted_cls_probs"] = torch.cat(
(
false_positives["predicted_cls_probs"],
predicted_cls_probs[key][false_positive_idxs],
)
)
false_positives["predicted_box_covariances"] = torch.cat(
(
false_positives["predicted_box_covariances"],
predicted_box_covariances[key][false_positive_idxs],
)
)
num_fp_in_frame = false_positive_idxs.sum(0)
# True positives are any detections with match iou > iou correct. We need to separate these detections to
# True positive and duplicate set. The true positive detection is the detection assigned the highest score
# by the neural network.
true_positive_idxs = torch.nonzero(match_iou >= iou_correct, as_tuple=False)
# Setup tensors to allow assignment of detections only once.
processed_gt = torch.tensor([]).type(torch.LongTensor).to(device)
predictions_idxs_processed = (
torch.tensor([]).type(torch.LongTensor).to(device)
)
for i in torch.arange(frame_gt_boxes.tensor.shape[0]):
# Check if true positive has been previously assigned to a ground truth box and remove it if this is
# the case. Very rare occurrence but need to handle it
# nevertheless.
prediction_idxs = true_positive_idxs[true_positive_idxs[:, 0] == i][
:, 1
]
non_valid_idxs = torch.nonzero(
predictions_idxs_processed[..., None] == prediction_idxs,
as_tuple=False,
)
if non_valid_idxs.shape[0] > 0:
prediction_idxs[non_valid_idxs[:, 1]] = -1
prediction_idxs = prediction_idxs[prediction_idxs != -1]
if prediction_idxs.shape[0] > 0:
# If there is a prediction attached to gt, count it as
# processed.
processed_gt = torch.cat(
(processed_gt, i.unsqueeze(0).to(processed_gt.device))
)
predictions_idxs_processed = torch.cat(
(predictions_idxs_processed, prediction_idxs)
)
current_matches_predicted_cls_probs = predicted_cls_probs[key][
prediction_idxs
]
max_score, _ = torch.max(current_matches_predicted_cls_probs, 1)
_, max_idxs = max_score.topk(max_score.shape[0])
if max_idxs.shape[0] > 1:
max_idx = max_idxs[0]
duplicate_idxs = max_idxs[1:]
else:
max_idx = max_idxs
duplicate_idxs = torch.empty(0).to(device)
current_matches_predicted_box_means = predicted_box_means[key][
prediction_idxs
]
current_matches_predicted_box_covariances = (
predicted_box_covariances[key][prediction_idxs]
)
# Highest scoring detection goes to true positives
true_positives["predicted_box_means"] = torch.cat(
(
true_positives["predicted_box_means"],
current_matches_predicted_box_means[
max_idx : max_idx + 1, :
],
)
)
true_positives["predicted_cls_probs"] = torch.cat(
(
true_positives["predicted_cls_probs"],
current_matches_predicted_cls_probs[
max_idx : max_idx + 1, :
],
)
)
true_positives["predicted_box_covariances"] = torch.cat(
(
true_positives["predicted_box_covariances"],
current_matches_predicted_box_covariances[
max_idx : max_idx + 1, :
],
)
)
true_positives["gt_box_means"] = torch.cat(
(
true_positives["gt_box_means"],
gt_box_means[key][i : i + 1, :],
)
)
true_positives["gt_cat_idxs"] = torch.cat(
(true_positives["gt_cat_idxs"], gt_cat_idxs[key][i : i + 1, :])
)
if trunc_occ_flag:
true_positives["is_truncated"] = torch.cat(
(
true_positives["is_truncated"],
is_truncated[key][i : i + 1],
)
)
true_positives["is_occluded"] = torch.cat(
(true_positives["is_occluded"], is_occluded[key][i : i + 1])
)
true_positives["iou_with_ground_truth"] = torch.cat(
(
true_positives["iou_with_ground_truth"],
match_iou[i, prediction_idxs][max_idx : max_idx + 1],
)
)
# Lower scoring redundant detections go to duplicates
if duplicate_idxs.shape[0] > 1:
duplicates["predicted_box_means"] = torch.cat(
(
duplicates["predicted_box_means"],
current_matches_predicted_box_means[duplicate_idxs, :],
)
)
duplicates["predicted_cls_probs"] = torch.cat(
(
duplicates["predicted_cls_probs"],
current_matches_predicted_cls_probs[duplicate_idxs, :],
)
)
duplicates["predicted_box_covariances"] = torch.cat(
(
duplicates["predicted_box_covariances"],
current_matches_predicted_box_covariances[
duplicate_idxs, :
],
)
)
duplicates["gt_box_means"] = torch.cat(
(
duplicates["gt_box_means"],
gt_box_means[key][
np.repeat(i, duplicate_idxs.shape[0]), :
],
)
)
duplicates["gt_cat_idxs"] = torch.cat(
(
duplicates["gt_cat_idxs"],
gt_cat_idxs[key][
np.repeat(i, duplicate_idxs.shape[0]), :
],
)
)
if trunc_occ_flag:
duplicates["is_truncated"] = torch.cat(
(
duplicates["is_truncated"],
is_truncated[key][
np.repeat(i, duplicate_idxs.shape[0])
],
)
)
duplicates["is_occluded"] = torch.cat(
(
duplicates["is_occluded"],
is_occluded[key][
np.repeat(i, duplicate_idxs.shape[0])
],
)
)
duplicates["iou_with_ground_truth"] = torch.cat(
(
duplicates["iou_with_ground_truth"],
match_iou[i, prediction_idxs][duplicate_idxs],
)
)
elif duplicate_idxs.shape[0] == 1:
# Special case when only one duplicate exists, required to
# index properly for torch.cat
duplicates["predicted_box_means"] = torch.cat(
(
duplicates["predicted_box_means"],
current_matches_predicted_box_means[
duplicate_idxs : duplicate_idxs + 1, :
],
)
)
duplicates["predicted_cls_probs"] = torch.cat(
(
duplicates["predicted_cls_probs"],
current_matches_predicted_cls_probs[
duplicate_idxs : duplicate_idxs + 1, :
],
)
)
duplicates["predicted_box_covariances"] = torch.cat(
(
duplicates["predicted_box_covariances"],
current_matches_predicted_box_covariances[
duplicate_idxs : duplicate_idxs + 1, :
],
)
)
duplicates["gt_box_means"] = torch.cat(
(
duplicates["gt_box_means"],
gt_box_means[key][i : i + 1, :],
)
)
duplicates["gt_cat_idxs"] = torch.cat(
(duplicates["gt_cat_idxs"], gt_cat_idxs[key][i : i + 1, :])
)
if trunc_occ_flag:
duplicates["is_truncated"] = torch.cat(
(
duplicates["is_truncated"],
is_truncated[key][i : i + 1],
)
)
duplicates["is_occluded"] = torch.cat(
(duplicates["is_occluded"], is_occluded[key][i : i + 1])
)
duplicates["iou_with_ground_truth"] = torch.cat(
(
duplicates["iou_with_ground_truth"],
match_iou[i, prediction_idxs][
duplicate_idxs : duplicate_idxs + 1
],
)
)
num_tp_dup_in_frame = predictions_idxs_processed.shape[0]
# Process localization errors. Localization errors are detections with iou < 0.5 with any ground truth.
# Mask out processed true positives/duplicates so they are not
# re-associated with another gt
# ToDo Localization Errors and False Positives are constant, do not change. We could generate them only
# once.
match_iou[:, true_positive_idxs[:, 1]] *= 0.0
localization_errors_idxs = torch.nonzero(
(match_iou > iou_min) & (match_iou < 0.5), as_tuple=False
)
# Setup tensors to allow assignment of detections only once.
processed_localization_errors = (
torch.tensor([]).type(torch.LongTensor).to(device)
)
for localization_error_idx in localization_errors_idxs[:, 1]:
# If localization error has been processed, skip iteration.
if (processed_localization_errors == localization_error_idx).any():
continue
# For every localization error, assign the ground truth with
# highest IOU.
gt_loc_error_idxs = localization_errors_idxs[
localization_errors_idxs[:, 1] == localization_error_idx
]
ious_with_gts = match_iou[
gt_loc_error_idxs[:, 0], gt_loc_error_idxs[:, 1]
]
gt_loc_error_idxs = gt_loc_error_idxs[:, 0]
# Choose the gt with the largest IOU with localization error
if gt_loc_error_idxs.shape[0] > 1:
sorted_idxs = ious_with_gts.sort(descending=True)[1]
gt_loc_error_idxs = gt_loc_error_idxs[
sorted_idxs[0] : sorted_idxs[0] + 1
]
processed_gt = torch.cat((processed_gt, gt_loc_error_idxs))
localization_errors["predicted_box_means"] = torch.cat(
(
localization_errors["predicted_box_means"],
predicted_box_means[key][
localization_error_idx : localization_error_idx + 1, :
],
)
)
localization_errors["predicted_cls_probs"] = torch.cat(
(
localization_errors["predicted_cls_probs"],
predicted_cls_probs[key][
localization_error_idx : localization_error_idx + 1, :
],
)
)
localization_errors["predicted_box_covariances"] = torch.cat(
(
localization_errors["predicted_box_covariances"],
predicted_box_covariances[key][
localization_error_idx : localization_error_idx + 1, :
],
)
)
localization_errors["gt_box_means"] = torch.cat(
(
localization_errors["gt_box_means"],
gt_box_means[key][gt_loc_error_idxs : gt_loc_error_idxs + 1, :],
)
)
localization_errors["gt_cat_idxs"] = torch.cat(
(
localization_errors["gt_cat_idxs"],
gt_cat_idxs[key][gt_loc_error_idxs : gt_loc_error_idxs + 1],
)
)
if trunc_occ_flag:
localization_errors["is_truncated"] = torch.cat(
(
localization_errors["is_truncated"],
is_truncated[key][
gt_loc_error_idxs : gt_loc_error_idxs + 1
],
)
)
localization_errors["is_occluded"] = torch.cat(
(
localization_errors["is_occluded"],
is_occluded[key][gt_loc_error_idxs : gt_loc_error_idxs + 1],
)
)
localization_errors["iou_with_ground_truth"] = torch.cat(
(
localization_errors["iou_with_ground_truth"],
match_iou[
gt_loc_error_idxs,
localization_error_idx : localization_error_idx + 1,
],
)
)
# Append processed localization errors
processed_localization_errors = torch.cat(
(processed_localization_errors, localization_error_idx.unsqueeze(0))
)
# Assert that the total number of processed predictions do not exceed the number of predictions in frame.
num_loc_errors_in_frame = processed_localization_errors.shape[0]
num_processed_predictions = (
num_loc_errors_in_frame + num_fp_in_frame + num_tp_dup_in_frame
)
# At the limit where iou_correct=0.5, equality holds.
assert num_processed_predictions <= num_predictions_in_frame
# Get false negative ground truth, which are fully missed.
# These can be found by looking for GT instances not processed.
processed_gt = processed_gt.unique()
false_negative_idxs = torch.ones(frame_gt_boxes.tensor.shape[0])
false_negative_idxs[processed_gt] = 0
false_negative_idxs = false_negative_idxs.type(torch.bool)
false_negatives["gt_box_means"] = torch.cat(
(
false_negatives["gt_box_means"],
gt_box_means[key][false_negative_idxs],
)
)
false_negatives["gt_cat_idxs"] = torch.cat(
(false_negatives["gt_cat_idxs"], gt_cat_idxs[key][false_negative_idxs])
)
false_negatives["count"].append(
(key, gt_box_means[key][false_negative_idxs].shape[0])
)
if trunc_occ_flag:
false_negatives["is_truncated"] = torch.cat(
(
false_negatives["is_truncated"],
is_truncated[key][false_negative_idxs],
)
)
false_negatives["is_occluded"] = torch.cat(
(
false_negatives["is_occluded"],
is_occluded[key][false_negative_idxs],
)
)
matched_results = dict()
matched_results.update(
{
"true_positives": true_positives,
"localization_errors": localization_errors,
"duplicates": duplicates,
"false_positives": false_positives,
"false_negatives": false_negatives,
}
)
return matched_results
def get_train_contiguous_id_to_test_thing_dataset_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id,
):
# If both dicts are equal or if we are performing out of distribution
# detection, just flip the test dict.
if (
train_thing_dataset_id_to_contiguous_id
== test_thing_dataset_id_to_contiguous_id
):
cat_mapping_dict = dict(
(v, k) for k, v in test_thing_dataset_id_to_contiguous_id.items()
)
else:
# If not equal, three situations: 1) BDD to KITTI, 2) COCO to PASCAL,
# or 3) COCO to OpenImages
cat_mapping_dict = dict(
(v, k) for k, v in test_thing_dataset_id_to_contiguous_id.items()
)
if "voc" in args.test_dataset and "coco" in cfg.DATASETS.TRAIN[0]:
dataset_mapping_dict = dict(
(v, k) for k, v in metadata.COCO_TO_VOC_CONTIGUOUS_ID.items()
)
if "openimages" in args.test_dataset and "coco" in cfg.DATASETS.TRAIN[0]:
dataset_mapping_dict = dict(
(v, k) for k, v in metadata.COCO_TO_OPENIMAGES_CONTIGUOUS_ID.items()
)
elif "kitti" in args.test_dataset and "bdd" in cfg.DATASETS.TRAIN[0]:
dataset_mapping_dict = dict(
(v, k) for k, v in metadata.BDD_TO_KITTI_CONTIGUOUS_ID.items()
)
else:
ValueError(
"Cannot generate category mapping dictionary. Please check if training and inference datasets are compatible."
)
cat_mapping_dict = dict(
(dataset_mapping_dict[k], v) for k, v in cat_mapping_dict.items()
)
return cat_mapping_dict
def get_test_thing_dataset_id_to_train_contiguous_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id,
):
cat_mapping_dict = get_train_contiguous_id_to_test_thing_dataset_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id,
)
return {v: k for k, v in cat_mapping_dict.items()}
def calculate_iou(bb1, bb2):
# determine the coordinates of the intersection rectangle
x_left = max(bb1[0], bb2[0])
y_top = max(bb1[1], bb2[1])
x_right = min(bb1[2], bb2[2])
y_bottom = min(bb1[3], bb2[3])
if x_right < x_left or y_bottom < y_top:
return 0.0
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box.
# NOTE: We MUST ALWAYS add +1 to calculate area when working in
# screen coordinates, since 0,0 is the top left pixel, and w-1,h-1
# is the bottom right pixel. If we DON'T add +1, the result is wrong.
intersection_area = (x_right - x_left + 1) * (y_bottom - y_top + 1)
# compute the area of both AABBs
bb1_area = (bb1[2] - bb1[0] + 1) * (bb1[3] - bb1[1] + 1)
bb2_area = (bb2[2] - bb2[0] + 1) * (bb2[3] - bb2[1] + 1)
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
return iou
| 37,914 | 39.079281 | 126 | py |
pmb-nll | pmb-nll-main/src/core/visualization_tools/results_processing_tools.py | import glob
import itertools
import numpy as np
import os
import pickle
import torch
from collections import defaultdict
# Project imports
from core.setup import setup_config, setup_arg_parser
from probabilistic_inference.inference_utils import get_inference_output_dir
def get_clean_results_dict(config_names,
configs_list,
inference_configs_list):
# Level 0 is coco validation set with no corruption, level 10 is open
# images, level 11 is open images ood
image_corruption_levels = [0, 1, 3, 5, 10, 11]
test_dataset_coco = "coco_2017_custom_val"
test_dataset_open_images = "openimages_val"
test_dataset_open_images_odd = "openimages_odd_val"
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
# Initiate dataframe dict
res_dict_clean = defaultdict(lambda: defaultdict(list))
for config_name, config, inference_config_name in zip(
config_names, configs_list, inference_configs_list):
# Setup config
args.config_file = config
args.inference_config = inference_config_name
args.test_dataset = test_dataset_coco
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
# Read coco dataset results
cfg.ACTUAL_TEST_DATASET = args.test_dataset
for image_corruption_level in image_corruption_levels:
# Build path to gt instances and inference output
args.image_corruption_level = image_corruption_level
if image_corruption_level == 0:
image_corruption_level = 'Val'
elif image_corruption_level == 10:
image_corruption_level = 'OpenIm'
elif image_corruption_level == 11:
image_corruption_level = 'OpenIm OOD'
else:
image_corruption_level = 'C' + str(image_corruption_level)
if 'OpenIm' not in image_corruption_level:
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
dictionary_file_name = glob.glob(
os.path.join(
inference_output_dir,
'probabilistic_scoring_res_averaged_*.pkl'))[0]
else:
args.image_corruption_level = 0
args.test_dataset = test_dataset_open_images if image_corruption_level == 'OpenIm' else test_dataset_open_images_odd
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
prob_dict_name = 'probabilistic_scoring_res_averaged_*.pkl' if image_corruption_level == 'OpenIm' else 'probabilistic_scoring_res_odd_*.pkl'
dictionary_file_name = glob.glob(
os.path.join(
inference_output_dir,
prob_dict_name))[0]
with open(dictionary_file_name, "rb") as pickle_file:
res_dict = pickle.load(pickle_file)
if image_corruption_level != 'OpenIm OOD':
# True Positives Results
res_dict_clean['True Positives']['Negative Log Likelihood (Classification)'].extend(
res_dict['true_positives_cls_analysis']['ignorance_score_mean'])
res_dict_clean['True Positives']['Brier Score'].extend(
res_dict['true_positives_cls_analysis']['brier_score_mean'])
res_dict_clean['True Positives']['Negative Log Likelihood (Regression)'].extend(
res_dict['true_positives_reg_analysis']['ignorance_score_mean'])
res_dict_clean['True Positives']['Mean Squared Error'].extend(
res_dict['true_positives_reg_analysis']['mean_squared_error'])
res_dict_clean['True Positives']['Energy Score'].extend(
res_dict['true_positives_reg_analysis']['energy_score_mean'])
res_dict_clean['True Positives']['Image Corruption Level'].extend(
[image_corruption_level] *
res_dict['true_positives_reg_analysis']['energy_score_mean'].shape[0])
res_dict_clean['True Positives']['Method Name'].extend(
[config_name] * res_dict['true_positives_reg_analysis']['energy_score_mean'].shape[0])
# Duplicates Results
res_dict_clean['Duplicates']['Negative Log Likelihood (Classification)'].extend(
res_dict['duplicates_cls_analysis']['ignorance_score_mean'])
res_dict_clean['Duplicates']['Brier Score'].extend(
res_dict['duplicates_cls_analysis']['brier_score_mean'])
res_dict_clean['Duplicates']['Negative Log Likelihood (Regression)'].extend(
res_dict['duplicates_reg_analysis']['ignorance_score_mean'])
res_dict_clean['Duplicates']['Mean Squared Error'].extend(
res_dict['duplicates_reg_analysis']['mean_squared_error'])
res_dict_clean['Duplicates']['Energy Score'].extend(
res_dict['duplicates_reg_analysis']['energy_score_mean'])
res_dict_clean['Duplicates']['Image Corruption Level'].extend(
[image_corruption_level] *
res_dict['duplicates_reg_analysis']['energy_score_mean'].shape[0])
res_dict_clean['Duplicates']['Method Name'].extend(
[config_name] * res_dict['duplicates_reg_analysis']['energy_score_mean'].shape[0])
# Localization Error Results
res_dict_clean['Localization Errors']['Negative Log Likelihood (Classification)'].extend(
res_dict['localization_errors_cls_analysis']['ignorance_score_mean'])
res_dict_clean['Localization Errors']['Brier Score'].extend(
res_dict['localization_errors_cls_analysis']['brier_score_mean'])
res_dict_clean['Localization Errors']['Negative Log Likelihood (Regression)'].extend(
res_dict['localization_errors_reg_analysis']['ignorance_score_mean'])
res_dict_clean['Localization Errors']['Mean Squared Error'].extend(
res_dict['localization_errors_reg_analysis']['mean_squared_error'])
res_dict_clean['Localization Errors']['Energy Score'].extend(
res_dict['localization_errors_reg_analysis']['energy_score_mean'])
res_dict_clean['Localization Errors']['Image Corruption Level'].extend(
[image_corruption_level] *
res_dict['localization_errors_reg_analysis']['energy_score_mean'].shape[0])
res_dict_clean['Localization Errors']['Method Name'].extend(
[config_name] *
res_dict['localization_errors_reg_analysis']['energy_score_mean'].shape[0])
# False Positives Results
res_dict_clean['False Positives']['Negative Log Likelihood (Classification)'].extend(
res_dict['false_positives_cls_analysis']['ignorance_score_mean'])
res_dict_clean['False Positives']['Brier Score'].extend(
res_dict['false_positives_cls_analysis']['brier_score_mean'])
res_dict_clean['False Positives']['Entropy'].extend(
res_dict['false_positives_reg_analysis']['total_entropy_mean'])
res_dict_clean['False Positives']['Image Corruption Level'].extend(
[image_corruption_level] *
res_dict['false_positives_reg_analysis']['total_entropy_mean'].shape[0])
res_dict_clean['False Positives']['Method Name'].extend(
[config_name] *
res_dict['false_positives_reg_analysis']['total_entropy_mean'].shape[0])
else:
# False Positives Results
res_dict_clean['False Positives']['Negative Log Likelihood (Classification)'].append(
res_dict['ignorance_score_mean'])
res_dict_clean['False Positives']['Brier Score'].append(
res_dict['brier_score_mean'])
res_dict_clean['False Positives']['Entropy'].append(
res_dict['total_entropy_mean'])
res_dict_clean['False Positives']['Image Corruption Level'].append(
image_corruption_level)
res_dict_clean['False Positives']['Method Name'].append(
config_name)
return res_dict_clean
def get_mAP_results(config_names,
configs_list,
inference_configs_list):
# Level 0 is coco validation set with no corruption, level 10 is open
# images, level 11 is open images ood
image_corruption_levels = [0, 1, 2, 3, 4, 5, 10]
test_dataset_coco = "coco_2017_custom_val"
test_dataset_open_images = "openimages_val"
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
# Initiate dataframe dict
mAP_results = defaultdict(list)
for config_name, config, inference_config_name in zip(
config_names, configs_list, inference_configs_list):
# Setup config
args.config_file = config
args.inference_config = inference_config_name
args.test_dataset = test_dataset_coco
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
# Read coco dataset results
cfg.ACTUAL_TEST_DATASET = args.test_dataset
for image_corruption_level in image_corruption_levels:
# Build path to gt instances and inference output
args.image_corruption_level = image_corruption_level
if image_corruption_level == 0:
image_corruption_level = 'Val'
elif image_corruption_level == 10:
image_corruption_level = 'OpenIm'
else:
image_corruption_level = 'C' + str(image_corruption_level)
if 'OpenIm' not in image_corruption_level:
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
else:
args.image_corruption_level = 0
args.test_dataset = test_dataset_open_images
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
text_file_name = glob.glob(
os.path.join(
inference_output_dir,
'mAP_res.txt'))[0]
with open(text_file_name, "r") as f:
mAP = f.read().strip('][\n').split(', ')[0]
mAP = float(mAP) * 100
mAP_results['Method Name'].append(config_name)
mAP_results['Image Corruption Level'].append(
image_corruption_level)
mAP_results['mAP'].append(mAP)
return mAP_results
def get_matched_results_dicts(config_names,
configs_list,
inference_configs_list,
iou_min=0.1,
iou_correct=0.5):
# Level 0 is coco validation set with no corruption, level 10 is open
# images, level 11 is open images ood
image_corruption_levels = [0, 10, 11]
test_dataset_coco = "coco_2017_custom_val"
test_dataset_open_images = "openimages_val"
test_dataset_open_images_odd = "openimages_odd_val"
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
# Initiate dataframe dict
res_dict_clean = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for config_name, config, inference_config_name in zip(
config_names, configs_list, inference_configs_list):
# Setup config
args.config_file = config
args.inference_config = inference_config_name
args.test_dataset = test_dataset_coco
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
# Read coco dataset results
cfg.ACTUAL_TEST_DATASET = args.test_dataset
for image_corruption_level in image_corruption_levels:
# Build path to gt instances and inference output
args.image_corruption_level = image_corruption_level
if image_corruption_level == 0:
image_corruption_level = 'Val'
elif image_corruption_level == 10:
image_corruption_level = 'OpenIm'
elif image_corruption_level == 11:
image_corruption_level = 'OpenIm OOD'
else:
image_corruption_level = 'C' + str(image_corruption_level)
if 'OpenIm' not in image_corruption_level:
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
# Get matched results by either generating them or loading from
# file.
dictionary_file_name = glob.glob(
os.path.join(
inference_output_dir,
"matched_results_{}_{}_*.pth".format(
iou_min,
iou_correct)))[0]
matched_results = torch.load(
dictionary_file_name, map_location='cuda')
elif image_corruption_level == 'OpenIm':
args.image_corruption_level = 0
args.test_dataset = test_dataset_open_images if image_corruption_level == 'OpenIm' else test_dataset_open_images_odd
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
dictionary_file_name = glob.glob(
os.path.join(
inference_output_dir,
"matched_results_{}_{}_*.pth".format(
iou_min,
iou_correct)))[0]
matched_results = torch.load(
dictionary_file_name, map_location='cuda')
else:
args.image_corruption_level = 0
args.test_dataset = test_dataset_open_images if image_corruption_level == 'OpenIm' else test_dataset_open_images_odd
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
dictionary_file_name = glob.glob(
os.path.join(
inference_output_dir,
"preprocessed_predicted_instances_odd_*.pth"))[0]
preprocessed_predicted_instances = torch.load(
dictionary_file_name, map_location='cuda')
predicted_boxes = preprocessed_predicted_instances['predicted_boxes']
predicted_cov_mats = preprocessed_predicted_instances['predicted_covar_mats']
predicted_cls_probs = preprocessed_predicted_instances['predicted_cls_probs']
predicted_boxes = list(itertools.chain.from_iterable(
[predicted_boxes[key] for key in predicted_boxes.keys()]))
predicted_cov_mats = list(itertools.chain.from_iterable(
[predicted_cov_mats[key] for key in predicted_cov_mats.keys()]))
predicted_cls_probs = list(itertools.chain.from_iterable(
[predicted_cls_probs[key] for key in predicted_cls_probs.keys()]))
predicted_boxes = torch.stack(
predicted_boxes, 1).transpose(
0, 1)
predicted_cov_mats = torch.stack(
predicted_cov_mats, 1).transpose(0, 1)
predicted_cls_probs = torch.stack(
predicted_cls_probs,
1).transpose(
0,
1)
matched_results = {
'predicted_box_means': predicted_boxes,
'predicted_box_covariances': predicted_cov_mats,
'predicted_cls_probs': predicted_cls_probs}
if image_corruption_level != 'OpenIm OOD':
all_results_means = torch.cat(
(matched_results['true_positives']['predicted_box_means'],
matched_results['localization_errors']['predicted_box_means'],
matched_results['duplicates']['predicted_box_means'],
matched_results['false_positives']['predicted_box_means']))
all_results_covs = torch.cat(
(matched_results['true_positives']['predicted_box_covariances'],
matched_results['localization_errors']['predicted_box_covariances'],
matched_results['duplicates']['predicted_box_covariances'],
matched_results['false_positives']['predicted_box_covariances']))
all_gt_means = torch.cat(
(matched_results['true_positives']['gt_box_means'],
matched_results['localization_errors']['gt_box_means'],
matched_results['duplicates']['gt_box_means'],
matched_results['false_positives']['predicted_box_means']*np.NaN))
predicted_multivariate_normal_dists = torch.distributions.multivariate_normal.MultivariateNormal(
all_results_means.to('cpu'),
all_results_covs.to('cpu') +
1e-2 *
torch.eye(all_results_covs.shape[2]).to('cpu'))
predicted_multivariate_normal_dists.loc = predicted_multivariate_normal_dists.loc.to(
'cuda')
predicted_multivariate_normal_dists.scale_tril = predicted_multivariate_normal_dists.scale_tril.to(
'cuda')
predicted_multivariate_normal_dists._unbroadcasted_scale_tril = predicted_multivariate_normal_dists._unbroadcasted_scale_tril.to(
'cuda')
predicted_multivariate_normal_dists.covariance_matrix = predicted_multivariate_normal_dists.covariance_matrix.to(
'cuda')
predicted_multivariate_normal_dists.precision_matrix = predicted_multivariate_normal_dists.precision_matrix.to(
'cuda')
all_entropy = predicted_multivariate_normal_dists.entropy()
all_log_prob = -predicted_multivariate_normal_dists.log_prob(all_gt_means)
# Energy Score.
sample_set = predicted_multivariate_normal_dists.sample((3,)).to('cuda')
sample_set_1 = sample_set[:-1]
sample_set_2 = sample_set[1:]
energy_score = torch.norm(
(sample_set_1 - all_gt_means),
dim=2).mean(0) - 0.5 * torch.norm(
(sample_set_1 - sample_set_2),
dim=2).mean(0)
mse_loss = torch.nn.MSELoss(reduction='none')
mse = mse_loss(all_gt_means, all_results_means).mean(1)
res_dict_clean[config_name][image_corruption_level]['Entropy'].extend(
all_entropy.cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['MSE'].extend(
mse.cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['NLL'].extend(
all_log_prob.cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['ED'].extend(
energy_score.cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['IOU With GT'].extend(torch.cat(
(matched_results['true_positives']['iou_with_ground_truth'],
matched_results['localization_errors']['iou_with_ground_truth'][:, 0],
matched_results['duplicates']['iou_with_ground_truth'],
torch.zeros(
matched_results['false_positives']['predicted_box_means'].shape[0]).to('cuda')*np.NaN)).cpu().numpy())
predicted_multivariate_normal_dists = torch.distributions.multivariate_normal.MultivariateNormal(
matched_results['false_positives']['predicted_box_means'].to('cpu'),
matched_results['false_positives']['predicted_box_covariances'].to('cpu') +
1e-2 *
torch.eye(matched_results['false_positives']['predicted_box_covariances'].shape[2]).to('cpu'))
predicted_multivariate_normal_dists.loc = predicted_multivariate_normal_dists.loc.to(
'cuda')
predicted_multivariate_normal_dists.scale_tril = predicted_multivariate_normal_dists.scale_tril.to(
'cuda')
predicted_multivariate_normal_dists._unbroadcasted_scale_tril = predicted_multivariate_normal_dists._unbroadcasted_scale_tril.to(
'cuda')
predicted_multivariate_normal_dists.covariance_matrix = predicted_multivariate_normal_dists.covariance_matrix.to(
'cuda')
predicted_multivariate_normal_dists.precision_matrix = predicted_multivariate_normal_dists.precision_matrix.to(
'cuda')
FP_Entropy = predicted_multivariate_normal_dists.entropy()
res_dict_clean[config_name][image_corruption_level]['FP_Entropy'].extend(
FP_Entropy.cpu().numpy())
predicted_cat_dists_fp = matched_results['false_positives']['predicted_cls_probs']
if predicted_cat_dists_fp.shape[1] == 80:
predicted_cat_dists_fp, _ = predicted_cat_dists_fp.max(dim=1)
predicted_cat_dists_fp = 1-predicted_cat_dists_fp
predicted_categorical_dists = torch.distributions.Bernoulli(
probs=predicted_cat_dists_fp)
else:
predicted_categorical_dists = torch.distributions.Categorical(
probs=matched_results['false_positives']['predicted_cls_probs'])
all_pred_ent = predicted_categorical_dists.entropy()
res_dict_clean[config_name][image_corruption_level]['Cat_Entropy'].extend(
all_pred_ent.cpu().numpy())
if image_corruption_level == 'OpenIm':
res_dict_clean[config_name][image_corruption_level]['Truncated'].extend(
torch.cat(
(matched_results['true_positives']['is_truncated'],
matched_results['localization_errors']['is_truncated'],
matched_results['duplicates']['is_truncated'],
torch.full((
matched_results['false_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN)).cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['Occluded'].extend(
torch.cat(
(matched_results['true_positives']['is_occluded'],
matched_results['localization_errors']['is_occluded'],
matched_results['duplicates']['is_occluded'],
torch.full((
matched_results['false_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN)).cpu().numpy())
else:
res_dict_clean[config_name][image_corruption_level]['Truncated'].extend(
torch.cat(
(torch.full((
matched_results['true_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN,
torch.full((
matched_results['localization_errors']['predicted_box_means'].shape[0],), -1,
dtype=torch.float32).to('cuda'),
torch.full((
matched_results['duplicates']['predicted_box_means'].shape[0],), -1,
dtype=torch.float32).to('cuda'),
torch.full((
matched_results['false_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN)).cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['Occluded'].extend(
torch.cat(
(torch.full((
matched_results['true_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN,
torch.full((
matched_results['localization_errors']['predicted_box_means'].shape[0],), -1,
dtype=torch.float32).to('cuda')*np.NaN,
torch.full((
matched_results['duplicates']['predicted_box_means'].shape[0],), -1,
dtype=torch.float32).to('cuda')*np.NaN,
torch.full((
matched_results['false_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN)).cpu().numpy())
else:
predicted_multivariate_normal_dists = torch.distributions.multivariate_normal.MultivariateNormal(
matched_results['predicted_box_means'].to('cpu'),
matched_results['predicted_box_covariances'].to('cpu') +
1e-2 *
torch.eye(matched_results['predicted_box_covariances'].shape[2]).to('cpu'))
predicted_multivariate_normal_dists.loc = predicted_multivariate_normal_dists.loc.to(
'cuda')
predicted_multivariate_normal_dists.scale_tril = predicted_multivariate_normal_dists.scale_tril.to(
'cuda')
predicted_multivariate_normal_dists._unbroadcasted_scale_tril = predicted_multivariate_normal_dists._unbroadcasted_scale_tril.to(
'cuda')
predicted_multivariate_normal_dists.covariance_matrix = predicted_multivariate_normal_dists.covariance_matrix.to(
'cuda')
predicted_multivariate_normal_dists.precision_matrix = predicted_multivariate_normal_dists.precision_matrix.to(
'cuda')
all_entropy = predicted_multivariate_normal_dists.entropy()
res_dict_clean[config_name][image_corruption_level]['FP_Entropy'].extend(
all_entropy.cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['IOU With GT'].extend(torch.zeros(
matched_results['predicted_box_means'].shape[0]).cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['Truncated'].extend(torch.full((
matched_results['predicted_box_means'].shape[0],), -1, dtype=torch.float32).cpu().numpy()*np.NaN)
res_dict_clean[config_name][image_corruption_level]['Occluded'].extend(torch.full((
matched_results['predicted_box_means'].shape[0],), -1, dtype=torch.float32).cpu().numpy()*np.NaN)
all_results_cat = matched_results['predicted_cls_probs']
if all_results_cat.shape[1] == 80:
predicted_cat_dists_fp, _ = all_results_cat.max(dim=1)
predicted_cat_dists_fp = 1-predicted_cat_dists_fp
predicted_categorical_dists = torch.distributions.Bernoulli(
probs=predicted_cat_dists_fp)
else:
predicted_categorical_dists = torch.distributions.Categorical(
probs=all_results_cat)
all_pred_ent = predicted_categorical_dists.entropy()
res_dict_clean[config_name][image_corruption_level]['Cat_Entropy'].extend(
all_pred_ent.cpu().numpy())
return res_dict_clean
def mean_reject_outliers(x, outlierConstant=1.5):
a = np.array(x)
upper_quartile = np.percentile(a, 75)
lower_quartile = np.percentile(a, 25)
IQR = (upper_quartile - lower_quartile) * outlierConstant
quartileSet = (lower_quartile - IQR, upper_quartile + IQR)
result = a[np.where((a >= quartileSet[0]) & (a <= quartileSet[1]))]
return np.nanmean(result)
| 30,031 | 53.703097 | 161 | py |
pmb-nll | pmb-nll-main/src/probabilistic_inference/probabilistic_retinanet_predictor.py | import numpy as np
import torch
import math
# Detectron Imports
from detectron2.layers import batched_nms, cat
from detectron2.structures import Boxes, Instances, pairwise_iou
# Project Imports
from probabilistic_inference import inference_utils
from probabilistic_inference.inference_core import ProbabilisticPredictor
from probabilistic_modeling.modeling_utils import covariance_output_to_cholesky, clamp_log_variance
class RetinaNetProbabilisticPredictor(ProbabilisticPredictor):
def __init__(self, cfg):
super().__init__(cfg)
# Create transform
self.sample_box2box_transform = inference_utils.SampleBox2BoxTransform(
self.cfg.MODEL.RPN.BBOX_REG_WEIGHTS)
def retinanet_probabilistic_inference(
self,
input_im,
outputs=None,
ensemble_inference=False,
outputs_list=None):
"""
General RetinaNet probabilistic anchor-wise inference. Preliminary inference step for many post-processing
based inference methods such as standard_nms, output_statistics, and bayes_od.
Args:
input_im (list): an input im list generated from dataset handler.
outputs (list): outputs from model.forward. Will be computed internally if not provided.
ensemble_inference (bool): True if ensembles are used for inference. If set to true, outputs_list must be externally provided.
outputs_list (list): List of model() outputs, usually generated from ensembles of models.
Returns:
all_predicted_boxes,
all_predicted_boxes_covariance (Tensor): Nx4x4 vectors used
all_predicted_prob (Tensor): Nx1 scores which represent max of all_pred_prob_vectors. For usage in NMS and mAP computation.
all_classes_idxs (Tensor): Nx1 Class ids to be used for NMS.
all_predicted_prob_vectors (Tensor): NxK tensor where K is the number of classes.
"""
is_epistemic = ((self.mc_dropout_enabled and self.num_mc_dropout_runs > 1)
or ensemble_inference) and outputs is None
if is_epistemic:
if self.mc_dropout_enabled and self.num_mc_dropout_runs > 1:
outputs_list = self.model(
input_im,
return_anchorwise_output=True,
num_mc_dropout_runs=self.num_mc_dropout_runs)
n_fms = len(self.model.in_features)
outputs_list = [{key: value[i * n_fms:(i + 1) * n_fms] if value is not None else value for key,
value in outputs_list.items()} for i in range(self.num_mc_dropout_runs)]
outputs = {'anchors': outputs_list[0]['anchors']}
# Compute box classification and classification variance means
box_cls = [output['box_cls'] for output in outputs_list]
box_cls_mean = box_cls[0]
for i in range(len(box_cls) - 1):
box_cls_mean = [box_cls_mean[j] + box_cls[i][j]
for j in range(len(box_cls_mean))]
box_cls_mean = [
box_cls_f_map /
len(box_cls) for box_cls_f_map in box_cls_mean]
outputs.update({'box_cls': box_cls_mean})
if outputs_list[0]['box_cls_var'] is not None:
box_cls_var = [output['box_cls_var']
for output in outputs_list]
box_cls_var_mean = box_cls_var[0]
for i in range(len(box_cls_var) - 1):
box_cls_var_mean = [
box_cls_var_mean[j] +
box_cls_var[i][j] for j in range(
len(box_cls_var_mean))]
box_cls_var_mean = [
box_cls_var_f_map /
len(box_cls_var) for box_cls_var_f_map in box_cls_var_mean]
else:
box_cls_var_mean = None
outputs.update({'box_cls_var': box_cls_var_mean})
# Compute box regression epistemic variance and mean, and aleatoric
# variance mean
box_delta_list = [output['box_delta']
for output in outputs_list]
box_delta_mean = box_delta_list[0]
for i in range(len(box_delta_list) - 1):
box_delta_mean = [
box_delta_mean[j] +
box_delta_list[i][j] for j in range(
len(box_delta_mean))]
box_delta_mean = [
box_delta_f_map /
len(box_delta_list) for box_delta_f_map in box_delta_mean]
outputs.update({'box_delta': box_delta_mean})
if outputs_list[0]['box_reg_var'] is not None:
box_reg_var = [output['box_reg_var']
for output in outputs_list]
box_reg_var_mean = box_reg_var[0]
for i in range(len(box_reg_var) - 1):
box_reg_var_mean = [
box_reg_var_mean[j] +
box_reg_var[i][j] for j in range(
len(box_reg_var_mean))]
box_reg_var_mean = [
box_delta_f_map /
len(box_reg_var) for box_delta_f_map in box_reg_var_mean]
else:
box_reg_var_mean = None
outputs.update({'box_reg_var': box_reg_var_mean})
elif outputs is None:
outputs = self.model(input_im, return_anchorwise_output=True)
all_anchors = []
all_predicted_deltas = []
all_predicted_box_reg_var = []
all_predicted_boxes_cholesky = []
all_predicted_prob = []
all_classes_idxs = []
all_predicted_prob_vectors = []
all_predicted_boxes_epistemic_covar = []
for i, anchors in enumerate(outputs['anchors']):
box_cls = outputs['box_cls'][i][0]
box_delta = outputs['box_delta'][i][0]
# If classification aleatoric uncertainty available, perform
# monte-carlo sampling to generate logits.
if outputs['box_cls_var'] is not None:
box_cls_var = outputs['box_cls_var'][i][0]
box_cls_dists = torch.distributions.normal.Normal(
box_cls, scale=torch.sqrt(torch.exp(box_cls_var)))
box_cls = box_cls_dists.rsample(
(self.model.cls_var_num_samples,))
box_cls = torch.mean(box_cls.sigmoid(), 0)
else:
box_cls = box_cls.sigmoid()
# Keep top k top scoring indices only.
num_topk = min(self.model.test_topk_candidates, box_delta.size(0))
predicted_prob, classes_idxs = torch.max(box_cls, 1)
predicted_prob, topk_idxs = predicted_prob.topk(num_topk)
# filter out the proposals with low confidence score
keep_idxs = predicted_prob > self.model.test_score_thresh
predicted_prob = predicted_prob[keep_idxs]
topk_idxs = topk_idxs[keep_idxs]
anchor_idxs = topk_idxs
classes_idxs = classes_idxs[topk_idxs]
box_delta = box_delta[anchor_idxs]
anchors = anchors[anchor_idxs]
cholesky_decomp = None
if outputs['box_reg_var'] is not None:
box_reg_var = outputs['box_reg_var'][i][0][anchor_idxs]
box_reg_var = clamp_log_variance(box_reg_var)
# Construct cholesky decomposition using diagonal vars
cholesky_decomp = covariance_output_to_cholesky(box_reg_var)
# In case dropout is enabled, we need to compute aleatoric
# covariance matrix and add it here:
box_reg_epistemic_covar = None
if is_epistemic:
# Compute epistemic box covariance matrix
box_delta_list_i = [
self.model.box2box_transform.apply_deltas(
box_delta_i[i][0][anchor_idxs],
anchors.tensor) for box_delta_i in box_delta_list]
_, box_reg_epistemic_covar = inference_utils.compute_mean_covariance_torch(
box_delta_list_i)
all_predicted_deltas.append(box_delta)
all_predicted_boxes_cholesky.append(cholesky_decomp)
all_predicted_box_reg_var.append(box_reg_var)
all_anchors.append(anchors.tensor)
all_predicted_prob.append(predicted_prob)
all_predicted_prob_vectors.append(box_cls[anchor_idxs])
all_classes_idxs.append(classes_idxs)
all_predicted_boxes_epistemic_covar.append(box_reg_epistemic_covar)
box_delta = cat(all_predicted_deltas)
anchors = cat(all_anchors)
if isinstance(all_predicted_boxes_cholesky[0], torch.Tensor):
# Generate multivariate samples to be used for monte-carlo simulation. We can afford much more samples
# here since the matrix dimensions are much smaller and therefore
# have much less memory footprint. Keep 100 or less to maintain
# reasonable runtime speed.
cholesky_decomp = cat(all_predicted_boxes_cholesky)
box_reg_var = cat(all_predicted_box_reg_var)
if self.use_mc_sampling:
if self.cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE == 'gaussian':
multivariate_normal_samples = torch.distributions.MultivariateNormal(
box_delta, scale_tril=cholesky_decomp)
elif self.cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE == 'laplacian':
multivariate_normal_samples = torch.distributions.Laplace(box_delta, scale=cholesky_decomp.diagonal(dim1=-2,dim2=-1)/math.sqrt(2.0))
# Define monte-carlo samples
distributions_samples = multivariate_normal_samples.rsample(
(1000,))
distributions_samples = torch.transpose(
torch.transpose(distributions_samples, 0, 1), 1, 2)
samples_anchors = torch.repeat_interleave(
anchors.unsqueeze(2), 1000, dim=2)
# Transform samples from deltas to boxes
t_dist_samples = self.sample_box2box_transform.apply_samples_deltas(
distributions_samples, samples_anchors)
# Compute samples mean and covariance matrices.
all_predicted_boxes, all_predicted_boxes_covariance = inference_utils.compute_mean_covariance_torch(
t_dist_samples)
if isinstance(
all_predicted_boxes_epistemic_covar[0],
torch.Tensor):
epistemic_covar_mats = cat(
all_predicted_boxes_epistemic_covar)
all_predicted_boxes_covariance += epistemic_covar_mats
else:
all_predicted_boxes_covariance = torch.matmul(cholesky_decomp, torch.transpose(cholesky_decomp, -1, -2))
all_predicted_boxes = self.model.box2box_transform.apply_deltas(box_delta, anchors)
else:
# This handles the case where no aleatoric uncertainty is available
if is_epistemic:
all_predicted_boxes_covariance = cat(
all_predicted_boxes_epistemic_covar)
else:
all_predicted_boxes_covariance = []
# predict boxes
all_predicted_boxes = self.model.box2box_transform.apply_deltas(
box_delta, anchors)
if 'ppp' in outputs:
ppp = outputs['ppp']
else:
ppp = []
return all_predicted_boxes, all_predicted_boxes_covariance, cat(
all_predicted_prob), cat(all_classes_idxs), cat(all_predicted_prob_vectors), ppp
def post_processing_standard_nms(self, input_im):
"""
This function produces results using standard non-maximum suppression. The function takes into
account any probabilistic modeling method when computing the results. It can combine aleatoric uncertainty
from heteroscedastic regression and epistemic uncertainty from monte-carlo dropout for both classification and
regression results.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
outputs = self.retinanet_probabilistic_inference(input_im)
return inference_utils.general_standard_nms_postprocessing(
input_im, outputs, self.model.test_nms_thresh, self.model.max_detections_per_image)
def post_processing_topk_detections(self, input_im):
"""
This function produces results using standard non-maximum suppression. The function takes into
account any probabilistic modeling method when computing the results. It can combine aleatoric uncertainty
from heteroscedastic regression and epistemic uncertainty from monte-carlo dropout for both classification and
regression results.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
outputs = self.retinanet_probabilistic_inference(input_im)
return inference_utils.general_topk_detection_postprocessing(input_im, outputs)
def post_processing_output_statistics(self, input_im):
"""
This function produces box covariance matrices using anchor statistics. Uses the fact that multiple anchors are
regressed to the same spatial location for clustering and extraction of box covariance matrix.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
outputs = self.retinanet_probabilistic_inference(input_im)
return inference_utils.general_output_statistics_postprocessing(
input_im,
outputs,
self.model.test_nms_thresh,
self.model.max_detections_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD)
def post_processing_mc_dropout_ensembles(self, input_im):
"""
This function produces results using multiple runs of MC dropout, through fusion before or after
the non-maximum suppression step.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
if self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_MERGE_MODE == 'pre_nms':
return self.post_processing_standard_nms(input_im)
else:
outputs_dict = self.model(
input_im,
return_anchorwise_output=False,
num_mc_dropout_runs=self.num_mc_dropout_runs)
n_fms = len(self.model.in_features)
outputs_list = [{key: value[i * n_fms:(i + 1) * n_fms] if value is not None else value for key,
value in outputs_dict.items()} for i in range(self.num_mc_dropout_runs)]
# Merge results:
results = [
inference_utils.general_standard_nms_postprocessing(
input_im,
self.retinanet_probabilistic_inference(
input_im,
outputs=outputs),
self.model.test_nms_thresh,
self.model.max_detections_per_image) for outputs in outputs_list]
# Append per-ensemble outputs after NMS has been performed.
ensemble_pred_box_list = [
result.pred_boxes.tensor for result in results]
ensemble_pred_prob_vectors_list = [
result.pred_cls_probs for result in results]
ensembles_class_idxs_list = [
result.pred_classes for result in results]
ensembles_pred_box_covariance_list = [
result.pred_boxes_covariance for result in results]
return inference_utils.general_black_box_ensembles_post_processing(
input_im,
ensemble_pred_box_list,
ensembles_class_idxs_list,
ensemble_pred_prob_vectors_list,
ensembles_pred_box_covariance_list,
self.model.test_nms_thresh,
self.model.max_detections_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD,
merging_method=self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_FUSION_MODE)
def post_processing_ensembles(self, input_im, model_dict):
"""
This function produces results using multiple runs of independently trained models, through fusion before or after
the non-maximum suppression step.
Args:
input_im (list): an input im list generated from dataset handler.
model_dict (dict): dictionary containing list of models comprising the ensemble.
Returns:
result (instances): object instances
"""
if self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_MERGE_MODE == 'pre_nms':
outputs_list = []
for model in model_dict:
outputs = model(input_im, return_anchorwise_output=True)
outputs_list.append(outputs)
outputs = self.retinanet_probabilistic_inference(
input_im, ensemble_inference=True, outputs_list=outputs_list)
return inference_utils.general_standard_nms_postprocessing(
input_im, outputs, self.model.test_nms_thresh, self.model.max_detections_per_image)
else:
outputs_list = []
for model in model_dict:
self.model = model
outputs_list.append(
self.post_processing_standard_nms(input_im))
# Merge results:
ensemble_pred_box_list = []
ensemble_pred_prob_vectors_list = []
ensembles_class_idxs_list = []
ensembles_pred_box_covariance_list = []
for results in outputs_list:
# Append per-ensemble outputs after NMS has been performed.
ensemble_pred_box_list.append(results.pred_boxes.tensor)
ensemble_pred_prob_vectors_list.append(results.pred_cls_probs)
ensembles_class_idxs_list.append(results.pred_classes)
ensembles_pred_box_covariance_list.append(
results.pred_boxes_covariance)
return inference_utils.general_black_box_ensembles_post_processing(
input_im,
ensemble_pred_box_list,
ensembles_class_idxs_list,
ensemble_pred_prob_vectors_list,
ensembles_pred_box_covariance_list,
self.model.test_nms_thresh,
self.model.max_detections_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD,
merging_method=self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_FUSION_MODE)
def post_processing_bayes_od(self, input_im):
"""
This function produces results using forms of bayesian inference instead of NMS for both category and box results.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
box_merge_mode = self.cfg.PROBABILISTIC_INFERENCE.BAYES_OD.BOX_MERGE_MODE
cls_merge_mode = self.cfg.PROBABILISTIC_INFERENCE.BAYES_OD.CLS_MERGE_MODE
outputs = self.retinanet_probabilistic_inference(input_im)
predicted_boxes, predicted_boxes_covariance, predicted_prob, classes_idxs, predicted_prob_vectors = outputs
keep = batched_nms(
predicted_boxes,
predicted_prob,
classes_idxs,
self.model.test_nms_thresh)
keep = keep[: self.model.max_detections_per_image]
match_quality_matrix = pairwise_iou(
Boxes(predicted_boxes), Boxes(predicted_boxes))
box_clusters_inds = match_quality_matrix[keep, :]
box_clusters_inds = box_clusters_inds > self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD
# Compute mean and covariance for every cluster.
predicted_prob_vectors_list = []
predicted_boxes_list = []
predicted_boxes_covariance_list = []
predicted_prob_vectors_centers = predicted_prob_vectors[keep]
for box_cluster, predicted_prob_vectors_center in zip(
box_clusters_inds, predicted_prob_vectors_centers):
cluster_categorical_params = predicted_prob_vectors[box_cluster]
center_binary_score, center_cat_idx = torch.max(
predicted_prob_vectors_center, 0)
cluster_binary_scores, cat_idx = cluster_categorical_params.max(
1)
class_similarity_idx = cat_idx == center_cat_idx
if cls_merge_mode == 'bayesian_inference':
predicted_prob_vectors_list.append(
cluster_categorical_params.mean(0).unsqueeze(0))
else:
predicted_prob_vectors_list.append(
predicted_prob_vectors_center.unsqueeze(0))
# Switch to numpy as torch.inverse is too slow.
cluster_means = predicted_boxes[box_cluster,
:][class_similarity_idx].cpu().numpy()
cluster_covs = predicted_boxes_covariance[box_cluster, :][class_similarity_idx].cpu(
).numpy()
predicted_box, predicted_box_covariance = inference_utils.bounding_box_bayesian_inference(
cluster_means, cluster_covs, box_merge_mode)
predicted_boxes_list.append(
torch.from_numpy(np.squeeze(predicted_box)))
predicted_boxes_covariance_list.append(
torch.from_numpy(predicted_box_covariance))
# Switch back to cuda for the remainder of the inference process.
result = Instances(
(input_im[0]['image'].shape[1],
input_im[0]['image'].shape[2]))
if len(predicted_boxes_list) > 0:
if cls_merge_mode == 'bayesian_inference':
predicted_prob_vectors = torch.cat(
predicted_prob_vectors_list, 0)
predicted_prob, classes_idxs = torch.max(
predicted_prob_vectors, 1)
elif cls_merge_mode == 'max_score':
predicted_prob_vectors = predicted_prob_vectors[keep]
predicted_prob = predicted_prob[keep]
classes_idxs = classes_idxs[keep]
result.pred_boxes = Boxes(
torch.stack(
predicted_boxes_list,
0).to(self.model.device))
result.scores = predicted_prob
result.pred_classes = classes_idxs
result.pred_cls_probs = predicted_prob_vectors
result.pred_boxes_covariance = torch.stack(
predicted_boxes_covariance_list, 0).to(self.model.device)
else:
result.pred_boxes = Boxes(predicted_boxes)
result.scores = torch.zeros(
predicted_boxes.shape[0]).to(
self.model.device)
result.pred_classes = classes_idxs
result.pred_cls_probs = predicted_prob_vectors
result.pred_boxes_covariance = torch.empty(
(predicted_boxes.shape + (4,))).to(self.model.device)
return result
| 23,910 | 44.894434 | 152 | py |
pmb-nll | pmb-nll-main/src/probabilistic_inference/probabilistic_rcnn_predictor.py | import numpy as np
import torch
# Detectron Imports
from detectron2.layers import batched_nms
from detectron2.structures import Boxes, Instances, pairwise_iou
# Project Imports
from probabilistic_inference import inference_utils
from probabilistic_inference.inference_core import ProbabilisticPredictor
from probabilistic_modeling.modeling_utils import covariance_output_to_cholesky, clamp_log_variance
class GeneralizedRcnnProbabilisticPredictor(ProbabilisticPredictor):
def __init__(self, cfg):
super().__init__(cfg)
# Define test score threshold
self.test_score_thres = self.model.roi_heads.box_predictor.test_score_thresh
self.test_nms_thresh = self.model.roi_heads.box_predictor.test_nms_thresh
self.test_topk_per_image = self.model.roi_heads.box_predictor.test_topk_per_image
# Create transform
self.sample_box2box_transform = inference_utils.SampleBox2BoxTransform(
self.cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS)
# Put proposal generator in eval mode if dropout enabled
if self.mc_dropout_enabled:
self.model.proposal_generator.eval()
def generalized_rcnn_probabilistic_inference(self,
input_im,
outputs=None,
ensemble_inference=False,
outputs_list=None):
"""
General RetinaNet probabilistic anchor-wise inference. Preliminary inference step for many post-processing
based inference methods such as standard_nms, output_statistics, and bayes_od.
Args:
input_im (list): an input im list generated from dataset handler.
outputs (list): outputs from model.forward(). will be computed internally if not provided.
ensemble_inference (bool): True if ensembles are used for inference. If set to true, outputs_list must be externally provided.
outputs_list (list): List of model() outputs, usually generated from ensembles of models.
Returns:
all_predicted_boxes,
all_predicted_boxes_covariance (Tensor): Nx4x4 vectors used
all_predicted_prob (Tensor): Nx1 scores which represent max of all_pred_prob_vectors. For usage in NMS and mAP computation.
all_classes_idxs (Tensor): Nx1 Class ids to be used for NMS.
all_predicted_prob_vectors (Tensor): NxK tensor where K is the number of classes.
"""
is_epistemic = ((self.mc_dropout_enabled and self.num_mc_dropout_runs > 1)
or ensemble_inference) and outputs is None
if is_epistemic:
if self.mc_dropout_enabled and self.num_mc_dropout_runs > 1:
outputs_list = self.model(
input_im,
return_anchorwise_output=True,
num_mc_dropout_runs=self.num_mc_dropout_runs)
proposals_list = [outputs['proposals']
for outputs in outputs_list]
box_delta_list = [outputs['box_delta']
for outputs in outputs_list]
box_cls_list = [outputs['box_cls'] for outputs in outputs_list]
box_reg_var_list = [outputs['box_reg_var']
for outputs in outputs_list]
box_cls_var_list = [outputs['box_cls_var']
for outputs in outputs_list]
outputs = dict()
proposals_all = proposals_list[0].proposal_boxes.tensor
for i in torch.arange(1, len(outputs_list)):
proposals_all = torch.cat(
(proposals_all, proposals_list[i].proposal_boxes.tensor), 0)
proposals_list[0].proposal_boxes.tensor = proposals_all
outputs['proposals'] = proposals_list[0]
box_delta = torch.cat(box_delta_list, 0)
box_cls = torch.cat(box_cls_list, 0)
outputs['box_delta'] = box_delta
outputs['box_cls'] = box_cls
if box_reg_var_list[0] is not None:
box_reg_var = torch.cat(box_reg_var_list, 0)
else:
box_reg_var = None
outputs['box_reg_var'] = box_reg_var
if box_cls_var_list[0] is not None:
box_cls_var = torch.cat(box_cls_var_list, 0)
else:
box_cls_var = None
outputs['box_cls_var'] = box_cls_var
elif outputs is None:
outputs = self.model(input_im,
return_anchorwise_output=True)
proposals = outputs['proposals']
box_cls = outputs['box_cls']
box_delta = outputs['box_delta']
if self.model.cls_var_loss == 'evidential':
box_dir_alphas = inference_utils.get_dir_alphas(box_cls)
box_dir_alphas = box_dir_alphas
box_cls = box_dir_alphas / box_dir_alphas.sum(1, keepdim=True)
else:
if outputs['box_cls_var'] is not None:
box_cls_var = outputs['box_cls_var']
box_cls_dists = torch.distributions.normal.Normal(
box_cls, scale=torch.sqrt(torch.exp(box_cls_var)))
box_cls = box_cls_dists.rsample(
(self.model.cls_var_num_samples,))
box_cls = torch.nn.functional.softmax(box_cls, dim=-1)
box_cls = box_cls.mean(0)
else:
box_cls = torch.nn.functional.softmax(box_cls, dim=-1)
# Remove background category
scores = box_cls[:, :-1]
num_bbox_reg_classes = box_delta.shape[1] // 4
box_delta = box_delta.reshape(-1, 4)
box_delta = box_delta.view(-1, num_bbox_reg_classes, 4)
filter_mask = scores > self.test_score_thres
filter_inds = filter_mask.nonzero(as_tuple=False)
if num_bbox_reg_classes == 1:
box_delta = box_delta[filter_inds[:, 0], 0]
else:
box_delta = box_delta[filter_mask]
scores = scores[filter_mask]
proposal_boxes = proposals.proposal_boxes.tensor[filter_inds[:, 0]]
if outputs['box_reg_var'] is not None:
box_reg_var = outputs['box_reg_var']
box_reg_var = box_reg_var.reshape(-1, self.model.bbox_cov_dims)
box_reg_var = box_reg_var.view(-1,
num_bbox_reg_classes,
self.model.bbox_cov_dims)
if num_bbox_reg_classes == 1:
box_reg_var = box_reg_var[filter_inds[:, 0], 0]
else:
box_reg_var = box_reg_var[filter_mask]
# Reconstruct cholesky decomposition of box covariance
# matrix
diag_vars = clamp_log_variance(box_reg_var)
cholesky_decomp = covariance_output_to_cholesky(diag_vars)
if self.use_mc_sampling:
# Generate multivariate samples to be used for monte-carlo simulation. We can afford much more samples
# here since the matrix dimensions are much smaller and therefore
# have much less memory footprint. Keep 100 or less to maintain
# reasonable runtime speed.
if self.cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE == 'gaussian':
multivariate_normal_samples = torch.distributions.MultivariateNormal(
box_delta, scale_tril=cholesky_decomp)
elif self.cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.DISTRIBUTION_TYPE == 'laplacian':
multivariate_normal_samples = torch.distributions.Laplace(box_delta, scale=cholesky_decomp.diagonal(dim1=-2,dim2=-1)/np.sqrt(2.0))
# Define monte-carlo samples
distributions_samples = multivariate_normal_samples.rsample(
(1000,))
distributions_samples = torch.transpose(
torch.transpose(distributions_samples, 0, 1), 1, 2)
samples_proposals = torch.repeat_interleave(
proposal_boxes.unsqueeze(2), 1000, dim=2)
# Transform samples from deltas to boxes
t_dist_samples = self.sample_box2box_transform.apply_samples_deltas(
distributions_samples, samples_proposals)
# Compute samples mean and covariance matrices.
boxes, boxes_covars = inference_utils.compute_mean_covariance_torch(
t_dist_samples)
else:
boxes = self.model.roi_heads.box_predictor.box2box_transform.apply_deltas(
box_delta, proposal_boxes)
boxes_covars = torch.matmul(cholesky_decomp, torch.transpose(cholesky_decomp, -1, -2))
else:
# predict boxes
boxes = self.model.roi_heads.box_predictor.box2box_transform.apply_deltas(
box_delta, proposal_boxes)
boxes_covars = []
if 'ppp' in outputs:
ppp = outputs['ppp']
else:
ppp = []
return boxes, boxes_covars, scores, filter_inds[:,
1], box_cls[filter_inds[:, 0]], ppp
def post_processing_standard_nms(self, input_im):
"""
This function produces results using standard non-maximum suppression. The function takes into
account any probabilistic modeling method when computing the results.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
outputs = self.generalized_rcnn_probabilistic_inference(input_im)
return inference_utils.general_standard_nms_postprocessing(
input_im, outputs, self.test_nms_thresh, self.test_topk_per_image)
def post_processing_topk_detections(self, input_im):
"""
This function produces results using topk selection based on confidence scores.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
outputs = self.generalized_rcnn_probabilistic_inference(input_im)
return inference_utils.general_topk_detection_postprocessing(input_im, outputs)
def post_processing_output_statistics(self, input_im):
"""
This function produces results using anchor statistics.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
outputs = self.generalized_rcnn_probabilistic_inference(input_im)
return inference_utils.general_output_statistics_postprocessing(
input_im,
outputs,
self.test_nms_thresh,
self.test_topk_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD)
def post_processing_mc_dropout_ensembles(self, input_im):
"""
This function produces results using monte-carlo dropout ensembles.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
if self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_MERGE_MODE == 'pre_nms':
# In generalized rcnn models, association cannot be achieved on an anchor level when using
# dropout as anchor order might shift. To overcome this problem, the anchor statistics function
# is used to perform the association and to fuse covariance
# results.
return self.post_processing_output_statistics(input_im)
else:
outputs_list = self.model(
input_im,
return_anchorwise_output=False,
num_mc_dropout_runs=self.num_mc_dropout_runs)
# Merge results:
results = [
inference_utils.general_standard_nms_postprocessing(
input_im,
self.generalized_rcnn_probabilistic_inference(
input_im,
outputs=outputs),
self.test_nms_thresh,
self.test_topk_per_image) for outputs in outputs_list]
# Append per-ensemble outputs after NMS has been performed.
ensemble_pred_box_list = [
result.pred_boxes.tensor for result in results]
ensemble_pred_prob_vectors_list = [
result.pred_cls_probs for result in results]
ensembles_class_idxs_list = [
result.pred_classes for result in results]
ensembles_pred_box_covariance_list = [
result.pred_boxes_covariance for result in results]
return inference_utils.general_black_box_ensembles_post_processing(
input_im,
ensemble_pred_box_list,
ensembles_class_idxs_list,
ensemble_pred_prob_vectors_list,
ensembles_pred_box_covariance_list,
self.test_nms_thresh,
self.test_topk_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD,
is_generalized_rcnn=True,
merging_method=self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_FUSION_MODE)
def post_processing_ensembles(self, input_im, model_dict):
if self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_MERGE_MODE == 'pre_nms':
outputs_list = []
for model in model_dict:
outputs = model(input_im, return_anchorwise_output=True)
outputs_list.append(outputs)
outputs = self.generalized_rcnn_probabilistic_inference(
input_im, ensemble_inference=True, outputs_list=outputs_list)
return inference_utils.general_output_statistics_postprocessing(
input_im,
outputs,
self.test_nms_thresh,
self.test_topk_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD)
else:
outputs_list = []
for model in model_dict:
self.model = model
outputs_list.append(
self.post_processing_standard_nms(input_im))
# Merge results:
ensemble_pred_box_list = []
ensemble_pred_prob_vectors_list = []
ensembles_class_idxs_list = []
ensembles_pred_box_covariance_list = []
for results in outputs_list:
# Append per-ensemble outputs after NMS has been performed.
ensemble_pred_box_list.append(results.pred_boxes.tensor)
ensemble_pred_prob_vectors_list.append(results.pred_cls_probs)
ensembles_class_idxs_list.append(results.pred_classes)
ensembles_pred_box_covariance_list.append(
results.pred_boxes_covariance)
return inference_utils.general_black_box_ensembles_post_processing(
input_im,
ensemble_pred_box_list,
ensembles_class_idxs_list,
ensemble_pred_prob_vectors_list,
ensembles_pred_box_covariance_list,
self.test_nms_thresh,
self.test_topk_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD,
is_generalized_rcnn=True,
merging_method=self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_FUSION_MODE)
def post_processing_bayes_od(self, input_im):
"""
This function produces results using forms of bayesian inference instead of NMS for both category
and box results.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
box_merge_mode = self.cfg.PROBABILISTIC_INFERENCE.BAYES_OD.BOX_MERGE_MODE
cls_merge_mode = self.cfg.PROBABILISTIC_INFERENCE.BAYES_OD.CLS_MERGE_MODE
outputs = self.generalized_rcnn_probabilistic_inference(input_im)
predicted_boxes, predicted_boxes_covariance, predicted_prob, classes_idxs, predicted_prob_vectors = outputs
keep = batched_nms(
predicted_boxes,
predicted_prob,
classes_idxs,
self.test_nms_thresh)
keep = keep[: self.test_topk_per_image]
match_quality_matrix = pairwise_iou(
Boxes(predicted_boxes), Boxes(predicted_boxes))
box_clusters_inds = match_quality_matrix[keep, :]
box_clusters_inds = box_clusters_inds > self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD
# Compute mean and covariance for every cluster.
predicted_boxes_list = []
predicted_boxes_covariance_list = []
predicted_prob_vectors_list = []
predicted_prob_vectors_centers = predicted_prob_vectors[keep]
for box_cluster, predicted_prob_vectors_center in zip(
box_clusters_inds, predicted_prob_vectors_centers):
# Ignore background categories provided by detectron2 inference
cluster_categorical_params = predicted_prob_vectors[box_cluster]
_, center_cat_idx = torch.max(predicted_prob_vectors_center, 0)
_, cat_idx = cluster_categorical_params.max(1)
class_similarity_idx = cat_idx == center_cat_idx
if cls_merge_mode == 'bayesian_inference':
cluster_categorical_params = cluster_categorical_params[class_similarity_idx]
predicted_prob_vectors_list.append(
cluster_categorical_params.mean(0).unsqueeze(0))
else:
predicted_prob_vectors_list.append(
predicted_prob_vectors_center.unsqueeze(0))
# Switch to numpy as torch.inverse is too slow.
cluster_means = predicted_boxes[box_cluster,
:][class_similarity_idx].cpu().numpy()
cluster_covs = predicted_boxes_covariance[box_cluster, :][class_similarity_idx].cpu(
).numpy()
predicted_box, predicted_box_covariance = inference_utils.bounding_box_bayesian_inference(
cluster_means, cluster_covs, box_merge_mode)
predicted_boxes_list.append(
torch.from_numpy(np.squeeze(predicted_box)))
predicted_boxes_covariance_list.append(
torch.from_numpy(predicted_box_covariance))
# Switch back to cuda for the remainder of the inference process.
result = Instances(
(input_im[0]['image'].shape[1],
input_im[0]['image'].shape[2]))
if len(predicted_boxes_list) > 0:
if cls_merge_mode == 'bayesian_inference':
predicted_prob_vectors = torch.cat(
predicted_prob_vectors_list, 0)
predicted_prob, classes_idxs = torch.max(
predicted_prob_vectors[:, :-1], 1)
elif cls_merge_mode == 'max_score':
predicted_prob_vectors = predicted_prob_vectors[keep]
predicted_prob = predicted_prob[keep]
classes_idxs = classes_idxs[keep]
result.pred_boxes = Boxes(
torch.stack(
predicted_boxes_list,
0).to(self.model.device))
result.scores = predicted_prob
result.pred_classes = classes_idxs
result.pred_cls_probs = predicted_prob_vectors
result.pred_boxes_covariance = torch.stack(
predicted_boxes_covariance_list, 0).to(self.model.device)
else:
result.pred_boxes = Boxes(predicted_boxes)
result.scores = torch.zeros(
predicted_boxes.shape[0]).to(
self.model.device)
result.pred_classes = classes_idxs
result.pred_cls_probs = predicted_prob_vectors
result.pred_boxes_covariance = torch.empty(
(predicted_boxes.shape + (4,))).to(self.model.device)
return result
| 20,442 | 43.733042 | 150 | py |
pmb-nll | pmb-nll-main/src/probabilistic_inference/inference_utils.py | import os
import numpy as np
import torch
from detectron2.layers import batched_nms
# Detectron imports
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.structures import Boxes, BoxMode, Instances, pairwise_iou
from PIL import Image
# Project imports
from probabilistic_inference.image_corruptions import corruption_dict, corruption_tuple
from probabilistic_inference.probabilistic_detr_predictor import (
DetrProbabilisticPredictor,
)
from probabilistic_inference.probabilistic_rcnn_predictor import (
GeneralizedRcnnProbabilisticPredictor,
)
from probabilistic_inference.probabilistic_retinanet_predictor import (
RetinaNetProbabilisticPredictor,
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def build_predictor(cfg):
"""
Builds probabilistic predictor according to architecture in config file.
Args:
cfg (CfgNode): detectron2 configuration node.
Returns:
Instance of the correct predictor.
"""
if cfg.MODEL.META_ARCHITECTURE == "ProbabilisticRetinaNet":
return RetinaNetProbabilisticPredictor(cfg)
elif cfg.MODEL.META_ARCHITECTURE == "ProbabilisticGeneralizedRCNN":
return GeneralizedRcnnProbabilisticPredictor(cfg)
elif cfg.MODEL.META_ARCHITECTURE == "ProbabilisticDetr":
return DetrProbabilisticPredictor(cfg)
else:
raise ValueError(
"Invalid meta-architecture {}.".format(cfg.MODEL.META_ARCHITECTURE)
)
def general_standard_nms_postprocessing(
input_im, outputs, nms_threshold=0.5, max_detections_per_image=100
):
"""
Args:
input_im (list): an input im list generated from dataset handler.
outputs (list): output list form model specific inference function
nms_threshold (float): non-maximum suppression threshold
max_detections_per_image (int): maximum allowed number of detections per image.
Returns:
result (Instances): final results after nms
"""
(
predicted_boxes,
predicted_boxes_covariance,
predicted_prob,
classes_idxs,
predicted_prob_vectors,
ppp,
) = outputs
# Perform nms
keep = batched_nms(predicted_boxes, predicted_prob, classes_idxs, nms_threshold)
keep = keep[:max_detections_per_image]
# Keep highest scoring results
result = Instances((input_im[0]["image"].shape[1], input_im[0]["image"].shape[2]))
result.pred_boxes = Boxes(predicted_boxes[keep])
result.scores = predicted_prob[keep]
result.pred_classes = classes_idxs[keep]
result.pred_cls_probs = predicted_prob_vectors[keep]
# Handle case where there is no ppp intensity function such as classical
# inference.
if isinstance(ppp, dict):
for k, v in ppp.items():
result.set(
"ppp_param_" + k,
torch.tensor([v] * (len(result.pred_boxes))).to(device),
)
else:
result.pred_ppp_weights = np.nan * torch.ones(len(result.pred_boxes)).to(device)
# Handle case where there is no covariance matrix such as classical
# inference.
if isinstance(predicted_boxes_covariance, torch.Tensor):
result.pred_boxes_covariance = predicted_boxes_covariance[keep]
else:
result.pred_boxes_covariance = torch.zeros(
predicted_boxes[keep].shape + (4,)
).to(device)
return result
def general_topk_detection_postprocessing(
input_im, outputs, max_detections_per_image=100
):
"""
Args:
input_im (list): an input im list generated from dataset handler.
outputs (list): output list form model specific inference function
Returns:
result (Instances): final results after nms
"""
(
predicted_boxes,
predicted_boxes_covariance,
predicted_prob,
classes_idxs,
predicted_prob_vectors,
ppp,
) = outputs
num_keep = min(max_detections_per_image, len(predicted_prob))
keep = torch.topk(predicted_prob, num_keep)[1]
# Keep highest scoring results
result = Instances((input_im[0]["image"].shape[1], input_im[0]["image"].shape[2]))
result.pred_boxes = Boxes(predicted_boxes[keep])
result.scores = predicted_prob[keep]
result.pred_classes = classes_idxs[keep]
result.pred_cls_probs = predicted_prob_vectors[keep]
# Handle case where there is no ppp intensity function such as classical
# inference.
if isinstance(ppp, dict):
for k, v in ppp.items():
result.set(
"ppp_param_" + k,
torch.tensor([v] * (len(result.pred_boxes))).to(device),
)
else:
result.pred_ppp_weights = np.nan * torch.ones(len(result.pred_boxes)).to(device)
# Handle case where there is no covariance matrix such as classical
# inference.
if isinstance(predicted_boxes_covariance, torch.Tensor):
result.pred_boxes_covariance = predicted_boxes_covariance[keep]
else:
result.pred_boxes_covariance = torch.zeros(
predicted_boxes[keep].shape + (4,)
).to(device)
return result
def general_output_statistics_postprocessing(
input_im,
outputs,
nms_threshold=0.5,
max_detections_per_image=100,
affinity_threshold=0.7,
):
"""
Args:
input_im (list): an input im list generated from dataset handler.
outputs (list): output list form model specific inference function
nms_threshold (float): non-maximum suppression threshold between 0-1
max_detections_per_image (int): maximum allowed number of detections per image.
affinity_threshold (float): cluster affinity threshold between 0-1
Returns:
result (Instances): final results after nms
"""
(
predicted_boxes,
predicted_boxes_covariance,
predicted_prob,
classes_idxs,
predicted_prob_vectors,
ppp,
) = outputs
# Get pairwise iou matrix
match_quality_matrix = pairwise_iou(Boxes(predicted_boxes), Boxes(predicted_boxes))
# Get cluster centers using standard nms. Much faster than sequential
# clustering.
keep = batched_nms(predicted_boxes, predicted_prob, classes_idxs, nms_threshold)
keep = keep[:max_detections_per_image]
clusters_inds = match_quality_matrix[keep, :]
clusters_inds = clusters_inds > affinity_threshold
# Compute mean and covariance for every cluster.
predicted_prob_vectors_list = []
predicted_boxes_list = []
predicted_boxes_covariance_list = []
for cluster_idxs, center_idx in zip(clusters_inds, keep):
if cluster_idxs.sum(0) >= 2:
# Make sure to only select cluster members of same class as center
cluster_center_classes_idx = classes_idxs[center_idx]
cluster_classes_idxs = classes_idxs[cluster_idxs]
class_similarity_idxs = cluster_classes_idxs == cluster_center_classes_idx
# Grab cluster
box_cluster = predicted_boxes[cluster_idxs, :][class_similarity_idxs, :]
cluster_mean = box_cluster.mean(0)
residuals = (box_cluster - cluster_mean).unsqueeze(2)
cluster_covariance = torch.sum(
torch.matmul(residuals, torch.transpose(residuals, 2, 1)), 0
) / max((box_cluster.shape[0] - 1), 1.0)
# Assume final result as mean and covariance of gaussian mixture of cluster members if
# covariance is provided by neural network.
if predicted_boxes_covariance is not None:
if len(predicted_boxes_covariance) > 0:
cluster_covariance = (
cluster_covariance
+ predicted_boxes_covariance[cluster_idxs, :][
class_similarity_idxs, :
].mean(0)
)
# Compute average over cluster probabilities
cluster_probs_vector = predicted_prob_vectors[cluster_idxs, :][
class_similarity_idxs, :
].mean(0)
else:
cluster_mean = predicted_boxes[center_idx]
cluster_probs_vector = predicted_prob_vectors[center_idx]
cluster_covariance = 1e-4 * torch.eye(4, 4).to(device)
if predicted_boxes_covariance is not None:
if len(predicted_boxes_covariance) > 0:
cluster_covariance = predicted_boxes_covariance[center_idx]
predicted_boxes_list.append(cluster_mean)
predicted_boxes_covariance_list.append(cluster_covariance)
predicted_prob_vectors_list.append(cluster_probs_vector)
result = Instances((input_im[0]["image"].shape[1], input_im[0]["image"].shape[2]))
if len(predicted_boxes_list) > 0:
# We do not average the probability vectors for this post processing method. Averaging results in
# very low mAP due to mixing with low scoring detection instances.
result.pred_boxes = Boxes(torch.stack(predicted_boxes_list, 0))
predicted_prob_vectors = torch.stack(predicted_prob_vectors_list, 0)
predicted_prob, classes_idxs = torch.max(predicted_prob_vectors, 1)
result.scores = predicted_prob
result.pred_classes = classes_idxs
result.pred_cls_probs = predicted_prob_vectors
result.pred_boxes_covariance = torch.stack(predicted_boxes_covariance_list, 0)
else:
result.pred_boxes = Boxes(predicted_boxes)
result.scores = torch.zeros(predicted_boxes.shape[0]).to(device)
result.pred_classes = classes_idxs
result.pred_cls_probs = predicted_prob_vectors
result.pred_boxes_covariance = torch.empty((predicted_boxes.shape + (4,))).to(
device
)
return result
def general_black_box_ensembles_post_processing(
input_im,
ensemble_pred_box_list,
ensembles_class_idxs_list,
ensemble_pred_prob_vectors_list,
ensembles_pred_box_covariance_list,
nms_threshold=0.5,
max_detections_per_image=100,
affinity_threshold=0.7,
is_generalized_rcnn=False,
merging_method="mixture_of_gaussians",
):
"""
Args:
input_im (list): an input im list generated from dataset handler.
ensemble_pred_box_list (list): predicted box list
ensembles_class_idxs_list (list): predicted classes list
ensemble_pred_prob_vectors_list (list): predicted probability vector list
ensembles_pred_box_covariance_list (list): predicted covariance matrices
nms_threshold (float): non-maximum suppression threshold between 0-1
max_detections_per_image (int): Number of maximum allowable detections per image.
affinity_threshold (float): cluster affinity threshold between 0-1
is_generalized_rcnn (bool): used to handle category selection by removing background class.
merging_method (str): default is gaussian mixture model. use 'bayesian_inference' to perform gaussian inference
similar to bayesod.
Returns:
result (Instances): final results after nms
"""
predicted_boxes = torch.cat(ensemble_pred_box_list, 0)
predicted_boxes_covariance = torch.cat(ensembles_pred_box_covariance_list, 0)
predicted_prob_vectors = torch.cat(ensemble_pred_prob_vectors_list, 0)
predicted_class_idxs = torch.cat(ensembles_class_idxs_list, 0)
# Compute iou between all output boxes and each other output box.
match_quality_matrix = pairwise_iou(Boxes(predicted_boxes), Boxes(predicted_boxes))
# Perform basic sequential clustering.
clusters = []
for i in range(match_quality_matrix.shape[0]):
# Check if current box is already a member of any previous cluster.
if i != 0:
all_clusters = torch.cat(clusters, 0)
if (all_clusters == i).any():
continue
# Only add if boxes have the same category.
cluster_membership_test = (match_quality_matrix[i, :] >= affinity_threshold) & (
predicted_class_idxs == predicted_class_idxs[i]
)
inds = torch.where(cluster_membership_test)
clusters.extend(inds)
# Compute mean and covariance for every cluster.
predicted_boxes_list = []
predicted_boxes_covariance_list = []
predicted_prob_vectors_list = []
# Compute cluster mean and covariance matrices.
for cluster in clusters:
box_cluster = predicted_boxes[cluster]
box_cluster_covariance = predicted_boxes_covariance[cluster]
if box_cluster.shape[0] >= 2:
if merging_method == "mixture_of_gaussians":
cluster_mean = box_cluster.mean(0)
# Compute epistemic covariance
residuals = (box_cluster - cluster_mean).unsqueeze(2)
predicted_covariance = torch.sum(
torch.matmul(residuals, torch.transpose(residuals, 2, 1)), 0
) / (box_cluster.shape[0] - 1)
# Add epistemic covariance
predicted_covariance = (
predicted_covariance + box_cluster_covariance.mean(0)
)
predicted_boxes_list.append(cluster_mean)
predicted_boxes_covariance_list.append(predicted_covariance)
predicted_prob_vectors_list.append(
predicted_prob_vectors[cluster].mean(0)
)
else:
cluster_mean, predicted_covariance = bounding_box_bayesian_inference(
box_cluster.cpu().numpy(),
box_cluster_covariance.cpu().numpy(),
box_merge_mode="bayesian_inference",
)
cluster_mean = torch.as_tensor(cluster_mean).to(device)
predicted_covariance = torch.as_tensor(predicted_covariance).to(device)
predicted_boxes_list.append(cluster_mean)
predicted_boxes_covariance_list.append(predicted_covariance)
predicted_prob_vectors_list.append(
predicted_prob_vectors[cluster].mean(0)
)
else:
predicted_boxes_list.append(predicted_boxes[cluster].mean(0))
predicted_boxes_covariance_list.append(
predicted_boxes_covariance[cluster].mean(0)
)
predicted_prob_vectors_list.append(predicted_prob_vectors[cluster].mean(0))
result = Instances((input_im[0]["image"].shape[1], input_im[0]["image"].shape[2]))
if len(predicted_boxes_list) > 0:
predicted_prob_vectors = torch.stack(predicted_prob_vectors_list, 0)
# Remove background class if generalized rcnn
if is_generalized_rcnn:
predicted_prob_vectors_no_bkg = predicted_prob_vectors[:, :-1]
else:
predicted_prob_vectors_no_bkg = predicted_prob_vectors
predicted_prob, classes_idxs = torch.max(predicted_prob_vectors_no_bkg, 1)
predicted_boxes = torch.stack(predicted_boxes_list, 0)
# We want to keep the maximum allowed boxes per image to be consistent
# with the rest of the methods. However, just sorting by score or uncertainty will lead to a lot of
# redundant detections so we have to use one more NMS step.
keep = batched_nms(predicted_boxes, predicted_prob, classes_idxs, nms_threshold)
keep = keep[:max_detections_per_image]
result.pred_boxes = Boxes(predicted_boxes[keep])
result.scores = predicted_prob[keep]
result.pred_classes = classes_idxs[keep]
result.pred_cls_probs = predicted_prob_vectors[keep]
result.pred_boxes_covariance = torch.stack(predicted_boxes_covariance_list, 0)[
keep
]
else:
result.pred_boxes = Boxes(predicted_boxes)
result.scores = torch.zeros(predicted_boxes.shape[0]).to(device)
result.pred_classes = predicted_class_idxs
result.pred_cls_probs = predicted_prob_vectors
result.pred_boxes_covariance = torch.empty((predicted_boxes.shape + (4,))).to(
device
)
return result
def bounding_box_bayesian_inference(cluster_means, cluster_covs, box_merge_mode):
"""
Args:
cluster_means (nd array): cluster box means.
cluster_covs (nd array): cluster box covariance matrices.
box_merge_mode (str): whether to use covariance intersection or not
Returns:
final_mean (nd array): cluster fused mean.
final_cov (nd array): cluster fused covariance matrix.
"""
cluster_precs = np.linalg.inv(cluster_covs)
if box_merge_mode == "bayesian_inference":
final_cov = np.linalg.inv(cluster_precs.sum(0))
final_mean = np.matmul(cluster_precs, np.expand_dims(cluster_means, 2)).sum(0)
final_mean = np.squeeze(np.matmul(final_cov, final_mean))
elif box_merge_mode == "covariance_intersection":
cluster_difference_precs = cluster_precs.sum(0) - cluster_precs
cluster_precs_det = np.linalg.det(cluster_precs)
cluster_total_prec_det = np.linalg.det(cluster_precs.sum(0))
cluster_difference_precs_det = np.linalg.det(cluster_difference_precs)
omegas = (
cluster_total_prec_det - cluster_difference_precs_det + cluster_precs_det
) / (
cluster_precs.shape[0] * cluster_total_prec_det
+ (cluster_precs_det - cluster_difference_precs_det).sum(0)
)
weighted_cluster_precs = np.expand_dims(omegas, (1, 2)) * cluster_precs
final_cov = np.linalg.inv(weighted_cluster_precs.sum(0))
final_mean = np.squeeze(
np.matmul(
final_cov,
np.matmul(weighted_cluster_precs, np.expand_dims(cluster_means, 2)).sum(
0
),
)
)
return final_mean, final_cov
def compute_mean_covariance_torch(input_samples):
"""
Function for efficient computation of mean and covariance matrix in pytorch.
Args:
input_samples(list): list of tensors from M stochastic monte-carlo sampling runs, each containing N x k tensors.
Returns:
predicted_mean(Tensor): an Nxk tensor containing the predicted mean.
predicted_covariance(Tensor): an Nxkxk tensor containing the predicted covariance matrix.
"""
if isinstance(input_samples, torch.Tensor):
num_samples = input_samples.shape[2]
else:
num_samples = len(input_samples)
input_samples = torch.stack(input_samples, 2)
# Compute Mean
predicted_mean = torch.mean(input_samples, 2, keepdim=True)
# Compute Covariance
residuals = torch.transpose(
torch.unsqueeze(input_samples - predicted_mean, 1), 1, 3
)
predicted_covariance = torch.matmul(residuals, torch.transpose(residuals, 3, 2))
predicted_covariance = torch.sum(predicted_covariance, 1) / (num_samples - 1)
return predicted_mean.squeeze(2), predicted_covariance
def probabilistic_detector_postprocess(results, output_height, output_width):
"""
Resize the output instances and scales estimated covariance matrices.
The input images are often resized when entering an object detector.
As a result, we often need the outputs of the detector in a different
resolution from its inputs.
Args:
results (Dict): the raw outputs from the probabilistic detector.
`results.image_size` contains the input image resolution the detector sees.
This object might be modified in-place.
output_height: the desired output resolution.
output_width: the desired output resolution.
Returns:
results (Dict): dictionary updated with rescaled boxes and covariance matrices.
"""
scale_x, scale_y = (
output_width / results.image_size[1],
output_height / results.image_size[0],
)
results = Instances((output_height, output_width), **results.get_fields())
output_boxes = results.pred_boxes
# Scale bounding boxes
output_boxes.scale(scale_x, scale_y)
output_boxes.clip(results.image_size)
results = results[output_boxes.nonempty()]
# Scale covariance matrices
if results.has("pred_boxes_covariance"):
# Add small value to make sure covariance matrix is well conditioned
output_boxes_covariance = results.pred_boxes_covariance + 1e-4 * torch.eye(
results.pred_boxes_covariance.shape[2]
).to(device)
scale_mat = (
torch.diag_embed(torch.as_tensor((scale_x, scale_y, scale_x, scale_y)))
.to(device)
.unsqueeze(0)
)
scale_mat = torch.repeat_interleave(
scale_mat, output_boxes_covariance.shape[0], 0
)
output_boxes_covariance = torch.matmul(
torch.matmul(scale_mat, output_boxes_covariance),
torch.transpose(scale_mat, 2, 1),
)
results.pred_boxes_covariance = output_boxes_covariance
return results
def covar_xyxy_to_xywh(output_boxes_covariance):
"""
Converts covariance matrices from top-left bottom-right corner representation to top-left corner
and width-height representation.
Args:
output_boxes_covariance: Input covariance matrices.
Returns:
output_boxes_covariance (Nxkxk): Transformed covariance matrices
"""
transformation_mat = (
torch.as_tensor(
[[1.0, 0, 0, 0], [0, 1.0, 0, 0], [-1.0, 0, 1.0, 0], [0, -1.0, 0, 1.0]]
)
.to(device)
.unsqueeze(0)
)
transformation_mat = torch.repeat_interleave(
transformation_mat, output_boxes_covariance.shape[0], 0
)
output_boxes_covariance = torch.matmul(
torch.matmul(transformation_mat, output_boxes_covariance),
torch.transpose(transformation_mat, 2, 1),
)
return output_boxes_covariance
def instances_to_json(instances, img_id, cat_mapping_dict=None):
"""
Dump an "Instances" object to a COCO-format json that's used for evaluation.
Args:
instances (Instances): detectron2 instances
img_id (int): the image id
cat_mapping_dict (dict): dictionary to map between raw category id from net and dataset id. very important if
performing inference on different dataset than that used for training.
Returns:
list[dict]: list of json annotations in COCO format.
"""
num_instance = len(instances)
if num_instance == 0:
return []
boxes = instances.pred_boxes.tensor.cpu().numpy()
boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
boxes = boxes.tolist()
scores = instances.scores.cpu().tolist()
classes = instances.pred_classes.cpu().tolist()
ppp = {
k[10:]: v[0].detach().cpu().numpy().tolist()
for k, v in instances.get_fields().items()
if "ppp_param" in k
}
classes = [
cat_mapping_dict[class_i] if class_i in cat_mapping_dict.keys() else -1
for class_i in classes
]
pred_cls_probs = instances.pred_cls_probs.cpu().tolist()
if instances.has("pred_boxes_covariance"):
pred_boxes_covariance = (
covar_xyxy_to_xywh(instances.pred_boxes_covariance).cpu().tolist()
)
else:
pred_boxes_covariance = []
results = []
for k in range(num_instance):
if classes[k] != -1:
result = {
"image_id": img_id,
"category_id": classes[k],
"bbox": boxes[k],
"score": scores[k],
"cls_prob": pred_cls_probs[k],
"bbox_covar": pred_boxes_covariance[k],
"ppp": ppp,
"image_size": list(instances[k].image_size),
}
results.append(result)
return results
class SampleBox2BoxTransform(Box2BoxTransform):
"""
Extension of Box2BoxTransform to support transforming across batch sizes.
"""
def apply_samples_deltas(self, deltas, boxes):
"""
Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`.
Args:
deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.
deltas[i] represents k potentially different class-specific
box transformations for the single box boxes[i].
boxes (Tensor): boxes to transform, of shape (N, 4)
"""
boxes = boxes.to(deltas.dtype)
widths = boxes[:, 2, :] - boxes[:, 0, :]
heights = boxes[:, 3, :] - boxes[:, 1, :]
ctr_x = boxes[:, 0, :] + 0.5 * widths
ctr_y = boxes[:, 1, :] + 0.5 * heights
wx, wy, ww, wh = self.weights
dx = deltas[:, 0::4, :] / wx
dy = deltas[:, 1::4, :] / wy
dw = deltas[:, 2::4, :] / ww
dh = deltas[:, 3::4, :] / wh
# Prevent sending too large values into torch.exp()
dw = torch.clamp(dw, max=self.scale_clamp)
dh = torch.clamp(dh, max=self.scale_clamp)
pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
pred_w = torch.exp(dw) * widths[:, None]
pred_h = torch.exp(dh) * heights[:, None]
pred_boxes = torch.zeros_like(deltas)
pred_boxes[:, 0::4, :] = pred_ctr_x - 0.5 * pred_w # x1
pred_boxes[:, 1::4, :] = pred_ctr_y - 0.5 * pred_h # y1
pred_boxes[:, 2::4, :] = pred_ctr_x + 0.5 * pred_w # x2
pred_boxes[:, 3::4, :] = pred_ctr_y + 0.5 * pred_h # y2
return pred_boxes
def corrupt(x, severity=1, corruption_name=None, corruption_number=None):
"""
:param x: image to corrupt; a 224x224x3 numpy array in [0, 255]
:param severity: strength with which to corrupt x; an integer in [0, 5]
:param corruption_name: specifies which corruption function to call;
must be one of 'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression',
'speckle_noise', 'gaussian_blur', 'spatter', 'saturate';
the last four are validation functions
:param corruption_number: the position of the corruption_name in the above list;
an integer in [0, 18]; useful for easy looping; 15, 16, 17, 18 are validation corruption numbers
:return: the image x corrupted by a corruption function at the given severity; same shape as input
"""
if corruption_name is not None:
x_corrupted = corruption_dict[corruption_name](Image.fromarray(x), severity)
elif corruption_number is not None:
x_corrupted = corruption_tuple[corruption_number](Image.fromarray(x), severity)
else:
raise ValueError("Either corruption_name or corruption_number must be passed")
if x_corrupted.shape != x.shape:
raise AssertionError("Output image not same size as input image!")
return np.uint8(x_corrupted)
def get_dir_alphas(pred_class_logits):
"""
Function to get dirichlet parameters from logits
Args:
pred_class_logits: class logits
"""
return torch.relu_(pred_class_logits) + 1.0
def get_inference_output_dir(
output_dir_name, test_dataset_name, inference_config_name, image_corruption_level
):
return os.path.join(
output_dir_name,
"inference",
test_dataset_name,
os.path.split(inference_config_name)[-1][:-5],
"corruption_level_" + str(image_corruption_level),
)
| 27,584 | 36.995868 | 120 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.