repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
espnet | espnet-master/egs2/librispeech/ssl1/local/measure_teacher_quality.py | # The measure_teacher_quality.py uses code from Fairseq:
# https://github.com/pytorch/fairseq/blob/master/examples/hubert/measure_teacher_quality.py
#
# Thanks to Abdelrahman Mohamed and Wei-Ning Hsu's help in this implementation,
# Their origial Hubert work is in:
# Paper: https://arxiv.org/pdf/2106.07447.pdf
# Code in Fairseq: https://github.com/pytorch/fairseq/tree/master/examples/hubert
import argparse
import re
from collections import Counter
import numpy as np
from tabulate import tabulate
from espnet2.utils.types import str2bool
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--lab_dir", type=str, required=True)
parser.add_argument("--lab_name", type=str, required=True)
parser.add_argument("--lab_sets", default=["valid"], type=str, nargs="+")
parser.add_argument(
"--phn_dir",
default="data/librispeech_phoneme_alignment",
)
parser.add_argument(
"--phn_sets", default=["dev-clean", "dev-other"], type=str, nargs="+"
)
parser.add_argument("--pad_len", default=0, type=int, help="padding for hypotheses")
parser.add_argument(
"--upsample", default=1, type=int, help="upsample factor for hypotheses"
)
parser.add_argument("--ref_lab_dir", default="")
parser.add_argument("--ref_lab_name", default="")
parser.add_argument("--verbose", type=str2bool, default=False)
return parser
def comp_purity(p_xy, axis):
max_p = p_xy.max(axis=axis)
marg_p = p_xy.sum(axis=axis)
indv_pur = max_p / marg_p
aggr_pur = max_p.sum()
return indv_pur, aggr_pur
def comp_entropy(p):
return (-p * np.log(p + 1e-8)).sum()
def comp_norm_mutual_info(p_xy):
p_x = p_xy.sum(axis=1, keepdims=True)
p_y = p_xy.sum(axis=0, keepdims=True)
pmi = np.log(p_xy / np.matmul(p_x, p_y) + 1e-8)
mi = (p_xy * pmi).sum()
h_x = comp_entropy(p_x)
h_y = comp_entropy(p_y)
return mi, mi / h_x, mi / h_y, h_x, h_y
def pad(labs, n):
if n == 0:
return np.array(labs)
return np.concatenate([[labs[0]] * n, labs, [labs[-1]] * n])
def comp_avg_seg_dur(labs_list):
n_frms = 0
n_segs = 0
for labs in labs_list:
labs = np.array(labs)
edges = np.zeros(len(labs)).astype(bool)
edges[0] = True
edges[1:] = labs[1:] != labs[:-1]
n_frms += len(edges)
n_segs += edges.astype(int).sum()
return n_frms / n_segs
def comp_joint_prob(uid2refs, uid2hyps):
"""
Args:
pad: padding for spliced-feature derived labels
"""
cnts = Counter()
skipped = []
abs_frmdiff = 0
for uid in uid2refs:
if uid not in uid2hyps:
skipped.append(uid)
continue
refs = uid2refs[uid]
hyps = uid2hyps[uid]
abs_frmdiff += abs(len(refs) - len(hyps))
min_len = min(len(refs), len(hyps))
refs = refs[:min_len]
hyps = hyps[:min_len]
cnts.update(zip(refs, hyps))
tot = sum(cnts.values())
ref_set = sorted({ref for ref, _ in cnts.keys()})
hyp_set = sorted({hyp for _, hyp in cnts.keys()})
ref2pid = dict(zip(ref_set, range(len(ref_set))))
hyp2lid = dict(zip(hyp_set, range(len(hyp_set))))
# print(hyp_set)
p_xy = np.zeros((len(ref2pid), len(hyp2lid)), dtype=float)
for (ref, hyp), cnt in cnts.items():
p_xy[ref2pid[ref], hyp2lid[hyp]] = cnt
p_xy /= p_xy.sum()
return p_xy, ref2pid, hyp2lid, tot, abs_frmdiff, skipped
def read_phn(tsv_path, rm_stress=True):
uid2phns = {}
with open(tsv_path) as f:
for line in f:
uid, phns = line.rstrip().split("\t")
phns = phns.split(",")
if rm_stress:
phns = [re.sub("[0-9]", "", phn) for phn in phns]
uid2phns[uid] = phns
return uid2phns
def read_lab(lab_path, pad_len=0, upsample=1):
with open(lab_path) as f:
labs_list = [
(
line.rstrip().split()[0],
pad(line.rstrip().split()[1:], pad_len).repeat(upsample),
)
for line in f
]
return dict(labs_list)
def main_lab_lab(
lab_dir,
lab_name,
lab_sets,
ref_dir,
ref_name,
pad_len=0,
upsample=1,
verbose=False,
):
uid2refs = {}
for s in lab_sets:
uid2refs.update(read_lab(f"{ref_dir}/{s}.{ref_name}"))
uid2hyps = {}
for s in lab_sets:
uid2hyps.update(
read_lab(
f"{lab_dir}/{s}.{lab_name}",
pad_len,
upsample,
)
)
_main(uid2refs, uid2hyps, verbose)
def main_phn_lab(
lab_dir,
lab_name,
lab_sets,
phn_dir,
phn_sets,
pad_len=0,
upsample=1,
verbose=False,
):
uid2refs = {}
for s in phn_sets:
uid2refs.update(read_phn(f"{phn_dir}/{s}.tsv"))
uid2hyps = {}
for s in lab_sets:
uid2hyps.update(
read_lab(
f"{lab_dir}/{s}/{lab_name}",
pad_len,
upsample,
)
)
_main(uid2refs, uid2hyps, verbose)
def _main(uid2refs, uid2hyps, verbose):
(p_xy, ref2pid, hyp2lid, tot, frmdiff, skipped) = comp_joint_prob(
uid2refs, uid2hyps
)
ref_pur_by_hyp, ref_pur = comp_purity(p_xy, axis=0)
hyp_pur_by_ref, hyp_pur = comp_purity(p_xy, axis=1)
(mi, mi_norm_by_ref, mi_norm_by_hyp, h_ref, h_hyp) = comp_norm_mutual_info(p_xy)
outputs = {
"ref pur": ref_pur,
"hyp pur": hyp_pur,
"H(ref)": h_ref,
"H(hyp)": h_hyp,
"MI": mi,
"MI/H(ref)": mi_norm_by_ref,
"ref segL": comp_avg_seg_dur(uid2refs.values()),
"hyp segL": comp_avg_seg_dur(uid2hyps.values()),
"p_xy shape": p_xy.shape,
"frm tot": tot,
"frm diff": frmdiff,
"utt tot": len(uid2refs),
"utt miss": len(skipped),
}
print(tabulate([outputs.values()], outputs.keys(), floatfmt=".4f"))
if __name__ == "__main__":
"""
compute quality of labels with respect to phone or another labels if set
"""
parser = get_parser()
args = parser.parse_args()
if args.ref_lab_dir and args.ref_lab_name:
main_lab_lab(
args.lab_dir,
args.lab_name,
args.lab_sets,
args.ref_lab_dir,
args.ref_lab_name,
args.pad_len,
args.upsample,
args.verbose,
)
else:
main_phn_lab(
args.lab_dir,
args.lab_name,
args.lab_sets,
args.phn_dir,
args.phn_sets,
args.pad_len,
args.upsample,
args.verbose,
)
| 6,713 | 26.292683 | 95 | py |
espnet | espnet-master/egs2/slurp_spatialized/asr1/local/multi_to_single.py | import os
import sys
from multiprocessing import Pool
from pathlib import Path
import torchaudio
import tqdm
multi_path = sys.argv[1]
single_path = sys.argv[2]
data_list = ["tr_real", "tr_synthetic", "cv", "tt", "tt_qut"]
def m2s(pf):
if ".wav" not in pf[2]:
return
mwav = os.path.join(pf[0], pf[2])
maudio, sr = torchaudio.load(mwav)
swav = os.path.join(pf[1], pf[2])
saudio = maudio[0:1]
torchaudio.save(swav, saudio, sr)
return
for data in data_list:
multi_folder = os.path.join(multi_path, data)
single_folder = os.path.join(single_path, data)
os.makedirs(os.path.join(single_folder, "mixture"), exist_ok=True)
os.symlink(
os.path.join(multi_folder, "s0_dry"), os.path.join(single_folder, "s0_dry")
)
os.symlink(
os.path.join(multi_folder, "metadata.json"),
os.path.join(single_folder, "metadata.json"),
)
for root, dirs, files in os.walk(os.path.join(multi_folder, "mixture")):
pfiles = [
(
os.path.join(multi_folder, "mixture"),
os.path.join(single_folder, "mixture"),
f,
)
for f in files
]
with Pool(processes=16) as p:
with tqdm.tqdm(total=len(pfiles)) as pbar:
for i, elem in enumerate(p.imap(m2s, pfiles)):
pbar.update()
| 1,386 | 26.74 | 83 | py |
espnet | espnet-master/egs2/chime7_task1/diar_asr1/local/pyannote_diarize.py | import argparse
import glob
import json
import math
import os.path
import re
from pathlib import Path
import numpy as np
import soundfile as sf
import torch
from pyannote.audio import Model, Pipeline
from pyannote.audio.core.inference import Inference
from pyannote.audio.pipelines import SpeakerDiarization
from pyannote.audio.utils.signal import Binarize, binarize
from pyannote.core import SlidingWindowFeature
from pyannote.metrics.segmentation import Annotation, Segment
from scipy.signal import medfilt2d
IS_CUDA = torch.cuda.is_available()
def split_maxlen(utt_group, min_len=10):
# merge if
out = []
stack = []
for utt in utt_group:
if not stack or (utt.end - stack[0].start) < min_len:
stack.append(utt)
continue
out.append(Segment(stack[0].start, stack[-1].end))
stack = [utt]
if len(stack):
out.append(Segment(stack[0].start, stack[-1].end))
return out
def merge_closer(annotation, delta=1.0, max_len=60, min_len=10):
name = annotation.uri
speakers = annotation.labels()
new_annotation = Annotation(uri=name)
for spk in speakers:
c_segments = sorted(annotation.label_timeline(spk), key=lambda x: x.start)
stack = []
for seg in c_segments:
if not stack or abs(stack[-1].end - seg.start) < delta:
stack.append(seg)
continue # continue
# more than delta, save the current max seg
if (stack[-1].end - stack[0].start) > max_len:
# break into parts of 10 seconds at least
for sub_seg in split_maxlen(stack, min_len):
new_annotation[sub_seg] = spk
stack = [seg]
else:
new_annotation[Segment(stack[0].start, stack[-1].end)] = spk
stack = [seg]
if len(stack):
new_annotation[Segment(stack[0].start, stack[-1].end)] = spk
return new_annotation
def rttm2json(rttm_file):
with open(rttm_file, "r") as f:
rttm = f.readlines()
rttm = [x.rstrip("\n") for x in rttm]
filename = Path(rttm_file).stem
to_json = []
for line in rttm:
current = line.split(" ")
start = current[3]
duration = current[4]
stop = str(float(start) + float(duration))
speaker = current[7]
session = filename
to_json.append(
{
"session_id": session,
"speaker": speaker,
"start_time": start,
"end_time": stop,
}
)
to_json = sorted(to_json, key=lambda x: float(x["start_time"]))
with open(
os.path.join(Path(rttm_file).parent, Path(rttm_file).stem + ".json"), "w"
) as f:
json.dump(to_json, f, indent=4)
def diarize_session(
sess_name,
pipeline,
wav_files,
uem_boundaries=None,
merge_closer_delta=1.5,
max_length_merged=60,
max_n_speakers=4,
):
# take the min len across all wavs
minlen = min([sf.SoundFile(w).frames for w in wav_files])
fs = sf.SoundFile(wav_files[0]).samplerate
if uem_boundaries is not None:
uem_boundaries = [round(x * fs) for x in uem_boundaries]
else:
uem_boundaries = [0, minlen]
# now for each audio file run inference
all_segmentation = []
all_audio = []
print("Running Segmentation on each of the {} channels".format(len(wav_files)))
for w_f in wav_files:
c_audio, c_fs = sf.read(w_f, dtype="float32")
assert fs == c_fs
c_audio = c_audio[: min(minlen, uem_boundaries[1])]
c_audio = c_audio[uem_boundaries[0] :]
c_audio = torch.from_numpy(c_audio[None, ...])
if (c_audio**2).mean() < 1e-8:
print(
"Not running inference on {}, because the signal amplitude is "
"too low, is it all zeros ?".format(c_audio)
)
continue
if IS_CUDA:
c_audio = c_audio.cuda()
c_seg = pipeline.get_segmentations({"waveform": c_audio, "sample_rate": fs})
c_seg = binarize(
c_seg,
onset=pipeline.segmentation.threshold,
initial_state=False,
)
all_segmentation.append(c_seg) # move to cpu for less mem consumption
all_audio.append(c_audio)
# here we select the best channel based on one with most activations.
# not an optimal criterion but at least the clustering afterwards will be fast.
sliding_window = all_segmentation[0].sliding_window
all_audio = torch.cat(all_audio, 0)
num_channels = all_audio.shape[0]
num_chunks, frames, local_spk = all_segmentation[0].data.shape
all_segmentation = SlidingWindowFeature(
np.stack([x.data for x in all_segmentation], -1),
sliding_window,
)
selected_audio = torch.zeros_like(c_audio)
selected_seg = []
print("Running Channel Selection by using the segmentation output")
for indx, (seg_b, segmentation) in enumerate(all_segmentation):
c_seg = all_audio[:, math.floor(seg_b.start * fs) : math.floor(seg_b.end * fs)]
# median filter here seems to improve performance on chime6 in high overlap
# conditions
segmentation = medfilt2d(
segmentation.reshape((frames, local_spk * num_channels)), (7, 1)
).reshape((frames, local_spk, num_channels))
# why not the fine-tuned model is used ?
# because that one is trained on chime6 to be robust against noise and
# reverberation and position of the mic.
# we want instead a model that is not so robust against that to use
# to select the best channel from which the embeddings will be extracted.
selection = np.argmax(
segmentation.sum((0, 1))
) # not the best selection criteria
# however this keeps it simple and fast.
selected_audio[
:, math.floor(seg_b.start * fs) : math.floor(seg_b.end * fs)
] = c_seg[selection]
selected_seg.append(segmentation[..., selection])
# stack em
selected_seg = SlidingWindowFeature(
np.stack([x.data for x in selected_seg]), sliding_window
)
count = Inference.trim(
selected_seg, warm_up=(0.1, 0.1)
) # default value in Pyannote
count = Inference.aggregate(
np.sum(count, axis=-1, keepdims=True),
frames=pipeline._frames,
hamming=False,
missing=0.0,
skip_average=False,
)
count.data = np.rint(count.data).astype(np.uint8)
print("Extracting Embeddings.")
embeddings = pipeline.get_embeddings(
{"waveform": selected_audio, "sample_rate": fs},
selected_seg,
exclude_overlap=pipeline.embedding_exclude_overlap,
)
# shape: (num_chunks, local_num_speakers, dimension)
print("Clustering.")
hard_clusters, _ = pipeline.clustering(
embeddings=embeddings,
segmentations=selected_seg,
num_clusters=None,
min_clusters=0,
max_clusters=max_n_speakers, # max-speakers are ok
file={
"waveform": selected_audio,
"sample_rate": fs,
}, # <== for oracle clustering
frames=pipeline._frames, # <== for oracle clustering
)
# reconstruct discrete diarization from raw hard clusters
# keep track of inactive speakers
inactive_speakers = np.sum(selected_seg.data, axis=1) == 0
# shape: (num_chunks, num_speakers)
hard_clusters[inactive_speakers] = -2
# reshape now to multi-channel
discrete_diarization = pipeline.reconstruct(
selected_seg,
hard_clusters,
count,
)
# convert to annotation
to_annotation = Binarize(
onset=0.5,
offset=0.5,
min_duration_on=pipeline.segmentation.min_duration_on,
min_duration_off=pipeline.segmentation.min_duration_off,
pad_onset=pipeline.segmentation.pad_onset,
pad_offset=pipeline.segmentation.pad_offset,
)
result = to_annotation(discrete_diarization)
offset = uem_boundaries[0] / fs
new_annotation = Annotation(uri=sess_name) # new annotation
speakers = result.labels()
for spk in speakers:
for seg in result.label_timeline(spk):
new_annotation[Segment(seg.start + offset, seg.end + offset)] = spk
new_annotation = merge_closer(
new_annotation, delta=merge_closer_delta, max_len=max_length_merged, min_len=10
)
return new_annotation
def read_uem(uem_file):
with open(uem_file, "r") as f:
lines = f.readlines()
lines = [x.rstrip("\n") for x in lines]
uem2sess = {}
for x in lines:
sess_id, _, start, stop = x.split(" ")
uem2sess[sess_id] = (float(start), float(stop))
return uem2sess
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"This script performs diarization using "
"Pyannote audio diarization pipeline "
"extended to handle multiple microphones.",
add_help=True,
usage="%(prog)s [options]",
)
parser.add_argument(
"-i,--in_dir",
type=str,
help="Folder containing the audio files which will be fed to the "
"diarization pipeline.",
metavar="STR",
dest="in_dir",
)
parser.add_argument(
"-o,--out_folder",
type=str,
default="",
required=False,
help="Path to output folder.",
metavar="STR",
dest="out_dir",
)
parser.add_argument(
"-u,--uem",
type=str,
default="",
required=False,
help="Path to uem file.",
metavar="STR",
dest="uem_file",
)
parser.add_argument(
"--token",
type=str,
help="Access token for HuggingFace Pyannote model."
"see https://github.com/pyannote/pyannote-audio"
"/blob/develop/tutorials/applying_a_pipeline.ipynb",
metavar="STR",
dest="token",
)
parser.add_argument(
"--mic_regex",
type=str,
help="Regular expression to extract the microphone "
"channel from audio filename.",
metavar="STR",
dest="mic_regex",
)
parser.add_argument(
"--sess_regex",
type=str,
help="Regular expression to extract the session" " from audio filename.",
metavar="STR",
dest="sess_regex",
)
parser.add_argument(
"--segmentation_model",
required=False,
default="popcornell/pyannote-segmentation-chime6-mixer6",
type=str,
help="Pre-trained segmentation model used.",
metavar="STR",
dest="segmentation_model",
)
parser.add_argument(
"--max_speakers",
type=int,
default=4,
help="Max number of speakers in each session.",
metavar="INT",
dest="max_speakers",
)
parser.add_argument(
"--max_batch_size",
type=int,
default=256,
help="Max batch size used for segmentation and embeddings extraction.",
metavar="INT",
dest="max_batch_size",
)
parser.add_argument(
"--max_length_merged",
type=str,
default="60",
help="Max length of segments that will be merged together. "
"Reduce to reduce GSS GPU memory occupation later in the recipe.",
metavar="STR",
dest="max_length_merged",
)
parser.add_argument(
"--merge_closer",
type=str,
default="0.5",
help="Merge segments from same speakers that "
"are less than this value apart.",
metavar="STR",
dest="merge_closer",
)
args = parser.parse_args()
pretrained_pipeline = Pipeline.from_pretrained(
"pyannote/speaker-diarization",
use_auth_token=args.token,
)
if len(args.segmentation_model):
# use local segmentation model or pre-trained one in
# https://huggingface.co/popcornell/pyannote-segmentation-chime6-mixer6
segmentation = Model.from_pretrained(args.segmentation_model)
else:
segmentation = Model.from_pretrained(
"pyannote/segmentation",
use_auth_token=args.token,
)
diarization_pipeline = SpeakerDiarization(
segmentation=segmentation,
embedding=pretrained_pipeline.embedding,
embedding_exclude_overlap=pretrained_pipeline.embedding_exclude_overlap,
clustering=pretrained_pipeline.klustering,
)
# we do not change the hyper-parameters of the original
# pyannote model
pretrained_hyperparameters = pretrained_pipeline.parameters(instantiated=True)
diarization_pipeline.segmentation.threshold = pretrained_hyperparameters[
"segmentation"
]["threshold"]
diarization_pipeline.segmentation.min_duration_off = 0.0
diarization_pipeline.segmentation.min_duration_on = 0.0 # 0.5
diarization_pipeline.segmentation.pad_onset = 0.0 # 0.2
diarization_pipeline.segmentation.pad_offset = 0.0 # 0.2
diarization_pipeline.clustering.threshold = pretrained_hyperparameters[
"clustering"
]["threshold"]
diarization_pipeline.clustering.min_cluster_size = (
15 # higher than pre-trained, which was 15
)
diarization_pipeline.clustering.method = pretrained_hyperparameters["clustering"][
"method"
]
Path(args.out_dir).mkdir(exist_ok=True, parents=True)
audio_f = glob.glob(os.path.join(args.in_dir, "*.wav")) + glob.glob(
os.path.join(args.in_dir, "*.flac")
)
audio_f = [x for x in audio_f if re.search(args.mic_regex, Path(x).stem)]
if args.uem_file:
uem_map = read_uem(args.uem_file)
# joint diarization of all mics
sess2audio = {}
for audio_file in audio_f:
filename = Path(audio_file).stem
sess_name = re.search(args.sess_regex, filename).group()
if sess_name not in sess2audio.keys():
sess2audio[sess_name] = []
sess2audio[sess_name].append(audio_file)
# now for each session
for sess in sess2audio.keys():
print("Diarizing Session {}".format(sess))
if args.uem_file:
c_uem = uem_map[sess]
else:
c_uem = None
c_result = diarize_session(
sess,
diarization_pipeline,
sess2audio[sess],
c_uem,
float(args.merge_closer),
float(args.max_length_merged),
args.max_speakers,
)
c_rttm_out = os.path.join(args.out_dir, sess + ".rttm")
with open(c_rttm_out, "w") as f:
f.write(c_result.to_rttm())
rttm2json(c_rttm_out)
| 14,818 | 32.603175 | 87 | py |
espnet | espnet-master/egs2/chime7_task1/diar_asr1/local/pyannote_finetune.py | import argparse
import os.path
import shutil
from pathlib import Path
from types import MethodType
from pyannote.audio import Inference, Model
from pyannote.audio.tasks import Segmentation
from pyannote.database import FileFinder, get_protocol
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint, RichProgressBar
from torch.optim import Adam
from torch_audiomentations import (
AddColoredNoise,
Compose,
Gain,
PeakNormalization,
PolarityInversion,
)
def finetune_segmentation(
n_gpus=1,
max_epochs=40,
batch_size=64,
learning_rate=1e-5,
gradient_clip=0.5,
patience=10,
num_workers=8,
auth_token=None,
exp_folder="./exp/pyannote_diarization_finetuned",
ft_protocol="chime7_finetune.SpeakerDiarization.only_words",
):
model = Model.from_pretrained("pyannote/segmentation", use_auth_token=auth_token)
dataset = get_protocol(ft_protocol, {"audio": FileFinder()})
augmentation = Compose(
transforms=[ # using pitch-shifting and bandstopfiltering
# slows significantly training, but you can try
# as it will likely improve results a bit.
Gain(
min_gain_in_db=-6.0,
max_gain_in_db=6.0,
p=0.5,
),
PeakNormalization(apply_to="only_too_loud_sounds"),
PolarityInversion(p=0.5),
AddColoredNoise(p=0.2),
],
output_type="dict",
)
task = Segmentation(
dataset,
duration=model.specifications.duration,
max_num_speakers=len(model.specifications.classes),
batch_size=batch_size,
num_workers=num_workers,
loss="bce",
vad_loss="bce",
augmentation=augmentation,
)
model.task = task
model.setup(stage="fit")
def configure_optimizers(self):
return Adam(self.parameters(), lr=learning_rate)
model.configure_optimizers = MethodType(configure_optimizers, model)
# we monitor diarization error rate on the validation set
# and use to keep the best checkpoint and stop early
monitor, direction = task.val_monitor
checkpoint = ModelCheckpoint(
monitor=monitor,
mode=direction,
save_top_k=1,
every_n_epochs=1,
save_last=False,
save_weights_only=False,
filename="{epoch}",
verbose=False,
)
early_stopping = EarlyStopping(
monitor=monitor,
mode=direction,
min_delta=0.0,
patience=patience,
strict=True,
verbose=False,
)
callbacks = [RichProgressBar(), checkpoint, early_stopping]
Path(exp_folder).mkdir(parents=True, exist_ok=True)
trainer = Trainer(
accelerator="gpu",
devices=list(range(n_gpus)),
callbacks=callbacks,
max_epochs=max_epochs,
gradient_clip_val=gradient_clip,
default_root_dir=exp_folder,
)
trainer.fit(model)
shutil.copyfile(
checkpoint.best_model_path,
os.path.join(Path(checkpoint.best_model_path).parent, "best.ckpt"),
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"Fine-tuning script for Pyannote segmentation model.",
add_help=True,
usage="%(prog)s [options]",
)
parser.add_argument(
"--token",
type=str,
help="Access token for HuggingFace Pyannote model."
"see https://github.com/pyannote/pyannote-audio"
"/blob/develop/tutorials/applying_a_pipeline.ipynb",
metavar="STR",
dest="auth_token",
)
parser.add_argument(
"--ngpus",
type=int,
required=False,
default=1,
help="Number of GPUs to use in fine-tuning.",
metavar="INT",
dest="ngpus",
)
parser.add_argument(
"--batch_size",
type=int,
required=False,
default=64,
help="Batch size to use in fine-tuning.",
metavar="INT",
dest="batch_size",
)
parser.add_argument(
"--learning_rate",
type=str,
required=False,
default="1e-5",
help="Learning rate to use in fine-tuning.",
metavar="STR",
dest="learning_rate",
)
parser.add_argument(
"--num_workers",
type=int,
required=False,
default=8,
help="Num workers for dataloading.",
metavar="INT",
dest="num_workers",
)
parser.add_argument(
"--exp_folder",
type=str,
required=False,
default="./exp/pyannote_diarization_finetuned",
help="Folder where to save the results and " "logs for the fine-tuning.",
metavar="STR",
dest="exp_folder",
)
parser.add_argument(
"--protocol",
type=str,
required=False,
default="chime7_finetune.SpeakerDiarization.only_words",
help="Dataset to use in fine-tuning, it must match an "
"entry in the database.yml in this folder.",
metavar="STR",
dest="protocol",
)
args = parser.parse_args()
finetune_segmentation(
batch_size=args.batch_size,
n_gpus=args.ngpus,
num_workers=args.num_workers,
auth_token=args.auth_token,
learning_rate=float(args.learning_rate),
exp_folder=args.exp_folder,
ft_protocol=args.protocol,
)
| 5,429 | 27.429319 | 87 | py |
espnet | espnet-master/egs2/chime7_task1/asr1/local/gss_micrank.py | import argparse
import os
from copy import deepcopy
from pathlib import Path
import lhotse
import soundfile as sf
import torch
import torchaudio
import tqdm
from torch.utils.data import DataLoader, Dataset
class EnvelopeVariance(torch.nn.Module):
"""
Envelope Variance Channel Selection method with
(optionally) learnable per mel-band weights.
"""
def __init__(
self,
n_mels=40,
n_fft=400,
hop_length=200,
samplerate=16000,
eps=1e-6,
chunk_size=4,
chunk_stride=2,
):
super(EnvelopeVariance, self).__init__()
self.mels = torchaudio.transforms.MelSpectrogram(
sample_rate=samplerate,
n_fft=n_fft,
hop_length=hop_length,
n_mels=n_mels,
power=2,
)
self.eps = eps
self.subband_weights = torch.nn.Parameter(torch.ones(n_mels))
self.chunk_size = int(chunk_size * samplerate / hop_length)
self.chunk_stride = int(chunk_stride * samplerate / hop_length)
def _single_window(self, mels):
logmels = torch.log(mels + self.eps)
mels = torch.exp(logmels - torch.mean(logmels, -1, keepdim=True))
var = torch.var(mels ** (1 / 3), dim=-1) # channels, subbands
var = var / torch.amax(var, 1, keepdim=True)
subband_weights = torch.abs(self.subband_weights)
ranking = torch.sum(var * subband_weights, -1)
return ranking
def _count_chunks(self, inlen, chunk_size, chunk_stride):
return int((inlen - chunk_size + chunk_stride) / chunk_stride)
def _get_chunks_indx(self, in_len, chunk_size, chunk_stride, discard_last=False):
i = -1
for i in range(self._count_chunks(in_len, chunk_size, chunk_stride)):
yield i * chunk_stride, i * chunk_stride + chunk_size
if not discard_last and i * chunk_stride + chunk_size < in_len:
if in_len - (i + 1) * chunk_stride > 0:
yield (i + 1) * chunk_stride, in_len
def forward(self, channels):
assert channels.ndim == 3
mels = self.mels(channels)
if mels.shape[-1] > (self.chunk_size + self.chunk_stride):
# using for because i am too lazy of taking care of padded
# values in stats computation, but this is fast
indxs = self._get_chunks_indx(
mels.shape[-1], self.chunk_size, self.chunk_stride
)
all_win_ranks = [self._single_window(mels[..., s:t]) for s, t in indxs]
return torch.stack(all_win_ranks).mean(0)
else:
return self._single_window(mels)
class MicRanking(Dataset):
def __init__(self, recordings, supervisions, ranker, top_k):
super().__init__()
self.recordings = recordings
self.supervisions = supervisions
self.ranker = ranker
self.top_k = top_k
def __len__(self):
return len(self.supervisions)
def _get_read_chans(self, c_supervision, c_recordings, start, duration, fs=16000):
to_tensor = []
chan_indx = []
for recording in c_recordings.sources:
c_wav, _ = sf.read(
recording.source,
start=int(start * fs),
stop=int(start * fs) + int(duration * fs),
)
c_wav = torch.from_numpy(c_wav).float().unsqueeze(0)
assert (
c_wav.shape[0] == 1
), "Input audio should be mono for channel selection in this script."
if len(to_tensor) > 0:
if c_wav.shape[-1] != to_tensor[0].shape[-1]:
print(
"Discarded recording {} from {} supervision start {:.3f} "
"stop {:.3f} . There is a difference of length of {} (samples)"
" with the other recordings. It may be that one recording "
"is shorter than the others and this segment exceed "
"the length of the shorter one."
"".format(
recording,
c_supervision.id,
c_supervision.start,
c_supervision.end,
abs(c_wav.shape[-1] - to_tensor[0].shape[-1]),
)
)
continue
to_tensor.append(c_wav)
chan_indx.append(recording.channels[0])
all_channels = torch.stack(to_tensor).transpose(0, 1)
return all_channels, chan_indx
def __getitem__(self, item):
c_supervision = self.supervisions[item]
start = c_supervision.start
duration = c_supervision.duration
c_recordings = recordings[c_supervision.recording_id]
fs = c_recordings.sampling_rate
all_channels, chan_indx = self._get_read_chans(
c_supervision, c_recordings, start, duration, fs
)
assert all_channels.ndim == 3
assert (
all_channels.shape[0] == 1
), "If batch size is more than one here something went wrong."
with torch.inference_mode():
c_scores = ranker(all_channels)
c_scores = c_scores[0].numpy().tolist()
c_scores = [(x, y) for x, y in zip(c_scores, chan_indx)]
c_scores = sorted(c_scores, key=lambda x: x[0], reverse=True)
c_scores = c_scores[: int(len(c_scores) * self.top_k)]
new_sup = deepcopy(c_supervision)
new_sup.channel = [x[-1] for x in c_scores]
return new_sup
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"We use this script to select a subset of" "microphones to feed to GSS."
)
parser.add_argument(
"-r,--recordings",
type=str,
metavar="STR",
dest="recordings",
help="Input recordings lhotse manifest",
)
parser.add_argument(
"-s,--supervisions",
type=str,
metavar="STR",
dest="supervisions",
help="Input supervisions lhotse manifest",
)
parser.add_argument(
"-o, --out_name",
type=str,
metavar="STR",
dest="out_name",
help="Name and path for the new output manifests with the reduced"
"channels. E.g. /tmp/chime6_selected --> will create "
"chime6_selected_recordings.jsonl.gz "
"and chime6_selected_supervisions.jsonl.gz",
)
parser.add_argument(
"-k, --top_k",
default=25,
type=int,
metavar="INT",
dest="top_k",
help="Percentage of best microphones to keep "
"(e.g. 20 -> 20% of all microphones)",
)
parser.add_argument(
"--nj",
default=8,
type=int,
metavar="INT",
dest="nj",
help="Number of parallel jobs",
)
args = parser.parse_args()
recordings = lhotse.load_manifest(args.recordings)
supervisions = lhotse.load_manifest(args.supervisions)
output_filename = args.out_name
ranker = EnvelopeVariance(samplerate=recordings[0].sampling_rate)
single_thread = MicRanking(recordings, supervisions, ranker, args.top_k / 100)
dataloader = DataLoader(
single_thread,
shuffle=False,
batch_size=1,
num_workers=args.nj,
drop_last=False,
collate_fn=lambda batch: [x for x in batch],
)
new_supervisions = []
for i_batch, elem in enumerate(tqdm.tqdm(dataloader)):
new_supervisions.extend(elem)
recording_set, supervision_set = lhotse.fix_manifests(
lhotse.RecordingSet.from_recordings(recordings),
lhotse.SupervisionSet.from_segments(new_supervisions),
)
# Fix manifests
lhotse.validate_recordings_and_supervisions(recording_set, supervision_set)
Path(output_filename).parent.mkdir(exist_ok=True, parents=True)
filename = Path(output_filename).stem
supervision_set.to_file(
os.path.join(Path(output_filename).parent, f"{filename}_supervisions.jsonl.gz")
)
recording_set.to_file(
os.path.join(Path(output_filename).parent, f"{filename}_recordings.jsonl.gz")
)
| 8,190 | 33.707627 | 87 | py |
FixMatch-pytorch | FixMatch-pytorch-master/train.py | import argparse
import logging
import math
import os
import random
import shutil
import time
from collections import OrderedDict
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from dataset.cifar import DATASET_GETTERS
from utils import AverageMeter, accuracy
logger = logging.getLogger(__name__)
best_acc = 0
def save_checkpoint(state, is_best, checkpoint, filename='checkpoint.pth.tar'):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint,
'model_best.pth.tar'))
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def get_cosine_schedule_with_warmup(optimizer,
num_warmup_steps,
num_training_steps,
num_cycles=7./16.,
last_epoch=-1):
def _lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
no_progress = float(current_step - num_warmup_steps) / \
float(max(1, num_training_steps - num_warmup_steps))
return max(0., math.cos(math.pi * num_cycles * no_progress))
return LambdaLR(optimizer, _lr_lambda, last_epoch)
def interleave(x, size):
s = list(x.shape)
return x.reshape([-1, size] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
def de_interleave(x, size):
s = list(x.shape)
return x.reshape([size, -1] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
def main():
parser = argparse.ArgumentParser(description='PyTorch FixMatch Training')
parser.add_argument('--gpu-id', default='0', type=int,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--num-workers', type=int, default=4,
help='number of workers')
parser.add_argument('--dataset', default='cifar10', type=str,
choices=['cifar10', 'cifar100'],
help='dataset name')
parser.add_argument('--num-labeled', type=int, default=4000,
help='number of labeled data')
parser.add_argument("--expand-labels", action="store_true",
help="expand labels to fit eval steps")
parser.add_argument('--arch', default='wideresnet', type=str,
choices=['wideresnet', 'resnext'],
help='dataset name')
parser.add_argument('--total-steps', default=2**20, type=int,
help='number of total steps to run')
parser.add_argument('--eval-step', default=1024, type=int,
help='number of eval steps to run')
parser.add_argument('--start-epoch', default=0, type=int,
help='manual epoch number (useful on restarts)')
parser.add_argument('--batch-size', default=64, type=int,
help='train batchsize')
parser.add_argument('--lr', '--learning-rate', default=0.03, type=float,
help='initial learning rate')
parser.add_argument('--warmup', default=0, type=float,
help='warmup epochs (unlabeled data based)')
parser.add_argument('--wdecay', default=5e-4, type=float,
help='weight decay')
parser.add_argument('--nesterov', action='store_true', default=True,
help='use nesterov momentum')
parser.add_argument('--use-ema', action='store_true', default=True,
help='use EMA model')
parser.add_argument('--ema-decay', default=0.999, type=float,
help='EMA decay rate')
parser.add_argument('--mu', default=7, type=int,
help='coefficient of unlabeled batch size')
parser.add_argument('--lambda-u', default=1, type=float,
help='coefficient of unlabeled loss')
parser.add_argument('--T', default=1, type=float,
help='pseudo label temperature')
parser.add_argument('--threshold', default=0.95, type=float,
help='pseudo label threshold')
parser.add_argument('--out', default='result',
help='directory to output the result')
parser.add_argument('--resume', default='', type=str,
help='path to latest checkpoint (default: none)')
parser.add_argument('--seed', default=None, type=int,
help="random seed")
parser.add_argument("--amp", action="store_true",
help="use 16-bit (mixed) precision through NVIDIA apex AMP")
parser.add_argument("--opt_level", type=str, default="O1",
help="apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--no-progress', action='store_true',
help="don't use progress bar")
args = parser.parse_args()
global best_acc
def create_model(args):
if args.arch == 'wideresnet':
import models.wideresnet as models
model = models.build_wideresnet(depth=args.model_depth,
widen_factor=args.model_width,
dropout=0,
num_classes=args.num_classes)
elif args.arch == 'resnext':
import models.resnext as models
model = models.build_resnext(cardinality=args.model_cardinality,
depth=args.model_depth,
width=args.model_width,
num_classes=args.num_classes)
logger.info("Total params: {:.2f}M".format(
sum(p.numel() for p in model.parameters())/1e6))
return model
if args.local_rank == -1:
device = torch.device('cuda', args.gpu_id)
args.world_size = 1
args.n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.world_size = torch.distributed.get_world_size()
args.n_gpu = 1
args.device = device
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning(
f"Process rank: {args.local_rank}, "
f"device: {args.device}, "
f"n_gpu: {args.n_gpu}, "
f"distributed training: {bool(args.local_rank != -1)}, "
f"16-bits training: {args.amp}",)
logger.info(dict(args._get_kwargs()))
if args.seed is not None:
set_seed(args)
if args.local_rank in [-1, 0]:
os.makedirs(args.out, exist_ok=True)
args.writer = SummaryWriter(args.out)
if args.dataset == 'cifar10':
args.num_classes = 10
if args.arch == 'wideresnet':
args.model_depth = 28
args.model_width = 2
elif args.arch == 'resnext':
args.model_cardinality = 4
args.model_depth = 28
args.model_width = 4
elif args.dataset == 'cifar100':
args.num_classes = 100
if args.arch == 'wideresnet':
args.model_depth = 28
args.model_width = 8
elif args.arch == 'resnext':
args.model_cardinality = 8
args.model_depth = 29
args.model_width = 64
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
labeled_dataset, unlabeled_dataset, test_dataset = DATASET_GETTERS[args.dataset](
args, './data')
if args.local_rank == 0:
torch.distributed.barrier()
train_sampler = RandomSampler if args.local_rank == -1 else DistributedSampler
labeled_trainloader = DataLoader(
labeled_dataset,
sampler=train_sampler(labeled_dataset),
batch_size=args.batch_size,
num_workers=args.num_workers,
drop_last=True)
unlabeled_trainloader = DataLoader(
unlabeled_dataset,
sampler=train_sampler(unlabeled_dataset),
batch_size=args.batch_size*args.mu,
num_workers=args.num_workers,
drop_last=True)
test_loader = DataLoader(
test_dataset,
sampler=SequentialSampler(test_dataset),
batch_size=args.batch_size,
num_workers=args.num_workers)
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
model = create_model(args)
if args.local_rank == 0:
torch.distributed.barrier()
model.to(args.device)
no_decay = ['bias', 'bn']
grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(
nd in n for nd in no_decay)], 'weight_decay': args.wdecay},
{'params': [p for n, p in model.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = optim.SGD(grouped_parameters, lr=args.lr,
momentum=0.9, nesterov=args.nesterov)
args.epochs = math.ceil(args.total_steps / args.eval_step)
scheduler = get_cosine_schedule_with_warmup(
optimizer, args.warmup, args.total_steps)
if args.use_ema:
from models.ema import ModelEMA
ema_model = ModelEMA(args, model, args.ema_decay)
args.start_epoch = 0
if args.resume:
logger.info("==> Resuming from checkpoint..")
assert os.path.isfile(
args.resume), "Error: no checkpoint directory found!"
args.out = os.path.dirname(args.resume)
checkpoint = torch.load(args.resume)
best_acc = checkpoint['best_acc']
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
if args.use_ema:
ema_model.ema.load_state_dict(checkpoint['ema_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
if args.amp:
from apex import amp
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.opt_level)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank],
output_device=args.local_rank, find_unused_parameters=True)
logger.info("***** Running training *****")
logger.info(f" Task = {args.dataset}@{args.num_labeled}")
logger.info(f" Num Epochs = {args.epochs}")
logger.info(f" Batch size per GPU = {args.batch_size}")
logger.info(
f" Total train batch size = {args.batch_size*args.world_size}")
logger.info(f" Total optimization steps = {args.total_steps}")
model.zero_grad()
train(args, labeled_trainloader, unlabeled_trainloader, test_loader,
model, optimizer, ema_model, scheduler)
def train(args, labeled_trainloader, unlabeled_trainloader, test_loader,
model, optimizer, ema_model, scheduler):
if args.amp:
from apex import amp
global best_acc
test_accs = []
end = time.time()
if args.world_size > 1:
labeled_epoch = 0
unlabeled_epoch = 0
labeled_trainloader.sampler.set_epoch(labeled_epoch)
unlabeled_trainloader.sampler.set_epoch(unlabeled_epoch)
labeled_iter = iter(labeled_trainloader)
unlabeled_iter = iter(unlabeled_trainloader)
model.train()
for epoch in range(args.start_epoch, args.epochs):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
losses_x = AverageMeter()
losses_u = AverageMeter()
mask_probs = AverageMeter()
if not args.no_progress:
p_bar = tqdm(range(args.eval_step),
disable=args.local_rank not in [-1, 0])
for batch_idx in range(args.eval_step):
try:
inputs_x, targets_x = labeled_iter.next()
# error occurs ↓
# inputs_x, targets_x = next(labeled_iter)
except:
if args.world_size > 1:
labeled_epoch += 1
labeled_trainloader.sampler.set_epoch(labeled_epoch)
labeled_iter = iter(labeled_trainloader)
inputs_x, targets_x = labeled_iter.next()
# error occurs ↓
# inputs_x, targets_x = next(labeled_iter)
try:
(inputs_u_w, inputs_u_s), _ = unlabeled_iter.next()
# error occurs ↓
# (inputs_u_w, inputs_u_s), _ = next(unlabeled_iter)
except:
if args.world_size > 1:
unlabeled_epoch += 1
unlabeled_trainloader.sampler.set_epoch(unlabeled_epoch)
unlabeled_iter = iter(unlabeled_trainloader)
(inputs_u_w, inputs_u_s), _ = unlabeled_iter.next()
# error occurs ↓
# (inputs_u_w, inputs_u_s), _ = next(unlabeled_iter)
data_time.update(time.time() - end)
batch_size = inputs_x.shape[0]
inputs = interleave(
torch.cat((inputs_x, inputs_u_w, inputs_u_s)), 2*args.mu+1).to(args.device)
targets_x = targets_x.to(args.device)
logits = model(inputs)
logits = de_interleave(logits, 2*args.mu+1)
logits_x = logits[:batch_size]
logits_u_w, logits_u_s = logits[batch_size:].chunk(2)
del logits
Lx = F.cross_entropy(logits_x, targets_x, reduction='mean')
pseudo_label = torch.softmax(logits_u_w.detach()/args.T, dim=-1)
max_probs, targets_u = torch.max(pseudo_label, dim=-1)
mask = max_probs.ge(args.threshold).float()
Lu = (F.cross_entropy(logits_u_s, targets_u,
reduction='none') * mask).mean()
loss = Lx + args.lambda_u * Lu
if args.amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
losses.update(loss.item())
losses_x.update(Lx.item())
losses_u.update(Lu.item())
optimizer.step()
scheduler.step()
if args.use_ema:
ema_model.update(model)
model.zero_grad()
batch_time.update(time.time() - end)
end = time.time()
mask_probs.update(mask.mean().item())
if not args.no_progress:
p_bar.set_description("Train Epoch: {epoch}/{epochs:4}. Iter: {batch:4}/{iter:4}. LR: {lr:.4f}. Data: {data:.3f}s. Batch: {bt:.3f}s. Loss: {loss:.4f}. Loss_x: {loss_x:.4f}. Loss_u: {loss_u:.4f}. Mask: {mask:.2f}. ".format(
epoch=epoch + 1,
epochs=args.epochs,
batch=batch_idx + 1,
iter=args.eval_step,
lr=scheduler.get_last_lr()[0],
data=data_time.avg,
bt=batch_time.avg,
loss=losses.avg,
loss_x=losses_x.avg,
loss_u=losses_u.avg,
mask=mask_probs.avg))
p_bar.update()
if not args.no_progress:
p_bar.close()
if args.use_ema:
test_model = ema_model.ema
else:
test_model = model
if args.local_rank in [-1, 0]:
test_loss, test_acc = test(args, test_loader, test_model, epoch)
args.writer.add_scalar('train/1.train_loss', losses.avg, epoch)
args.writer.add_scalar('train/2.train_loss_x', losses_x.avg, epoch)
args.writer.add_scalar('train/3.train_loss_u', losses_u.avg, epoch)
args.writer.add_scalar('train/4.mask', mask_probs.avg, epoch)
args.writer.add_scalar('test/1.test_acc', test_acc, epoch)
args.writer.add_scalar('test/2.test_loss', test_loss, epoch)
is_best = test_acc > best_acc
best_acc = max(test_acc, best_acc)
model_to_save = model.module if hasattr(model, "module") else model
if args.use_ema:
ema_to_save = ema_model.ema.module if hasattr(
ema_model.ema, "module") else ema_model.ema
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model_to_save.state_dict(),
'ema_state_dict': ema_to_save.state_dict() if args.use_ema else None,
'acc': test_acc,
'best_acc': best_acc,
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
}, is_best, args.out)
test_accs.append(test_acc)
logger.info('Best top-1 acc: {:.2f}'.format(best_acc))
logger.info('Mean top-1 acc: {:.2f}\n'.format(
np.mean(test_accs[-20:])))
if args.local_rank in [-1, 0]:
args.writer.close()
def test(args, test_loader, model, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
if not args.no_progress:
test_loader = tqdm(test_loader,
disable=args.local_rank not in [-1, 0])
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_loader):
data_time.update(time.time() - end)
model.eval()
inputs = inputs.to(args.device)
targets = targets.to(args.device)
outputs = model(inputs)
loss = F.cross_entropy(outputs, targets)
prec1, prec5 = accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), inputs.shape[0])
top1.update(prec1.item(), inputs.shape[0])
top5.update(prec5.item(), inputs.shape[0])
batch_time.update(time.time() - end)
end = time.time()
if not args.no_progress:
test_loader.set_description("Test Iter: {batch:4}/{iter:4}. Data: {data:.3f}s. Batch: {bt:.3f}s. Loss: {loss:.4f}. top1: {top1:.2f}. top5: {top5:.2f}. ".format(
batch=batch_idx + 1,
iter=len(test_loader),
data=data_time.avg,
bt=batch_time.avg,
loss=losses.avg,
top1=top1.avg,
top5=top5.avg,
))
if not args.no_progress:
test_loader.close()
logger.info("top-1 acc: {:.2f}".format(top1.avg))
logger.info("top-5 acc: {:.2f}".format(top5.avg))
return losses.avg, top1.avg
if __name__ == '__main__':
main()
| 19,677 | 38.199203 | 238 | py |
FixMatch-pytorch | FixMatch-pytorch-master/dataset/cifar.py | import logging
import math
import numpy as np
from PIL import Image
from torchvision import datasets
from torchvision import transforms
from .randaugment import RandAugmentMC
logger = logging.getLogger(__name__)
cifar10_mean = (0.4914, 0.4822, 0.4465)
cifar10_std = (0.2471, 0.2435, 0.2616)
cifar100_mean = (0.5071, 0.4867, 0.4408)
cifar100_std = (0.2675, 0.2565, 0.2761)
normal_mean = (0.5, 0.5, 0.5)
normal_std = (0.5, 0.5, 0.5)
def get_cifar10(args, root):
transform_labeled = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(size=32,
padding=int(32*0.125),
padding_mode='reflect'),
transforms.ToTensor(),
transforms.Normalize(mean=cifar10_mean, std=cifar10_std)
])
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=cifar10_mean, std=cifar10_std)
])
base_dataset = datasets.CIFAR10(root, train=True, download=True)
train_labeled_idxs, train_unlabeled_idxs = x_u_split(
args, base_dataset.targets)
train_labeled_dataset = CIFAR10SSL(
root, train_labeled_idxs, train=True,
transform=transform_labeled)
train_unlabeled_dataset = CIFAR10SSL(
root, train_unlabeled_idxs, train=True,
transform=TransformFixMatch(mean=cifar10_mean, std=cifar10_std))
test_dataset = datasets.CIFAR10(
root, train=False, transform=transform_val, download=False)
return train_labeled_dataset, train_unlabeled_dataset, test_dataset
def get_cifar100(args, root):
transform_labeled = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(size=32,
padding=int(32*0.125),
padding_mode='reflect'),
transforms.ToTensor(),
transforms.Normalize(mean=cifar100_mean, std=cifar100_std)])
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=cifar100_mean, std=cifar100_std)])
base_dataset = datasets.CIFAR100(
root, train=True, download=True)
train_labeled_idxs, train_unlabeled_idxs = x_u_split(
args, base_dataset.targets)
train_labeled_dataset = CIFAR100SSL(
root, train_labeled_idxs, train=True,
transform=transform_labeled)
train_unlabeled_dataset = CIFAR100SSL(
root, train_unlabeled_idxs, train=True,
transform=TransformFixMatch(mean=cifar100_mean, std=cifar100_std))
test_dataset = datasets.CIFAR100(
root, train=False, transform=transform_val, download=False)
return train_labeled_dataset, train_unlabeled_dataset, test_dataset
def x_u_split(args, labels):
label_per_class = args.num_labeled // args.num_classes
labels = np.array(labels)
labeled_idx = []
# unlabeled data: all data (https://github.com/kekmodel/FixMatch-pytorch/issues/10)
unlabeled_idx = np.array(range(len(labels)))
for i in range(args.num_classes):
idx = np.where(labels == i)[0]
idx = np.random.choice(idx, label_per_class, False)
labeled_idx.extend(idx)
labeled_idx = np.array(labeled_idx)
assert len(labeled_idx) == args.num_labeled
if args.expand_labels or args.num_labeled < args.batch_size:
num_expand_x = math.ceil(
args.batch_size * args.eval_step / args.num_labeled)
labeled_idx = np.hstack([labeled_idx for _ in range(num_expand_x)])
np.random.shuffle(labeled_idx)
return labeled_idx, unlabeled_idx
class TransformFixMatch(object):
def __init__(self, mean, std):
self.weak = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(size=32,
padding=int(32*0.125),
padding_mode='reflect')])
self.strong = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(size=32,
padding=int(32*0.125),
padding_mode='reflect'),
RandAugmentMC(n=2, m=10)])
self.normalize = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)])
def __call__(self, x):
weak = self.weak(x)
strong = self.strong(x)
return self.normalize(weak), self.normalize(strong)
class CIFAR10SSL(datasets.CIFAR10):
def __init__(self, root, indexs, train=True,
transform=None, target_transform=None,
download=False):
super().__init__(root, train=train,
transform=transform,
target_transform=target_transform,
download=download)
if indexs is not None:
self.data = self.data[indexs]
self.targets = np.array(self.targets)[indexs]
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
class CIFAR100SSL(datasets.CIFAR100):
def __init__(self, root, indexs, train=True,
transform=None, target_transform=None,
download=False):
super().__init__(root, train=train,
transform=transform,
target_transform=target_transform,
download=download)
if indexs is not None:
self.data = self.data[indexs]
self.targets = np.array(self.targets)[indexs]
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
DATASET_GETTERS = {'cifar10': get_cifar10,
'cifar100': get_cifar100}
| 6,239 | 33.098361 | 87 | py |
FixMatch-pytorch | FixMatch-pytorch-master/dataset/randaugment.py | # code in this file is adpated from
# https://github.com/ildoonet/pytorch-randaugment/blob/master/RandAugment/augmentations.py
# https://github.com/google-research/fixmatch/blob/master/third_party/auto_augment/augmentations.py
# https://github.com/google-research/fixmatch/blob/master/libml/ctaugment.py
import logging
import random
import numpy as np
import PIL
import PIL.ImageOps
import PIL.ImageEnhance
import PIL.ImageDraw
from PIL import Image
logger = logging.getLogger(__name__)
PARAMETER_MAX = 10
def AutoContrast(img, **kwarg):
return PIL.ImageOps.autocontrast(img)
def Brightness(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
return PIL.ImageEnhance.Brightness(img).enhance(v)
def Color(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
return PIL.ImageEnhance.Color(img).enhance(v)
def Contrast(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
return PIL.ImageEnhance.Contrast(img).enhance(v)
def Cutout(img, v, max_v, bias=0):
if v == 0:
return img
v = _float_parameter(v, max_v) + bias
v = int(v * min(img.size))
return CutoutAbs(img, v)
def CutoutAbs(img, v, **kwarg):
w, h = img.size
x0 = np.random.uniform(0, w)
y0 = np.random.uniform(0, h)
x0 = int(max(0, x0 - v / 2.))
y0 = int(max(0, y0 - v / 2.))
x1 = int(min(w, x0 + v))
y1 = int(min(h, y0 + v))
xy = (x0, y0, x1, y1)
# gray
color = (127, 127, 127)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img
def Equalize(img, **kwarg):
return PIL.ImageOps.equalize(img)
def Identity(img, **kwarg):
return img
def Invert(img, **kwarg):
return PIL.ImageOps.invert(img)
def Posterize(img, v, max_v, bias=0):
v = _int_parameter(v, max_v) + bias
return PIL.ImageOps.posterize(img, v)
def Rotate(img, v, max_v, bias=0):
v = _int_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
return img.rotate(v)
def Sharpness(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
return PIL.ImageEnhance.Sharpness(img).enhance(v)
def ShearX(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0))
def ShearY(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))
def Solarize(img, v, max_v, bias=0):
v = _int_parameter(v, max_v) + bias
return PIL.ImageOps.solarize(img, 256 - v)
def SolarizeAdd(img, v, max_v, bias=0, threshold=128):
v = _int_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
img_np = np.array(img).astype(np.int)
img_np = img_np + v
img_np = np.clip(img_np, 0, 255)
img_np = img_np.astype(np.uint8)
img = Image.fromarray(img_np)
return PIL.ImageOps.solarize(img, threshold)
def TranslateX(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
v = int(v * img.size[0])
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateY(img, v, max_v, bias=0):
v = _float_parameter(v, max_v) + bias
if random.random() < 0.5:
v = -v
v = int(v * img.size[1])
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def _float_parameter(v, max_v):
return float(v) * max_v / PARAMETER_MAX
def _int_parameter(v, max_v):
return int(v * max_v / PARAMETER_MAX)
def fixmatch_augment_pool():
# FixMatch paper
augs = [(AutoContrast, None, None),
(Brightness, 0.9, 0.05),
(Color, 0.9, 0.05),
(Contrast, 0.9, 0.05),
(Equalize, None, None),
(Identity, None, None),
(Posterize, 4, 4),
(Rotate, 30, 0),
(Sharpness, 0.9, 0.05),
(ShearX, 0.3, 0),
(ShearY, 0.3, 0),
(Solarize, 256, 0),
(TranslateX, 0.3, 0),
(TranslateY, 0.3, 0)]
return augs
def my_augment_pool():
# Test
augs = [(AutoContrast, None, None),
(Brightness, 1.8, 0.1),
(Color, 1.8, 0.1),
(Contrast, 1.8, 0.1),
(Cutout, 0.2, 0),
(Equalize, None, None),
(Invert, None, None),
(Posterize, 4, 4),
(Rotate, 30, 0),
(Sharpness, 1.8, 0.1),
(ShearX, 0.3, 0),
(ShearY, 0.3, 0),
(Solarize, 256, 0),
(SolarizeAdd, 110, 0),
(TranslateX, 0.45, 0),
(TranslateY, 0.45, 0)]
return augs
class RandAugmentPC(object):
def __init__(self, n, m):
assert n >= 1
assert 1 <= m <= 10
self.n = n
self.m = m
self.augment_pool = my_augment_pool()
def __call__(self, img):
ops = random.choices(self.augment_pool, k=self.n)
for op, max_v, bias in ops:
prob = np.random.uniform(0.2, 0.8)
if random.random() + prob >= 1:
img = op(img, v=self.m, max_v=max_v, bias=bias)
img = CutoutAbs(img, int(32*0.5))
return img
class RandAugmentMC(object):
def __init__(self, n, m):
assert n >= 1
assert 1 <= m <= 10
self.n = n
self.m = m
self.augment_pool = fixmatch_augment_pool()
def __call__(self, img):
ops = random.choices(self.augment_pool, k=self.n)
for op, max_v, bias in ops:
v = np.random.randint(1, self.m)
if random.random() < 0.5:
img = op(img, v=v, max_v=max_v, bias=bias)
img = CutoutAbs(img, int(32*0.5))
return img
| 5,821 | 25.343891 | 99 | py |
FixMatch-pytorch | FixMatch-pytorch-master/models/resnext.py | import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
logger = logging.getLogger(__name__)
def mish(x):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function (https://arxiv.org/abs/1908.08681)"""
return x * torch.tanh(F.softplus(x))
class nn.BatchNorm2d(nn.BatchNorm2d):
"""How Does BN Increase Collapsed Neural Network Filters? (https://arxiv.org/abs/2001.11216)"""
def __init__(self, num_features, alpha=0.1, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True):
super().__init__(num_features, eps, momentum, affine, track_running_stats)
self.alpha = alpha
def forward(self, x):
return super().forward(x) + self.alpha
class ResNeXtBottleneck(nn.Module):
"""
RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)
"""
def __init__(self, in_channels, out_channels, stride,
cardinality, base_width, widen_factor):
""" Constructor
Args:
in_channels: input channel dimensionality
out_channels: output channel dimensionality
stride: conv stride. Replaces pooling layer.
cardinality: num of convolution groups.
base_width: base number of channels in each group.
widen_factor: factor to reduce the input dimensionality before convolution.
"""
super().__init__()
width_ratio = out_channels / (widen_factor * 64.)
D = cardinality * int(base_width * width_ratio)
self.conv_reduce = nn.Conv2d(
in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_reduce = nn.BatchNorm2d(D, momentum=0.001)
self.conv_conv = nn.Conv2d(D, D,
kernel_size=3, stride=stride, padding=1,
groups=cardinality, bias=False)
self.bn = nn.BatchNorm2d(D, momentum=0.001)
self.act = mish
self.conv_expand = nn.Conv2d(
D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_expand = nn.BatchNorm2d(out_channels, momentum=0.001)
self.shortcut = nn.Sequential()
if in_channels != out_channels:
self.shortcut.add_module('shortcut_conv',
nn.Conv2d(in_channels, out_channels,
kernel_size=1,
stride=stride,
padding=0,
bias=False))
self.shortcut.add_module(
'shortcut_bn', nn.BatchNorm2d(out_channels, momentum=0.001))
def forward(self, x):
bottleneck = self.conv_reduce.forward(x)
bottleneck = self.act(self.bn_reduce.forward(bottleneck))
bottleneck = self.conv_conv.forward(bottleneck)
bottleneck = self.act(self.bn.forward(bottleneck))
bottleneck = self.conv_expand.forward(bottleneck)
bottleneck = self.bn_expand.forward(bottleneck)
residual = self.shortcut.forward(x)
return self.act(residual + bottleneck)
class CifarResNeXt(nn.Module):
"""
ResNext optimized for the Cifar dataset, as specified in
https://arxiv.org/pdf/1611.05431.pdf
"""
def __init__(self, cardinality, depth, num_classes,
base_width, widen_factor=4):
""" Constructor
Args:
cardinality: number of convolution groups.
depth: number of layers.
nlabels: number of classes
base_width: base number of channels in each group.
widen_factor: factor to adjust the channel dimensionality
"""
super().__init__()
self.cardinality = cardinality
self.depth = depth
self.block_depth = (self.depth - 2) // 9
self.base_width = base_width
self.widen_factor = widen_factor
self.nlabels = num_classes
self.output_size = 64
self.stages = [64, 64 * self.widen_factor, 128 *
self.widen_factor, 256 * self.widen_factor]
self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)
self.bn_1 = nn.BatchNorm2d(64, momentum=0.001)
self.act = mish
self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)
self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)
self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)
self.classifier = nn.Linear(self.stages[3], num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight,
mode='fan_out',
nonlinearity='leaky_relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0.0)
def block(self, name, in_channels, out_channels, pool_stride=2):
""" Stack n bottleneck modules where n is inferred from the depth of the network.
Args:
name: string name of the current block.
in_channels: number of input channels
out_channels: number of output channels
pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.
Returns: a Module consisting of n sequential bottlenecks.
"""
block = nn.Sequential()
for bottleneck in range(self.block_depth):
name_ = '%s_bottleneck_%d' % (name, bottleneck)
if bottleneck == 0:
block.add_module(name_, ResNeXtBottleneck(in_channels,
out_channels,
pool_stride,
self.cardinality,
self.base_width,
self.widen_factor))
else:
block.add_module(name_,
ResNeXtBottleneck(out_channels,
out_channels,
1,
self.cardinality,
self.base_width,
self.widen_factor))
return block
def forward(self, x):
x = self.conv_1_3x3.forward(x)
x = self.act(self.bn_1.forward(x))
x = self.stage_1.forward(x)
x = self.stage_2.forward(x)
x = self.stage_3.forward(x)
x = F.adaptive_avg_pool2d(x, 1)
x = x.view(-1, self.stages[3])
return self.classifier(x)
def build_resnext(cardinality, depth, width, num_classes):
logger.info(f"Model: ResNeXt {depth+1}x{width}")
return CifarResNeXt(cardinality=cardinality,
depth=depth,
base_width=width,
num_classes=num_classes)
| 7,390 | 41.97093 | 112 | py |
FixMatch-pytorch | FixMatch-pytorch-master/models/ema.py | from copy import deepcopy
import torch
class ModelEMA(object):
def __init__(self, args, model, decay):
self.ema = deepcopy(model)
self.ema.to(args.device)
self.ema.eval()
self.decay = decay
self.ema_has_module = hasattr(self.ema, 'module')
# Fix EMA. https://github.com/valencebond/FixMatch_pytorch thank you!
self.param_keys = [k for k, _ in self.ema.named_parameters()]
self.buffer_keys = [k for k, _ in self.ema.named_buffers()]
for p in self.ema.parameters():
p.requires_grad_(False)
def update(self, model):
needs_module = hasattr(model, 'module') and not self.ema_has_module
with torch.no_grad():
msd = model.state_dict()
esd = self.ema.state_dict()
for k in self.param_keys:
if needs_module:
j = 'module.' + k
else:
j = k
model_v = msd[j].detach()
ema_v = esd[k]
esd[k].copy_(ema_v * self.decay + (1. - self.decay) * model_v)
for k in self.buffer_keys:
if needs_module:
j = 'module.' + k
else:
j = k
esd[k].copy_(msd[j])
| 1,297 | 32.282051 | 78 | py |
FixMatch-pytorch | FixMatch-pytorch-master/models/wideresnet.py | import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
logger = logging.getLogger(__name__)
def mish(x):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function (https://arxiv.org/abs/1908.08681)"""
return x * torch.tanh(F.softplus(x))
class PSBatchNorm2d(nn.BatchNorm2d):
"""How Does BN Increase Collapsed Neural Network Filters? (https://arxiv.org/abs/2001.11216)"""
def __init__(self, num_features, alpha=0.1, eps=1e-05, momentum=0.001, affine=True, track_running_stats=True):
super().__init__(num_features, eps, momentum, affine, track_running_stats)
self.alpha = alpha
def forward(self, x):
return super().forward(x) + self.alpha
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, drop_rate=0.0, activate_before_residual=False):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes, momentum=0.001)
self.relu1 = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes, momentum=0.001)
self.relu2 = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.drop_rate = drop_rate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
self.activate_before_residual = activate_before_residual
def forward(self, x):
if not self.equalInOut and self.activate_before_residual == True:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.drop_rate > 0:
out = F.dropout(out, p=self.drop_rate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, drop_rate=0.0, activate_before_residual=False):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(
block, in_planes, out_planes, nb_layers, stride, drop_rate, activate_before_residual)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, drop_rate, activate_before_residual):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes,
i == 0 and stride or 1, drop_rate, activate_before_residual))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, num_classes, depth=28, widen_factor=2, drop_rate=0.0):
super(WideResNet, self).__init__()
channels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, channels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(
n, channels[0], channels[1], block, 1, drop_rate, activate_before_residual=True)
# 2nd block
self.block2 = NetworkBlock(
n, channels[1], channels[2], block, 2, drop_rate)
# 3rd block
self.block3 = NetworkBlock(
n, channels[2], channels[3], block, 2, drop_rate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(channels[3], momentum=0.001)
self.relu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.fc = nn.Linear(channels[3], num_classes)
self.channels = channels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight,
mode='fan_out',
nonlinearity='leaky_relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0.0)
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.adaptive_avg_pool2d(out, 1)
out = out.view(-1, self.channels)
return self.fc(out)
def build_wideresnet(depth, widen_factor, dropout, num_classes):
logger.info(f"Model: WideResNet {depth}x{widen_factor}")
return WideResNet(depth=depth,
widen_factor=widen_factor,
drop_rate=dropout,
num_classes=num_classes)
| 5,338 | 41.373016 | 119 | py |
FixMatch-pytorch | FixMatch-pytorch-master/utils/misc.py | '''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
'''
import logging
import torch
logger = logging.getLogger(__name__)
__all__ = ['get_mean_and_std', 'accuracy', 'AverageMeter']
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=1, shuffle=False, num_workers=4)
mean = torch.zeros(3)
std = torch.zeros(3)
logger.info('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:, i, :, :].mean()
std[i] += inputs[:, i, :, :].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class AverageMeter(object):
"""Computes and stores the average and current value
Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
| 1,726 | 25.569231 | 95 | py |
Deep-xVA-Solver | Deep-xVA-Solver-master/basketCallWithCVA.py | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import xvaEquation as eqn
from solver import BSDESolver
from XvaSolver import XvaSolver
import RecursiveEquation as receqn
import munch
import pandas as pd
if __name__ == "__main__":
dim = 100 #dimension of brownian motion
P = 2048 #number of outer Monte Carlo Loops
batch_size = 64
total_time = 1.0
num_time_interval = 100
r = 0.01
sigma = 0.25
x_init = 100
strike = x_init*dim
exact = 157.99
config = {
"eqn_config": {
"_comment": "a basket call option",
"eqn_name": "BasketOption",
"total_time": total_time,
"dim": dim,
"num_time_interval": num_time_interval,
"strike":strike,
"r":r,
"sigma":sigma,
"x_init":x_init
},
"net_config": {
"y_init_range": [150, 170],#[154.37,165.41], #set to None when not sure
"num_hiddens": [dim+10, dim+10],
"lr_values": [5e-2, 5e-3],#[5e-1,5e-2, 5e-3],
"lr_boundaries": [2000],#[1000,2000],
"num_iterations": 4000,
"batch_size": batch_size,
"valid_size": 128,
"logging_frequency": 100,
"dtype": "float64",
"verbose": True
}
}
config = munch.munchify(config)
bsde = getattr(eqn, config.eqn_config.eqn_name)(config.eqn_config)
tf.keras.backend.set_floatx(config.net_config.dtype)
#apply algorithm 1
bsde_solver = BSDESolver(config, bsde)
training_history = bsde_solver.train()
#Simulate the BSDE after training - MtM scenarios
simulations = bsde_solver.model.simulate_path(bsde.sample(P))
#estimated epected positive and negative exposure
time_stamp = np.linspace(0,1,num_time_interval+1)
epe = np.mean(np.exp(-r*time_stamp)*np.maximum(simulations,0),axis=0)
ene = np.mean(np.exp(-r*time_stamp)*np.minimum(simulations,0),axis=0)
epe_exact = np.array([exact for s in time_stamp[1:]])
ene_exact = np.array([0.0 for s in time_stamp[1:]])
fig = plt.figure()
plt.plot(time_stamp,[exact] + list(epe_exact),'b--',label='DEPE = exact solution',)
plt.plot(time_stamp,np.transpose(epe),'b',label='DEPE = deep solver approximation')
plt.plot(time_stamp,[0.0]+ list(ene_exact),'r--',label='DNPE = exact solution',)
plt.plot(time_stamp,np.transpose(ene),'r',label='DNPE = deep solver approximation')
plt.xlabel('t')
plt.legend()
plt.show()
fig.savefig(config.eqn_config.eqn_name + '.pdf',format = 'pdf')
df = pd.DataFrame(simulations[:,0,:])
filepath = 'exposure' + config.eqn_config.eqn_name + '.xlsx'
df.to_excel(filepath, index=False)
configBCVA = {
"eqn_config": {
"_comment": "BCVA on a basket call",
"eqn_name": "BCVA",
"dim": dim,
"total_time": total_time,
"num_time_interval": num_time_interval,
"r":r,
"recoveryC" : 0.3,
"lambdaC" : 0.1,
"recoveryB" : 0.4,
"lambdaB" : 0.01,
"clean_value": bsde,
"clean_value_model": bsde_solver.model
},
"net_config": {
"y_init_range": [0, 20],
"num_hiddens": [dim+10, dim+10],
"lr_values": [5e-2, 5e-3],
"lr_boundaries": [2000],
"num_iterations": 4000,
"batch_size": batch_size,
"valid_size": 128,
"logging_frequency": 100,
"dtype": "float64",
"verbose": True
}
}
configBCVA = munch.munchify(configBCVA)
bcvabsde = getattr(receqn, configBCVA.eqn_config.eqn_name)(configBCVA.eqn_config)
tf.keras.backend.set_floatx(configBCVA.net_config.dtype)
#apply algorithm 3
bcva_solver = XvaSolver(configBCVA, bcvabsde)
#loss: 1.7611e-01, Y0: 6.9664e-01,
bcva_training_history = bcva_solver.train()
bcva_simulations = bcva_solver.model.simulate_path(bcvabsde.sample(P))
#(0.699395244753698, [0.6903630282972714, 0.7084274612101246])
print(bcvabsde.monte_carlo(100000)) | 4,627 | 35.440945 | 91 | py |
Deep-xVA-Solver | Deep-xVA-Solver-master/callOption.py | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from solver import BSDESolver
import xvaEquation as eqn
import munch
import pandas as pd
if __name__ == "__main__":
dim = 1 #dimension of brownian motion
P = 2048 #number of outer Monte Carlo Loops
batch_size = 64
total_time = 1.0
num_time_interval = 200
strike = 100
r = 0.01
sigma = 0.25
x_init = 100
config = {
"eqn_config": {
"_comment": "a basket call option",
"eqn_name": "CallOption",
"total_time": total_time,
"dim": dim,
"num_time_interval": num_time_interval,
"strike":strike,
"r":r,
"sigma":sigma,
"x_init":x_init,
"strike":strike
},
"net_config": {
"y_init_range": [9, 11],
"num_hiddens": [dim+20, dim+20],
"lr_values": [5e-2, 5e-3],
"lr_boundaries": [2000],
"num_iterations": 4000,
"batch_size": batch_size,
"valid_size": 1024,
"logging_frequency": 100,
"dtype": "float64",
"verbose": True
}
}
config = munch.munchify(config)
bsde = getattr(eqn, config.eqn_config.eqn_name)(config.eqn_config)
tf.keras.backend.set_floatx(config.net_config.dtype)
#apply algorithm 1
bsde_solver = BSDESolver(config, bsde)
training_history = bsde_solver.train()
#Simulate the BSDE after training - MtM scenarios
simulations = bsde_solver.model.simulate(bsde.sample(P))
#estimated epected positive and negative exposure
time_stamp = np.linspace(0,1,num_time_interval+1)
epe = np.mean(np.exp(-r*time_stamp)*np.maximum(simulations,0),axis=0)
ene = np.mean(np.exp(-r*time_stamp)*np.minimum(simulations,0),axis=0)
exact = bsde.SolExact(0,x_init)
epe_exact = np.array([exact for s in time_stamp[1:]])
ene_exact = np.array([0.0 for s in time_stamp[1:]])
fig = plt.figure()
plt.plot(time_stamp,[exact] + list(epe_exact),'b--',label='DEPE = exact solution',)
plt.plot(time_stamp,epe,'b',label='DEPE = deep solver approximation')
plt.plot(time_stamp,[0.0]+ list(ene_exact),'r--',label='DNPE = exact solution',)
plt.plot(time_stamp,ene,'r',label='DNPE = deep solver approximation')
plt.xlabel('t')
plt.legend()
plt.show()
fig.savefig(config.eqn_config.eqn_name + '.pdf',format = 'pdf')
df = pd.DataFrame(simulations[:,0,:])
filepath = 'exposure' + config.eqn_config.eqn_name + '.xlsx'
df.to_excel(filepath, index=False)
| 2,853 | 32.186047 | 87 | py |
Deep-xVA-Solver | Deep-xVA-Solver-master/fvaForward.py | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from solver import BSDESolver
from XvaSolver import XvaSolver
import xvaEquation as eqn
import RecursiveEquation as receqn
import munch
from scipy.stats import norm
if __name__ == "__main__":
dim = 1 #dimension of brownian motion
P = 2048 #number of outer Monte Carlo Loops
batch_size = 64
total_time = 1.0
num_time_interval=100
strike = 100
r = 0.02
sigma=0.25
x_init=100
config = {
"eqn_config": {
"_comment": "a forward contract",
"eqn_name": "PricingForward",
"total_time": total_time,
"dim": dim,
"num_time_interval": num_time_interval,
"strike":strike,
"r":r,
"sigma":sigma,
"x_init":x_init
},
"net_config": {
"y_init_range": [-5, 5],
"num_hiddens": [dim+20, dim+20],
"lr_values": [5e-2, 5e-3],
"lr_boundaries": [2000],
"num_iterations": 4000,
"batch_size": batch_size,
"valid_size": 256,
"logging_frequency": 100,
"dtype": "float64",
"verbose": True
}
}
config = munch.munchify(config)
bsde = getattr(eqn, config.eqn_config.eqn_name)(config.eqn_config)
tf.keras.backend.set_floatx(config.net_config.dtype)
#apply algorithm 1
bsde_solver = BSDESolver(config, bsde)
training_history = bsde_solver.train()
#apply trained model to evaluate value of the forward contract via Monte Carlo
simulations = bsde_solver.model.simulate_path(bsde.sample(P))
#estimated epected positive and negative exposure
time_stamp = np.linspace(0,1,num_time_interval+1)
epe = np.mean(np.exp(-r*time_stamp)*np.maximum(simulations,0),axis=0)
ene = np.mean(np.exp(-r*time_stamp)*np.minimum(simulations,0),axis=0)
#exact solution
rv = norm()
d1 = np.array([(-r * s + np.log(x_init/strike) + (r+sigma**2/2)*s)/sigma/np.sqrt(s)
for s in time_stamp[1:]])
d2 = np.array([d1[i]-sigma*np.sqrt(s) for i,s in enumerate(time_stamp[1:])])
epe_exact = x_init*rv.cdf(d1) - strike*np.exp(-r)*rv.cdf(d2)
ene_exact = x_init*rv.cdf(-d1) - strike*np.exp(-r)*rv.cdf(-d2)
plt.figure()
plt.plot(time_stamp,[epe_exact[0]]+list(epe_exact),'b--',label='DEPE = exact solution')
plt.plot(time_stamp,epe[0],'b',label='DEPE = deep solver approximation')
plt.plot(time_stamp,[ene_exact[0]]+list(ene_exact),'r--',label='DNPE = exact solution')
plt.plot(time_stamp,ene[0],'r',label='DNPE = deep solver approximation')
plt.xlabel('t')
plt.legend()
plt.show()
# bsde_solver.model.save('testmodel.tf',save_format='tf')
# XVA computation step.
r_f = 0.04
configFVA = {
"eqn_config": {
"_comment": "XVA on a forward",
"eqn_name": "FVA",
"total_time": total_time,
"num_time_interval": num_time_interval,
"r":r,
"r_fl": r_f,
"r_fb": r_f,
"r_cl": 0.00,
"r_cl": 0.00,
"clean_value": bsde,
"clean_value_model": bsde_solver.model
},
"net_config": {
"y_init_range": [-5, 5],
"num_hiddens": [dim+20, dim+20],
"lr_values": [5e-2, 5e-3],
"lr_boundaries": [2000],
"num_iterations": 4000,
"batch_size": batch_size,
"valid_size": 256,
"logging_frequency": 100,
"dtype": "float64",
"verbose": True
}
}
configFVA = munch.munchify(configFVA)
fvabsde = getattr(receqn, configFVA.eqn_config.eqn_name)(configFVA.eqn_config)
tf.keras.backend.set_floatx(configFVA.net_config.dtype)
#apply algorithm 3
xva_solver = XvaSolver(config, fvabsde)
xva_training_history = xva_solver.train()
fva_simulations = xva_solver.model.simulate_path(fvabsde.sample(P))
print("Exact Values from analytic formulas")
exactVhat = x_init - strike*np.exp(-r * total_time)
exactV = np.exp(-(r_f - r) * total_time)*x_init - strike*np.exp(-r_f * total_time)
exactFVA = exactVhat - exactV
print("exactV = " + str(exactV))
print("exactVhat = " + str(exactVhat))
print("exactFVA = " + str(exactFVA))
print("FVA from Algorithm 3")
fvaFromSolver = fva_simulations[0,0,0]
print("fvaFromSolver = " +str(fvaFromSolver) )
fvaError = fva_simulations[0,0,0] - exactFVA
print("error = "+ str(fvaError))
| 5,069 | 33.726027 | 91 | py |
Deep-xVA-Solver | Deep-xVA-Solver-master/XvaSolver.py | import logging
import time
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import tensorflow.keras.layers as layers
DELTA_CLIP = 50.0
class XvaSolver(object):
"""The fully connected neural network model."""
def __init__(self, config, bsde):
self.eqn_config = config.eqn_config
self.net_config = config.net_config
self.bsde = bsde
self.model = NonsharedModel(config, bsde)
#self.y_init = self.model.y_init
try:
lr_schedule = config.net_config.lr_schedule
except AttributeError:
lr_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
self.net_config.lr_boundaries, self.net_config.lr_values)
self.optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule, epsilon=1e-8)
def train(self):
start_time = time.time()
training_history = []
valid_data = self.bsde.sample(self.net_config.valid_size)
# begin sgd iteration
for step in tqdm(range(self.net_config.num_iterations+1)):
if step % self.net_config.logging_frequency == 0:
loss = self.loss_fn(valid_data, training=False).numpy()
y_init = self.model.y_init.numpy()[0]
elapsed_time = time.time() - start_time
training_history.append([step, loss, y_init, elapsed_time])
if self.net_config.verbose:
#logging.info("step: %5u, loss: %.4e, Y0: %.4e, elapsed time: %3u" % (
# step, loss, y_init, elapsed_time))
print("step: %5u, loss: %.4e, Y0: %.4e, elapsed time: %3u" % (
step, loss, y_init, elapsed_time))
self.train_step(self.bsde.sample(self.net_config.batch_size))
return np.array(training_history)
def loss_fn(self, inputs, training):
dw, x, v_clean, coll = inputs
y_terminal = self.model(inputs, training)
delta = y_terminal - self.bsde.g_tf(self.bsde.total_time, x[:, :, -1],v_clean[:,:,-1], coll[:,:,-1])
# use linear approximation outside the clipped range
loss = tf.reduce_mean(tf.where(tf.abs(delta) < DELTA_CLIP, tf.square(delta),
2 * DELTA_CLIP * tf.abs(delta) - DELTA_CLIP ** 2))
return loss
def grad(self, inputs, training):
with tf.GradientTape(persistent=True) as tape:
loss = self.loss_fn(inputs, training)
grad = tape.gradient(loss, self.model.trainable_variables)
del tape
return grad
@tf.function
def train_step(self, train_data):
grad = self.grad(train_data, training=True)
self.optimizer.apply_gradients(zip(grad, self.model.trainable_variables))
class NonsharedModel(tf.keras.Model):
def __init__(self, config, bsde):
super(NonsharedModel, self).__init__()
self.config = config
self.eqn_config = config.eqn_config
self.net_config = config.net_config
self.bsde = bsde
self.dim = bsde.dim
self.y_init = tf.Variable(np.random.uniform(low=self.net_config.y_init_range[0],
high=self.net_config.y_init_range[1],
size=[1]),dtype=self.net_config.dtype
)
self.z_init = tf.Variable(np.random.uniform(low=-.1, high=.1,
size=[1, self.eqn_config.dim]),dtype=self.net_config.dtype
)
self.subnet = [FeedForwardSubNet(config,bsde.dim) for _ in range(self.bsde.num_time_interval-1)]
def call(self, inputs, training):
dw, x, v_clean, coll = inputs
time_stamp = np.arange(0, self.eqn_config.num_time_interval) * self.bsde.delta_t
all_one_vec = tf.ones(shape=tf.stack([tf.shape(dw)[0], 1]), dtype=self.net_config.dtype)
y = all_one_vec * self.y_init
z = tf.matmul(all_one_vec, self.z_init)
for t in range(0, self.bsde.num_time_interval-1):
y = y - self.bsde.delta_t * (
self.bsde.f_tf(time_stamp[t], x[:, :, t], y, z, v_clean[:,:,t], coll[:,:,t])
) + tf.reduce_sum(z * dw[:, :, t], 1, keepdims=True)
try:
z = self.subnet[t](x[:, :, t + 1], training) / self.bsde.dim
except TypeError:
z = self.subnet(tf.concat([time_stamp[t+1]*all_one_vec,x[:, :, t + 1]],axis=1), training=training) / self.bsde.dim
# terminal time
y = y - self.bsde.delta_t * self.bsde.f_tf(time_stamp[-1], x[:, :, -2], y, z,v_clean[:,:,-2],coll[:,:,-2]) + \
tf.reduce_sum(z * dw[:, :, -1], 1, keepdims=True)
return y
def predict_step(self, data):
dw, x, v_clean, coll = data[0]
time_stamp = np.arange(0, self.eqn_config.num_time_interval) * self.bsde.delta_t
all_one_vec = tf.ones(shape=tf.stack([tf.shape(dw)[0], 1]), dtype=self.net_config.dtype)
y = all_one_vec * self.y_init
z = tf.matmul(all_one_vec, self.z_init)
history = tf.TensorArray(self.net_config.dtype,size=self.bsde.num_time_interval+1)
history = history.write(0,y)
for t in range(0, self.bsde.num_time_interval-1):
y = y - self.bsde.delta_t * (
self.bsde.f_tf(time_stamp[t], x[:, :, t], y, z,v_clean[:,:,t],coll[:,:,t])
) + tf.reduce_sum(z * dw[:, :, t], 1, keepdims=True)
history = history.write(t+1,y)
try:
z = self.subnet[t](x[:, :, t + 1], training=False) / self.bsde.dim
except TypeError:
z = self.subnet(tf.concat([time_stamp[t+1]*all_one_vec,x[:, :, t + 1]],axis=1), training=False) / self.bsde.dim
# terminal time
y = y - self.bsde.delta_t * self.bsde.f_tf(time_stamp[-1], x[:, :, -2], y, z,v_clean[:,:,-2], coll[:,:,-2]) + \
tf.reduce_sum(z * dw[:, :, -1], 1, keepdims=True)
history = history.write(self.bsde.num_time_interval,y)
history = tf.transpose(history.stack(),perm=[1,2,0])
return dw,x,v_clean,coll,history
def simulate_path(self,num_sample):
return self.predict(num_sample)[4]
class FeedForwardSubNet(tf.keras.Model):
def __init__(self, config,dim):
super(FeedForwardSubNet, self).__init__()
num_hiddens = config.net_config.num_hiddens
self.bn_layers = [
tf.keras.layers.BatchNormalization(
momentum=0.99,
epsilon=1e-6,
beta_initializer=tf.random_normal_initializer(0.0, stddev=0.1),
gamma_initializer=tf.random_uniform_initializer(0.1, 0.5)
)
for _ in range(len(num_hiddens) + 2)]
self.dense_layers = [tf.keras.layers.Dense(num_hiddens[i],
use_bias=False,
activation=None,)
for i in range(len(num_hiddens))]
# final output should be gradient of size dim
self.dense_layers.append(tf.keras.layers.Dense(dim, activation=None))
def call(self, x, training):
"""structure: bn -> (dense -> bn -> relu) * len(num_hiddens) -> dense """
x = self.bn_layers[0](x, training)
for i in range(len(self.dense_layers) - 1):
x = self.dense_layers[i](x)
x = self.bn_layers[i+1](x, training)
x = tf.nn.relu(x)
x = self.dense_layers[-1](x)
return x
### univeral neural networks instead of one neural network at each time point
def get_universal_neural_network(input_dim):
input = layers.Input(shape=(input_dim,))
x = layers.BatchNormalization()(input)
for i in range(5):
x = layers.Dense(input_dim+10,'relu',False)(x)
x = layers.BatchNormalization()(x)
output = layers.Dense(input_dim-1,'relu')(x)
#output = layers.Dense(2*dim,'relu')(x)
return tf.keras.Model(input,output)
'''
def get_universal_neural_network(input_dim,num_neurons=20,num_hidden_blocks=4):
input = tf.keras.Input(shape=(input_dim,))
x = layers.BatchNormalization()(input)
s = layers.Dense(num_neurons,activation='relu',use_bias=False)(x)
s = layers.BatchNormalization()(s)
for i in range(num_hidden_blocks-1):
z = layers.add([layers.Dense(num_neurons,None,False)(x),layers.Dense(num_neurons,None,False)(s)])
z = Add_bias(num_neurons)(z)
z = layers.Activation(tf.nn.sigmoid)(z)
g = layers.add([layers.Dense(num_neurons,None,False)(x),layers.Dense(num_neurons,None,False)(s)])
g = Add_bias(num_neurons)(g)
g = layers.Activation(tf.nn.sigmoid)(g)
r = layers.add([layers.Dense(num_neurons,None,False)(x),layers.Dense(num_neurons,None,False)(s)])
r = Add_bias(num_neurons)(r)
r = layers.Activation(tf.nn.sigmoid)(r)
h = layers.add([layers.Dense(num_neurons,None,False)(x),layers.Dense(num_neurons,None,False)(layers.multiply([s,r]))])
h = Add_bias(num_neurons)(h)
h = layers.Activation(tf.nn.relu)(h)
s = layers.add([layers.multiply([1-g,h]),layers.multiply([z,s])])
s = layers.BatchNormalization()(s)
output = layers.Dense(input_dim-1,None)(s)
return tf.keras.Model(input,output)
'''
class Add_bias(tf.keras.layers.Layer):
def __init__(self,units):
super(Add_bias, self).__init__()
self.units = units
def build(self, input_shape):
self.b = self.add_weight(shape=(self.units,),
initializer='zeros',
trainable=True)
def call(self, inputs):
return inputs + self.b
| 10,030 | 43.982063 | 130 | py |
Deep-xVA-Solver | Deep-xVA-Solver-master/basketCall.py | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from solver import BSDESolver
import xvaEquation as eqn
import munch
import pandas as pd
if __name__ == "__main__":
dim = 100 #dimension of brownian motion
P = 2048 #number of outer Monte Carlo Loops
batch_size = 64
total_time = 1.0
num_time_interval = 200
strike = 100
r = 0.01
sigma = 0.25
x_init = 100
config = {
"eqn_config": {
"_comment": "a basket call option",
"eqn_name": "BasketOption",
"total_time": total_time,
"dim": dim,
"num_time_interval": num_time_interval,
"strike":strike,
"r":r,
"sigma":sigma,
"x_init":x_init,
"strike":strike
},
"net_config": {
"y_init_range": [150, 170],
"num_hiddens": [dim+10, dim+10],
"lr_values": [5e-2, 5e-3],
"lr_boundaries": [2000],
"num_iterations": 4000,
"batch_size": batch_size,
"valid_size": 256,
"logging_frequency": 100,
"dtype": "float64",
"verbose": True
}
}
config = munch.munchify(config)
bsde = getattr(eqn, config.eqn_config.eqn_name)(config.eqn_config)
tf.keras.backend.set_floatx(config.net_config.dtype)
#apply algorithm 1
bsde_solver = BSDESolver(config, bsde)
training_history = bsde_solver.train()
#Simulate the BSDE after training - MtM scenarios
simulations = bsde_solver.model.simulate_path(bsde.sample(P))
#estimated epected positive and negative exposure
time_stamp = np.linspace(0,1,num_time_interval+1)
epe = np.mean(np.exp(-r*time_stamp)*np.maximum(simulations,0),axis=0)
ene = np.mean(np.exp(-r*time_stamp)*np.minimum(simulations,0),axis=0)
exact = 157.9914 #101.0721 #398.08
epe_exact = np.array([exact for s in time_stamp[1:]])
ene_exact = np.array([0.0 for s in time_stamp[1:]])
fig = plt.figure()
plt.plot(time_stamp,[exact] + list(epe_exact),'b--',label='DEPE = exact solution',)
plt.plot(time_stamp,np.transpose(epe),'b',label='DEPE = deep solver approximation')
plt.plot(time_stamp,[0.0]+ list(ene_exact),'r--',label='DNPE = exact solution',)
plt.plot(time_stamp,np.transpose(ene),'r',label='DNPE = deep solver approximation')
plt.xlabel('t')
plt.legend()
plt.show()
fig.savefig(config.eqn_config.eqn_name + '.pdf',format = 'pdf')
df = pd.DataFrame(simulations[:,0,:])
filepath = 'exposure' + config.eqn_config.eqn_name + '.xlsx'
df.to_excel(filepath, index=False)
#Check Monte Carlo price
dw, x = bsde.sample(10000)
portfolio = tf.reduce_sum(x, 1,keepdims=True)
payoff=tf.math.maximum(portfolio-dim * strike,0)
average = tf.reduce_mean(payoff,0,keepdims=True)
mcprice = np.exp(- r * total_time) * average[:,:,num_time_interval]
print(mcprice) | 3,193 | 33.717391 | 87 | py |
Deep-xVA-Solver | Deep-xVA-Solver-master/solver.py | import logging
import time
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import tensorflow.keras.layers as layers
DELTA_CLIP = 50.0
class BSDESolver(object):
"""The fully connected neural network model."""
def __init__(self, config, bsde):
self.eqn_config = config.eqn_config
self.net_config = config.net_config
self.bsde = bsde
self.model = NonsharedModel(config, bsde)
#self.y_init = self.model.y_init
try:
lr_schedule = config.net_config.lr_schedule
except AttributeError:
lr_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
self.net_config.lr_boundaries, self.net_config.lr_values)
self.optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule, epsilon=1e-8)
def train(self):
start_time = time.time()
training_history = []
valid_data = self.bsde.sample(self.net_config.valid_size)
# begin sgd iteration
for step in tqdm(range(self.net_config.num_iterations+1)):
if step % self.net_config.logging_frequency == 0:
loss = self.loss_fn(valid_data, training=False).numpy()
y_init = self.model.y_init.numpy()[0]
elapsed_time = time.time() - start_time
training_history.append([step, loss, y_init, elapsed_time])
if self.net_config.verbose:
#logging.info("step: %5u, loss: %.4e, Y0: %.4e, elapsed time: %3u" % (
# step, loss, y_init, elapsed_time))
print("step: %5u, loss: %.4e, Y0: %.4e, elapsed time: %3u" % (
step, loss, y_init, elapsed_time))
self.train_step(self.bsde.sample(self.net_config.batch_size))
return np.array(training_history)
def loss_fn(self, inputs, training):
dw, x = inputs
y_terminal = self.model(inputs, training)
delta = y_terminal - self.bsde.g_tf(self.bsde.total_time, x[:, :, -1])
# use linear approximation outside the clipped range
loss = tf.reduce_mean(tf.where(tf.abs(delta) < DELTA_CLIP, tf.square(delta),
2 * DELTA_CLIP * tf.abs(delta) - DELTA_CLIP ** 2))
loss += 1000*(tf.maximum(self.model.y_init[0]-self.net_config.y_init_range[1],0)+tf.maximum(self.net_config.y_init_range[0]-self.model.y_init[0],0))
return loss
def grad(self, inputs, training):
with tf.GradientTape(persistent=True) as tape:
loss = self.loss_fn(inputs, training)
grad = tape.gradient(loss, self.model.trainable_variables)
del tape
return grad
@tf.function
def train_step(self, train_data):
grad = self.grad(train_data, training=True)
self.optimizer.apply_gradients(zip(grad, self.model.trainable_variables))
class NonsharedModel(tf.keras.Model):
def __init__(self, config, bsde):
super(NonsharedModel, self).__init__()
self.config = config
self.eqn_config = config.eqn_config
self.net_config = config.net_config
self.bsde = bsde
self.dim = bsde.dim
self.y_init = tf.Variable(np.random.uniform(low=self.net_config.y_init_range[0],
high=self.net_config.y_init_range[1],
size=[1]),dtype=self.net_config.dtype
)
self.z_init = tf.Variable(np.random.uniform(low=-.1, high=.1,
size=[1, self.eqn_config.dim]),dtype=self.net_config.dtype
)
self.subnet = [FeedForwardSubNet(config,bsde.dim) for _ in range(self.bsde.num_time_interval-1)]
def call(self, inputs, training):
dw, x = inputs
time_stamp = np.arange(0, self.eqn_config.num_time_interval) * self.bsde.delta_t
all_one_vec = tf.ones(shape=tf.stack([tf.shape(dw)[0], 1]), dtype=self.net_config.dtype)
y = all_one_vec * self.y_init
z = tf.matmul(all_one_vec, self.z_init)
for t in range(0, self.bsde.num_time_interval-1):
y = y - self.bsde.delta_t * (
self.bsde.f_tf(time_stamp[t], x[:, :, t], y, z)
) + tf.reduce_sum(z * dw[:, :, t], 1, keepdims=True)
try:
z = self.subnet[t](x[:, :, t + 1], training) / self.bsde.dim
except TypeError:
z = self.subnet(tf.concat([time_stamp[t+1]*all_one_vec,x[:, :, t + 1]],axis=1), training=training) / self.bsde.dim
# terminal time
y = y - self.bsde.delta_t * self.bsde.f_tf(time_stamp[-1], x[:, :, -2], y, z) + \
tf.reduce_sum(z * dw[:, :, -1], 1, keepdims=True)
return y
def predict_step(self, data):
dw, x = data[0]
time_stamp = np.arange(0, self.eqn_config.num_time_interval) * self.bsde.delta_t
all_one_vec = tf.ones(shape=tf.stack([tf.shape(dw)[0], 1]), dtype=self.net_config.dtype)
y = all_one_vec * self.y_init
z = tf.matmul(all_one_vec, self.z_init)
history = tf.TensorArray(self.net_config.dtype,size=self.bsde.num_time_interval+1)
history = history.write(0,y)
for t in range(0, self.bsde.num_time_interval-1):
y = y - self.bsde.delta_t * (
self.bsde.f_tf(time_stamp[t], x[:, :, t], y, z)
) + tf.reduce_sum(z * dw[:, :, t], 1, keepdims=True)
history = history.write(t+1,y)
try:
z = self.subnet[t](x[:, :, t + 1], training=False) / self.bsde.dim
except TypeError:
z = self.subnet(tf.concat([time_stamp[t+1]*all_one_vec,x[:, :, t + 1]],axis=1), training=False) / self.bsde.dim
# terminal time
y = y - self.bsde.delta_t * self.bsde.f_tf(time_stamp[-1], x[:, :, -2], y, z) + \
tf.reduce_sum(z * dw[:, :, -1], 1, keepdims=True)
history = history.write(self.bsde.num_time_interval,y)
history = tf.transpose(history.stack(),perm=[1,2,0])
return dw,x,history
def simulate_path(self,num_sample):
return self.predict(num_sample)[2]
class FeedForwardSubNet(tf.keras.Model):
def __init__(self, config,dim):
super(FeedForwardSubNet, self).__init__()
num_hiddens = config.net_config.num_hiddens
self.bn_layers = [
tf.keras.layers.BatchNormalization(
momentum=0.99,
epsilon=1e-6,
beta_initializer=tf.random_normal_initializer(0.0, stddev=0.1),
gamma_initializer=tf.random_uniform_initializer(0.1, 0.5)
)
for _ in range(len(num_hiddens) + 2)]
self.dense_layers = [tf.keras.layers.Dense(num_hiddens[i],
use_bias=False,
activation=None,)
for i in range(len(num_hiddens))]
# final output should be gradient of size dim
self.dense_layers.append(tf.keras.layers.Dense(dim, activation=None))
def call(self, x, training):
"""structure: bn -> (dense -> bn -> relu) * len(num_hiddens) -> dense """
x = self.bn_layers[0](x, training)
for i in range(len(self.dense_layers) - 1):
x = self.dense_layers[i](x)
x = self.bn_layers[i+1](x, training)
x = tf.nn.relu(x)
x = self.dense_layers[-1](x)
return x
### univeral neural networks instead of one neural network at each time point
def get_universal_neural_network(input_dim):
input = layers.Input(shape=(input_dim,))
x = layers.BatchNormalization()(input)
for i in range(5):
x = layers.Dense(input_dim+10,'relu',False)(x)
x = layers.BatchNormalization()(x)
output = layers.Dense(input_dim-1,'relu')(x)
#output = layers.Dense(2*dim,'relu')(x)
return tf.keras.Model(input,output)
'''
def get_universal_neural_network(input_dim,num_neurons=20,num_hidden_blocks=4):
input = tf.keras.Input(shape=(input_dim,))
x = layers.BatchNormalization()(input)
s = layers.Dense(num_neurons,activation='relu',use_bias=False)(x)
s = layers.BatchNormalization()(s)
for i in range(num_hidden_blocks-1):
z = layers.add([layers.Dense(num_neurons,None,False)(x),layers.Dense(num_neurons,None,False)(s)])
z = Add_bias(num_neurons)(z)
z = layers.Activation(tf.nn.sigmoid)(z)
g = layers.add([layers.Dense(num_neurons,None,False)(x),layers.Dense(num_neurons,None,False)(s)])
g = Add_bias(num_neurons)(g)
g = layers.Activation(tf.nn.sigmoid)(g)
r = layers.add([layers.Dense(num_neurons,None,False)(x),layers.Dense(num_neurons,None,False)(s)])
r = Add_bias(num_neurons)(r)
r = layers.Activation(tf.nn.sigmoid)(r)
h = layers.add([layers.Dense(num_neurons,None,False)(x),layers.Dense(num_neurons,None,False)(layers.multiply([s,r]))])
h = Add_bias(num_neurons)(h)
h = layers.Activation(tf.nn.relu)(h)
s = layers.add([layers.multiply([1-g,h]),layers.multiply([z,s])])
s = layers.BatchNormalization()(s)
output = layers.Dense(input_dim-1,None)(s)
return tf.keras.Model(input,output)
'''
class Add_bias(tf.keras.layers.Layer):
def __init__(self,units):
super(Add_bias, self).__init__()
self.units = units
def build(self, input_shape):
self.b = self.add_weight(shape=(self.units,),
initializer='zeros',
trainable=True)
def call(self, inputs):
return inputs + self.b
| 9,986 | 43.584821 | 156 | py |
Deep-xVA-Solver | Deep-xVA-Solver-master/forward.py | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from solver import BSDESolver
import xvaEquation as eqn
import munch
from scipy.stats import norm
import pandas as pd
if __name__ == "__main__":
dim = 1 #dimension of brownian motion
P = 2048 #number of outer Monte Carlo Loops
batch_size = 64
total_time = 1.0
num_time_interval=200
strike = 100
r = 0.0
mu = 0.0
sigma=0.25
x_init=100
config = {
"eqn_config": {
"_comment": "a forward contract",
"eqn_name": "PricingForward",
"total_time": total_time,
"dim": dim,
"num_time_interval": num_time_interval,
"strike":strike,
"r":r,
"sigma":sigma,
"x_init":x_init
},
"net_config": {
"y_init_range": [-5, 5],
"num_hiddens": [dim+20, dim+20],
"lr_values": [5e-2, 5e-3],
"lr_boundaries": [2000],
"num_iterations": 4000,
"batch_size": batch_size,
"valid_size": 256,
"logging_frequency": 100,
"dtype": "float64",
"verbose": True
}
}
config = munch.munchify(config)
bsde = getattr(eqn, config.eqn_config.eqn_name)(config.eqn_config)
tf.keras.backend.set_floatx(config.net_config.dtype)
#apply algorithm 1
bsde_solver = BSDESolver(config, bsde)
training_history = bsde_solver.train()
#Simulate the BSDE after training - MtM scenarios
simulations = bsde_solver.model.simulate_path(bsde.sample(P))
#estimated epected positive and negative exposure
time_stamp = np.linspace(0,1,num_time_interval+1)
epe = np.mean(np.exp(-r*time_stamp)*np.maximum(simulations,0),axis=0)
ene = np.mean(np.exp(-r*time_stamp)*np.minimum(simulations,0),axis=0)
#exact solution
rv = norm()
d1 = np.array([(-r * s + np.log(x_init/strike) + (r+sigma**2/2)*s)/sigma/np.sqrt(s)
for s in time_stamp[1:]])
d2 = np.array([d1[i]-sigma*np.sqrt(s) for i,s in enumerate(time_stamp[1:])])
epe_exact = x_init*rv.cdf(d1) - strike*np.exp(-r)*rv.cdf(d2)
ene_exact = x_init*rv.cdf(-d1) - strike*np.exp(-r)*rv.cdf(-d2)
fig = plt.figure()
plt.plot(time_stamp,[0.0]+list(epe_exact),'b--',label='DEPE = exact solution')
plt.plot(time_stamp,np.transpose(epe),'b',label='DEPE = deep solver approximation')
plt.plot(time_stamp,[0.0]+list(ene_exact),'r--',label='DNPE = exact solution')
plt.plot(time_stamp,np.transpose(ene),'r',label='DNPE = deep solver approximation')
plt.xlabel('t')
plt.legend()
plt.show()
fig.savefig(config.eqn_config.eqn_name + '.pdf',format = 'pdf')
df = pd.DataFrame(simulations[:,0,:])
filepath = 'exposure' + config.eqn_config.eqn_name + '.xlsx'
df.to_excel(filepath, index=False)
| 3,107 | 33.153846 | 88 | py |
Deep-xVA-Solver | Deep-xVA-Solver-master/callOptionStabilityTests.py | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from solver import BSDESolver
import xvaEquation as eqn
import munch
import pandas as pd
if __name__ == "__main__":
dim = 1 #dimension of brownian motion
P = 2048 #number of outer Monte Carlo Loops
batch_size = 64
total_time = 1.0
num_time_interval = 200
strike = 100
r = 0.01
sigma = 0.25
x_init = 100
config = {
"eqn_config": {
"_comment": "a basket call option",
"eqn_name": "CallOption",
"total_time": total_time,
"dim": dim,
"num_time_interval": num_time_interval,
"strike":strike,
"r":r,
"sigma":sigma,
"x_init":x_init,
"strike":strike
},
"net_config": {
"y_init_range": [2, 20],
"num_hiddens": [dim + 80, dim + 80, dim + 80, dim + 80, dim + 80, dim + 80, dim + 80, dim + 80],
"lr_values": [5e-3, 5e-3],
"lr_boundaries": [2000],
"num_iterations": 4000,
"batch_size": batch_size,
"valid_size": 1024,
"logging_frequency": 100,
"dtype": "float64",
"verbose": True
}
}
config = munch.munchify(config)
bsde = getattr(eqn, config.eqn_config.eqn_name)(config.eqn_config)
tf.keras.backend.set_floatx(config.net_config.dtype)
#apply algorithm 1
bsde_solver = BSDESolver(config, bsde)
training_history = bsde_solver.train()
#Simulate the BSDE after training - MtM scenarios
simulations = bsde_solver.model.simulate_path(bsde.sample(P))
#estimated epected positive and negative exposure
time_stamp = np.linspace(0,1,num_time_interval+1)
epe = np.mean(np.exp(-r*time_stamp)*np.maximum(simulations,0),axis=0)
ene = np.mean(np.exp(-r*time_stamp)*np.minimum(simulations,0),axis=0)
exact = bsde.SolExact(0,x_init)
epe_exact = np.array([exact for s in time_stamp[1:]])
ene_exact = np.array([0.0 for s in time_stamp[1:]])
fig = plt.figure()
plt.plot(time_stamp,[exact]+list(epe_exact),'b--',label='DEPE = exact solution')
plt.plot(time_stamp,np.transpose(epe),'b',label='DEPE = deep solver approximation')
plt.plot(time_stamp,[0.0]+list(ene_exact),'r--',label='DNPE = exact solution')
plt.plot(time_stamp,np.transpose(ene),'r',label='DNPE = deep solver approximation')
plt.xlabel('t')
plt.legend()
plt.show()
fig.savefig(config.eqn_config.eqn_name + '.pdf',format = 'pdf')
df = pd.DataFrame(simulations[:,0,:])
filepath = 'exposure' + config.eqn_config.eqn_name + '.xlsx'
df.to_excel(filepath, index=False)
| 2,953 | 32.954023 | 116 | py |
inferno | inferno-master/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
import runpy
__version__ = runpy.run_path('inferno/version.py')['__version__']
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
# TODO: put package requirements here
"pip>=8.1.2",
"torch>=0.1.12",
"dill",
"pyyaml",
"scipy>=0.13.0",
"h5py",
"numpy>=1.8",
"scikit-image",
"torchvision",
"tqdm"
]
setup_requirements = [
'pytest-runner'
]
test_requirements = [
'pytest', 'unittest'
]
dependency_links = [
'http://download.pytorch.org/whl/cu75/torch-0.2.0.post1-cp35-cp35m-manylinux1_x86_64.whl#egg=torch-0.2.0'
]
setup(
name='inferno-pytorch',
version=__version__,
description="Inferno is a little library providing utilities and convenience functions/classes around PyTorch.",
long_description=readme + '\n\n' + history,
author="Nasim Rahaman",
author_email='nasim.rahaman@iwr.uni-heidelberg.de',
url='https://github.com/inferno-pytorch/inferno',
packages=find_packages(where='.', exclude=["*.tests", "*.tests.*",
"tests.*", "tests",
"__pycache__", "*.pyc"]),
dependency_links=dependency_links,
include_package_data=True,
install_requires=requirements,
license="Apache Software License 2.0",
zip_safe=False,
keywords='inferno pytorch torch deep learning cnn deep-pyromania',
classifiers=[
# How mature is this project? Common values are\
# 2 - Pre-Alpha',
# 3 - Alpha,
# 4 - Beta,
# 5 - Production/Stable
'Development Status :: 2 - Pre-Alpha',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],
test_suite='test',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
| 2,255 | 27.556962 | 116 | py |
inferno | inferno-master/examples/regularized_mnist.py | """
Regularized MNIST Example
================================
This example demonstrates adding and logging arbitrary regularization losses, in this case,
L2 activity regularization and L1 weight regularization.
- Add a `_losses` dictionary to any module containing loss names and values
- Use a criterion from `inferno.extensions.criteria.regularized` that will collect and add those losses
- Call `Trainer.observe_training_and_validation_states` to log the losses as well
"""
import argparse
import sys
import torch
import torch.nn as nn
from torchvision import datasets, transforms
from inferno.extensions.layers.reshape import Flatten
from inferno.trainers.basic import Trainer
from inferno.trainers.callbacks.logging.tensorboard import TensorboardLogger
class RegularizedLinear(nn.Linear):
def __init__(self, *args, ar_weight=1e-3, l1_weight=1e-3, **kwargs):
super(RegularizedLinear, self).__init__(*args, **kwargs)
self.ar_weight = ar_weight
self.l1_weight = l1_weight
self._losses = {}
def forward(self, input):
output = super(RegularizedLinear, self).forward(input)
self._losses['activity_regularization'] = (output * output).sum() * self.ar_weight
self._losses['l1_weight_regularization'] = torch.abs(self.weight).sum() * self.l1_weight
return output
def model_fn():
return nn.Sequential(
Flatten(),
RegularizedLinear(in_features=784, out_features=256),
nn.LeakyReLU(),
RegularizedLinear(in_features=256, out_features=128),
nn.LeakyReLU(),
RegularizedLinear(in_features=128, out_features=10)
)
def mnist_data_loaders(args):
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('./data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('./data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
return train_loader, test_loader
def train_model(args):
model = model_fn()
train_loader, validate_loader = mnist_data_loaders(args)
# Build trainer
trainer = Trainer(model) \
.build_criterion('RegularizedCrossEntropyLoss') \
.build_metric('CategoricalError') \
.build_optimizer('Adam') \
.validate_every((1, 'epochs')) \
.save_every((1, 'epochs')) \
.save_to_directory(args.save_directory) \
.set_max_num_epochs(args.epochs) \
.build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
log_images_every='never'),
log_directory=args.save_directory)
# Record regularization losses
trainer.logger.observe_training_and_validation_states([
'main_loss',
'total_regularization_loss',
'activity_regularization',
'l1_weight_regularization'
])
# Bind loaders
trainer \
.bind_loader('train', train_loader) \
.bind_loader('validate', validate_loader)
if args.cuda:
trainer.cuda()
# Go!
trainer.fit()
def main(argv):
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--save-directory', type=str, default='output/mnist/v1',
help='output directory')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=20, metavar='N',
help='number of epochs to train (default: 20)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
args = parser.parse_args(argv)
args.cuda = not args.no_cuda and torch.cuda.is_available()
train_model(args)
if __name__ == '__main__':
main(sys.argv[1:])
| 4,545 | 35.368 | 103 | py |
inferno | inferno-master/examples/plot_cheap_unet.py | """
UNet Tutorial
================================
A unet example which can be run without a gpu
"""
##############################################################################
# Preface
# --------------
# We start with some unspectacular multi purpose imports needed for this example
import matplotlib.pyplot as plt
import torch
from torch import nn
import numpy
##############################################################################
# determine whether we have a gpu
# and should use cuda
USE_CUDA = torch.cuda.is_available()
##############################################################################
# Dataset
# --------------
# For simplicity we will use a toy dataset where we need to perform
# a binary segmentation task.
from inferno.io.box.binary_blobs import get_binary_blob_loaders
# convert labels from long to float as needed by
# binary cross entropy loss
def label_transform(x):
return torch.from_numpy(x).float()
#label_transform = lambda x : torch.from_numpy(x).float()
train_loader, test_loader, validate_loader = get_binary_blob_loaders(
size=8, # how many images per {train,test,validate}
train_batch_size=2,
length=256, # <= size of the images
gaussian_noise_sigma=1.4, # <= how noise are the images
train_label_transform = label_transform,
validate_label_transform = label_transform
)
image_channels = 1 # <-- number of channels of the image
pred_channels = 1 # <-- number of channels needed for the prediction
if False:
##############################################################################
# Visualize Dataset
# ~~~~~~~~~~~~~~~~~~~~~~
fig = plt.figure()
for i,(image, target) in enumerate(train_loader):
ax = fig.add_subplot(1, 2, 1)
ax.imshow(image[0,0,...])
ax.set_title('raw data')
ax = fig.add_subplot(1, 2, 2)
ax.imshow(target[0,...])
ax.set_title('ground truth')
break
fig.tight_layout()
plt.show()
##############################################################################
# Training
# ----------------------------
# To train the unet, we use the infernos Trainer class of inferno.
# Since we train many models later on in this example we encapsulate
# the training in a function (see :ref:`sphx_glr_auto_examples_trainer.py` for
# an example dedicated to the trainer itself).
from inferno.trainers import Trainer
from inferno.utils.python_utils import ensure_dir
def train_model(model, loaders, **kwargs):
trainer = Trainer(model)
trainer.build_criterion('BCEWithLogitsLoss')
trainer.build_optimizer('Adam', lr=kwargs.get('lr', 0.0001))
#trainer.validate_every((kwargs.get('validate_every', 10), 'epochs'))
#trainer.save_every((kwargs.get('save_every', 10), 'epochs'))
#trainer.save_to_directory(ensure_dir(kwargs.get('save_dir', 'save_dor')))
trainer.set_max_num_epochs(kwargs.get('max_num_epochs', 20))
# bind the loaders
trainer.bind_loader('train', loaders[0])
trainer.bind_loader('validate', loaders[1])
if USE_CUDA:
trainer.cuda()
# do the training
trainer.fit()
return trainer
##############################################################################
# Prediction
# ----------------------------
# The trainer contains the trained model and we can do predictions.
# We use :code:`unwrap` to convert the results to numpy arrays.
# Since we want to do many prediction we encapsulate the
# the prediction in a function
from inferno.utils.torch_utils import unwrap
def predict(trainer, test_loader, save_dir=None):
trainer.eval_mode()
for image, target in test_loader:
# transfer image to gpu
image = image.cuda() if USE_CUDA else image
# get batch size from image
batch_size = image.size()[0]
for b in range(batch_size):
prediction = trainer.apply_model(image)
prediction = torch.nn.functional.sigmoid(prediction)
image = unwrap(image, as_numpy=True, to_cpu=True)
prediction = unwrap(prediction, as_numpy=True, to_cpu=True)
target = unwrap(target, as_numpy=True, to_cpu=True)
fig = plt.figure()
ax = fig.add_subplot(2, 2, 1)
ax.imshow(image[b,0,...])
ax.set_title('raw data')
ax = fig.add_subplot(2, 2, 2)
ax.imshow(target[b,...])
ax.set_title('ground truth')
ax = fig.add_subplot(2, 2, 4)
ax.imshow(prediction[b,...])
ax.set_title('prediction')
fig.tight_layout()
plt.show()
##############################################################################
# Custom UNet
# ----------------------------
# Often one needs to have a UNet with custom layers.
# Here we show how to implement such a customized UNet.
# To this end we derive from :code:`UNetBase`.
# For the sake of this example we will create
# a Unet which uses depthwise convolutions and might be trained on a CPU
from inferno.extensions.models import UNetBase
from inferno.extensions.layers import ConvSELU2D, ConvReLU2D, ConvELU2D, ConvSigmoid2D,Conv2D,ConvActivation
class CheapConv(nn.Module):
def __init__(self, in_channels, out_channels, activated):
super(CheapConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
if activated:
self.convs = torch.nn.Sequential(
ConvActivation(in_channels=in_channels, out_channels=in_channels, depthwise=True, kernel_size=(3, 3), activation='ReLU', dim=2),
ConvReLU2D(in_channels=in_channels, out_channels=out_channels, kernel_size=(1,1))
)
else:
self.convs = torch.nn.Sequential(
ConvActivation(in_channels=in_channels, out_channels=in_channels, depthwise=True, kernel_size=(3, 3), activation='ReLU', dim=2),
Conv2D(in_channels=in_channels, out_channels=out_channels, kernel_size=(1,1))
)
def forward(self, x):
assert x.shape[1] == self.in_channels,"input has wrong number of channels"
x = self.convs(x)
assert x.shape[1] == self.out_channels,"output has wrong number of channels"
return x
class CheapConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, activated):
super(CheapConvBlock, self).__init__()
self.activated = activated
self.in_channels = in_channels
self.out_channels = out_channels
if(in_channels != out_channels):
self.start = ConvReLU2D(in_channels=in_channels, out_channels=out_channels, kernel_size=(1,1))
else:
self.start = None
self.conv_a = CheapConv(in_channels=out_channels, out_channels=out_channels, activated=True)
self.conv_b = CheapConv(in_channels=out_channels, out_channels=out_channels, activated=False)
self.activation = torch.nn.ReLU()
def forward(self, x):
x_input = x
if self.start is not None:
x_input = self.start(x_input)
x = self.conv_a(x_input)
x = self.conv_b(x)
x = x + x_input
if self.activated:
x = self.activation(x)
return x
class MySimple2DCpUnet(UNetBase):
def __init__(self, in_channels, out_channels, depth=3, residual=False, **kwargs):
super(MySimple2DCpUnet, self).__init__(in_channels=in_channels, out_channels=out_channels,
dim=2, depth=depth, **kwargs)
def conv_op_factory(self, in_channels, out_channels, part, index):
# last?
last = part == 'up' and index==0
return CheapConvBlock(in_channels=in_channels, out_channels=out_channels, activated=not last),False
from inferno.extensions.layers import RemoveSingletonDimension
model_b = torch.nn.Sequential(
CheapConv(in_channels=image_channels, out_channels=4, activated=True),
MySimple2DCpUnet(in_channels=4, out_channels=pred_channels) ,
RemoveSingletonDimension(dim=1)
)
###################################################
# do the training (with the same functions as before)
trainer = train_model(model=model_b, loaders=[train_loader, validate_loader], save_dir='model_b', lr=0.001)
###################################################
# do the training (with the same functions as before)1
predict(trainer=trainer, test_loader=test_loader)
| 8,435 | 33.859504 | 144 | py |
inferno | inferno-master/examples/plot_unet_tutorial.py | """
UNet Tutorial
================================
A tentative tutorial on the usage
of the unet framework in inferno
"""
##############################################################################
# Preface
# --------------
# We start with some unspectacular multi purpose imports needed for this example
import matplotlib.pyplot as plt
import torch
import numpy
##############################################################################
# determine whether we have a gpu
# and should use cuda
USE_CUDA = torch.cuda.is_available()
##############################################################################
# Dataset
# --------------
# For simplicity we will use a toy dataset where we need to perform
# a binary segmentation task.
from inferno.io.box.binary_blobs import get_binary_blob_loaders
# convert labels from long to float as needed by
# binary cross entropy loss
def label_transform(x):
return torch.from_numpy(x).float()
#label_transform = lambda x : torch.from_numpy(x).float()
train_loader, test_loader, validate_loader = get_binary_blob_loaders(
size=8, # how many images per {train,test,validate}
train_batch_size=2,
length=256, # <= size of the images
gaussian_noise_sigma=1.4, # <= how noise are the images
train_label_transform = label_transform,
validate_label_transform = label_transform
)
image_channels = 1 # <-- number of channels of the image
pred_channels = 1 # <-- number of channels needed for the prediction
##############################################################################
# Visualize Dataset
# ~~~~~~~~~~~~~~~~~~~~~~
fig = plt.figure()
for i,(image, target) in enumerate(train_loader):
ax = fig.add_subplot(1, 2, 1)
ax.imshow(image[0,0,...])
ax.set_title('raw data')
ax = fig.add_subplot(1, 2, 2)
ax.imshow(target[0,...])
ax.set_title('ground truth')
break
fig.tight_layout()
plt.show()
##############################################################################
# Simple UNet
# ----------------------------
# We start with a very simple predefined
# res block UNet. By default, this UNet uses ReLUs (in conjunction with batchnorm) as nonlinearities
# With :code:`activated=False` we make sure that the last layer
# is not activated since we chain the UNet with a sigmoid
# activation function.
from inferno.extensions.models import ResBlockUNet
from inferno.extensions.layers import RemoveSingletonDimension
model = torch.nn.Sequential(
ResBlockUNet(dim=2, in_channels=image_channels, out_channels=pred_channels, activated=False),
RemoveSingletonDimension(dim=1),
torch.nn.Sigmoid()
)
##############################################################################
# while the model above will work in principal, it has some drawbacks.
# Within the UNet, the number of features is increased by a multiplicative
# factor while going down, the so-called gain. The default value for the gain is 2.
# Since we start with only a single channel we could either increase the gain,
# or use a some convolutions to increase the number of channels
# before the the UNet.
from inferno.extensions.layers import ConvReLU2D
model_a = torch.nn.Sequential(
ConvReLU2D(in_channels=image_channels, out_channels=5, kernel_size=3),
ResBlockUNet(dim=2, in_channels=5, out_channels=pred_channels, activated=False,
res_block_kwargs=dict(batchnorm=True,size=2)) ,
RemoveSingletonDimension(dim=1)
# torch.nn.Sigmoid()
)
##############################################################################
# Training
# ----------------------------
# To train the unet, we use the infernos Trainer class of inferno.
# Since we train many models later on in this example we encapsulate
# the training in a function (see :ref:`sphx_glr_auto_examples_trainer.py` for
# an example dedicated to the trainer itself).
from inferno.trainers import Trainer
from inferno.utils.python_utils import ensure_dir
def train_model(model, loaders, **kwargs):
trainer = Trainer(model)
trainer.build_criterion('BCEWithLogitsLoss')
trainer.build_optimizer('Adam', lr=kwargs.get('lr', 0.0001))
#trainer.validate_every((kwargs.get('validate_every', 10), 'epochs'))
#trainer.save_every((kwargs.get('save_every', 10), 'epochs'))
#trainer.save_to_directory(ensure_dir(kwargs.get('save_dir', 'save_dor')))
trainer.set_max_num_epochs(kwargs.get('max_num_epochs', 200))
# bind the loaders
trainer.bind_loader('train', loaders[0])
trainer.bind_loader('validate', loaders[1])
if USE_CUDA:
trainer.cuda()
# do the training
trainer.fit()
return trainer
trainer = train_model(model=model_a, loaders=[train_loader, validate_loader], save_dir='model_a', lr=0.01)
##############################################################################
# Prediction
# ----------------------------
# The trainer contains the trained model and we can do predictions.
# We use :code:`unwrap` to convert the results to numpy arrays.
# Since we want to do many prediction we encapsulate the
# the prediction in a function
from inferno.utils.torch_utils import unwrap
def predict(trainer, test_loader, save_dir=None):
trainer.eval_mode()
for image, target in test_loader:
# transfer image to gpu
image = image.cuda() if USE_CUDA else image
# get batch size from image
batch_size = image.size()[0]
for b in range(batch_size):
prediction = trainer.apply_model(image)
prediction = torch.nn.functional.sigmoid(prediction)
image = unwrap(image, as_numpy=True, to_cpu=True)
prediction = unwrap(prediction, as_numpy=True, to_cpu=True)
target = unwrap(target, as_numpy=True, to_cpu=True)
fig = plt.figure()
ax = fig.add_subplot(2, 2, 1)
ax.imshow(image[b,0,...])
ax.set_title('raw data')
ax = fig.add_subplot(2, 2, 2)
ax.imshow(target[b,...])
ax.set_title('ground truth')
ax = fig.add_subplot(2, 2, 4)
ax.imshow(prediction[b,...])
ax.set_title('prediction')
fig.tight_layout()
plt.show()
###################################################
# do the prediction
predict(trainer=trainer, test_loader=test_loader)
##############################################################################
# Custom UNet
# ----------------------------
# Often one needs to have a UNet with custom layers.
# Here we show how to implement such a customized UNet.
# To this end we derive from :code:`UNetBase`.
# For the sake of this example we will create
# a rather exotic UNet which uses different types
# of convolutions/non-linearities in the different branches
# of the unet
from inferno.extensions.models import UNetBase
from inferno.extensions.layers import ConvSELU2D, ConvReLU2D, ConvELU2D, ConvSigmoid2D,Conv2D
from inferno.extensions.layers.sampling import Upsample
class MySimple2DUnet(UNetBase):
def __init__(self, in_channels, out_channels, depth=3, **kwargs):
super(MySimple2DUnet, self).__init__(in_channels=in_channels, out_channels=out_channels,
dim=2, depth=depth, **kwargs)
def conv_op_factory(self, in_channels, out_channels, part, index):
if part == 'down':
return torch.nn.Sequential(
ConvELU2D(in_channels=in_channels, out_channels=out_channels, kernel_size=3),
ConvELU2D(in_channels=out_channels, out_channels=out_channels, kernel_size=3)
), False
elif part == 'bottom':
return torch.nn.Sequential(
ConvReLU2D(in_channels=in_channels, out_channels=out_channels, kernel_size=3),
ConvReLU2D(in_channels=out_channels, out_channels=out_channels, kernel_size=3),
), False
elif part == 'up':
# are we in the very last block?
if index == 0:
return torch.nn.Sequential(
ConvELU2D(in_channels=in_channels, out_channels=out_channels, kernel_size=3),
Conv2D(in_channels=out_channels, out_channels=out_channels, kernel_size=3)
), False
else:
return torch.nn.Sequential(
ConvELU2D(in_channels=in_channels, out_channels=out_channels, kernel_size=3),
ConvReLU2D(in_channels=out_channels, out_channels=out_channels, kernel_size=3)
), False
else:
raise RuntimeError("something is wrong")
# this function CAN be implemented, if not, MaxPooling is used by default
def downsample_op_factory(self, index):
return torch.nn.MaxPool2d(kernel_size=2, stride=2)
# this function CAN be implemented, if not, Upsampling is used by default
def upsample_op_factory(self, index):
return Upsample(mode='bilinear', align_corners=False,scale_factor=2)
model_b = torch.nn.Sequential(
ConvReLU2D(in_channels=image_channels, out_channels=5, kernel_size=3),
MySimple2DUnet(in_channels=5, out_channels=pred_channels) ,
RemoveSingletonDimension(dim=1)
)
###################################################
# do the training (with the same functions as before)
trainer = train_model(model=model_b, loaders=[train_loader, validate_loader], save_dir='model_b', lr=0.001)
###################################################
# do the training (with the same functions as before)
predict(trainer=trainer, test_loader=test_loader)
| 9,582 | 35.299242 | 107 | py |
inferno | inferno-master/examples/plot_train_side_loss_unet.py | """
Train Side Loss UNet Example
================================
In this example a UNet with side supervision
and auxiliary loss implemented
"""
##############################################################################
# Imports needed for this example
import torch
import torch.nn as nn
from inferno.io.box.binary_blobs import get_binary_blob_loaders
from inferno.trainers.basic import Trainer
from inferno.extensions.layers.convolutional import Conv2D
from inferno.extensions.models.res_unet import _ResBlock as ResBlock
from inferno.extensions.models import ResBlockUNet
from inferno.utils.torch_utils import unwrap
from inferno.utils.python_utils import ensure_dir
import pylab
##############################################################################
# To create a UNet with side loss we create a new nn.Module class
# which has a ResBlockUNet as member.
# The ResBlockUNet is configured such that the results of the
# bottom convolution and all the results of the up-stream
# convolutions are returned as (side)-output.
# a 1x1 convolutions is used to give the side outputs
# the right number of out_channels and UpSampling is
# used to resize all side-outputs to the full resolution
# of the input. These side `side-predictions` are
# returned by our MySideLossUNet.
# Furthermore, all `side-predictions` are concatenated
# and feed trough another two residual blocks to make
# the final prediction.
class MySideLossUNet(nn.Module):
def __init__(self, in_channels, out_channels, depth=3):
super(MySideLossUNet, self).__init__()
self.depth = depth
self.unet = ResBlockUNet(in_channels=in_channels, out_channels=in_channels*2,
dim=2, unet_kwargs=dict(depth=depth),
side_out_parts=['bottom', 'up'])
# number of out channels
self.n_channels_per_output = self.unet.n_channels_per_output
# 1x1 conv to give the side outs of the unet
# the right number of channels
# and a Upsampling to give the right shape
upscale_factor = 2**self.depth
conv_and_scale = []
for n_channels in self.n_channels_per_output:
# conv blocks
conv = Conv2D(in_channels=n_channels, out_channels=out_channels, kernel_size=1)
if upscale_factor > 1:
upsample = nn.Upsample(scale_factor=upscale_factor)
conv_and_scale.append(nn.Sequential(conv, upsample))
else:
conv_and_scale.append(conv)
upscale_factor //= 2
self.conv_and_scale = nn.ModuleList(conv_and_scale)
# combined number of channels after concat
# concat side output predictions with main output of unet
self.n_channels_combined = (self.depth + 1)* out_channels + in_channels*2
self.final_block = nn.Sequential(
ResBlock(dim=2,in_channels=self.n_channels_combined, out_channels=self.n_channels_combined),
ResBlock(in_channels=self.n_channels_combined, out_channels=out_channels,
dim=2, activated=False),
)
def forward(self, input):
outs = self.unet(input)
assert len(outs) == len(self.n_channels_per_output)
# convert the unet output into the right number of
preds = [None] * len(outs)
for i,out in enumerate(outs):
preds[i] = self.conv_and_scale[i](out)
# this is the side output
preds = tuple(preds)
# concat side output predictions with main output of unet
combined = torch.cat(preds + (outs[-1],), 1)
final_res = self.final_block(combined)
# return everything
return preds + (final_res,)
##############################################################################
# We use a custom loss functions which applied CrossEntropyLoss
# to all side outputs.
# The side outputs are weighted in a quadratic fashion and added up
# into a single value
class MySideLoss(nn.Module):
"""Wrap a criterion. Collect regularization losses from model and combine with wrapped criterion.
"""
def __init__(self):
super(MySideLoss, self).__init__()
self.criterion = nn.CrossEntropyLoss(reduce=True)
w = 1.0
l = None
def forward(self, predictions, target):
w = 1.0
l = None
for p in predictions:
ll = self.criterion(p, target)*w
if l is None:
l = ll
else:
l += ll
w *= 2
return l
##############################################################################
# Training boilerplate (see :ref:`sphx_glr_auto_examples_trainer.py`)
LOG_DIRECTORY = ensure_dir('log')
SAVE_DIRECTORY = ensure_dir('save')
DATASET_DIRECTORY = ensure_dir('dataset')
USE_CUDA = torch.cuda.is_available()
# Build a residual unet where the last layer is not activated
sl_unet = MySideLossUNet(in_channels=5, out_channels=2)
model = nn.Sequential(
ResBlock(dim=2, in_channels=1, out_channels=5),
sl_unet
)
train_loader, test_loader, validate_loader = get_binary_blob_loaders(
train_batch_size=3,
length=512, # <= size of the images
gaussian_noise_sigma=1.5 # <= how noise are the images
)
# Build trainer
trainer = Trainer(model)
trainer.build_criterion(MySideLoss())
trainer.build_optimizer('Adam')
trainer.validate_every((10, 'epochs'))
#trainer.save_every((10, 'epochs'))
#trainer.save_to_directory(SAVE_DIRECTORY)
trainer.set_max_num_epochs(40)
# Bind loaders
trainer \
.bind_loader('train', train_loader)\
.bind_loader('validate', validate_loader)
if USE_CUDA:
trainer.cuda()
# Go!
trainer.fit()
##############################################################################
# Predict with the trained network
# and visualize the results
# predict:
#trainer.load(best=True)
trainer.bind_loader('train', train_loader)
trainer.bind_loader('validate', validate_loader)
trainer.eval_mode()
if USE_CUDA:
trainer.cuda()
# look at an example
for img,target in test_loader:
if USE_CUDA:
img = img.cuda()
# softmax on each of the prediction
preds = trainer.apply_model(img)
preds = [nn.functional.softmax(pred,dim=1) for pred in preds]
preds = [unwrap(pred, as_numpy=True, to_cpu=True) for pred in preds]
img = unwrap(img, as_numpy=True, to_cpu=True)
target = unwrap(target, as_numpy=True, to_cpu=True)
n_plots = len(preds) + 2
batch_size = preds[0].shape[0]
for b in range(batch_size):
fig = pylab.figure()
ax1 = fig.add_subplot(2,4,1)
ax1.set_title('image')
ax1.imshow(img[b,0,...])
ax2 = fig.add_subplot(2,4,2)
ax2.set_title('ground truth')
ax2.imshow(target[b,...])
for i,pred in enumerate(preds):
axn = fig.add_subplot(2,4, 3+i)
axn.imshow(pred[b,1,...])
if i + 1 < len(preds):
axn.set_title('side prediction %d'%i)
else:
axn.set_title('combined prediction')
pylab.show()
break
| 7,112 | 30.613333 | 104 | py |
inferno | inferno-master/examples/trainer.py | """
Trainer Example
================================
This example should illustrate how to use the trainer class.
"""
import torch.nn as nn
from inferno.io.box.cifar import get_cifar10_loaders
from inferno.trainers.basic import Trainer
from inferno.trainers.callbacks.logging.tensorboard import TensorboardLogger
from inferno.extensions.layers import ConvELU2D
from inferno.extensions.layers import Flatten
from inferno.utils.python_utils import ensure_dir
from inferno.extensions.layers import SELU
##################################################
# change directories to your needs
LOG_DIRECTORY = ensure_dir('log')
SAVE_DIRECTORY = ensure_dir('save')
DATASET_DIRECTORY = ensure_dir('dataset')
##################################################
# shall models be downloaded
DOWNLOAD_CIFAR = True
USE_CUDA = True
##################################################
# Build torch model
model = nn.Sequential(
ConvELU2D(in_channels=3, out_channels=256, kernel_size=3),
nn.MaxPool2d(kernel_size=2, stride=2),
ConvELU2D(in_channels=256, out_channels=256, kernel_size=3),
nn.MaxPool2d(kernel_size=2, stride=2),
ConvELU2D(in_channels=256, out_channels=256, kernel_size=3),
nn.MaxPool2d(kernel_size=2, stride=2),
Flatten(),
nn.Linear(in_features=(256 * 4 * 4), out_features=10),
nn.Softmax()
)
##################################################
# data loaders
train_loader, validate_loader = get_cifar10_loaders(DATASET_DIRECTORY,
download=DOWNLOAD_CIFAR)
##################################################
# Build trainer
trainer = Trainer(model)
trainer.build_criterion('CrossEntropyLoss')
trainer.build_metric('CategoricalError')
trainer.build_optimizer('Adam')
trainer.validate_every((2, 'epochs'))
trainer.save_every((5, 'epochs'))
trainer.save_to_directory(SAVE_DIRECTORY)
trainer.set_max_num_epochs(10)
trainer.build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
log_images_every='never'),
log_directory=LOG_DIRECTORY)
##################################################
# Bind loaders
trainer.bind_loader('train', train_loader)
trainer.bind_loader('validate', validate_loader)
##################################################
# activate cuda
if USE_CUDA:
trainer.cuda()
##################################################
# fit
trainer.fit()
| 2,399 | 30.578947 | 76 | py |
inferno | inferno-master/inferno/io/core/base.py | from torch.utils.data.dataset import Dataset
class SyncableDataset(Dataset):
def __init__(self, base_sequence=None):
self.base_sequence = base_sequence
def sync_with(self, dataset):
if hasattr(dataset, 'base_sequence'):
self.base_sequence = dataset.base_sequence
return self
def __len__(self):
if self.base_sequence is None:
raise RuntimeError("Class {} does not specify a base sequence. Either specify "
"one by assigning to self.base_sequence or override the "
"__len__ method.".format(self.__class__.__name__))
else:
return len(self.base_sequence)
class IndexSpec(object):
"""
Class to wrap any extra index information a `Dataset` object might want to send back.
This could be useful in (say) inference, where we would wish to (asynchronously) know
more about the current input.
"""
def __init__(self, index=None, base_sequence_at_index=None):
self.index = index
self.base_sequence_at_index = base_sequence_at_index
def __int__(self):
return int(self.index)
| 1,170 | 33.441176 | 91 | py |
inferno | inferno-master/inferno/io/core/zip.py | from torch.utils.data.dataset import Dataset
import torch.multiprocessing as mp
import numpy as np
from . import data_utils as du
from .base import SyncableDataset
from ...utils.exceptions import assert_
from ...utils import python_utils as pyu
import random
class Zip(SyncableDataset):
"""
Zip two or more datasets to one dataset. If the datasets implement synchronization primitives,
they are all synchronized with the first dataset.
"""
def __init__(self, *datasets, sync=False, transforms=None):
super(Zip, self).__init__()
assert_(len(datasets) >= 1, "Expecting one or more datasets, got none.", ValueError)
for dataset_index, dataset in enumerate(datasets):
assert_(isinstance(dataset, Dataset),
"Object at position {} of type {} is not a subclass of "
"`torch.utils.data.dataset.Dataset`"
.format(dataset_index, type(dataset).__name__),
TypeError)
assert_(transforms is None or callable(transforms),
"Given `transforms` is not callable.",
TypeError)
self.datasets = datasets
self.sync = sync
self.transforms = transforms
if self.sync:
self.sync_datasets()
# Inherit base sequence if sync'ing
if self.sync and all([du.defines_base_sequence(dataset) for dataset in self.datasets]):
self.base_sequence = list(zip(*[dataset.base_sequence for dataset in self.datasets]))
else:
self.base_sequence = None
def sync_datasets(self):
master_dataset = self.datasets[0]
for dataset in self.datasets[1:]:
if du.implements_sync_primitives(dataset):
dataset.sync_with(master_dataset)
def sync_with(self, dataset):
master_dataset = self.datasets[0]
if du.implements_sync_primitives(master_dataset):
master_dataset.sync_with(dataset)
# Sync all other datasets
self.sync_datasets()
def __getitem__(self, index):
assert_(index < len(self), exception_type=IndexError)
fetched = [dataset[index] for dataset in self.datasets]
if self.transforms is None:
return fetched
elif callable(self.transforms):
return self.transforms(*fetched)
else:
raise RuntimeError
def __len__(self):
if du.defines_base_sequence(self):
return super(Zip, self).__len__()
else:
return min([len(dataset) for dataset in self.datasets])
def __repr__(self):
if len(self.datasets) > 3:
return "{}({}xDatasets)".format(type(self).__name__, len(self.datasets))
else:
return "{}(".format(type(self).__name__) + \
", ".join([dataset.__repr__() for dataset in self.datasets[:-1]]) + ", " + \
self.datasets[-1].__repr__() + \
')'
class ZipReject(Zip):
"""
Extends `Zip` by the functionality of rejecting samples that don't fulfill
a specified rejection criterion.
"""
def __init__(self, *datasets, sync=False, transforms=None,
rejection_dataset_indices, rejection_criterion,
random_jump_after_reject=True):
"""
Parameters
----------
datasets : list or tuple
Datasets to zip.
sync : bool
Whether to synchronize zipped datasets if a synchronization primitive is available.
transforms : callable
Transforms to apply on the fetched batch.
rejection_dataset_indices : int or list or tuple
Indices (or index) corresponding to the datasets which are used to determine whether
a batch should be rejected.
rejection_criterion : callable
Criterion for rejection of batch. Must be a callable that accepts one or more
arrays / tensors and returns True if the corresponding batch should be rejected,
False otherwise. Should accept as many inputs as the number of elements in
`rejection_dataset_indices` if the latter is a list, and 1 otherwise. Note that
the order of the inputs to the `rejection_criterion` is the same as the order of
the indices in `rejection_dataset_indices`.
random_jump_after_reject: bool
Whether to try a random index or the rejected index incremented by one after rejection.
"""
super(ZipReject, self).__init__(*datasets, sync=sync, transforms=transforms)
for rejection_dataset_index in pyu.to_iterable(rejection_dataset_indices):
assert_(rejection_dataset_index < len(datasets),
"Index of the dataset to be used for rejection (= {}) is larger "
"than the number of datasets (= {}) minus one."
.format(rejection_dataset_index, len(datasets)),
IndexError)
self.rejection_dataset_indices = pyu.to_iterable(rejection_dataset_indices)
assert_(callable(rejection_criterion),
"Rejection criterion is not callable as it should be.",
TypeError)
# return true if fetched should be rejected
self.rejection_criterion = rejection_criterion
# Array shared over processes to keep track of which indices have been rejected
self.rejected = mp.Array('b', len(self))
self.available_indices = None
# optional index mapping to exclude rejected indices, reducing dataset size (see remove_rejected())
self.index_mapping = None
self.random_jump_after_reject = random_jump_after_reject
def remove_rejected(self):
# remove the indices belonging to samples that were rejected from the dataset
# this changes the length of the dataset
rejected = np.array(self.rejected[:])
self.index_mapping = np.argwhere(1 - rejected)[:, 0]
self.rejected = mp.Array('b', len(self))
# just in case of num_workers == 0
self.available_indices = None
def __len__(self):
if hasattr(self, 'index_mapping') and self.index_mapping is not None:
return len(self.index_mapping)
else:
return super(ZipReject, self).__len__()
def next_index_to_try(self, index):
if self.random_jump_after_reject:
return np.random.randint(len(self))
else:
return (index + 1) % len(self)
def fetch_from_rejection_datasets(self, index):
rejection_fetched = [self.datasets[rejection_dataset_index][index]
for rejection_dataset_index in self.rejection_dataset_indices]
return rejection_fetched
def __getitem__(self, index):
# we increase the index until a valid batch of 'rejection_dataset' is found
assert_(index < len(self), exception_type=IndexError)
index_ = index
# if we have a rejection dataset, check if the rejection criterion is fulfilled
# and update the index
if self.rejection_dataset_indices is not None:
# at the start of each epoch, compute the available indices from the shared variable
if self.available_indices is None:
self.available_indices = set(np.argwhere(1 - np.array(self.rejected[:]))[:, 0])
reject = True
while reject:
# check if there are no potentially valid indices left
if not self.available_indices:
raise RuntimeError("ZipReject: No valid batch was found!")
# check if this index was marked as rejected before
if index_ not in self.available_indices:
index_ = self.next_index_to_try(index_)
continue
# check if this index was marked as rejected in any process
if self.rejected[index_]:
self.available_indices.remove(index_)
continue
# map the index, if an index_mapping has been defined (see remove_rejected())
mapped_index_ = index_ if self.index_mapping is None else self.index_mapping[index_]
# we only fetch the dataset which has the rejection criterion
# and only fetch all datasets when a valid index is found
rejection_fetched = self.fetch_from_rejection_datasets(mapped_index_)
# check if this batch is to be rejected
reject = self.rejection_criterion(*rejection_fetched)
# if so, increase the index and add it
if reject:
self.rejected[index_] = True
self.available_indices.remove(index_)
# fetch all other datasets and concatenate them with the valid rejection_fetch
fetched = []
for dataset_index, dataset in enumerate(self.datasets):
if dataset_index in self.rejection_dataset_indices:
# Find the index in `rejection_fetched` corresponding to this dataset_index
index_in_rejection_fetched = self.rejection_dataset_indices.index(dataset_index)
# ... and append to fetched
fetched.append(rejection_fetched[index_in_rejection_fetched])
else:
# Fetch and append to fetched
fetched.append(dataset[mapped_index_])
else:
# map the index, if an index_mapping has been defined (see remove_rejected())
mapped_index_ = index_ if self.index_mapping is None else self.index_mapping[index_]
fetched = [dataset[mapped_index_] for dataset in self.datasets]
# apply transforms if present
if self.transforms is not None:
assert_(callable(self.transforms), "`self.transforms` is not callable.", TypeError)
fetched = self.transforms(*fetched)
return fetched
| 10,048 | 45.308756 | 107 | py |
inferno | inferno-master/inferno/io/core/concatenate.py | import numpy as np
from torch.utils.data.dataset import Dataset
from ...utils import python_utils as pyu
class Concatenate(Dataset):
"""
Concatenates mutliple datasets to one. This class does not implement
synchronization primitives.
"""
def __init__(self, *datasets, transforms=None):
assert all([isinstance(dataset, Dataset) for dataset in datasets])
assert len(datasets) >= 1
assert transforms is None or callable(transforms)
self.datasets = datasets
self.transforms = transforms
def map_index(self, index):
# Get a list of lengths of all datasets. Say the answer is [4, 3, 3],
# and we're looking for index = 5.
len_list = list(map(len, self.datasets))
# Cumulate to a numpy array. The answer is [4, 7, 10]
cumulative_len_list = np.cumsum(len_list)
# When the index is subtracted, we get [-1, 2, 5]. We're looking for the (index
# of the) first cumulated len which is larger than the index (in this case,
# 7 (index 1)).
offset_cumulative_len_list = cumulative_len_list - index
dataset_index = np.argmax(offset_cumulative_len_list > 0)
# With the dataset index, we figure out the index in dataset
if dataset_index == 0:
# First dataset - index corresponds to index_in_dataset
index_in_dataset = index
else:
# Get cumulated length up to the current dataset
len_up_to_dataset = cumulative_len_list[dataset_index - 1]
# Compute index_in_dataset as that what's left
index_in_dataset = index - len_up_to_dataset
return dataset_index, index_in_dataset
def __getitem__(self, index):
assert index < len(self)
dataset_index, index_in_dataset = self.map_index(index)
fetched = self.datasets[dataset_index][index_in_dataset]
if self.transforms is None:
return fetched
elif callable(self.transforms):
return self.transforms(*pyu.to_iterable(fetched))
else:
raise NotImplementedError
def __len__(self):
return sum([len(dataset) for dataset in self.datasets])
def __repr__(self):
if len(self.datasets) < 3:
return "Concatenate(" + \
", ".join([dataset.__repr__() for dataset in self.datasets[:-1]]) + ", " + \
self.datasets[-1].__repr__() + \
")"
else:
return "Concatenate({}xDatasets)".format(len(self.datasets))
| 2,559 | 40.290323 | 95 | py |
inferno | inferno-master/inferno/io/box/cityscapes.py | import zipfile
import io
import os
import torch.utils.data as data
from PIL import Image
from os.path import join, relpath, abspath
from ...utils.exceptions import assert_
from ..transform.base import Compose
from ..transform.generic import \
Normalize, NormalizeRange, Cast, AsTorchBatch, Project, Label2OneHot
from ..transform.image import \
RandomSizedCrop, RandomGammaCorrection, RandomFlip, Scale, PILImage2NumPyArray
from ..core import Concatenate
CITYSCAPES_CLASSES = {
0: 'unlabeled',
1: 'ego vehicle',
2: 'rectification border',
3: 'out of roi',
4: 'static',
5: 'dynamic',
6: 'ground',
7: 'road',
8: 'sidewalk',
9: 'parking',
10: 'rail track',
11: 'building',
12: 'wall',
13: 'fence',
14: 'guard rail',
15: 'bridge',
16: 'tunnel',
17: 'pole',
18: 'polegroup',
19: 'traffic light',
20: 'traffic sign',
21: 'vegetation',
22: 'terrain',
23: 'sky',
24: 'person',
25: 'rider',
26: 'car',
27: 'truck',
28: 'bus',
29: 'caravan',
30: 'trailer',
31: 'train',
32: 'motorcycle',
33: 'bicycle',
-1: 'license plate'
}
IGNORE_CLASS_LABEL = 19
# Class labels to use for training, found here:
# https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py#L61
CITYSCAPES_CLASSES_TO_LABELS = {
0: IGNORE_CLASS_LABEL,
1: IGNORE_CLASS_LABEL,
2: IGNORE_CLASS_LABEL,
3: IGNORE_CLASS_LABEL,
4: IGNORE_CLASS_LABEL,
5: IGNORE_CLASS_LABEL,
6: IGNORE_CLASS_LABEL,
7: 0,
8: 1,
9: IGNORE_CLASS_LABEL,
10: IGNORE_CLASS_LABEL,
11: 2,
12: 3,
13: 4,
14: IGNORE_CLASS_LABEL,
15: IGNORE_CLASS_LABEL,
16: IGNORE_CLASS_LABEL,
17: 5,
18: IGNORE_CLASS_LABEL,
19: 6,
20: 7,
21: 8,
22: 9,
23: 10,
24: 11,
25: 12,
26: 13,
27: 14,
28: 15,
29: IGNORE_CLASS_LABEL,
30: IGNORE_CLASS_LABEL,
31: 16,
32: 17,
33: 18,
-1: IGNORE_CLASS_LABEL
}
# Map classes to official cityscapes colors
CITYSCAPES_CLASS_COLOR_MAPPING = {
0: (0, 0, 0),
1: (0, 0, 0),
2: (0, 0, 0),
3: (0, 0, 0),
4: (0, 0, 0),
5: (111, 74, 0),
6: (81, 0, 81),
7: (128, 64, 128),
8: (244, 35, 232),
9: (250, 170, 160),
10: (230, 150, 140),
11: (70, 70, 70),
12: (102, 102, 156),
13: (190, 153, 153),
14: (180, 165, 180),
15: (150, 100, 100),
16: (150, 120, 90),
17: (153, 153, 153),
18: (153, 153, 153),
19: (250, 170, 30),
20: (220, 220, 0),
21: (107, 142, 35),
22: (152, 251, 152),
23: (70, 130, 180),
24: (220, 20, 60),
25: (255, 0, 0),
26: (0, 0, 142),
27: (0, 0, 70),
28: (0, 60, 100),
29: (0, 0, 90),
30: (0, 0, 110),
31: (0, 80, 100),
32: (0, 0, 230),
33: (119, 11, 32),
-1: (0, 0, 142),
}
# Weights corresponding to the outputs
CITYSCAPES_LABEL_WEIGHTS = {
0: 1.,
1: 1.,
2: 1.,
3: 1.,
4: 1.,
5: 1.,
6: 1.,
7: 1.,
8: 1.,
9: 1.,
10: 1.,
11: 1.,
12: 1.,
13: 1.,
14: 1.,
15: 1.,
16: 1.,
17: 1.,
18: 1.,
19: 0.
}
# 0:void 1:flat 2:construction 3:object 4:nature 5:sky 6:human 7:vehicle
CITYSCAPES_CATEGORIES = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 4, 4, 5, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7]
CITYSCAPES_IGNORE_IN_EVAL = [True, True, True, True, True, True, True, False, False, True, True,
False, False, False, True, True, True, False, True, False, False,
False, False, False, False,
False, False, False, False, True, True, False, False, False, True]
# mean and std
CITYSCAPES_MEAN = [0.28689554, 0.32513303, 0.28389177]
CITYSCAPES_STD = [0.18696375, 0.19017339, 0.18720214]
def get_matching_labelimage_file(f, groundtruth):
fs = f.split('/')
fs[0] = groundtruth
fs[-1] = str.replace(fs[-1], 'leftImg8bit', groundtruth + '_labelIds')
return '/'.join(fs)
def get_filelist(path):
if path.endswith('.zip'):
return zipfile.ZipFile(path, 'r').filelist
elif os.path.isdir(path):
return [relpath(join(root, filename), abspath(join(path, '..')))
for root, _, filenames in os.walk(path) for filename in filenames]
else:
raise NotImplementedError("Path must be a zip archive or a directory.")
def make_dataset(path, split):
images = []
for f in get_filelist(path):
if isinstance(f, str):
fn = f
fns = f.split('/')
else:
fn = f.filename
fns = f.filename.split('/')
if fns[-1].endswith('.png') and fns[1] == split:
# use first folder name to identify train/val/test images
if split == 'train_extra':
groundtruth = 'gtCoarse'
else:
groundtruth = 'gtFine'
fl = get_matching_labelimage_file(fn, groundtruth)
images.append((f, fl))
return images
def extract_image(path, image_path):
if path.endswith('.zip'):
# read image directly from zipfile if path is a zip
return Image.open(io.BytesIO(zipfile.ZipFile(path, 'r').read(image_path)))
else:
return Image.open(join(abspath(join(path, '..')), image_path), 'r')
class Cityscapes(data.Dataset):
SPLIT_NAME_MAPPING = {'train': 'train',
'training': 'train',
'validate': 'val',
'val': 'val',
'validation': 'val',
'test': 'test',
'testing': 'test',
'training_extra': 'train_extra',
'train_extra': 'train_extra'}
# Dataset statistics
CLASSES = CITYSCAPES_CLASSES
MEAN = CITYSCAPES_MEAN
STD = CITYSCAPES_STD
BLACKLIST = ['leftImg8bit/train_extra/troisdorf/troisdorf_000000_000073_leftImg8bit.png']
def __init__(self, root_folder, split='train', read_from_zip_archive=True,
image_transform=None, label_transform=None, joint_transform=None):
"""
Parameters:
root_folder: folder that contains both leftImg8bit_trainvaltest.zip and
gtFine_trainvaltest.zip archives.
split: name of dataset spilt (i.e. 'train_extra', 'train', 'val' or 'test')
"""
assert_(split in self.SPLIT_NAME_MAPPING.keys(),
"`split` must be one of {}".format(set(self.SPLIT_NAME_MAPPING.keys())),
KeyError)
self.split = self.SPLIT_NAME_MAPPING.get(split)
self.read_from_zip_archive = read_from_zip_archive
# Get roots
self.image_root, self.label_root = [join(root_folder, groot)
for groot in self.get_image_and_label_roots()]
# Transforms
self.image_transform = image_transform
self.label_transform = label_transform
self.joint_transform = joint_transform
# Make list with paths to the images
self.image_paths = make_dataset(self.image_root, self.split)
def __getitem__(self, index):
pi, pl = self.image_paths[index]
if pi in self.BLACKLIST:
# Select the next image if the current image is bad
return self[index + 1]
image = extract_image(self.image_root, pi)
label = extract_image(self.label_root, pl)
try:
# Apply transforms
if self.image_transform is not None:
image = self.image_transform(image)
if self.label_transform is not None:
label = self.label_transform(label)
if self.joint_transform is not None:
image, label = self.joint_transform(image, label)
except Exception:
print("[!] An Exception occurred while applying the transforms at "
"index {} of split '{}'.".format(index, self.split))
raise
return image, label
def __len__(self):
return len(self.image_paths)
def download(self):
# TODO: please download the dataset from
# https://www.cityscapes-dataset.com/
raise NotImplementedError
def get_image_and_label_roots(self):
all_roots = {
'zipped':
{
'train': ('leftImg8bit_trainvaltest.zip', 'gtFine_trainvaltest.zip'),
'val': ('leftImg8bit_trainvaltest.zip', 'gtFine_trainvaltest.zip'),
'train_extra': ('leftImg8bit_trainextra.zip', 'gtCoarse.zip')
},
'unzipped':
{
'train': ('leftImg8bit', 'gtFine'),
'val': ('leftImg8bit', 'gtFine'),
'train_extra': ('leftImg8bit', 'gtCoarse')
}
}
image_and_label_roots = all_roots\
.get('zipped' if self.read_from_zip_archive else 'unzipped').get(self.split)
return image_and_label_roots
def make_transforms(image_shape, labels_as_onehot):
# Make transforms
image_transforms = Compose(PILImage2NumPyArray(),
NormalizeRange(),
RandomGammaCorrection(),
Normalize(mean=CITYSCAPES_MEAN, std=CITYSCAPES_STD))
label_transforms = Compose(PILImage2NumPyArray(),
Project(projection=CITYSCAPES_CLASSES_TO_LABELS))
joint_transforms = Compose(RandomSizedCrop(ratio_between=(0.6, 1.0),
preserve_aspect_ratio=True),
# Scale raw image back to the original shape
Scale(output_image_shape=image_shape,
interpolation_order=3, apply_to=[0]),
# Scale segmentation back to the original shape
# (without interpolation)
Scale(output_image_shape=image_shape,
interpolation_order=0, apply_to=[1]),
RandomFlip(allow_ud_flips=False),
# Cast raw image to float
Cast('float', apply_to=[0]))
if labels_as_onehot:
# Applying Label2OneHot on the full label image makes it unnecessarily expensive,
# because we're throwing it away with RandomSizedCrop and Scale. Tests show that it's
# ~1 sec faster per image.
joint_transforms \
.add(Label2OneHot(num_classes=len(CITYSCAPES_LABEL_WEIGHTS), dtype='bool',
apply_to=[1])) \
.add(Cast('float', apply_to=[1]))
else:
# Cast label image to long
joint_transforms.add(Cast('long', apply_to=[1]))
# Batchify
joint_transforms.add(AsTorchBatch(2, add_channel_axis_if_necessary=False))
# Return as kwargs
return {'image_transform': image_transforms,
'label_transform': label_transforms,
'joint_transform': joint_transforms}
def get_cityscapes_loaders(root_directory, image_shape=(1024, 2048), labels_as_onehot=False,
include_coarse_dataset=False, read_from_zip_archive=True,
train_batch_size=1, validate_batch_size=1, num_workers=2):
# Build datasets
train_dataset = Cityscapes(root_directory, split='train',
read_from_zip_archive=read_from_zip_archive,
**make_transforms(image_shape, labels_as_onehot))
if include_coarse_dataset:
# Build coarse dataset
coarse_dataset = Cityscapes(root_directory, split='train_extra',
read_from_zip_archive=read_from_zip_archive,
**make_transforms(image_shape, labels_as_onehot))
# ... and concatenate with train_dataset
train_dataset = Concatenate(coarse_dataset, train_dataset)
validate_dataset = Cityscapes(root_directory, split='validate',
read_from_zip_archive=read_from_zip_archive,
**make_transforms(image_shape, labels_as_onehot))
# Build loaders
train_loader = data.DataLoader(train_dataset, batch_size=train_batch_size,
shuffle=True, num_workers=num_workers, pin_memory=True)
validate_loader = data.DataLoader(validate_dataset, batch_size=validate_batch_size,
shuffle=True, num_workers=num_workers, pin_memory=True)
return train_loader, validate_loader
| 12,804 | 33.146667 | 98 | py |
inferno | inferno-master/inferno/io/box/camvid.py | # Adapted from felixgwu's PR here:
# https://github.com/felixgwu/vision/blob/cf491d301f62ae9c77ff7250fb7def5cd55ec963/torchvision/datasets/camvid.py
import os
import torch
import torch.utils.data as data
import numpy as np
from PIL import Image
from torchvision.datasets.folder import default_loader
from ...utils.exceptions import assert_
from ..transform.base import Compose
from ..transform.generic import Normalize, NormalizeRange, Cast, AsTorchBatch, Label2OneHot
from ..transform.image import \
RandomSizedCrop, RandomGammaCorrection, RandomFlip, Scale, PILImage2NumPyArray
try:
from torchvision.datasets.folder import is_image_file
except ImportError:
from torchvision.datasets.folder import IMG_EXTENSIONS, has_file_allowed_extension
def is_image_file(filename):
return has_file_allowed_extension(filename, IMG_EXTENSIONS)
CAMVID_CLASSES = ['Sky',
'Building',
'Column-Pole',
'Road',
'Sidewalk',
'Tree',
'Sign-Symbol',
'Fence',
'Car',
'Pedestrain',
'Bicyclist',
'Void']
# weights when using median frequency balancing used in SegNet paper
# https://arxiv.org/pdf/1511.00561.pdf
# The numbers were generated by:
# https://github.com/yandex/segnet-torch/blob/master/datasets/camvid-gen.lua
CAMVID_CLASS_WEIGHTS = [0.58872014284134,
0.51052379608154,
2.6966278553009,
0.45021694898605,
1.1785038709641,
0.77028578519821,
2.4782588481903,
2.5273461341858,
1.0122526884079,
3.2375309467316,
4.1312313079834,
0]
# mean and std
CAMVID_MEAN = [0.41189489566336, 0.4251328133025, 0.4326707089857]
CAMVID_STD = [0.27413549931506, 0.28506257482912, 0.28284674400252]
CAMVID_CLASS_COLORS = [
(128, 128, 128),
(128, 0, 0),
(192, 192, 128),
(128, 64, 128),
(0, 0, 192),
(128, 128, 0),
(192, 128, 128),
(64, 64, 128),
(64, 0, 128),
(64, 64, 0),
(0, 128, 192),
(0, 0, 0),
]
def make_dataset(dir):
images = []
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
item = path
images.append(item)
return images
def label_to_long_tensor(pic):
label = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
label = label.view(pic.size[1], pic.size[0], 1)
label = label.transpose(0, 1).transpose(0, 2).squeeze().contiguous().long()
return label
def label_to_pil_image(label):
label = label.unsqueeze(0)
colored_label = torch.zeros(3, label.size(1), label.size(2)).byte()
for i, color in enumerate(CAMVID_CLASS_COLORS):
mask = label.eq(i)
for j in range(3):
colored_label[j].masked_fill_(mask, color[j])
npimg = colored_label.numpy()
npimg = np.transpose(npimg, (1, 2, 0))
mode = None
if npimg.shape[2] == 1:
npimg = npimg[:, :, 0]
mode = "L"
return Image.fromarray(npimg, mode=mode)
class CamVid(data.Dataset):
SPLIT_NAME_MAPPING = {'train': 'train',
'training': 'train',
'validate': 'val',
'val': 'val',
'validation': 'val',
'test': 'test',
'testing': 'test'}
# Dataset statistics
CLASS_WEIGHTS = CAMVID_CLASS_WEIGHTS
CLASSES = CAMVID_CLASSES
MEAN = CAMVID_MEAN
STD = CAMVID_STD
def __init__(self, root, split='train',
image_transform=None, label_transform=None, joint_transform=None,
download=False, loader=default_loader):
# Validate
assert_(split in self.SPLIT_NAME_MAPPING.keys(),
"`split` must be one of {}".format(set(self.SPLIT_NAME_MAPPING.keys())),
KeyError)
# Root directory and split
self.root_directory = root
self.split = self.SPLIT_NAME_MAPPING.get(split)
# Utils
self.image_loader = loader
# Transforms
self.image_transform = image_transform
self.label_transform = label_transform
self.joint_transform = joint_transform
# For when we implement download:
if download:
self.download()
# Make dataset with paths to the image
self.image_paths = make_dataset(os.path.join(self.root_directory, self.split))
def __getitem__(self, index):
path = self.image_paths[index]
image = self.image_loader(path)
label = Image.open(path.replace(self.split, self.split + 'annot'))
# Apply transforms
if self.image_transform is not None:
image = self.image_transform(image)
if self.label_transform is not None:
label = self.label_transform(label)
if self.joint_transform is not None:
image, label = self.joint_transform(image, label)
return image, label
def __len__(self):
return len(self.image_paths)
def download(self):
# TODO: please download the dataset from
# https://github.com/alexgkendall/SegNet-Tutorial/tree/master/CamVid
raise NotImplementedError
# noinspection PyTypeChecker
def get_camvid_loaders(root_directory, image_shape=(360, 480), labels_as_onehot=False,
train_batch_size=1, validate_batch_size=1, test_batch_size=1,
num_workers=2):
# Make transforms
image_transforms = Compose(PILImage2NumPyArray(),
NormalizeRange(),
RandomGammaCorrection(),
Normalize(mean=CAMVID_MEAN, std=CAMVID_STD))
label_transforms = PILImage2NumPyArray()
joint_transforms = Compose(RandomSizedCrop(ratio_between=(0.6, 1.0),
preserve_aspect_ratio=True),
# Scale raw image back to the original shape
Scale(output_image_shape=image_shape,
interpolation_order=3, apply_to=[0]),
# Scale segmentation back to the original shape
# (without interpolation)
Scale(output_image_shape=image_shape,
interpolation_order=0, apply_to=[1]),
RandomFlip(allow_ud_flips=False),
# Cast raw image to float
Cast('float', apply_to=[0]))
if labels_as_onehot:
# See cityscapes loader to understand why this is here.
joint_transforms\
.add(Label2OneHot(num_classes=len(CAMVID_CLASS_WEIGHTS), dtype='bool',
apply_to=[1]))\
.add(Cast('float', apply_to=[1]))
else:
# Cast label image to long
joint_transforms.add(Cast('long', apply_to=[1]))
# Batchify
joint_transforms.add(AsTorchBatch(2, add_channel_axis_if_necessary=False))
# Build datasets
train_dataset = CamVid(root_directory, split='train',
image_transform=image_transforms,
label_transform=label_transforms,
joint_transform=joint_transforms)
validate_dataset = CamVid(root_directory, split='validate',
image_transform=image_transforms,
label_transform=label_transforms,
joint_transform=joint_transforms)
test_dataset = CamVid(root_directory, split='test',
image_transform=image_transforms,
label_transform=label_transforms,
joint_transform=joint_transforms)
# Build loaders
train_loader = data.DataLoader(train_dataset, batch_size=train_batch_size,
shuffle=True, num_workers=num_workers, pin_memory=True)
validate_loader = data.DataLoader(validate_dataset, batch_size=validate_batch_size,
shuffle=True, num_workers=num_workers, pin_memory=True)
test_loader = data.DataLoader(test_dataset, batch_size=test_batch_size,
shuffle=True, num_workers=num_workers, pin_memory=True)
return train_loader, validate_loader, test_loader
| 8,776 | 38.714932 | 113 | py |
inferno | inferno-master/inferno/io/box/binary_blobs.py | import torch.utils.data as data
import skimage.data
import numpy
from operator import mul
from functools import reduce
class BinaryBlobs(data.Dataset):
def __init__(self, size=20, length=512, blob_size_fraction=0.1,
n_dim=2, volume_fraction=0.5,split='train',
uniform_noise_range=(-1.2, 1.2),
gaussian_noise_sigma=1.2,
noise_scale_factor=8,
image_transform=None,
label_transform=None,
joint_transform=None):
# how many images are in the dataset
self.size = size
# blob related members
self.length = length
self.blob_size_fraction = blob_size_fraction
self.n_dim = n_dim
self.volume_fraction = volume_fraction
# which split {'train', 'test', 'validate'}
self.split = split
# noise related members
self.uniform_noise_range = uniform_noise_range
self.gaussian_noise_sigma = float(gaussian_noise_sigma)
self.noise_scale_factor = noise_scale_factor
# transforms
self.image_transform = image_transform
self.label_transform = label_transform
self.joint_transform = joint_transform
# internal
split_to_seed = dict(train=0, test=1, validate=2)
self.master_seed = split_to_seed[self.split]*self.size
def __getitem__(self, index):
# generate the labels
label = skimage.data.binary_blobs(
length=self.length,
blob_size_fraction=self.blob_size_fraction,
n_dim=self.n_dim,
volume_fraction=self.volume_fraction,
seed=self.master_seed + index)
# make the raw image [-1,1]
image = label.astype('float32')*2
image -= 1
# add uniform noise
low, high = self.uniform_noise_range
uniform_noise = numpy.random.uniform(low=low, high=high,
size=image.size)
image += uniform_noise.reshape(image.shape)
# add gaussian noise
gaussian_noise = numpy.random.normal(scale=self.gaussian_noise_sigma,
size=image.size)
image += gaussian_noise.reshape(image.shape)
# generate noise at lower scales
small_shape = [s//self.noise_scale_factor for s in label.shape]
small_size = reduce(mul, small_shape, 1)
small_noise_img = numpy.random.uniform(low=low, high=high,
size=small_size)
small_noise_img = small_noise_img.reshape(small_shape)
gaussian_noise = numpy.random.normal(scale=self.gaussian_noise_sigma,
size=small_size)
small_noise_img += gaussian_noise.reshape(small_shape)
noise_img = skimage.transform.resize(image = small_noise_img,
output_shape=image.shape, mode='reflect')
image += noise_img
image -= image.mean()
image /= image.std()
label = label.astype('long')
try:
# Apply transforms
if self.image_transform is not None:
image = self.image_transform(image)
if self.label_transform is not None:
label = self.label_transform(label)
if self.joint_transform is not None:
image, label = self.joint_transform(image, label)
except Exception:
print("[!] An Exception occurred while applying the transforms at "
"index {} of split '{}'.".format(index, self.split))
raise
image = image[None,...]
return image, label
def __len__(self):
return self.size
def get_binary_blob_loaders(train_batch_size=1, test_batch_size=1,
num_workers=1,
train_image_transform=None,
train_label_transform=None,
train_joint_transform=None,
validate_image_transform=None,
validate_label_transform=None,
validate_joint_transform=None,
test_image_transform=None,
test_label_transform=None,
test_joint_transform=None,
**kwargs):
trainset = BinaryBlobs(split='train', image_transform=train_image_transform,
label_transform=train_label_transform, joint_transform=train_joint_transform, **kwargs)
testset = BinaryBlobs(split='test', image_transform=test_image_transform,
label_transform=test_label_transform, joint_transform=test_joint_transform, **kwargs)
validset = BinaryBlobs(split='validate',image_transform=validate_image_transform,
label_transform=validate_label_transform, joint_transform=validate_joint_transform, **kwargs)
trainloader = data.DataLoader(trainset, batch_size=train_batch_size,
num_workers=num_workers)
testloader = data.DataLoader(testset, batch_size=test_batch_size,
num_workers=num_workers)
validloader = data.DataLoader(validset, batch_size=test_batch_size,
num_workers=num_workers)
return trainloader, testloader, validloader
if __name__ == "__main__":
ds = BinaryBlobs()
ds[0] | 5,580 | 37.226027 | 101 | py |
inferno | inferno-master/inferno/io/box/cifar.py | import os
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data.sampler import SubsetRandomSampler
def get_cifar10_loaders(root_directory, train_batch_size=128, test_batch_size=256,
download=False, augment=False, validation_dataset_size=None):
# Data preparation for CIFAR10.
if augment:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.247, 0.2435, 0.2616)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.247, 0.2435, 0.2616)),
])
else:
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.247, 0.2435, 0.2616)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.247, 0.2435, 0.2616)),
])
trainset = torchvision.datasets.CIFAR10(root=os.path.join(root_directory, 'data'),
train=True, download=download,
transform=transform_train)
if validation_dataset_size:
indices = torch.randperm(len(trainset))
train_indices = indices[:(len(indices) - validation_dataset_size)]
valid_indices = indices[(len(indices) - validation_dataset_size):]
validset = torchvision.datasets.CIFAR10(root=os.path.join(root_directory, 'data'),
train=True, download=download,
transform=transform_test)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size,
pin_memory=True, num_workers=1,
sampler=SubsetRandomSampler(train_indices))
validloader = torch.utils.data.DataLoader(validset, batch_size=test_batch_size,
pin_memory=True, num_workers=1,
sampler=SubsetRandomSampler(valid_indices))
else:
trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size,
shuffle=True, pin_memory=True, num_workers=1)
testset = torchvision.datasets.CIFAR10(root=os.path.join(root_directory, 'data'),
train=False, download=download,
transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size,
shuffle=False, pin_memory=True, num_workers=1)
if validation_dataset_size:
return trainloader, validloader, testloader
else:
return trainloader, testloader
def get_cifar100_loaders(root_directory, train_batch_size=128, test_batch_size=100,
download=False, augment=False, validation_dataset_size=None):
# Data preparation for CIFAR100. Adapted from
# https://github.com/kuangliu/pytorch-cifar/blob/master/main.py
if augment:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5071, 0.4865, 0.4409), std=(0.2673, 0.2564, 0.2762)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.5071, 0.4865, 0.4409), std=(0.2673, 0.2564, 0.2762)),
])
else:
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.5071, 0.4865, 0.4409), std=(0.2673, 0.2564, 0.2762)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.5071, 0.4865, 0.4409), std=(0.2673, 0.2564, 0.2762)),
])
trainset = torchvision.datasets.CIFAR100(root=os.path.join(root_directory, 'data'),
train=True, download=download,
transform=transform_train)
if validation_dataset_size:
indices = torch.randperm(len(trainset))
train_indices = indices[:(len(indices) - validation_dataset_size)]
valid_indices = indices[(len(indices) - validation_dataset_size):]
validset = torchvision.datasets.CIFAR100(root=os.path.join(root_directory, 'data'),
train=True, download=download,
transform=transform_test)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size,
pin_memory=True, num_workers=1,
sampler=SubsetRandomSampler(train_indices))
validloader = torch.utils.data.DataLoader(validset, batch_size=test_batch_size,
pin_memory=True, num_workers=1,
sampler=SubsetRandomSampler(valid_indices))
else:
trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size,
shuffle=True, pin_memory=True, num_workers=1)
testset = torchvision.datasets.CIFAR100(root=os.path.join(root_directory, 'data'),
train=False, download=download,
transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size,
shuffle=False, pin_memory=True, num_workers=1)
if validation_dataset_size:
return trainloader, validloader, testloader
else:
return trainloader, testloader
| 6,337 | 52.260504 | 96 | py |
inferno | inferno-master/inferno/io/transform/image.py | import numpy as np
from scipy.ndimage import zoom
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import map_coordinates, rotate
from scipy.ndimage.morphology import binary_dilation, binary_erosion
from skimage.exposure import adjust_gamma
from warnings import catch_warnings, simplefilter
from .base import Transform
from ...utils.exceptions import assert_, ShapeError
class PILImage2NumPyArray(Transform):
"""Convert a PIL Image object to a numpy array.
For images with multiple channels (say RGB), the channel axis is moved to front. Therefore,
a (100, 100, 3) RGB image becomes an array of shape (3, 100, 100).
"""
def tensor_function(self, tensor):
tensor = np.asarray(tensor)
if tensor.ndim == 3:
# There's a channel axis - we move it to front
tensor = np.moveaxis(tensor, source=-1, destination=0)
elif tensor.ndim == 2:
pass
else:
raise NotImplementedError("Expected tensor to be a 2D or 3D "
"numpy array, got a {}D array instead."
.format(tensor.ndim))
return tensor
class Scale(Transform):
"""Scales an image to a given size with spline interpolation of requested order.
Unlike torchvision.transforms.Scale, this does not depend on PIL and therefore works
with numpy arrays. If you do have a PIL image and wish to use this transform, consider
applying `PILImage2NumPyArray` first.
Warnings
--------
This transform uses `scipy.ndimage.zoom` and requires scipy >= 0.13.0 to work correctly.
"""
def __init__(self, output_image_shape, interpolation_order=3, zoom_kwargs=None, **super_kwargs):
"""
Parameters
----------
output_image_shape : list or tuple or int or None
Target size of the output image. Aspect ratio may not be preserved.
If output_image_shape is None, image input size will be preserved
interpolation_order : int
Interpolation order for the spline interpolation.
zoom_kwargs : dict
Keyword arguments for `scipy.ndimage.zoom`.
super_kwargs : dict
Keyword arguments for the superclass.
"""
super(Scale, self).__init__(**super_kwargs)
if output_image_shape is not None:
output_image_shape = (output_image_shape, output_image_shape) \
if isinstance(output_image_shape, int) else tuple(output_image_shape)
assert_(len(output_image_shape) == 2,
"`output_image_shape` must be an integer or a tuple of length 2.",
ValueError)
self.output_image_shape = output_image_shape
self.interpolation_order = interpolation_order
self.zoom_kwargs = {} if zoom_kwargs is None else dict(zoom_kwargs)
def image_function(self, image):
source_height, source_width = image.shape
target_height, target_width = self.output_image_shape
# We're on Python 3 - take a deep breath and relax.
zoom_height, zoom_width = (target_height / source_height), (target_width / source_width)
with catch_warnings():
# Ignore warning that scipy should be > 0.13 (it's 0.19 these days)
simplefilter('ignore')
rescaled_image = zoom(image, (zoom_height, zoom_width),
order=self.interpolation_order, **self.zoom_kwargs)
# This should never happen
assert_(rescaled_image.shape == (target_height, target_width),
"Shape mismatch that shouldn't have happened if you were on scipy > 0.13.0. "
"Are you on scipy > 0.13.0?",
ShapeError)
return rescaled_image
class RandomCrop(Transform):
"""Crop input to a given size.
This is similar to torchvision.transforms.RandomCrop, except that it operates on
numpy arrays instead of PIL images. If you do have a PIL image and wish to use this
transform, consider applying `PILImage2NumPyArray` first.
Warnings
--------
If `output_image_shape` is larger than the image itself, the image is not cropped
(along the relevant dimensions).
"""
def __init__(self, output_image_shape, **super_kwargs):
"""
Parameters
----------
output_image_shape : tuple or list or int
Expected shape of the output image. Could be an integer, (say) 100, in
which case it's interpreted as `(100, 100)`. Note that if the image shape
along some (or all) dimension is smaller, say `(50, 200)`, the resulting
output images will have the shape `(50, 100)`.
super_kwargs : dict
Keywords to the super class.
"""
super(RandomCrop, self).__init__(**super_kwargs)
# Privates
self._image_shape_cache = None
# Publics
output_image_shape = (output_image_shape, output_image_shape) \
if isinstance(output_image_shape, int) else tuple(output_image_shape)
assert_(len(output_image_shape) == 2,
"`output_image_shape` must be an integer or a tuple of length 2.",
ValueError)
self.output_image_shape = output_image_shape
def clear_random_variables(self):
self._image_shape_cache = None
super(RandomCrop, self).clear_random_variables()
def build_random_variables(self, height_leeway, width_leeway):
if height_leeway > 0:
self.set_random_variable('height_location',
np.random.randint(low=0, high=height_leeway + 1))
if width_leeway > 0:
self.set_random_variable('width_location',
np.random.randint(low=0, high=width_leeway + 1))
def image_function(self, image):
# Validate image shape
if self._image_shape_cache is not None:
assert_(self._image_shape_cache == image.shape,
"RandomCrop works on multiple images simultaneously only "
"if they have the same shape. Was expecting an image of "
"shape {}, got one of shape {} instead."
.format(self._image_shape_cache, image.shape),
ShapeError)
else:
self._image_shape_cache = image.shape
source_height, source_width = image.shape
crop_height, crop_width = self.output_image_shape
height_leeway = source_height - crop_height
width_leeway = source_width - crop_width
if height_leeway > 0:
# Crop height
height_location = self.get_random_variable('height_location',
height_leeway=height_leeway,
width_leeway=width_leeway)
cropped = image[height_location:(height_location + crop_height), :]
assert cropped.shape[0] == self.output_image_shape[0], "Well, shit."
else:
cropped = image
if width_leeway > 0:
# Crop width
width_location = self.get_random_variable('width_location',
height_leeway=height_leeway,
width_leeway=width_leeway)
cropped = cropped[:, width_location:(width_location + crop_width)]
assert cropped.shape[1] == self.output_image_shape[1], "Well, shit."
return cropped
class RandomSizedCrop(Transform):
"""Extract a randomly sized crop from the image.
The ratio of the sizes of the cropped and the original image can be limited within
specified bounds along both axes. To resize back to a constant sized image, compose
with `Scale`.
"""
def __init__(self, ratio_between=None, height_ratio_between=None, width_ratio_between=None,
preserve_aspect_ratio=False, relative_target_aspect_ratio=None, **super_kwargs):
"""
Parameters
----------
ratio_between : tuple
Specify the bounds between which to sample the crop ratio. This applies to
both height and width if not overriden. Can be None if both height and width
ratios are specified individually.
height_ratio_between : tuple
Specify the bounds between which to sample the vertical crop ratio.
Can be None if `ratio_between` is not None.
width_ratio_between : tuple
Specify the bounds between which to sample the horizontal crop ratio.
Can be None if `ratio_between` is not None.
preserve_aspect_ratio : bool
Whether to preserve aspect ratio. If both `height_ratio_between`
and `width_ratio_between` are specified, the former is used if this
is set to True.
relative_target_aspect_ratio : float
Specify the target aspect ratio (W x H) relative to the input image
(i.e. by mapping the input image ratio to 1:1). For instance, if an image
has the size 1024 (H) x 2048 (W), a relative target aspect ratio of 0.5
might yield images of size 1024 x 1024. Note that this only applies if
`preserve_aspect_ratio` is set to False.
super_kwargs : dict
Keyword arguments for the super class.
"""
super(RandomSizedCrop, self).__init__(**super_kwargs)
# Privates
self._image_shape_cache = None
# Publics
height_ratio_between = tuple(height_ratio_between) \
if height_ratio_between is not None else tuple(ratio_between)
width_ratio_between = tuple(width_ratio_between) \
if width_ratio_between is not None else tuple(ratio_between)
assert_(height_ratio_between is not None,
"`height_ratio_between` is not specified.",
ValueError)
assert_(width_ratio_between is not None,
"`width_ratio_between` is not specified.",
ValueError)
self.height_ratio_between = height_ratio_between
self.width_ratio_between = width_ratio_between
self.preserve_aspect_ratio = preserve_aspect_ratio
self.relative_target_aspect_ratio = relative_target_aspect_ratio
def build_random_variables(self, image_shape):
# Seed RNG
np.random.seed()
# Compute random variables
source_height, source_width = image_shape
height_ratio = np.random.uniform(low=self.height_ratio_between[0],
high=self.height_ratio_between[1])
if self.preserve_aspect_ratio:
width_ratio = height_ratio
elif self.relative_target_aspect_ratio is not None:
width_ratio = height_ratio * self.relative_target_aspect_ratio
else:
width_ratio = np.random.uniform(low=self.width_ratio_between[0],
high=self.width_ratio_between[1])
crop_height = int(np.round(height_ratio * source_height))
crop_width = int(np.round(width_ratio * source_width))
height_leeway = source_height - crop_height
width_leeway = source_width - crop_width
# Set random variables
if height_leeway > 0:
self.set_random_variable('height_location',
np.random.randint(low=0, high=height_leeway + 1))
if width_leeway > 0:
self.set_random_variable('width_location',
np.random.randint(low=0, high=width_leeway + 1))
self.set_random_variable('crop_height', crop_height)
self.set_random_variable('crop_width', crop_width)
self.set_random_variable('height_leeway', height_leeway)
self.set_random_variable('width_leeway', width_leeway)
def image_function(self, image):
# Validate image shape
if self._image_shape_cache is not None:
assert_(self._image_shape_cache == image.shape,
"RandomCrop works on multiple images simultaneously only "
"if they have the same shape. Was expecting an image of "
"shape {}, got one of shape {} instead."
.format(self._image_shape_cache, image.shape),
ShapeError)
else:
self._image_shape_cache = image.shape
height_leeway = self.get_random_variable('height_leeway', image_shape=image.shape)
width_leeway = self.get_random_variable('width_leeway', image_shape=image.shape)
if height_leeway > 0:
height_location = self.get_random_variable('height_location',
image_shape=image.shape)
crop_height = self.get_random_variable('crop_height',
image_shape=image.shape)
cropped = image[height_location:(height_location + crop_height), :]
else:
cropped = image
if width_leeway > 0:
width_location = self.get_random_variable('width_location',
image_shape=image.shape)
crop_width = self.get_random_variable('crop_width',
image_shape=image.shape)
cropped = cropped[:, width_location:(width_location + crop_width)]
return cropped
class RandomGammaCorrection(Transform):
"""Applies gamma correction [1] with a random gamma.
This transform uses `skimage.exposure.adjust_gamma`, which requires the input be positive.
References
----------
[1] https://en.wikipedia.org/wiki/Gamma_correction
"""
def __init__(self, gamma_between=(0.5, 2.), gain=1, **super_kwargs):
"""
Parameters
----------
gamma_between : tuple or list
Specifies the range within which to sample gamma (uniformly).
gain : int or float
The resulting gamma corrected image is multiplied by this `gain`.
super_kwargs : dict
Keyword arguments for the superclass.
"""
super(RandomGammaCorrection, self).__init__(**super_kwargs)
self.gamma_between = list(gamma_between)
self.gain = gain
def build_random_variables(self):
np.random.seed()
self.set_random_variable('gamma',
np.random.uniform(low=self.gamma_between[0],
high=self.gamma_between[1]))
def image_function(self, image):
gamma_adjusted = adjust_gamma(image,
gamma=self.get_random_variable('gamma'),
gain=self.gain)
return gamma_adjusted
class ElasticTransform(Transform):
"""Random Elastic Transformation."""
NATIVE_DTYPES = {'float32', 'float64'}
PREFERRED_DTYPE = 'float32'
def __init__(self, alpha, sigma, order=1, invert=False, **super_kwargs):
self._initial_dtype = None
super(ElasticTransform, self).__init__(**super_kwargs)
self.alpha = alpha
self.sigma = sigma
self.order = order
self.invert = invert
def build_random_variables(self, **kwargs):
# All this is done just once per batch (i.e. until `clear_random_variables` is called)
np.random.seed()
imshape = kwargs.get('imshape')
# Build and scale random fields
random_field_x = np.random.uniform(-1, 1, imshape) * self.alpha
random_field_y = np.random.uniform(-1, 1, imshape) * self.alpha
# Smooth random field (this has to be done just once per reset)
sdx = gaussian_filter(random_field_x, self.sigma, mode='reflect')
sdy = gaussian_filter(random_field_y, self.sigma, mode='reflect')
# Make meshgrid
x, y = np.meshgrid(np.arange(imshape[1]), np.arange(imshape[0]))
# Make inversion coefficient
_inverter = 1. if not self.invert else -1.
# Distort meshgrid indices (invert if required)
flow_y, flow_x = (y + _inverter * sdy).reshape(-1, 1), (x + _inverter * sdx).reshape(-1, 1)
# Set random states
self.set_random_variable('flow_x', flow_x)
self.set_random_variable('flow_y', flow_y)
def cast(self, image):
if image.dtype not in self.NATIVE_DTYPES:
self._initial_dtype = image.dtype
image = image.astype(self.PREFERRED_DTYPE)
return image
def uncast(self, image):
if self._initial_dtype is not None:
image = image.astype(self._initial_dtype)
self._initial_dtype = None
return image
def image_function(self, image):
# Cast image to one of the native dtypes (one which that is supported by scipy)
image = self.cast(image)
# Take measurements
imshape = image.shape
# Obtain flows
flows = self.get_random_variable('flow_y', imshape=imshape), \
self.get_random_variable('flow_x', imshape=imshape)
# Map cooordinates from image to distorted index set
transformed_image = map_coordinates(image, flows,
mode='reflect', order=self.order).reshape(imshape)
# Uncast image to the original dtype
transformed_image = self.uncast(transformed_image)
return transformed_image
class AdditiveGaussianNoise(Transform):
"""Add gaussian noise to the input."""
def __init__(self, sigma, **super_kwargs):
super(AdditiveGaussianNoise, self).__init__(**super_kwargs)
self.sigma = sigma
def build_random_variables(self, **kwargs):
np.random.seed()
self.set_random_variable('noise', np.random.normal(loc=0, scale=self.sigma,
size=kwargs.get('imshape')))
def image_function(self, image):
image = image + self.get_random_variable('noise', imshape=image.shape)
return image
class RandomRotate(Transform):
"""Random 90-degree rotations."""
def __init__(self, **super_kwargs):
super(RandomRotate, self).__init__(**super_kwargs)
def build_random_variables(self, **kwargs):
np.random.seed()
self.set_random_variable('k', np.random.randint(0, 4))
def image_function(self, image):
return np.rot90(image, k=self.get_random_variable('k'))
class RandomTranspose(Transform):
"""Random 2d transpose."""
def __init__(self, **super_kwargs):
super(RandomTranspose, self).__init__(**super_kwargs)
def build_random_variables(self, **kwargs):
np.random.seed()
self.set_random_variable('do_transpose', np.random.uniform() > 0.5)
def image_function(self, image):
if self.get_random_variable('do_transpose'):
image = np.transpose(image)
return image
class RandomFlip(Transform):
"""Random left-right or up-down flips."""
def __init__(self, allow_lr_flips=True, allow_ud_flips=True, **super_kwargs):
super(RandomFlip, self).__init__(**super_kwargs)
self.allow_lr_flips = allow_lr_flips
self.allow_ud_flips = allow_ud_flips
def build_random_variables(self, **kwargs):
np.random.seed()
self.set_random_variable('flip_lr', np.random.uniform() > 0.5)
self.set_random_variable('flip_ud', np.random.uniform() > 0.5)
def image_function(self, image):
if self.allow_lr_flips and self.get_random_variable('flip_lr'):
image = np.fliplr(image)
if self.allow_ud_flips and self.get_random_variable('flip_ud'):
image = np.flipud(image)
return image
class CenterCrop(Transform):
""" Crop patch of size `size` from the center of the image """
def __init__(self, size, **super_kwargs):
super(CenterCrop, self).__init__(**super_kwargs)
assert isinstance(size, (int, tuple))
self.size = (size, size) if isinstance(size, int) else size
def image_function(self, image):
h, w = image.shape
th, tw = self.size
if h > th:
y1 = int(round((h - th) / 2.))
image = image[y1:y1 + th, :]
if w > tw:
x1 = int(round((w - tw) / 2.))
image = image[:, x1:x1 + tw]
return image
class BinaryMorphology(Transform):
"""
Apply a binary morphology operation on an image. Supported operations are dilation
and erosion.
"""
def __init__(self, mode, num_iterations=1, morphology_kwargs=None, **super_kwargs):
"""
Parameters
----------
mode : {'dilate', 'erode'}
Whether to dilate or erode.
num_iterations : int
Number of iterations to apply the operation for.
morphology_kwargs: dict
Keyword arguments to the morphology function
(i.e. `scipy.ndimage.morphology.binary_erosion` or
`scipy.ndimage.morphology.binary_erosion`)
super_kwargs : dict
Keyword arguments to the superclass.
"""
super(BinaryMorphology, self).__init__(**super_kwargs)
# Validate and assign mode
assert_(mode in ['dilate', 'erode'],
"Mode must be one of ['dilate', 'erode']. Got {} instead.".format(mode),
ValueError)
self.mode = mode
self.num_iterations = num_iterations
self.morphology_kwargs = {} if morphology_kwargs is None else dict(morphology_kwargs)
def image_function(self, image):
if self.mode == 'dilate':
transformed_image = binary_dilation(image, iterations=self.num_iterations,
**self.morphology_kwargs)
elif self.mode == 'erode':
transformed_image = binary_erosion(image, iterations=self.num_iterations,
**self.morphology_kwargs)
else:
raise ValueError
# Cast transformed image to the right dtype and return
return transformed_image.astype(image.dtype)
class BinaryDilation(BinaryMorphology):
"""Apply a binary dilation operation on an image."""
def __init__(self, num_iterations=1, morphology_kwargs=None, **super_kwargs):
super(BinaryDilation, self).__init__(mode='dilate', num_iterations=num_iterations,
morphology_kwargs=morphology_kwargs,
**super_kwargs)
class BinaryErosion(BinaryMorphology):
"""Apply a binary erosion operation on an image."""
def __init__(self, num_iterations=1, morphology_kwargs=None, **super_kwargs):
super(BinaryErosion, self).__init__(mode='erode', num_iterations=num_iterations,
morphology_kwargs=morphology_kwargs,
**super_kwargs)
class FineRandomRotations(Transform):
""" Random Rotation with random uniform angle distribution
batch_function applies to rotation of input and label image
Parameters
----------
angle_range : int
maximum angle of rotation
axes : tuple, default (1,2) assuming that channel axis is 0
pair of axis that define the 2d-plane of rotation
mask_label : constant value that is used to pad the label images
"""
def __init__(self, angle_range, axes=(1,2), mask_label=0, **super_kwargs):
super(FineRandomRotations, self).__init__(**super_kwargs)
self.angle_range = angle_range
self.axes = axes
self.ml = mask_label
def build_random_variables(self):
np.random.seed()
self.set_random_variable('angle',
np.random.uniform(low=-self.angle_range,
high=self.angle_range))
def batch_function(self, image):
angle = self.get_random_variable('angle')
return rotate(image[0], angle, axes=self.axes, reshape=False), \
rotate(image[1], angle, axes=self.axes, order=0, cval=self.ml, reshape=False)
class RandomScaleSegmentation(Transform):
""" Random Scale input and label image
Parameters
----------
scale_range : tuple of floats defining (min, max) scales
maximum angle of rotation
resize : if True, image is cropped or padded to the original size
pad_const: value used for constant padding
"""
def __init__(self, scale_range, resize=True, pad_const=0, **super_kwargs):
super(RandomScaleSegmentation, self).__init__(**super_kwargs)
self.scale_range = scale_range
self.resize = resize
self.pad_const = pad_const
def build_random_variables(self):
np.random.seed()
self.set_random_variable('seg_scale',
np.random.uniform(low=self.scale_range[0],
high=self.scale_range[1]))
def batch_function(self, image):
scale = self.get_random_variable('seg_scale')
input_image, segmentation = image
image_shape = np.array(input_image.shape[1:])
if input_image.ndim == segmentation.ndim + 1:
segmentation = segmentation[None]
with catch_warnings():
simplefilter('ignore')
img = np.stack([zoom(x, scale, order=3) for x in input_image])
seg = np.stack([zoom(x, scale, order=0) for x in segmentation])
new_shape = np.array(img.shape[1:])
if self.resize:
if scale > 1.:
# pad image to original size
crop_l = (new_shape - image_shape) // 2
crop_r = new_shape - image_shape - crop_l
cropping = [slice(None)] + [slice(c[0] if c[0] > 0 else None,
-c[1] if c[1] > 0 else None) for c in zip(crop_l, crop_r)]
img = img[cropping]
seg = seg[cropping]
else:
# crop image to original size
pad_l = (image_shape - new_shape) // 2
pad_r = image_shape - new_shape - pad_l
padding = [(0,0)] + list(zip(pad_l, pad_r))
img = np.pad(img, padding, 'constant', constant_values=self.pad_const)
seg = np.pad(seg, padding, 'constant', constant_values=self.pad_const)
return img, seg
| 26,648 | 43.341098 | 107 | py |
inferno | inferno-master/inferno/io/transform/volume.py | import numpy as np
import scipy
from scipy.ndimage import zoom
from scipy.ndimage.morphology import binary_dilation, binary_erosion
from .base import Transform
from ...utils.exceptions import assert_
class RandomFlip3D(Transform):
def __init__(self, **super_kwargs):
super(RandomFlip3D, self).__init__(**super_kwargs)
def build_random_variables(self, **kwargs):
np.random.seed()
self.set_random_variable('flip_lr', np.random.uniform() > 0.5)
self.set_random_variable('flip_ud', np.random.uniform() > 0.5)
self.set_random_variable('flip_z', np.random.uniform() > 0.5)
def volume_function(self, volume):
if self.get_random_variable('flip_lr'):
volume = volume[:, :, ::-1].copy()
if self.get_random_variable('flip_ud'):
volume = volume[:, ::-1, :].copy()
if self.get_random_variable('flip_z'):
volume = volume[::-1, :, :].copy()
return volume
class RandomRot3D(Transform):
def __init__(self, rot_range, p=0.125, reshape=False, order=0, mode='nearest', **super_kwargs):
super(RandomRot3D, self).__init__(**super_kwargs)
self.rot_range = rot_range
self.p = p
self.reshape = reshape
self.order = order
self.mode = mode
def build_random_variables(self, **kwargs):
np.random.seed()
self.set_random_variable('do_z', np.random.uniform() < self.p)
self.set_random_variable('do_y', np.random.uniform() < self.p)
self.set_random_variable('do_x', np.random.uniform() < self.p)
self.set_random_variable('angle_z', np.random.uniform(-self.rot_range, self.rot_range))
self.set_random_variable('angle_y', np.random.uniform(-self.rot_range, self.rot_range))
self.set_random_variable('angle_x', np.random.uniform(-self.rot_range, self.rot_range))
def volume_function(self, volume):
angle_z = self.get_random_variable('angle_z')
angle_y = self.get_random_variable('angle_y')
angle_x = self.get_random_variable('angle_x')
# rotate along z-axis
if self.get_random_variable('do_z'):
volume = scipy.ndimage.interpolation.rotate(volume, angle_z,
order=self.order, mode=self.mode,
axes=(0, 1), reshape=self.reshape)
# rotate along y-axis
if self.get_random_variable('do_y'):
volume = scipy.ndimage.interpolation.rotate(volume, angle_y,
order=self.order, mode=self.mode,
axes=(0, 2), reshape=self.reshape)
# rotate along x-axis
if self.get_random_variable('do_y'):
volume = scipy.ndimage.interpolation.rotate(volume, angle_x,
order=self.order, mode=self.mode,
axes=(1, 2), reshape=self.reshape)
return volume
# TODO this is obsolete
class AdditiveRandomNoise3D(Transform):
""" Add gaussian noise to 3d volume
Need to know input shape before application, but can be
synchronized between different inputs (cf. `AdditiveNoise`)
Arguments:
shape: shape of input volumes
std: standard deviation of gaussian
super_kwargs: keyword arguments for `Transform` base class
"""
def __init__(self, shape, std, **super_kwargs):
super(AdditiveRandomNoise3D, self).__init__(**super_kwargs)
self.shape = shape
self.std = float(std)
def build_random_variables(self, **kwargs):
np.random.seed()
self.set_random_variable('noise_vol',
np.random.normal(loc=0.0, scale=self.std, size=self.shape))
def volume_function(self, volume):
noise_vol = self.get_random_variable('noise_vol')
return volume + noise_vol
# TODO different options than gaussian
class AdditiveNoise(Transform):
""" Add noise to 3d volume
Do NOT need to know input shape before application, but CANNOT be
synchronized between different inputs (cf. `AdditiveRandomNoise`)
Arguments:
sigma: sigma for noise
mode: mode of distribution (only gaussian supported for now)
super_kwargs: keyword arguments for `Transform` base class
"""
def __init__(self, sigma, mode='gaussian', **super_kwargs):
assert mode == 'gaussian'
super().__init__(**super_kwargs)
self.sigma = sigma
# TODO check if volume is tensor and use torch functions in that case
def tensor_function(self, volume):
volume += np.random.normal(loc=0, scale=self.sigma, size=volume.shape)
return volume
class CentralSlice(Transform):
def volume_function(self, volume):
half_z = volume.shape[0] // 2
return volume[half_z:half_z + 1, ...]
class VolumeCenterCrop(Transform):
""" Crop patch of size `size` from the center of the volume """
def __init__(self, size, **super_kwargs):
super().__init__(**super_kwargs)
assert isinstance(size, (int, tuple))
self.size = (size, size, size) if isinstance(size, int) else size
assert len(size) == 3
def volume_function(self, volume):
h, w, d = volume.shape
th, tw, td = self.size
x1 = int(round((w - tw) / 2.))
y1 = int(round((h - th) / 2.))
z1 = int(round((d - td) / 2.))
return volume[x1:x1+tw, y1:y1+th, z1:z1+td]
class VolumeAsymmetricCrop(Transform):
""" Crop `crop_left` from the left borders and `crop_right` from the right borders """
def __init__(self, crop_left, crop_right, **super_kwargs):
super(VolumeAsymmetricCrop, self).__init__(**super_kwargs)
assert isinstance(crop_left, (list, tuple))
assert isinstance(crop_right, (list, tuple))
assert len(crop_left) == 3
assert len(crop_right) == 3
self.crop_left = crop_left
self.crop_right = crop_right
def volume_function(self, volume):
x1, y1, z1 = self.crop_left
x2, y2, z2 = (np.array(volume.shape) - np.array(self.crop_right)).astype('uint32')
return volume[x1:x2, y1:y2, z1:z2]
class Slices2Channels(Transform):
""" Needed for training 2D network with slices above/below as additional channels
For the input data transforms one dimension (x, y or z) into channels
For the target data just takes the central slice and discards all the rest"""
def __init__(self, num_channels, downsampling=1, **super_kwargs):
super(Slices2Channels, self).__init__(**super_kwargs)
self.channels = num_channels
self.downsampling = downsampling
def batch_function(self, batch):
try:
axis = batch[0].shape.index(self.channels)
except ValueError:
print("The axis has the shape of the desired channels number!")
half = int(self.channels/2)
new_input = np.moveaxis(batch[0], axis, 0)
# take every nth slice to the both directions of the central slice
indices = []
for i in range(self.channels):
if i % self.downsampling == half % self.downsampling:
indices.append(i)
new_input = new_input[indices] # num_chan after - int (num_chan/(2*downsample)) * 2 + 1
new_target = np.moveaxis(batch[1], axis, 0)
new_target = new_target[half]
return (new_input, new_target)
class RandomScale3D(Transform):
"""Scales a volume with a random zoom factor with spline interpolation of requested order"""
def __init__(self, zoom_factor_range, interpolation_order=0, p=0.5,
same_zoom=True, zoom_kwargs=None, **super_kwargs):
"""
Parameters
----------
zoom_factor_range : list or tuple
The allowed range to sample zoom factors along the axes.
interpolation_order : int
Interpolation order for the spline interpolation.
p : float
Probability that the axis gets zoomed
same_zoom: bool
Apply the same zoom factor to all the axes
zoom_kwargs : dict
Keyword arguments for `scipy.ndimage.zoom`.
super_kwargs : dict
Keyword arguments for the superclass.
"""
super(RandomScale3D, self).__init__(**super_kwargs)
assert_(len(zoom_factor_range) == 2,
"`zoom_factor_range` must be a list or a tuple of length 2.",
ValueError)
self.min = zoom_factor_range[0]
self.max = zoom_factor_range[1]
self.interpolation_order = interpolation_order
self.p = p
self.same_zoom = same_zoom
self.zoom_kwargs = {} if zoom_kwargs is None else dict(zoom_kwargs)
def build_random_variables(self, **kwargs):
np.random.seed()
self.set_random_variable('do_z', np.random.uniform() < self.p)
self.set_random_variable('do_y', np.random.uniform() < self.p)
self.set_random_variable('do_x', np.random.uniform() < self.p)
self.set_random_variable('zoom_z', np.random.uniform(self.min, self.max))
self.set_random_variable('zoom_y', np.random.uniform(self.min, self.max))
self.set_random_variable('zoom_x', np.random.uniform(self.min, self.max))
def volume_function(self, volume):
zoom_z = self.get_random_variable('zoom_z') \
if self.get_random_variable('do_z') else 1
zoom_y = self.get_random_variable('zoom_y') \
if self.get_random_variable('do_y') else 1
zoom_x = self.get_random_variable('zoom_x') \
if self.get_random_variable('do_x') else 1
if self.same_zoom:
zoom_y, zoom_x = zoom_z, zoom_z
zoomed_volume = zoom(volume, (zoom_z, zoom_y, zoom_x),
order=self.interpolation_order, **self.zoom_kwargs)
return zoomed_volume
class RandomBinaryMorphology3D(Transform):
"""
Apply a random binary morphology operation (dilation or erosion).
Allowed range of iteration number can be set.
"""
def __init__(self, p=0.5, num_iter_range=(1, 5), morphology_kwargs=None, **super_kwargs):
"""
Parameters
----------
p : float
Probability that any operation is applied
num_iter_range : list or tuple
The allowed range of iteration number to apply the operation for.
morphology_kwargs: dict
Keyword arguments to the morphology function
(i.e. `scipy.ndimage.morphology.binary_erosion` or
`scipy.ndimage.morphology.binary_erosion`)
super_kwargs : dict
Keyword arguments to the superclass.
"""
super(RandomBinaryMorphology3D, self).__init__(**super_kwargs)
assert_(len(num_iter_range) == 2,
"`num_iter_range` must be a list or a tuple of length 2.",
ValueError)
self.p = p
self.min_iter = num_iter_range[0]
self.max_iter = num_iter_range[1] + 1
self.morphology_kwargs = {} if morphology_kwargs is None else dict(morphology_kwargs)
def build_random_variables(self, **kwargs):
np.random.seed()
self.set_random_variable('do', np.random.uniform() < self.p)
self.set_random_variable('erode', np.random.uniform() < 0.5)
self.set_random_variable('iter_num', np.random.randint(self.min_iter, self.max_iter))
def volume_function(self, volume):
do = self.get_random_variable('do')
erode_mode = self.get_random_variable('erode')
iter_num = self.get_random_variable('iter_num')
if do:
if erode_mode:
transformed_volume = binary_erosion(volume, iterations=iter_num,
**self.morphology_kwargs)
else:
transformed_volume = binary_dilation(volume, iterations=iter_num,
**self.morphology_kwargs)
volume = transformed_volume.astype(volume.dtype)
return volume
class CropPad2Divisible(Transform):
"""
Given the number, symmetrically crops/pads the volume
for all dimensions to be divisible by this number.
Used e.g. to feed input with any shape to models with pooling layers.
The threshold of cropping vs padding can be specified.
"""
def __init__(self, divisor=16, crop_pad_threshold=0.2,
mode='constant', padding_kwargs=None, **super_kwargs):
"""
Parameters
----------
divisor : int
A number that all dimensions should be divisible by
crop_pad_threshold : float
When "division remainder to divisor" ratio is lower then this number,
input volume will be cropped, otherwise - padded.
Set to 0 to only pad and 1 to only crop.
mode: ‘constant’, ‘edge’, ‘symmetric’, etc
See all the possible modes in numpy.pad doc
padding_kwargs: dict
Keyword arguments to numpy.pad
super_kwargs : dict
Keyword arguments to the superclass.
"""
super(CropPad2Divisible, self).__init__(**super_kwargs)
assert_(0 <= crop_pad_threshold <= 1,
"threshold must be between 0 and 1 inclusive",
ValueError)
assert_(divisor % 2 == 0, "divisor must be an even number", ValueError)
self.divisor = divisor
self.crop_pad_threshold = crop_pad_threshold
self.mode = mode
self.padding_kwargs = {} if padding_kwargs is None else dict(padding_kwargs)
def volume_function(self, volume):
half_div = int(self.divisor/2)
remainders = [axis % self.divisor for axis in volume.shape]
to_pad = [remainder/self.divisor >= self.crop_pad_threshold
for remainder in remainders]
diffs = [(int(np.floor(remainder/2)), int(np.ceil(remainder/2)))
for remainder in remainders]
padding = [(half_div - diff[0], half_div - diff[1])
if pad else (0, 0)
for diff, pad in zip(diffs, to_pad)]
cropping = [slice(diff[0], -diff[1])
if not (pad or diff[1] == 0) else slice(None, None)
for diff, pad in zip(diffs, to_pad)]
volume = np.pad(volume, pad_width=padding, mode=self.mode, **self.padding_kwargs)
volume = volume[cropping]
return volume
class CropPad2Size(Transform):
"""
Adjust the input volume to the given size:
Symmetrically crops if input > size, symmetrically pads if input < size.
"""
def __init__(self, output_size, mode='constant',
padding_kwargs=None, **super_kwargs):
"""
Parameters
----------
output_size : int, tuple or list
The output size. If int, the same value is used for all axes
mode: `constant`, `edge`, `symmetric`, etc
See all the possible modes in numpy.pad doc
padding_kwargs: dict
Keyword arguments to numpy.pad
super_kwargs : dict
Keyword arguments to the superclass.
"""
super(CropPad2Size, self).__init__(**super_kwargs)
self.output_size = output_size if isinstance(output_size, (list, tuple)) \
else (output_size, ) * 3
assert len(self.output_size) == 3, 'The size should be given for all the dimensions'
self.mode = mode
self.padding_kwargs = {} if padding_kwargs is None else dict(padding_kwargs)
def volume_function(self, volume):
difference = [inp - outp for inp, outp in zip(volume.shape, self.output_size)]
to_pad = [diff < 0 for diff in difference]
to_crop = [diff > 0 for diff in difference]
diffs = [(int(np.floor(diff/2)), int(np.ceil(diff/2)))
for diff in np.abs(difference)]
padding = [(diff[0], diff[1]) if pad else (0, 0)
for diff, pad in zip(diffs, to_pad)]
cropping = [slice(diff[0], -diff[1]) if crop else slice(None, None)
for diff, crop in zip(diffs, to_crop)]
volume = np.pad(volume, pad_width=padding, mode=self.mode, **self.padding_kwargs)
volume = volume[cropping]
return volume
| 16,485 | 41.380463 | 99 | py |
inferno | inferno-master/inferno/io/transform/generic.py | import numpy as np
import torch
from .base import Transform, DTypeMapping
from ...utils.exceptions import assert_, DTypeError
class Normalize(Transform):
"""Normalizes input to zero mean unit variance."""
def __init__(self, eps=1e-4, mean=None, std=None, ignore_value=None, **super_kwargs):
"""
Parameters
----------
eps : float
A small epsilon for numerical stability.
mean : list or float or numpy.ndarray
Global dataset mean for all channels.
std : list or float or numpy.ndarray
Global dataset std for all channels.
super_kwargs : dict
Kwargs to the superclass `inferno.io.transform.base.Transform`.
"""
super(Normalize, self).__init__(**super_kwargs)
self.eps = eps
self.mean = np.asarray(mean) if mean is not None else None
self.std = np.asarray(std) if std is not None else None
self.ignore_value = ignore_value
def tensor_function(self, tensor):
# if we have a background value that we don't want to normalize
mask = None if self.ignore_value is None else (tensor != self.ignore_value)
if mask is None:
mean = np.asarray(tensor.mean()) if self.mean is None else self.mean
std = np.asarray(tensor.std()) if self.std is None else self.std
else:
mean = np.asarray(tensor[mask].mean()) if self.mean is None else self.mean
std = np.asarray(tensor[mask].std()) if self.std is None else self.std
# Figure out how to reshape mean and std
reshape_as = [-1] + [1] * (tensor.ndim - 1)
# Normalize
if mask is None:
tensor = (tensor - mean.reshape(*reshape_as)) / (std.reshape(*reshape_as) + self.eps)
else:
# if tensor is int, the normalized tensor will be in int as well
tensor = tensor.astype('float64')
tensor[mask] = ((tensor - mean.reshape(*reshape_as)) \
/ (std.reshape(*reshape_as) + self.eps))[mask]
return tensor
class NormalizeRange(Transform):
"""Normalizes input by a constant."""
def __init__(self, normalize_by=255., **super_kwargs):
"""
Parameters
----------
normalize_by : float or int
Scalar to normalize by.
super_kwargs : dict
Kwargs to the superclass `inferno.io.transform.base.Transform`.
"""
super(NormalizeRange, self).__init__(**super_kwargs)
self.normalize_by = float(normalize_by)
def tensor_function(self, tensor):
return tensor / self.normalize_by
class Project(Transform):
"""
Given a projection mapping (i.e. a dict) and an input tensor, this transform replaces
all values in the tensor that equal a key in the mapping with the value corresponding to
the key.
"""
def __init__(self, projection, **super_kwargs):
"""
Parameters
----------
projection : dict
The projection mapping.
super_kwargs : dict
Keywords to the super class.
"""
super(Project, self).__init__(**super_kwargs)
self.projection = dict(projection)
def tensor_function(self, tensor):
output = np.zeros_like(tensor)
for source, target in self.projection.items():
output[tensor == source] = target
return output
class Label2OneHot(Transform, DTypeMapping):
"""Convert integer labels to one-hot vectors for arbitrary dimensional data."""
def __init__(self, num_classes, dtype='float', **super_kwargs):
"""
Parameters
----------
num_classes : int
Number of classes.
dtype : str
Datatype of the output.
super_kwargs : dict
Keyword arguments to the superclass.
"""
super(Label2OneHot, self).__init__(**super_kwargs)
self.num_classes = num_classes
self.dtype = self.DTYPE_MAPPING.get(dtype)
def tensor_function(self, tensor):
reshaped_arange = np.arange(self.num_classes).reshape(-1, *(1,)*tensor.ndim)
output = np.equal(reshaped_arange, tensor).astype(self.dtype)
# output = np.zeros(shape=(self.num_classes,) + tensor.shape, dtype=self.dtype)
# # Optimizing for simplicity and memory efficiency, because one would usually
# # spawn multiple workers
# for class_num in range(self.num_classes):
# output[class_num] = tensor == class_num
return output
class Cast(Transform, DTypeMapping):
"""Casts inputs to a specified datatype."""
def __init__(self, dtype='float', **super_kwargs):
"""
Parameters
----------
dtype : {'float16', 'float32', 'float64', 'half', 'float', 'double'}
Datatype to cast to.
super_kwargs : dict
Kwargs to the superclass `inferno.io.transform.base.Transform`.
"""
super(Cast, self).__init__(**super_kwargs)
assert dtype in self.DTYPE_MAPPING.keys()
self.dtype = self.DTYPE_MAPPING.get(dtype)
def tensor_function(self, tensor):
return getattr(np, self.dtype)(tensor)
class AsTorchBatch(Transform):
"""Converts a given numpy array to a torch batch tensor.
The result is a torch tensor __without__ the leading batch axis. For example,
if the input is an image of shape `(100, 100)`, the output is a batch of shape
`(1, 100, 100)`. The collate function will add the leading batch axis to obtain
a tensor of shape `(N, 1, 100, 100)`, where `N` is the batch-size.
"""
def __init__(self, dimensionality, add_channel_axis_if_necessary=True, **super_kwargs):
"""
Parameters
----------
dimensionality : {1, 2, 3}
Dimensionality of the data: 1 if vector, 2 if image, 3 if volume.
add_channel_axis_if_necessary : bool
Whether to add a channel axis where necessary. For example, if `dimensionality = 2`
and the input temperature has 2 dimensions (i.e. an image), setting
`add_channel_axis_if_necessary` to True results in the output being a 3 dimensional
tensor, where the leading dimension is a singleton and corresponds to `channel`.
super_kwargs : dict
Kwargs to the superclass `inferno.io.transform.base.Transform`.
"""
super(AsTorchBatch, self).__init__(**super_kwargs)
assert dimensionality in [1, 2, 3]
self.dimensionality = dimensionality
self.add_channel_axis_if_necessary = bool(add_channel_axis_if_necessary)
def _to_batch(self, tensor):
assert_(isinstance(tensor, np.ndarray),
"Expected numpy array, got %s" % type(tensor),
DTypeError)
if self.dimensionality == 3:
# We're dealing with a volume. tensor can either be 3D or 4D
assert tensor.ndim in [3, 4]
if tensor.ndim == 3 and self.add_channel_axis_if_necessary:
# Add channel axis
return torch.from_numpy(tensor[None, ...])
else:
# Channel axis is in already
return torch.from_numpy(tensor)
elif self.dimensionality == 2:
# We're dealing with an image. tensor can either be 2D or 3D
assert tensor.ndim in [2, 3]
if tensor.ndim == 2 and self.add_channel_axis_if_necessary:
# Add channel axis
return torch.from_numpy(tensor[None, ...])
else:
# Channel axis is in already
return torch.from_numpy(tensor)
elif self.dimensionality == 1:
# We're dealing with a vector - it has to be 1D
assert tensor.ndim == 1
return torch.from_numpy(tensor)
else:
raise NotImplementedError
def tensor_function(self, tensor):
assert_(isinstance(tensor, (list, np.ndarray)),
"Expected numpy array or list, got %s" % type(tensor),
DTypeError)
if isinstance(tensor, np.ndarray):
return self._to_batch(tensor)
else:
return [self._to_batch(elem) for elem in tensor]
| 8,277 | 39.578431 | 97 | py |
inferno | inferno-master/inferno/extensions/initializers/base.py | import torch.nn.init as init
__all__ = ['Initializer',
'Initialization',
'WeightInitFunction',
'BiasInitFunction',
'TensorInitFunction']
class Initializer(object):
"""
Base class for all initializers.
"""
# TODO Support LSTMs and GRUs
VALID_LAYERS = {'Conv1d', 'Conv2d', 'Conv3d',
'ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d',
'Linear', 'Bilinear',
'Embedding'}
def __call__(self, module):
module_class_name = module.__class__.__name__
if module_class_name in self.VALID_LAYERS:
# Apply to weight and bias
try:
if hasattr(module, 'weight'):
self.call_on_weight(module.weight.data)
except NotImplementedError:
# Don't cry if it's not implemented
pass
try:
if hasattr(module, 'bias'):
self.call_on_bias(module.bias.data)
except NotImplementedError:
pass
return module
def call_on_bias(self, tensor):
return self.call_on_tensor(tensor)
def call_on_weight(self, tensor):
return self.call_on_tensor(tensor)
def call_on_tensor(self, tensor):
raise NotImplementedError
@classmethod
def initializes_weight(cls):
return 'call_on_tensor' in cls.__dict__ or 'call_on_weight' in cls.__dict__
@classmethod
def initializes_bias(cls):
return 'call_on_tensor' in cls.__dict__ or 'call_on_bias' in cls.__dict__
class Initialization(Initializer):
def __init__(self, weight_initializer=None, bias_initializer=None):
if weight_initializer is None:
self.weight_initializer = Initializer()
else:
if isinstance(weight_initializer, Initializer):
assert weight_initializer.initializes_weight()
self.weight_initializer = weight_initializer
elif isinstance(weight_initializer, str):
init_function = getattr(init, weight_initializer, None)
assert init_function is not None
self.weight_initializer = WeightInitFunction(init_function=init_function)
else:
# Provison for weight_initializer to be a function
assert callable(weight_initializer)
self.weight_initializer = WeightInitFunction(init_function=weight_initializer)
if bias_initializer is None:
self.bias_initializer = Initializer()
else:
if isinstance(bias_initializer, Initializer):
assert bias_initializer.initializes_bias
self.bias_initializer = bias_initializer
elif isinstance(bias_initializer, str):
init_function = getattr(init, bias_initializer, None)
assert init_function is not None
self.bias_initializer = BiasInitFunction(init_function=init_function)
else:
assert callable(bias_initializer)
self.bias_initializer = BiasInitFunction(init_function=bias_initializer)
def call_on_weight(self, tensor):
return self.weight_initializer.call_on_weight(tensor)
def call_on_bias(self, tensor):
return self.bias_initializer.call_on_bias(tensor)
class WeightInitFunction(Initializer):
def __init__(self, init_function, *init_function_args, **init_function_kwargs):
super(WeightInitFunction, self).__init__()
assert callable(init_function)
self.init_function = init_function
self.init_function_args = init_function_args
self.init_function_kwargs = init_function_kwargs
def call_on_weight(self, tensor):
return self.init_function(tensor, *self.init_function_args, **self.init_function_kwargs)
class BiasInitFunction(Initializer):
def __init__(self, init_function, *init_function_args, **init_function_kwargs):
super(BiasInitFunction, self).__init__()
assert callable(init_function)
self.init_function = init_function
self.init_function_args = init_function_args
self.init_function_kwargs = init_function_kwargs
def call_on_bias(self, tensor):
return self.init_function(tensor, *self.init_function_args, **self.init_function_kwargs)
class TensorInitFunction(Initializer):
def __init__(self, init_function, *init_function_args, **init_function_kwargs):
super(TensorInitFunction, self).__init__()
assert callable(init_function)
self.init_function = init_function
self.init_function_args = init_function_args
self.init_function_kwargs = init_function_kwargs
def call_on_tensor(self, tensor):
return self.init_function(tensor, *self.init_function_args, **self.init_function_kwargs)
| 4,904 | 36.159091 | 96 | py |
inferno | inferno-master/inferno/extensions/initializers/presets.py | import numpy as np
import torch.nn.init as init
from functools import partial
from .base import Initialization, Initializer
__all__ = ['Constant', 'NormalWeights',
'SELUWeightsZeroBias',
'ELUWeightsZeroBias',
'OrthogonalWeightsZeroBias',
'KaimingNormalWeightsZeroBias']
class Constant(Initializer):
"""Initialize with a constant."""
def __init__(self, constant):
self.constant = constant
def call_on_tensor(self, tensor):
tensor.fill_(self.constant)
return tensor
class NormalWeights(Initializer):
"""
Initialize weights with random numbers drawn from the normal distribution at
`mean` and `stddev`.
"""
def __init__(self, mean=0., stddev=1., sqrt_gain_over_fan_in=None):
self.mean = mean
self.stddev = stddev
self.sqrt_gain_over_fan_in = sqrt_gain_over_fan_in
def compute_fan_in(self, tensor):
if tensor.dim() == 2:
return tensor.size(1)
else:
return np.prod(list(tensor.size())[1:])
def call_on_weight(self, tensor):
# Compute stddev if required
if self.sqrt_gain_over_fan_in is not None:
stddev = self.stddev * \
np.sqrt(self.sqrt_gain_over_fan_in / self.compute_fan_in(tensor))
else:
stddev = self.stddev
# Init
tensor.normal_(self.mean, stddev)
class OrthogonalWeightsZeroBias(Initialization):
def __init__(self, orthogonal_gain=1.):
# This prevents a deprecated warning in Pytorch 0.4+
orthogonal = getattr(init, 'orthogonal_', init.orthogonal)
super(OrthogonalWeightsZeroBias, self)\
.__init__(weight_initializer=partial(orthogonal, gain=orthogonal_gain),
bias_initializer=Constant(0.))
class KaimingNormalWeightsZeroBias(Initialization):
def __init__(self, relu_leakage=0):
# This prevents a deprecated warning in Pytorch 0.4+
kaiming_normal = getattr(init, 'kaiming_normal_', init.kaiming_normal)
super(KaimingNormalWeightsZeroBias, self)\
.__init__(weight_initializer=partial(kaiming_normal, a=relu_leakage),
bias_initializer=Constant(0.))
class SELUWeightsZeroBias(Initialization):
def __init__(self):
super(SELUWeightsZeroBias, self)\
.__init__(weight_initializer=NormalWeights(sqrt_gain_over_fan_in=1.),
bias_initializer=Constant(0.))
class ELUWeightsZeroBias(Initialization):
def __init__(self):
super(ELUWeightsZeroBias, self)\
.__init__(weight_initializer=NormalWeights(sqrt_gain_over_fan_in=1.5505188080679277),
bias_initializer=Constant(0.))
| 2,751 | 32.560976 | 97 | py |
inferno | inferno-master/inferno/extensions/models/unet.py | import torch
import torch.nn as nn
from ..layers.identity import Identity
from ..layers.convolutional import ConvELU2D, ConvELU3D, Conv2D, Conv3D
from ..layers.sampling import Upsample as InfernoUpsample
from ...utils.math_utils import max_allowed_ds_steps
__all__ = ['UNetBase', 'UNet', 'ResBlockUNet']
_all = __all__
class UNetBase(nn.Module):
""" Base class for implementing UNets.
The depth and dimension of the UNet is flexible.
The deriving classes must implement
`conv_op_factory` and can implement
`upsample_op_factory` and
`downsample_op_factory`.
Attributes:
in_channels (int): Number of input channels.
dim (int): Spatial dimension of data (must be 2 or 3).
out_channels (int): Number of output channels. Set to None by default,
which sets the number of out channels to the number of input channels
to preserve symmetry of feature channels (default: None).
depth (int): How many down-sampling / up-sampling steps
shall be performed (default: 3).
gain (int): Multiplicative increase of channels while going down in the UNet.
The same factor is used to decrease the number of channels while
going up in the UNet (default: 2).
residual (bool): If residual is true, the output of the down-streams
are added to the up-stream results.
Otherwise the results are concatenated (default: False).
"""
def __init__(self, in_channels, dim, out_channels=None, depth=3,
gain=2, residual=False, upsample_mode=None, p_dropout=None):
super(UNetBase, self).__init__()
# early sanity check
if dim not in [2, 3]:
raise RuntimeError("UNetBase is only implemented for 2D and 3D")
# settings related members
self.in_channels = int(in_channels)
self.dim = int(dim)
self.out_channels = self.in_channels if out_channels is\
None else int(out_channels)
self.depth = int(depth)
self.gain = int(gain)
self.residual = bool(residual)
self.p_dropout = p_dropout
# members to remember what to store as side output
self._store_conv_down = []
self._store_conv_bottom = False
self._store_conv_up = []
# number of channels per side output
self.n_channels_per_output = []
# members to hold actual nn.Modules / nn.ModuleLists
self._pre_conv_down_ops = None
self._post_conv_down_ops = None
self._conv_down_ops = None
self._pre_conv_up_ops = None
self._post_conv_up_ops = None
self._conv_up_ops = None
self._upsample_ops = None
self._downsample_ops = None
self._pre_conv_bottom_ops = None
self._post_conv_bottom_ops = None
self._conv_bottom_op = None
# upsample kwargs
self._upsample_kwargs = self._make_upsample_kwargs(upsample_mode=upsample_mode)
########################################
# default dropout
########################################
if self.p_dropout is not None:
self.use_dropout = True
if self.dim == 2 :
self._channel_dropout_op = self.torch.nn.Dropout2d(p=float(self.p_dropout),
inplace=False)
else:
self._channel_dropout_op = self.torch.nn.Dropout3d(p=float(self.p_dropout),
inplace=False)
else:
self.use_dropout = False
# down-stream convolution blocks
self._init__downstream()
# pooling / downsample operators
self._downsample_ops = nn.ModuleList([
self.downsample_op_factory(i) for i in range(depth)
])
# upsample operators
# we flip the index that is given as argument to index consistently in up and
# downstream sampling factories
self._upsample_ops = nn.ModuleList([
self.upsample_op_factory(depth - i - 1) for i in range(depth)
])
# bottom block of the unet
self._init__bottom()
# up-stream convolution blocks
self._init__upstream()
assert len(self.n_channels_per_output) == self._store_conv_down.count(True) + \
self._store_conv_up.count(True) + int(self._store_conv_bottom)
def _get_num_channels(self, depth):
assert depth > 0
return self.in_channels * self.gain**depth
def _init__downstream(self):
conv_down_ops = []
self._store_conv_down = []
current_in_channels = self.in_channels
for i in range(self.depth):
out_channels = self._get_num_channels(i + 1)
op, return_op_res = self.conv_op_factory(in_channels=current_in_channels,
out_channels=out_channels,
part='down', index=i)
conv_down_ops.append(op)
if return_op_res:
self.n_channels_per_output.append(out_channels)
self._store_conv_down.append(True)
else:
self._store_conv_down.append(False)
# increase the number of channels
current_in_channels = out_channels
# store as proper torch ModuleList
self._conv_down_ops = nn.ModuleList(conv_down_ops)
return current_in_channels
def _init__bottom(self):
current_in_channels = self._get_num_channels(self.depth)
factory_res = self.conv_op_factory(in_channels=current_in_channels,
out_channels=current_in_channels, part='bottom', index=0)
if isinstance(factory_res, tuple):
self._conv_bottom_op, self._store_conv_bottom = factory_res
if self._store_conv_bottom:
self.n_channels_per_output.append(current_in_channels)
else:
self._conv_bottom_op = factory_res
self._store_conv_bottom = False
def _init__upstream(self):
conv_up_ops = []
current_in_channels = self._get_num_channels(self.depth)
for i in range(self.depth):
# the number of out channels (set to self.out_channels for last decoder)
out_channels = self.out_channels if i + 1 == self.depth else \
self._get_num_channels(self.depth - i - 1)
# if not residual we concat which needs twice as many channels
fac = 1 if self.residual else 2
# we flip the index that is given as argument to index consistently in up and
# downstream conv factories
op, return_op_res = self.conv_op_factory(in_channels=fac*current_in_channels,
out_channels=out_channels,
part='up', index=self.depth - i - 1)
conv_up_ops.append(op)
if return_op_res:
self.n_channels_per_output.append(out_channels)
self._store_conv_up.append(True)
else:
self._store_conv_up.append(False)
# decrease the number of input_channels
current_in_channels = out_channels
# store as proper torch ModuleLis
self._conv_up_ops = nn.ModuleList(conv_up_ops)
# the last block needs to be stored in any case
if not self._store_conv_up[-1]:
self._store_conv_up[-1] = True
self.n_channels_per_output.append(out_channels)
def _make_upsample_kwargs(self, upsample_mode):
"""To avoid some waring from pytorch, and some missing implementations
for the arguments need to be handle carefully in this helper functions
Args:
upsample_mode (str): users choice for upsampling interpolation style.
"""
if upsample_mode is None:
if self.dim == 2:
upsample_mode = 'bilinear'
elif self.dim == 3:
# upsample_mode = 'nearest'
upsample_mode = 'trilinear'
upsample_kwargs = dict(scale_factor=2, mode=upsample_mode)
if upsample_mode in ('bilinear', 'trilinear'):
upsample_kwargs['align_corners'] = False
return upsample_kwargs
def _forward_sanity_check(self, input):
if isinstance(input, tuple):
raise RuntimeError("tuples of tensors are not supported")
shape = input.shape
if shape[1] != self.in_channels:
raise RuntimeError("wrong number of channels: expected %d, got %d"%
(self.in_channels, input.size(1)))
if input.dim() != self.dim + 2:
raise RuntimeError("wrong number of dim: expected %d, got %d"%
(self.dim+2, input.dim()))
self._check_scaling(input)
# override if model has different scaling
def _check_scaling(self, input):
shape = input.shape
mx = max_allowed_ds_steps(shape=shape[2:2+self.dim], factor=2)
if mx < self.depth:
raise RuntimeError("cannot downsample %d times, with shape %s"%
(self.depth, str(input.size())) )
def forward(self, input):
# check if input is suitable
self._forward_sanity_check(input=input)
# collect all desired outputs
side_out = []
# remember all conv-block results of the downward part
# of the UNet
down_res = []
#################################
# downwards part
#################################
out = input
for d in range(self.depth):
out = self._conv_down_ops[d](out)
#out = self.dropout
down_res.append(out)
if self._store_conv_down[d]:
side_out.append(out)
out = self._downsample_ops[d](out)
#################################
# bottom part
#################################
out = self._conv_bottom_op(out)
if self._store_conv_bottom:
side_out.append(out)
#################################
# upward part
#################################
down_res = list(reversed(down_res)) # <- eases indexing
for d in range(self.depth):
# upsample
out = self._upsample_ops[d](out)
# the result of the downward part
a = down_res[d]
# add or concat?
if self.residual:
out = a + out
else:
out = torch.cat([a, out], 1)
# the convolutional block
out = self._conv_up_ops[d](out)
if self._store_conv_up[d]:
side_out.append(out)
# if len(side_out) == 1 we actually have no side output
# just the main output
if len(side_out) == 1:
return side_out[0]
else:
return tuple(side_out)
def downsample_op_factory(self, index):
C = nn.MaxPool2d if self.dim == 2 else nn.MaxPool3d
return C(kernel_size=2, stride=2)
def upsample_op_factory(self, index):\
return InfernoUpsample(**self._upsample_kwargs)
#return nn.Upsample(**self._upsample_kwargs)
def conv_op_factory(self, in_channels, out_channels, part, index):
raise NotImplementedError("conv_op_factory need to be implemented by deriving class")
def _dropout(self, x):
if self.use_dropout:
return self._channel_dropout_op(x)
else:
return x
# TODO implement function to load a pretrained unet
class UNet(UNetBase):
"""
Default 2d / 3d U-Net implementation following:
https://arxiv.org/abs/1505.04597
"""
def __init__(self, in_channels, out_channels, dim,
depth=4, initial_features=64, gain=2,
final_activation=None, p_dropout=None):
# convolutional types for inner convolutions and output convolutions
self.default_conv = ConvELU2D if dim == 2 else ConvELU3D
last_conv = Conv2D if dim == 2 else Conv3D
# init the base class
super(UNet, self).__init__(in_channels=initial_features, dim=dim,
depth=depth, gain=gain, p_dropout=p_dropout)
# initial conv layer to go from the number of input channels, which are defined by the data
# (usually 1 or 3) to the initial number of feature maps
self._initial_conv = self.default_conv(in_channels, initial_features, 3)
# get the final output and activation activation
if isinstance(final_activation, str):
activation = getattr(nn, final_activation)()
elif isinstance(final_activation, nn.Module):
activation = final_activation
elif final_activation is None:
activation = None
else:
raise NotImplementedError("Activation of type %s is not supported" % type(final_activation))
# override the unet base attributes for out_channels
self.out_channels = int(out_channels)
if activation is None:
self._output = last_conv(initial_features, self.out_channels, 1)
else:
self._output = nn.Sequential(last_conv(initial_features, self.out_channels, 1),
activation)
def forward(self, input):
# TODO implement 2d from 3d input (see neurofire)
x = self._initial_conv(input)
x = super(UNet, self).forward(x)
return self._output(x)
def conv_op_factory(self, in_channels, out_channels, part, index):
# is this the first convolutional block?
first = (part == 'down' and index == 0)
# if this is the first conv block, we just need
# a single convolution, because we have the `_initial_conv` already
if first:
conv = self.default_conv(in_channels, out_channels, 3)
else:
conv = nn.Sequential(self.default_conv(in_channels, out_channels, 3),
self.default_conv(out_channels, out_channels, 3))
return conv, False
| 14,315 | 36.37859 | 104 | py |
inferno | inferno-master/inferno/extensions/models/res_unet.py | import torch
import torch.nn as nn
from ..layers.convolutional import ConvActivation
from .unet import UNetBase
from ...utils.python_utils import require_dict_kwargs
__all__ = ['ResBlockUNet']
_all = __all__
# We only use this for the u-net implementation here
# in favor of less code duplication it might be a
# good ideat to replace this with 'ResidualBlock' from layers.convolutional_blocks
class _ResBlockBase(nn.Module):
def __init__(self, in_channels, out_channels, dim,
size=2, force_skip_op=False, activated=True):
super(_ResBlockBase, self).__init__()
self.in_channels = int(in_channels)
self.out_channels = int(out_channels)
self.size = int(size)
self.activated = bool(activated)
self.force_skip_op = bool(force_skip_op)
self.dim = int(dim)
if self.in_channels != self.out_channels or self.force_skip_op:
self.activated_skip_op = self.activated_skip_op_factory(in_channels=self.in_channels,
out_channels=self.out_channels)
conv_ops = []
activation_ops = []
for i in range(self.size):
# the convolutions
if i == 0:
op = self.nonactivated_conv_op_factory(in_channels=self.out_channels,
out_channels=self.out_channels, index=i)
else:
op = self.nonactivated_conv_op_factory(in_channels=self.out_channels,
out_channels=self.out_channels, index=i)
conv_ops.append(op)
# the activations
if i < self.size or self.activated:
activation_ops.append(self.activation_op_factory(index=i))
self.conv_ops = nn.ModuleList(conv_ops)
self.activation_ops = nn.ModuleList(activation_ops)
def activated_skip_op_factory(self, in_channels, out_channels):
raise NotImplementedError("activated_skip_op_factory need to be implemented by deriving class")
def nonactivated_conv_op_factory(self, in_channels, out_channels, index):
raise NotImplementedError("conv_op_factory need to be implemented by deriving class")
def activation_op_factory(self, index):
return nn.ReLU()
def forward(self, input):
if input.size(1) != self.in_channels:
raise RuntimeError("wrong number of channels: expected %d, got %d"%
(self.in_channels, input.size(1)))
if input.dim() != self.dim + 2:
raise RuntimeError("wrong number of dim: expected %d, got %d"%
(self.dim+2, input.dim()))
if self.in_channels != self.out_channels or self.force_skip_op:
skip_res = self.activated_skip_op(input)
else:
skip_res = input
assert skip_res.size(1) == self.out_channels
res = skip_res
for i in range(self.size):
res = self.conv_ops[i](res)
assert res.size(1) == self.out_channels
if i + 1 < self.size:
res = self.activation_ops[i](res)
non_activated = skip_res + res
if self.activated:
return self.activation_ops[-1](non_activated)
else:
return non_activated
class _ResBlock(_ResBlockBase):
def __init__(self, in_channels, out_channels, dim, size=2, activated=True,
activation='ReLU', batchnorm=True, force_skip_op=False, conv_kwargs=None):
# trick to store nn-module before call of super
# => we put it in a list
if isinstance(activation, str):
self.activation_op = [getattr(torch.nn, activation)()]
elif isinstance(activation, nn.Module):
self.activation_op = [activation]
else:
raise RuntimeError("activation must be a striong or a torch.nn.Module")
# keywords for conv
if conv_kwargs is None:
conv_kwargs = dict(
kernel_size=3, dim=dim, activation=None,
stride=1, dilation=1, groups=None, depthwise=False, bias=True,
deconv=False, initialization=None
)
elif isinstance(conv_kwargs, dict):
conv_kwargs['activation'] = None
else:
raise RuntimeError("conv_kwargs must be either None or a dict")
self.conv_kwargs = conv_kwargs
self.dim = dim
self.batchnorm = batchnorm
self.conv_1x1_kwargs = dict(kernel_size=1, dim=dim, activation=None,
stride=1, dilation=1, groups=None,
depthwise=False, bias=True, deconv=False,
initialization=None)
super(_ResBlock, self).__init__(in_channels=in_channels,
out_channels=out_channels,
dim=dim, size=size,
force_skip_op=force_skip_op,
activated=activated)
def activated_skip_op_factory(self, in_channels, out_channels):
conv_op = ConvActivation(in_channels=in_channels,
out_channels=out_channels, **self.conv_1x1_kwargs)
if self.batchnorm:
batchnorm_op = self.batchnorm_op_factory(in_channels=out_channels)
return torch.nn.Sequential(conv_op, batchnorm_op, self.activation_op[0])
else:
return torch.nn.Sequential(conv_op, self.activation_op[0])
def nonactivated_conv_op_factory(self, in_channels, out_channels, index):
conv_op = ConvActivation(in_channels=in_channels,
out_channels=out_channels, **self.conv_kwargs)
if self.batchnorm:
batchnorm_op = self.batchnorm_op_factory(in_channels=out_channels)
return torch.nn.Sequential(conv_op, batchnorm_op)
else:
return conv_op
def activation_op_factory(self, index):
return self.activation_op[0]
def batchnorm_op_factory(self, in_channels):
bn_cls_name = 'BatchNorm{}d'.format(int(self.dim))
bn_op_cls = getattr(torch.nn, bn_cls_name)
return bn_op_cls(in_channels)
# TODO not sure how to handle out-channels properly.
# For now, we just force the corrcect number in the last decoder layer
class ResBlockUNet(UNetBase):
"""TODO.
ACCC
Attributes:
activated (TYPE): Description
dim (TYPE): Description
res_block_kwargs (TYPE): Description
side_out_parts (TYPE): Description
unet_kwargs (TYPE): Description
"""
def __init__(self, in_channels, dim, out_channels, unet_kwargs=None,
res_block_kwargs=None, activated=True,
side_out_parts=None):
self.dim = dim
self.unet_kwargs = require_dict_kwargs(unet_kwargs, "unet_kwargs must be a dict or None")
self.res_block_kwargs = require_dict_kwargs(res_block_kwargs,
"res_block_kwargs must be a dict or None")
self.activated = activated
if isinstance(side_out_parts, str):
self.side_out_parts = set([side_out_parts])
elif isinstance(side_out_parts, (tuple,list)):
self.side_out_parts = set(side_out_parts)
else:
self.side_out_parts = set()
super(ResBlockUNet, self).__init__(in_channels=in_channels,
out_channels=out_channels,
dim=dim,
**self.unet_kwargs)
def conv_op_factory(self, in_channels, out_channels, part, index):
# is this the very last convolutional block?
very_last = (part == 'up' and index == 0)
# should the residual block be activated?
activated = not very_last or self.activated
# should the output be part of the overall
# return-list in the forward pass of the UNet
use_as_output = part in self.side_out_parts
# residual block used within the UNet
return _ResBlock(in_channels=in_channels, out_channels=out_channels,
dim=self.dim, activated=activated,
**self.res_block_kwargs), use_as_output
| 8,404 | 39.408654 | 103 | py |
inferno | inferno-master/inferno/extensions/metrics/categorical.py | import torch
from .base import Metric
from ...utils.torch_utils import flatten_samples, is_label_tensor
from ...utils.exceptions import assert_, DTypeError, ShapeError
class CategoricalError(Metric):
"""Categorical error."""
def __init__(self, aggregation_mode='mean'):
assert aggregation_mode in ['mean', 'sum']
self.aggregation_mode = aggregation_mode
def forward(self, prediction, target):
# Check if prediction is binary or not
is_binary = len(prediction.size()) == 1 or prediction.size(1) == 1
if len(target.size()) > 1:
target = target.squeeze(1)
assert len(target.size()) == 1
if is_binary:
# Binary classification
prediction = prediction > 0.5
incorrect = prediction.type_as(target).ne(target).float()
if self.aggregation_mode == 'mean':
return incorrect.mean()
else:
return incorrect.sum()
else:
# Multiclass classificiation
_, predicted_class = torch.max(prediction, 1)
if predicted_class.dim() == prediction.dim():
# Support for Pytorch 0.1.12
predicted_class = predicted_class.squeeze(1)
incorrect = predicted_class.type_as(target).ne(target).float()
if self.aggregation_mode == 'mean':
return incorrect.mean()
else:
return incorrect.sum()
class IOU(Metric):
"""Intersection over Union. """
def __init__(self, ignore_class=None, sharpen_prediction=False, eps=1e-6):
super(IOU, self).__init__()
self.eps = eps
self.ignore_class = ignore_class
self.sharpen_prediction = sharpen_prediction
def forward(self, prediction, target):
# Assume that is one of:
# prediction.shape = (N, C, H, W)
# prediction.shape = (N, C, D, H, W)
# prediction.shape = (N, C)
# The corresponding target shapes are either:
# target.shape = (N, H, W)
# target.shape = (N, D, H, W)
# target.shape = (N,)
# Or:
# target.shape = (N, C, H, W)
# target.shape = (N, C, D, H, W)
# target.shape = (N, C)
# First, reshape prediction to (C, -1)
flattened_prediction = flatten_samples(prediction)
# Take measurements
num_classes, num_samples = flattened_prediction.size()
# We need to figure out if the target is a int label tensor or a onehot tensor.
# The former always has one dimension less, so
if target.dim() == (prediction.dim() - 1):
# Labels, we need to go one hot
# Make sure it's a label
assert_(is_label_tensor(target),
"Target must be a label tensor (of dtype long) if it has one "
"dimension less than the prediction.",
DTypeError)
# Reshape target to (1, -1) for it to work with scatter
flattened_target = target.view(1, -1)
# Convert target to onehot with shape (C, -1)
# Make sure the target is consistent
assert_(target.max() < num_classes)
onehot_targets = flattened_prediction \
.new(num_classes, num_samples) \
.zero_() \
.scatter_(0, flattened_target, 1)
elif target.dim() == prediction.dim():
# Onehot, nothing to do except flatten
onehot_targets = flatten_samples(target)
else:
raise ShapeError("Target must have the same number of dimensions as the "
"prediction, or one less. Got target.dim() = {} but "
"prediction.dim() = {}.".format(target.dim(), prediction.dim()))
# Cast onehot_targets to float if required (this is a no-op if it's already float)
onehot_targets = onehot_targets.float()
# Sharpen prediction if required to. Sharpening in this sense means to replace
# the max predicted probability with 1.
if self.sharpen_prediction:
_, predicted_classes = torch.max(flattened_prediction, 0)
# Case for pytorch 0.2, where predicted_classes is (N,) instead of (1, N)
if predicted_classes.dim() == 1:
predicted_classes = predicted_classes.view(1, -1)
# Scatter
flattened_prediction = flattened_prediction\
.new(num_classes, num_samples).zero_().scatter_(0, predicted_classes, 1)
# Now to compute the IOU = (a * b).sum()/(a**2 + b**2 - a * b).sum()
# We sum over all samples to obtain a classwise iou
numerator = (flattened_prediction * onehot_targets).sum(-1)
denominator = \
flattened_prediction.sub_(onehot_targets).pow_(2).clamp_(min=self.eps).sum(-1) + \
numerator
classwise_iou = numerator.div_(denominator)
# If we're ignoring a class, don't count its contribution to the mean
if self.ignore_class is not None:
ignore_class = self.ignore_class \
if self.ignore_class != -1 else onehot_targets.size(0) - 1
assert_(ignore_class < onehot_targets.size(0),
"`ignore_class` = {} must be at least one less than the number "
"of classes = {}.".format(ignore_class, onehot_targets.size(0)),
ValueError)
num_classes = onehot_targets.size(0)
dont_ignore_class = list(range(num_classes))
dont_ignore_class.pop(ignore_class)
if classwise_iou.is_cuda:
dont_ignore_class = \
torch.LongTensor(dont_ignore_class).cuda(classwise_iou.get_device())
else:
dont_ignore_class = torch.LongTensor(dont_ignore_class)
iou = classwise_iou[dont_ignore_class].mean()
else:
iou = classwise_iou.mean()
return iou
class NegativeIOU(IOU):
def forward(self, prediction, target):
return -1 * super(NegativeIOU, self).forward(prediction, target)
| 6,159 | 44.294118 | 94 | py |
inferno | inferno-master/inferno/extensions/containers/graph.py | from collections import OrderedDict
import sys
import threading
import multiprocessing as mp
import copy
import gc
import networkx as nx
from networkx import is_directed_acyclic_graph, topological_sort
from torch import nn as nn
from ...utils import python_utils as pyu
from ...utils.exceptions import assert_
from ..layers.device import OnDevice
from ..layers.identity import Identity
__all__ = ['NNGraph', 'Graph']
class NNGraph(nx.DiGraph):
"""A NetworkX DiGraph, except that node and edge ordering matters."""
# We don't copy torch tensors, only to have them deleted.
ATTRIBUTES_TO_NOT_COPY = {'payload'}
node_dict_factory = OrderedDict
adjlist_dict_factory = OrderedDict
def copy(self, **init_kwargs):
new = type(self)(**init_kwargs)
# Remove all attributes and copy only the graph structure
for source, target in self.edges_iter():
# Add new nodes
new.add_node(source)
new.add_node(target)
# Copy attributes
new.node[source].update(copy.deepcopy({key: value
for key, value in self.node[source].items()
if key not in self.ATTRIBUTES_TO_NOT_COPY}))
new.node[target].update(copy.deepcopy({key: value
for key, value in self.node[target].items()
if key not in self.ATTRIBUTES_TO_NOT_COPY}))
# Add new edge
new.add_edge(copy.deepcopy(source), copy.deepcopy(target))
old_edge_attributes = self[source][target]
new_edge_attributes = {key: value for key, value in old_edge_attributes.items()
if key not in self.ATTRIBUTES_TO_NOT_COPY}
new_edge_attributes = copy.deepcopy(new_edge_attributes)
new[source][target].update(new_edge_attributes)
return new
class Graph(nn.Module):
"""
A graph structure to build networks with complex architectures. The resulting graph model
can be used like any other `torch.nn.Module`. The graph structure used behind the scenes
is a `networkx.DiGraph`. This internal graph is exposed by the `apply_on_graph` method,
which can be used with any NetworkX function (e.g. for plotting with matplotlib or GraphViz).
Examples
--------
The naive inception module (without the max-pooling for simplicity) with ELU-layers of 64 units
can be built as following, (assuming 64 input channels):
>>> from inferno.extensions.layers.reshape import Concatenate
>>> from inferno.extensions.layers.convolutional import ConvELU2D
>>> import torch
>>> # Build the model
>>> inception_module = Graph()
>>> inception_module.add_input_node('input')
>>> inception_module.add_node('conv1x1', ConvELU2D(64, 64, 3), previous='input')
>>> inception_module.add_node('conv3x3', ConvELU2D(64, 64, 3), previous='input')
>>> inception_module.add_node('conv5x5', ConvELU2D(64, 64, 3), previous='input')
>>> inception_module.add_node('cat', Concatenate(),
>>> previous=['conv1x1', 'conv3x3', 'conv5x5'])
>>> inception_module.add_output_node('output', 'cat')
>>> # Build dummy variable
>>> input = torch.rand(1, 64, 100, 100)
>>> # Get output
>>> output = inception_module(input)
"""
def __init__(self, graph=None):
"""
Construct the graph object.
Parameters
----------
graph : networkx.DiGraph or NNGraph
Graph to build the object from (optional).
"""
super(Graph, self).__init__()
# Privates
self._thread_to_graph_mapping = {}
self._creator_thread = threading.get_ident()
self._creator_pid = mp.current_process().pid
# Publics
if graph is not None:
self.graph = graph
else:
self.graph = NNGraph()
@property
def graph(self):
# `graph` needs to be different for every thread, because torch.nn.parallel.replicate does
# not make a copy.
graph = self._thread_to_graph_mapping.get(threading.get_ident())
if graph is None:
creator_thread_graph = self._thread_to_graph_mapping.get(self._creator_thread)
assert creator_thread_graph is not None
graph = creator_thread_graph.copy()
# We don't need to clear payloads because the copy method of NNGraph copies only the
# graph structure and not the attributes
self._thread_to_graph_mapping.update({threading.get_ident(): graph})
return graph
@graph.setter
def graph(self, value):
assert_(isinstance(value, NNGraph), exception_type=TypeError)
self._thread_to_graph_mapping.update({threading.get_ident(): value})
def is_node_in_graph(self, name):
"""
Checks whether a node is in the graph.
Parameters
----------
name : str
Name of the node.
Returns
-------
bool
"""
return name in self.graph.nodes
def is_source_node(self, name):
"""
Checks whether a given node (by name) is a source node.
A source node has no incoming edges.
Parameters
----------
name : str
Name of the node.
Returns
-------
bool
Raises
------
AssertionError
if node is not found in the graph.
"""
assert self.is_node_in_graph(name)
return self.graph.in_degree(name) == 0
def is_sink_node(self, name):
"""
Checks whether a given node (by name) is a sink node.
A sink node has no outgoing edges.
Parameters
----------
name : str
Name of the node.
Returns
-------
bool
Raises
------
AssertionError
if node is not found in the graph.
"""
assert self.is_node_in_graph(name)
return self.graph.out_degree(name) == 0
@property
def output_nodes(self):
"""
Gets a list of output nodes. The order is relevant and is the same as that
in which the forward method returns its outputs.
Returns
-------
list
A list of names (str) of the output nodes.
"""
return [name for name, node_attributes in self.graph.nodes.items()
if node_attributes.get('is_output_node', False)]
@property
def input_nodes(self):
"""
Gets a list of input nodes. The order is relevant and is the same as that
in which the forward method accepts its inputs.
Returns
-------
list
A list of names (str) of the input nodes.
"""
return [name for name, node_attributes in self.graph.nodes.items()
if node_attributes.get('is_input_node', False)]
@property
def graph_is_valid(self):
"""Checks if the graph is valid."""
# Check if the graph is a DAG
is_dag = is_directed_acyclic_graph(self.graph)
# Check if output nodes are sinks
output_nodes_are_sinks = all([self.is_sink_node(name) for name in self.output_nodes])
# Check inf input nodes are sources
input_nodes_are_sources = all([self.is_source_node(name) for name in self.input_nodes])
# TODO Check whether only input nodes are sources and only output nodes are sinks
# Conclude
is_valid = is_dag and output_nodes_are_sinks and input_nodes_are_sources
return is_valid
def assert_graph_is_valid(self):
"""Asserts that the graph is valid."""
assert is_directed_acyclic_graph(self.graph), "Graph is not a DAG."
for name in self.output_nodes:
assert self.is_sink_node(name), "Output node {} is not a sink.".format(name)
assert not self.is_source_node(name), "Output node {} is a source node. " \
"Make sure it's connected.".format(name)
for name in self.input_nodes:
assert self.is_source_node(name), "Input node {} is not a source.".format(name)
assert not self.is_sink_node(name), "Input node {} is a sink node. " \
"Make sure it's connected.".format(name)
def add_node(self, name, module, previous=None):
"""
Add a node to the graph.
Parameters
----------
name : str
Name of the node. Nodes are identified by their names.
module : torch.nn.Module
Torch module for this node.
previous : str or list of str
(List of) name(s) of the previous node(s).
Returns
-------
Graph
self
"""
assert isinstance(module, nn.Module)
self.add_module(name, module)
self.graph.add_node(name)
if previous is not None:
for _previous in pyu.to_iterable(previous):
self.add_edge(_previous, name)
return self
def add_input_node(self, name):
"""
Add an input to the graph. The order in which input nodes are added is the
order in which the forward method accepts its inputs.
Parameters
----------
name : str
Name of the input node.
Returns
-------
Graph
self
"""
self.add_module(name, Identity())
self.graph.add_node(name, is_input_node=True)
return self
def add_output_node(self, name, previous=None):
"""
Add an output to the graph. The order in which output nodes are added is the
order in which the forward method returns its outputs.
Parameters
----------
name : str
Name of the output node.
Returns
-------
Graph
self
"""
self.graph.add_node(name, is_output_node=True)
if previous is not None:
for _previous in pyu.to_iterable(previous):
self.add_edge(_previous, name)
return self
def add_edge(self, from_node, to_node):
"""
Add an edge between two nodes.
Parameters
----------
from_node : str
Name of the source node.
to_node : str
Name of the target node.
Returns
-------
Graph
self
Raises
------
AssertionError
if either of the two nodes is not in the graph,
or if the edge is not 'legal'.
"""
assert self.is_node_in_graph(from_node)
assert self.is_node_in_graph(to_node)
self.graph.add_edge(from_node, to_node)
assert self.graph_is_valid
return self
def apply_on_graph(self, function, *args, **kwargs):
"""Applies a `function` on the internal graph."""
return function(self, *args, **kwargs)
def get_module_for_nodes(self, names):
"""
Gets the `torch.nn.Module` object for nodes corresponding to `names`.
Parameters
----------
names : str or list of str
Names of the nodes to fetch the modules of.
Returns
-------
list or torch.nn.Module
Module or a list of modules corresponding to `names`.
"""
names = pyu.to_iterable(names)
modules = []
for name in names:
assert self.is_node_in_graph(name), "Node '{}' is not in graph.".format(name)
module = getattr(self, name, None)
assert module is not None, "Node '{}' is in the graph but could not find a module " \
"corresponding to it.".format(name)
modules.append(module)
return pyu.from_iterable(modules)
def to_device(self, names, target_device, device_ordinal=None, asynchronous=False):
"""Transfer nodes in the network to a specified device."""
names = pyu.to_iterable(names)
for name in names:
assert self.is_node_in_graph(name), "Node '{}' is not in graph.".format(name)
module = getattr(self, name, None)
assert module is not None, "Node '{}' is in the graph but could not find a module " \
"corresponding to it.".format(name)
# Transfer
module_on_device = OnDevice(module, target_device,
device_ordinal=device_ordinal,
asynchronous=asynchronous)
setattr(self, name, module_on_device)
return self
def get_parameters_for_nodes(self, names, named=False):
"""Get parameters of all nodes listed in `names`."""
if not named:
parameters = (parameter
for module in pyu.to_iterable(self.get_module_for_nodes(names))
for parameter in module.parameters())
else:
parameters = ((name, parameter)
for module in pyu.to_iterable(self.get_module_for_nodes(names))
for name, parameter in module.named_parameters())
return parameters
def clear_payloads(self, graph=None):
graph = self.graph if graph is None else graph
for edge in list(graph.edges(data=True)):
source, target, _ = edge
if 'payload' in graph[source][target]:
del graph[source][target]['payload']
def forward_through_node(self, name, input=None):
# If input is a tuple/list, it will NOT be unpacked.
# Make sure the node is in the graph
if input is None:
# Make sure the node is not a source node
assert not self.is_source_node(name), \
"Node '{}' did not get an input but is a source node.".format(name)
# Get input from payload
incoming_edges = self.graph.in_edges(name)
input = []
for incoming, this in incoming_edges:
# Append to input
input.append(self.graph[incoming][this]['payload'])
# Clear reference for the garbage collector to do its thing
del self.graph[incoming][this]['payload']
else:
assert self.is_node_in_graph(name)
# Convert input to list
input = [input]
# Get outputs
try:
outputs = pyu.to_iterable(getattr(self, name)(*input))
except Exception as e:
input_spec_string = "\n".join(["--[{}]-{}-->[{}]".format(incoming,
tuple(_input.size()),
this)
for (incoming, this), _input in
zip(self.graph.in_edges(name), input)])
message = "In node '{}': {}\n" \
"Inputs to this node were:\n{}"\
.format(name, str(e), input_spec_string)
raise type(e)(message).with_traceback(sys.exc_info()[2])
# Distribute outputs to outgoing payloads if required
if not self.is_sink_node(name):
outgoing_edges = self.graph.out_edges(name)
if len(outputs) == 1:
# Support for replication
outputs *= len(outgoing_edges)
# Make sure the number of outputs check out
assert len(outputs) == len(outgoing_edges), \
"Number of outputs from the model ({}) does not match the number " \
"of out-edges ({}) in the graph for this node ('{}').".format(len(outputs),
len(outgoing_edges),
name)
for (this, outgoing), output in zip(outgoing_edges, outputs):
self.graph[this][outgoing].update({'payload': output})
# Collect garbage to free some GPU memory?
del input
gc.collect()
# Return outputs
return pyu.from_iterable(outputs)
def forward(self, *inputs):
self.assert_graph_is_valid()
input_nodes = self.input_nodes
output_nodes = self.output_nodes
assert len(inputs) == len(input_nodes), "Was expecting {} " \
"arguments for as many input nodes, got {}."\
.format(len(input_nodes), len(inputs))
# Unpack inputs to input nodes
for input, input_node in zip(inputs, input_nodes):
self.forward_through_node(input_node, input=input)
# Toposort the graph
toposorted = topological_sort(self.graph)
# Remove all input and output nodes
toposorted = [name for name in toposorted
if name not in input_nodes and name not in output_nodes]
# Since we'll be clearing payloads anyway, it makes no sense whatsoever
# to evaluate sink nodes
toposorted = [name for name in toposorted if not self.is_sink_node(name)]
# Forward
for node in toposorted:
self.forward_through_node(node)
# Read outputs from output nodes
outputs = []
for output_node in output_nodes:
# Get all incoming edges to output node
outputs_from_node = [self.graph[incoming][this]['payload']
for incoming, this in self.graph.in_edges(output_node)]
outputs.append(pyu.from_iterable(outputs_from_node))
# Clear payloads for next pass
self.clear_payloads()
# Done.
return pyu.from_iterable(outputs)
| 18,135 | 37.020964 | 99 | py |
inferno | inferno-master/inferno/extensions/containers/sequential.py | import torch.nn as nn
from ...utils import python_utils as pyu
__all__ = ['Sequential1', 'Sequential2']
class Sequential1(nn.Sequential):
"""Like torch.nn.Sequential, but with a few extra methods."""
def __len__(self):
return len(self._modules.values())
class Sequential2(Sequential1):
"""Another sequential container.
Identitcal to torch.nn.Sequential, except that modules may return multiple outputs and
accept multiple inputs.
"""
def forward(self, *input):
for module in self._modules.values():
input = pyu.to_iterable(module(*pyu.to_iterable(input)))
return pyu.from_iterable(input)
| 658 | 27.652174 | 90 | py |
inferno | inferno-master/inferno/extensions/layers/identity.py | import torch.nn as nn
__all__ = ['identity']
_all = __all__
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x | 198 | 18.9 | 40 | py |
inferno | inferno-master/inferno/extensions/layers/convolutional.py | import torch.nn as nn
import sys
import functools
from ..initializers import (
OrthogonalWeightsZeroBias,
KaimingNormalWeightsZeroBias,
SELUWeightsZeroBias,
)
from ..initializers import Initializer
from .normalization import BatchNormND
from .activations import SELU
from ...utils.exceptions import assert_, ShapeError
from ...utils.partial_cls import register_partial_cls
# we append to this later on
__all__ = [
"GlobalConv2D",
]
_all = __all__
register_partial_cls_here = functools.partial(register_partial_cls, module=__name__)
class ConvActivation(nn.Module):
"""Convolutional layer with 'SAME' padding by default followed by an activation."""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
dim,
activation,
stride=1,
dilation=1,
groups=None,
depthwise=False,
bias=True,
deconv=False,
initialization=None,
valid_conv=False,
):
super(ConvActivation, self).__init__()
# Validate dim
assert_(
dim in [1, 2, 3],
"`dim` must be one of [1, 2, 3], got {}.".format(dim),
ShapeError,
)
self.dim = dim
# Check if depthwise
if depthwise:
# We know that in_channels == out_channels, but we also want a consistent API.
# As a compromise, we allow that out_channels be None or 'auto'.
out_channels = in_channels if out_channels in [None, "auto"] else out_channel
assert_(
in_channels == out_channels,
"For depthwise convolutions, number of input channels (given: {}) "
"must equal the number of output channels (given {}).".format(
in_channels, out_channels
),
ValueError,
)
assert_(
groups is None or groups == in_channels,
"For depthwise convolutions, groups (given: {}) must "
"equal the number of channels (given: {}).".format(groups, in_channels),
)
groups = in_channels
else:
groups = 1 if groups is None else groups
self.depthwise = depthwise
if valid_conv:
self.conv = getattr(nn, "Conv{}d".format(self.dim))(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
groups=groups,
bias=bias,
)
elif not deconv:
# Get padding
padding = self.get_padding(kernel_size, dilation)
self.conv = getattr(nn, "Conv{}d".format(self.dim))(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
bias=bias,
)
else:
self.conv = getattr(nn, "ConvTranspose{}d".format(self.dim))(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
groups=groups,
bias=bias,
)
if initialization is None:
pass
elif isinstance(initialization, Initializer):
self.conv.apply(initialization)
else:
raise NotImplementedError
if isinstance(activation, str):
self.activation = getattr(nn, activation)()
elif isinstance(activation, nn.Module):
self.activation = activation
elif activation is None:
self.activation = None
else:
raise NotImplementedError
def forward(self, input):
conved = self.conv(input)
if self.activation is not None:
activated = self.activation(conved)
else:
# No activation
activated = conved
return activated
def _pair_or_triplet(self, object_):
if isinstance(object_, (list, tuple)):
assert len(object_) == self.dim
return object_
else:
object_ = [object_] * self.dim
return object_
def _get_padding(self, _kernel_size, _dilation):
assert isinstance(_kernel_size, int)
assert isinstance(_dilation, int)
assert _kernel_size % 2 == 1
return ((_kernel_size - 1) // 2) * _dilation
def get_padding(self, kernel_size, dilation):
kernel_size = self._pair_or_triplet(kernel_size)
dilation = self._pair_or_triplet(dilation)
padding = [
self._get_padding(_kernel_size, _dilation)
for _kernel_size, _dilation in zip(kernel_size, dilation)
]
return tuple(padding)
# for consistency
ConvActivationND = ConvActivation
# noinspection PyUnresolvedReferences
class _BNReLUSomeConv(object):
def forward(self, input):
normed = self.batchnorm(input)
activated = self.activation(normed)
conved = self.conv(activated)
return conved
class BNReLUConvBaseND(_BNReLUSomeConv, ConvActivation):
def __init__(self, in_channels, out_channels, kernel_size, dim, stride=1, dilation=1, deconv=False):
super(BNReLUConvBaseND, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
dim=dim,
stride=stride,
activation=nn.ReLU(inplace=True),
dilation=dilation,
deconv=deconv,
initialization=KaimingNormalWeightsZeroBias(0),
)
self.batchnorm = BatchNormND(dim, in_channels)
def _register_conv_cls(conv_name, fix=None, default=None):
if fix is None:
fix = {}
if default is None:
default = {}
# simple conv activation
activations = ["ReLU", "ELU", "Sigmoid", "SELU", ""]
init_map = {
"ReLU": KaimingNormalWeightsZeroBias,
"SELU": SELUWeightsZeroBias
}
for activation_str in activations:
cls_name = cls_name = "{}{}ND".format(conv_name,activation_str)
__all__.append(cls_name)
initialization_cls = init_map.get(activation_str, OrthogonalWeightsZeroBias)
if activation_str == "":
activation = None
_fix = {**fix}
_default = {'activation':None}
elif activation_str == "SELU":
activation = nn.SELU(inplace=True)
_fix={**fix, 'activation':activation}
_default = {**default}
else:
activation = activation_str
_fix={**fix, 'activation':activation}
_default = {**default}
register_partial_cls_here(ConvActivation, cls_name,
fix=_fix,
default={**_default, 'initialization':initialization_cls()}
)
for dim in [1, 2, 3]:
cls_name = "{}{}{}D".format(conv_name,activation_str, dim)
__all__.append(cls_name)
register_partial_cls_here(ConvActivation, cls_name,
fix={**_fix, 'dim':dim},
default={**_default, 'initialization':initialization_cls()}
)
def _register_bnr_conv_cls(conv_name, fix=None, default=None):
if fix is None:
fix = {}
if default is None:
default = {}
for dim in [1, 2, 3]:
cls_name = "BNReLU{}ND".format(conv_name)
__all__.append(cls_name)
register_partial_cls_here(BNReLUConvBaseND, cls_name,fix=fix,default=default)
for dim in [1, 2, 3]:
cls_name = "BNReLU{}{}D".format(conv_name, dim)
__all__.append(cls_name)
register_partial_cls_here(BNReLUConvBaseND, cls_name,
fix={**fix, 'dim':dim},
default=default)
# conv classes
_register_conv_cls("Conv")
_register_conv_cls("ValidConv", fix=dict(valid_conv=True))
_register_conv_cls("Deconv", fix=dict(deconv=True), default=dict(kernel_size=2, stride=2))
_register_conv_cls("StridedConv", default=dict(stride=2))
_register_conv_cls("DilatedConv", fix=dict(dilation=2))
_register_conv_cls("DepthwiseConv", fix=dict(deconv=False, depthwise=True), default=dict(out_channels='auto'))
# BatchNormRelu classes
_register_bnr_conv_cls("Conv", fix=dict(deconv=False))
_register_bnr_conv_cls("Deconv", fix=dict(deconv=True))
_register_bnr_conv_cls("StridedConv", default=dict(stride=2))
_register_bnr_conv_cls("DilatedConv", default=dict(dilation=2))
_register_bnr_conv_cls("DepthwiseConv", fix=dict(deconv=False, depthwise=True), default=dict(out_channels='auto'))
del _register_conv_cls
del _register_bnr_conv_cls
class GlobalConv2D(nn.Module):
"""From https://arxiv.org/pdf/1703.02719.pdf
Main idea: we can have a bigger kernel size computationally acceptable
if we separate 2D-conv in 2 1D-convs """
def __init__(
self,
in_channels,
out_channels,
kernel_size,
local_conv_type,
activation=None,
use_BN=False,
**kwargs
):
super(GlobalConv2D, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
assert isinstance(kernel_size, (int, list, tuple))
if isinstance(kernel_size, int):
kernel_size = (kernel_size,) * 2
self.kwargs = kwargs
self.conv1a = local_conv_type(
in_channels=self.in_channels,
out_channels=self.out_channels,
kernel_size=(kernel_size[0], 1),
**kwargs
)
self.conv1b = local_conv_type(
in_channels=self.out_channels,
out_channels=self.out_channels,
kernel_size=(1, kernel_size[1]),
**kwargs
)
self.conv2a = local_conv_type(
in_channels=self.in_channels,
out_channels=self.out_channels,
kernel_size=(1, kernel_size[1]),
**kwargs
)
self.conv2b = local_conv_type(
in_channels=self.out_channels,
out_channels=self.out_channels,
kernel_size=(kernel_size[0], 1),
**kwargs
)
if use_BN:
self.batchnorm = nn.BatchNorm2d(self.out_channels)
else:
self.batchnorm = None
self.activation = activation
def forward(self, input_):
out1 = self.conv1a(input_)
out1 = self.conv1b(out1)
out2 = self.conv2a(input_)
out2 = self.conv2b(out2)
out = out1.add(1, out2)
if self.activation is not None:
out = self.activation(out)
if self.batchnorm is not None:
out = self.batchnorm(out)
return out
| 10,967 | 32.439024 | 114 | py |
inferno | inferno-master/inferno/extensions/layers/convolutional_blocks.py | import torch.nn as nn
from .convolutional import BNReLUConv2D, BNReLUDeconv2D, Conv2D, Deconv2D
from ...utils import python_utils as pyu
from ...utils.exceptions import assert_
__all__ = ['ResidualBlock', 'PreActSimpleResidualBlock']
_all = __all__
class ResidualBlock(nn.Module):
def __init__(self, layers, resample=None):
super(ResidualBlock, self).__init__()
assert pyu.is_listlike(layers)
self.layers = nn.Sequential(*layers)
self.resample = resample
def forward(self, input):
preaddition = self.layers(input)
if self.resample is not None:
skip = self.resample(input)
else:
skip = input
output = preaddition + skip
return output
class PreActSimpleResidualBlock(ResidualBlock):
def __init__(self, in_channels, num_hidden_channels, upsample=False, downsample=False):
layers = []
if downsample:
assert_(not upsample, "Both downsample and upsample is set to true.", ValueError)
layers.append(BNReLUConv2D(in_channels=in_channels,
out_channels=num_hidden_channels,
kernel_size=3,
stride=2))
resample = nn.Sequential(Conv2D(in_channels=in_channels,
out_channels=in_channels,
kernel_size=1, stride=2),
nn.BatchNorm2d(in_channels))
elif upsample:
layers.append(BNReLUDeconv2D(in_channels=in_channels,
out_channels=num_hidden_channels,
kernel_size=2,
stride=2))
resample = nn.Sequential(Deconv2D(in_channels=in_channels,
out_channels=in_channels,
kernel_size=2, stride=2),
nn.BatchNorm2d(in_channels))
else:
layers.append(BNReLUConv2D(in_channels=in_channels,
out_channels=num_hidden_channels,
kernel_size=3))
resample = None
layers.append(BNReLUConv2D(in_channels=num_hidden_channels,
out_channels=in_channels,
kernel_size=3))
super(PreActSimpleResidualBlock, self).__init__(layers, resample)
# TODO PreActBottleneckResidualBlock
| 2,616 | 41.901639 | 93 | py |
inferno | inferno-master/inferno/extensions/layers/reshape.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from ...utils.exceptions import assert_, ShapeError
from ...utils import python_utils as pyu
__all__ = ['View', 'AsMatrix', 'Flatten',
'As3D', 'As2D',
'Concatenate', 'Cat',
'ResizeAndConcatenate', 'PoolCat',
'GlobalMeanPooling', 'GlobalMaxPooling',
'Sum', 'SplitChannels','Squeeze', 'RemoveSingletonDimension']
_all = __all__
class View(nn.Module):
def __init__(self, as_shape):
super(View, self).__init__()
self.as_shape = self.validate_as_shape(as_shape)
def validate_as_shape(self, as_shape):
assert all([isinstance(_s, int) or _s == 'x' for _s in as_shape])
all_int_indices = [_n for _n, _s in enumerate(as_shape) if isinstance(_s, int)]
if all_int_indices:
first_int_at_index = all_int_indices[0]
assert all([isinstance(_s, int) for _s in as_shape[first_int_at_index:]])
return as_shape
def forward(self, input):
input_shape = list(input.size())
reshaped_shape = [_s if isinstance(_s, int) else input_shape[_n]
for _n, _s in enumerate(self.as_shape)]
output = input.view(*reshaped_shape)
return output
class AsMatrix(View):
def __init__(self):
super(AsMatrix, self).__init__(as_shape=['x', 'x'])
class Flatten(View):
def __init__(self):
super(Flatten, self).__init__(as_shape=['x', -1])
class As3D(nn.Module):
def __init__(self, channel_as_z=False, num_channels_or_num_z_slices=1):
super(As3D, self).__init__()
self.channel_as_z = channel_as_z
self.num_channels_or_num_z_slices = num_channels_or_num_z_slices
def forward(self, input):
if input.dim() == 5:
# If input is a batch of 3D volumes - return as is
return input
elif input.dim() == 4:
# If input is a batch of 2D images, reshape
b, c, _0, _1 = list(input.size())
assert_(c % self.num_channels_or_num_z_slices == 0,
"Number of channels of the 4D image tensor (= {}) must be "
"divisible by the set number of channels or number of z slices "
"of the 5D volume tensor (= {})."
.format(c, self.num_channels_or_num_z_slices),
ShapeError)
c //= self.num_channels_or_num_z_slices
if self.channel_as_z:
# Move channel axis to z
return input.view(b, self.num_channels_or_num_z_slices, c, _0, _1)
else:
# Keep channel axis where it is, but add a singleton dimension for z
return input.view(b, c, self.num_channels_or_num_z_slices, _0, _1)
elif input.dim() == 2:
# We have a matrix which we wish to turn to a 3D batch
b, c = list(input.size())
return input.view(b, c, 1, 1, 1)
else:
raise NotImplementedError
class As2D(nn.Module):
def __init__(self, z_as_channel=True):
super(As2D, self).__init__()
self.z_as_channel = z_as_channel
def forward(self, input):
if input.dim() == 5:
b, c, _0, _1, _2 = list(input.size())
if not self.z_as_channel:
assert _0 == 1
# Reshape
return input.view(b, c * _0, _1, _2)
elif input.dim() == 4:
# Nothing to do here - input is already 2D
return input
elif input.dim() == 2:
# We make singleton dimensions
b, c = list(input.size())
return input.view(b, c, 1, 1)
class Concatenate(nn.Module):
"""Concatenate input tensors along a specified dimension."""
def __init__(self, dim=1):
super(Concatenate, self).__init__()
self.dim = dim
def forward(self, *inputs):
return torch.cat(inputs, dim=self.dim)
class ResizeAndConcatenate(nn.Module):
"""
Resize input tensors spatially (to a specified target size) before concatenating
them along the a given dim (channel, i.e. 1 by default). The down-sampling mode can
be specified ('average' or 'max'), but the up-sampling is always 'nearest'.
"""
POOL_MODE_MAPPING = {'avg': 'avg',
'average': 'avg',
'mean': 'avg',
'max': 'max'}
def __init__(self, target_size, pool_mode='average', dim=1):
super(ResizeAndConcatenate, self).__init__()
self.target_size = target_size
assert_(pool_mode in self.POOL_MODE_MAPPING.keys(),
"`pool_mode` must be one of {}, got {} instead."
.format(self.POOL_MODE_MAPPING.keys(), pool_mode),
ValueError)
self.pool_mode = self.POOL_MODE_MAPPING.get(pool_mode)
self.dim = dim
def forward(self, *inputs):
dim = inputs[0].dim()
assert_(dim in [4, 5],
'Input tensors must either be 4 or 5 '
'dimensional, but inputs[0] is {}D.'.format(dim),
ShapeError)
# Get resize function
spatial_dim = {4: 2, 5: 3}[dim]
resize_function = getattr(F, 'adaptive_{}_pool{}d'.format(self.pool_mode,
spatial_dim))
target_size = pyu.as_tuple_of_len(self.target_size, spatial_dim)
# Do the resizing
resized_inputs = []
for input_num, input in enumerate(inputs):
# Make sure the dim checks out
assert_(input.dim() == dim,
"Expected inputs[{}] to be a {}D tensor, got a {}D "
"tensor instead.".format(input_num, dim, input.dim()),
ShapeError)
resized_inputs.append(resize_function(input, target_size))
# Concatenate along the channel axis
if len(resized_inputs) > 1:
concatenated = torch.cat(tuple(resized_inputs), self.dim)
else:
concatenated = resized_inputs[0]
# Done
return concatenated
class Cat(Concatenate):
"""An alias for `Concatenate`. Hey, everyone knows who Cat is."""
pass
class PoolCat(ResizeAndConcatenate):
"""Alias for `ResizeAndConcatenate`, just to annoy snarky web developers."""
pass
class GlobalMeanPooling(ResizeAndConcatenate):
"""Global mean pooling layer."""
def __init__(self):
super(GlobalMeanPooling, self).__init__((1, 1), 'average')
class GlobalMaxPooling(ResizeAndConcatenate):
"""Global max pooling layer."""
def __init__(self):
super(GlobalMaxPooling, self).__init__((1, 1), 'max')
class Sum(nn.Module):
"""Sum all inputs."""
def forward(self, *inputs):
return torch.stack(inputs, dim=0).sum(0)
class SplitChannels(nn.Module):
"""Split input at a given index along the channel axis."""
def __init__(self, channel_index):
super(SplitChannels, self).__init__()
self.channel_index = channel_index
def forward(self, input):
if isinstance(self.channel_index, int):
split_location = self.channel_index
elif self.channel_index == 'half':
split_location = input.size(1) // 2
else:
raise NotImplementedError
assert split_location < input.size(1)
split_0 = input[:, 0:split_location, ...]
split_1 = input[:, split_location:, ...]
return split_0, split_1
class Squeeze(nn.Module):
def __init__(self):
super(Squeeze, self).__init__()
def forward(self, x):
return x.squeeze()
class RemoveSingletonDimension(nn.Module):
def __init__(self, dim=1):
super(RemoveSingletonDimension, self).__init__()
self.dim = 1
def forward(self, x):
size = list(x.size())
if size[self.dim] != 1:
raise RuntimeError("RemoveSingletonDimension expects a single channel at dim %d, shape=%s"%(self.dim,str(size)))
slicing = []
for s in size:
slicing.append(slice(0, s))
slicing[self.dim] = 0
return x[slicing] | 8,186 | 34.137339 | 124 | py |
inferno | inferno-master/inferno/extensions/layers/sampling.py | import torch.nn as nn
__all__ = ['AnisotropicUpsample', 'AnisotropicPool', 'Upsample', 'AnisotropicUpsample2D', 'AnisotropicPool2D']
# torch is deprecating nn.Upsample in favor of nn.functional.interpolate
# we wrap interpolate here to still use Upsample as class
class Upsample(nn.Module):
def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=None):
self.size = size
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
super(Upsample, self).__init__()
# interpolate was only introduced in torch 0.4.1 for backward compatibility
# we check if we have the attribute here and fall back to Upsample otherwise
if hasattr(nn.functional, 'interpolate'):
self.have_interpolate = True
else:
self.have_interpolate = False
self.sampler = nn.Upsample(size=size, scale_factor=scale_factor,
mode=mode, align_corners=align_corners)
def forward(self, input):
if self.have_interpolate:
return nn.functional.interpolate(input, self.size, self.scale_factor,
self.mode, self.align_corners)
else:
return self.sampler(input)
class AnisotropicUpsample(nn.Module):
def __init__(self, scale_factor):
super(AnisotropicUpsample, self).__init__()
self.upsampler = Upsample(scale_factor=scale_factor)
def forward(self, input):
# input is 3D of shape NCDHW
N, C, D, H, W = input.size()
# Fold C and D axes in one
folded = input.view(N, C * D, H, W)
# Upsample
upsampled = self.upsampler(folded)
# Unfold out the C and D axes
unfolded = upsampled.view(N, C, D,
self.upsampler.scale_factor * H,
self.upsampler.scale_factor * W)
# Done
return unfolded
class AnisotropicPool(nn.MaxPool3d):
def __init__(self, downscale_factor):
ds = downscale_factor
super(AnisotropicPool, self).__init__(kernel_size=(1, ds + 1, ds + 1),
stride=(1, ds, ds),
padding=(0, 1, 1))
class AnisotropicUpsample2D(nn.Module):
def __init__(self, scale_factor):
super(AnisotropicUpsample2D, self).__init__()
self.upsampler = nn.Upsample(scale_factor=scale_factor)
def forward(self, input):
# input is 2D of shape NCDW (or NCDH, egal)
N, C, D, W = input.size()
# Fold C and D axes in one
folded = input.view(N, C * D, W)
# Upsample
upsampled = self.upsampler(folded)
# Unfold out the C and D axes
unfolded = upsampled.view(N, C, D,
self.upsampler.scale_factor * W)
# Done
return unfolded
class AnisotropicPool2D(nn.MaxPool2d):
def __init__(self, downscale_factor):
ds = downscale_factor
super(AnisotropicPool2D, self).__init__(kernel_size=(1, ds + 1),
stride=(1, ds),
padding=(0, 1))
| 3,269 | 37.470588 | 110 | py |
inferno | inferno-master/inferno/extensions/layers/activations.py | import torch.nn.functional as F
import torch.nn as nn
from ...utils.torch_utils import where
__all__ = ['SELU']
_all = __all__
class SELU(nn.Module):
def forward(self, input):
return self.selu(input)
@staticmethod
def selu(x):
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
# noinspection PyTypeChecker
return scale * where(x >= 0, x, alpha * F.elu(x)) | 444 | 25.176471 | 57 | py |
inferno | inferno-master/inferno/extensions/layers/normalization.py | import torch.nn as nn
class BatchNormND(nn.Module):
def __init__(self, dim, num_features,
eps=1e-5, momentum=0.1,
affine=True,track_running_stats=True):
super(BatchNormND, self).__init__()
assert dim in [1, 2, 3]
self.bn = getattr(nn, 'BatchNorm{}d'.format(dim))(num_features=num_features,
eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats)
def forward(self, x):
return self.bn(x) | 504 | 35.071429 | 95 | py |
inferno | inferno-master/inferno/extensions/layers/device.py | import torch.nn as nn
from ...utils.python_utils import from_iterable, to_iterable
from ...utils.exceptions import assert_, DeviceError
__all__ = ['DeviceTransfer', 'OnDevice']
_all = __all__
class DeviceTransfer(nn.Module):
"""Layer to transfer variables to a specified device."""
def __init__(self, target_device, device_ordinal=None, asynchronous=False):
"""
Parameters
----------
target_device : {'cpu', 'cuda'}
Device to transfer to.
device_ordinal : int
Device ordinal if target_device == 'cuda'.
asynchronous : bool
Whether to use asynchronous transfers.
"""
super(DeviceTransfer, self).__init__()
# Validate arguments
assert_(target_device in ['cpu', 'cuda'],
"Target device must either be 'cpu' or 'cuda'.",
DeviceError)
if target_device == 'cpu':
assert_(device_ordinal is None,
"'device_ordinal' must be None if target_device is 'cpu'.",
DeviceError)
self.target_device = target_device
self.device_ordinal = device_ordinal
def forward(self, *inputs):
if self.target_device == 'cuda':
transferred = tuple(input_.cuda(device=self.device_ordinal,
non_blocking=self.asynchronous)
for input_ in inputs)
elif self.target_device == 'cpu':
transferred = tuple(input_.cpu() for input_ in inputs)
else:
raise NotImplementedError
return from_iterable(transferred)
class OnDevice(nn.Module):
"""
Moves a module to a device. The advantage of using this over `torch.nn.Module.cuda` is
that the inputs are transferred to the same device as the module, enabling easy model
parallelism.
"""
def __init__(self, module, target_device, device_ordinal=None, asynchronous=False):
"""
Parameters
----------
module : torch.nn.Module
Module to transfer to device.
target_device : {'cuda', 'cpu'}
The device to move `module` to. Must be either 'cuda' or 'cpu'.
device_ordinal : int
Ordinal of the GPU device if `target_device = 'cuda'`.
asynchronous : bool
Whether to use asynchronous transfers.
"""
super(OnDevice, self).__init__()
# Validate arguments
assert_(target_device in ['cpu', 'cuda'],
"Target device must either be 'cpu' or 'cuda'.",
DeviceError)
if target_device == 'cpu':
assert_(device_ordinal is None,
"'device_ordinal' must be None if target_device is 'cpu'.",
DeviceError)
self.target_device = target_device
self.device_ordinal = device_ordinal
self.asynchronous = asynchronous
# This is a no-op if module is already in the right device
self.device_transfer = DeviceTransfer(self.target_device,
device_ordinal=self.device_ordinal,
asynchronous=self.asynchronous)
self.module = self.transfer_module(module)
def transfer_module(self, module):
if self.target_device == 'cuda':
return module.cuda(device_id=self.device_ordinal)
elif self.target_device == 'cpu':
return module.cpu()
else:
raise NotImplementedError
def forward(self, *inputs):
# Transfer inputs (no-op if they're already on the right device)
transferred = to_iterable(self.device_transfer(*inputs))
output = self.module(*transferred)
return output
| 3,792 | 38.103093 | 90 | py |
inferno | inferno-master/inferno/extensions/criteria/core.py | import torch.nn as nn
from functools import reduce
from ...utils.exceptions import assert_, ShapeError, NotTorchModuleError
__all__ = ['Criteria', 'As2DCriterion']
class Criteria(nn.Module):
"""Aggregate multiple criteria to one."""
def __init__(self, *criteria):
super(Criteria, self).__init__()
if len(criteria) == 1 and isinstance(criteria[0], (list, tuple)):
criteria = list(criteria[0])
else:
criteria = list(criteria)
# Validate criteria
assert all([isinstance(criterion, nn.Module) for criterion in criteria]), \
"Criterion must be a torch module."
self.criteria = criteria
def forward(self, prediction, target):
assert isinstance(prediction, (list, tuple)), \
"`prediction` must be a list or a tuple, got {} instead."\
.format(type(prediction).__name__)
assert isinstance(target, (list, tuple)), \
"`prediction` must be a list or a tuple, got {} instead." \
.format(type(target).__name__)
assert len(prediction) == len(target), \
"Number of predictions must equal the number of targets. " \
"Got {} predictions but {} targets.".format(len(prediction), len(target))
# Compute losses
losses = [criterion(prediction, target)
for _prediction, _target, criterion in zip(prediction, target, self.criteria)]
# Aggegate losses
loss = reduce(lambda x, y: x + y, losses)
# Done
return loss
class As2DCriterion(nn.Module):
"""
Makes a given criterion applicable on (N, C, H, W) prediction and (N, H, W) target tensors,
if they're applicable to (N, C) prediction and (N,) target tensors .
"""
def __init__(self, criterion):
super(As2DCriterion, self).__init__()
assert_(isinstance(criterion, nn.Module),
"Criterion must be a module, got a {} instead."
.format(type(criterion).__name__),
NotTorchModuleError)
self.criterion = criterion
def forward(self, prediction, target):
# Validate input
assert_(prediction.dim() == 4, "`prediction` is expected to be a 4D tensor of shape "
"(N, C, H, W), got a {}D "
"tensor instead.".format(prediction.dim()),
ShapeError)
assert_(target.dim() == 3, "`target` is expected to be a 3D tensor of shape "
"(N, H, W), got a {}D "
"tensor instead.".format(target.dim()),
ShapeError)
# prediction is assumed to be NCHW, and target NHW.
# this makes target (NHW,)
target = target.contiguous().view(-1)
# This makes prediction (N, H, W, C) --> (NHW, C)
num_channels = prediction.size(1)
prediction = prediction.permute(0, 2, 3, 1).contiguous().view(-1, num_channels)
# Now, the criterion should be applicable as is
loss = self.criterion(prediction, target)
return loss
| 3,139 | 42.013699 | 96 | py |
inferno | inferno-master/inferno/extensions/criteria/set_similarity_measures.py | import torch.nn as nn
from ...utils.torch_utils import flatten_samples
__all__ = ['SorensenDiceLoss', 'GeneralizedDiceLoss']
class SorensenDiceLoss(nn.Module):
"""
Computes a loss scalar, which when minimized maximizes the Sorensen-Dice similarity
between the input and the target. For both inputs and targets it must be the case that
`input_or_target.size(1) = num_channels`.
"""
def __init__(self, weight=None, channelwise=True, eps=1e-6):
"""
Parameters
----------
weight : torch.FloatTensor or torch.cuda.FloatTensor
Class weights. Applies only if `channelwise = True`.
channelwise : bool
Whether to apply the loss channelwise and sum the results (True)
or to apply it on all channels jointly (False).
"""
super(SorensenDiceLoss, self).__init__()
self.register_buffer('weight', weight)
self.channelwise = channelwise
self.eps = eps
def forward(self, input, target):
"""
input: torch.FloatTensor or torch.cuda.FloatTensor
target: torch.FloatTensor or torch.cuda.FloatTensor
Expected shape of the inputs: (batch_size, nb_channels, ...)
"""
assert input.size() == target.size()
if not self.channelwise:
numerator = (input * target).sum()
denominator = (input * input).sum() + (target * target).sum()
loss = -2. * (numerator / denominator.clamp(min=self.eps))
else:
# TODO This should be compatible with Pytorch 0.2, but check
# Flatten input and target to have the shape (C, N),
# where N is the number of samples
input = flatten_samples(input)
target = flatten_samples(target)
# Compute numerator and denominator (by summing over samples and
# leaving the channels intact)
numerator = (input * target).sum(-1)
denominator = (input * input).sum(-1) + (target * target).sum(-1)
channelwise_loss = -2 * (numerator / denominator.clamp(min=self.eps))
if self.weight is not None:
# With pytorch < 0.2, channelwise_loss.size = (C, 1).
if channelwise_loss.dim() == 2:
channelwise_loss = channelwise_loss.squeeze(1)
assert self.weight.size() == channelwise_loss.size()
# Apply weight
channelwise_loss = self.weight * channelwise_loss
# Sum over the channels to compute the total loss
loss = channelwise_loss.sum()
return loss
class GeneralizedDiceLoss(nn.Module):
"""
Computes the scalar Generalized Dice Loss defined in https://arxiv.org/abs/1707.03237
This version works for multiple classes and expects predictions for every class (e.g. softmax output) and
one-hot targets for every class.
"""
def __init__(self, weight=None, channelwise=False, eps=1e-6):
super(GeneralizedDiceLoss, self).__init__()
self.register_buffer('weight', weight)
self.channelwise = channelwise
self.eps = eps
def forward(self, input, target):
"""
input: torch.FloatTensor or torch.cuda.FloatTensor
target: torch.FloatTensor or torch.cuda.FloatTensor
Expected shape of the inputs:
- if not channelwise: (batch_size, nb_classes, ...)
- if channelwise: (batch_size, nb_channels, nb_classes, ...)
"""
assert input.size() == target.size()
if not self.channelwise:
# Flatten input and target to have the shape (nb_classes, N),
# where N is the number of samples
input = flatten_samples(input)
target = flatten_samples(target)
# Find classes weights:
sum_targets = target.sum(-1)
class_weigths = 1. / (sum_targets * sum_targets).clamp(min=self.eps)
# Compute generalized Dice loss:
numer = ((input * target).sum(-1) * class_weigths).sum()
denom = ((input + target).sum(-1) * class_weigths).sum()
loss = 1. - 2. * numer / denom.clamp(min=self.eps)
else:
def flatten_and_preserve_channels(tensor):
tensor_dim = tensor.dim()
assert tensor_dim >= 3
num_channels = tensor.size(1)
num_classes = tensor.size(2)
# Permute the channel axis to first
permute_axes = list(range(tensor_dim))
permute_axes[0], permute_axes[1], permute_axes[2] = permute_axes[1], permute_axes[2], permute_axes[0]
permuted = tensor.permute(*permute_axes).contiguous()
flattened = permuted.view(num_channels, num_classes, -1)
return flattened
# Flatten input and target to have the shape (nb_channels, nb_classes, N)
input = flatten_and_preserve_channels(input)
target = flatten_and_preserve_channels(target)
# Find classes weights:
sum_targets = target.sum(-1)
class_weigths = 1. / (sum_targets * sum_targets).clamp(min=self.eps)
# Compute generalized Dice loss:
numer = ((input * target).sum(-1) * class_weigths).sum(-1)
denom = ((input + target).sum(-1) * class_weigths).sum(-1)
channelwise_loss = 1. - 2. * numer / denom.clamp(min=self.eps)
if self.weight is not None:
if channelwise_loss.dim() == 2:
channelwise_loss = channelwise_loss.squeeze(1)
assert self.weight.size() == channelwise_loss.size(),\
"""`weight` should have shape (nb_channels, ),
`target` should have shape (batch_size, nb_channels, nb_classes, ...)"""
# Apply channel weights:
channelwise_loss = self.weight * channelwise_loss
loss = channelwise_loss.sum()
return loss
| 6,051 | 42.228571 | 117 | py |
inferno | inferno-master/inferno/extensions/criteria/elementwise_measures.py | import torch.nn as nn
from ...utils.exceptions import assert_
class WeightedMSELoss(nn.Module):
NEGATIVE_CLASS_WEIGHT = 1.
def __init__(self, positive_class_weight=1., positive_class_value=1., size_average=True):
super(WeightedMSELoss, self).__init__()
assert_(positive_class_weight >= 0,
"Positive class weight can't be less than zero, got {}."
.format(positive_class_weight),
ValueError)
self.mse = nn.MSELoss(size_average=size_average)
self.positive_class_weight = positive_class_weight
self.positive_class_value = positive_class_value
def forward(self, input, target):
# Get a mask
positive_class_mask = target.data.eq(self.positive_class_value).type_as(target.data)
# Get differential weights (positive_weight - negative_weight,
# i.e. subtract 1, assuming the negative weight is gauged at 1)
weight_differential = (positive_class_mask
.mul_(self.positive_class_weight - self.NEGATIVE_CLASS_WEIGHT))
# Get final weight by adding weight differential to a tensor with negative weights
weights = weight_differential.add_(self.NEGATIVE_CLASS_WEIGHT)
# `weights` should be positive if NEGATIVE_CLASS_WEIGHT is not messed with.
sqrt_weights = weights.sqrt_()
return self.mse(input * sqrt_weights, target * sqrt_weights)
| 1,434 | 46.833333 | 94 | py |
inferno | inferno-master/inferno/extensions/criteria/regularized.py | import warnings
import torch
from torch import nn
from . import set_similarity_measures, core
__all__ = [
'RegularizedLoss',
'RegularizedCrossEntropyLoss',
'RegularizedBCEWithLogitsLoss',
'RegularizedBCELoss',
'RegularizedMSELoss',
'RegularizedNLLLoss'
]
def collect_losses(module):
"""Collect `_losses` dictionaries from module and children
:param module: a Module to be searched for losses
:return: dictionary of loss names to values
"""
losses = {}
def _collect(m):
if hasattr(m, '_losses'):
for k, v in m._losses.items():
if k in losses:
losses[k] = losses[k] + v
else:
losses[k] = v
module.apply(_collect)
return losses
def build_criterion(criterion, *args, **kwargs):
"""Build a criterion
:param criterion: criterion class, name of criterion class, or instance of criterion
:param args: args for constructor
:param kwargs: kwargs for constructor
:return: instance of criterion
"""
if isinstance(criterion, str):
for module in [nn, core, set_similarity_measures]:
criterion_class = getattr(module, criterion, None)
if criterion_class is not None:
break
assert criterion_class is not None, "Criterion {} not found.".format(criterion)
elif callable(criterion) and isinstance(criterion, type):
criterion_class = criterion
elif isinstance(criterion, torch.nn.Module):
return criterion
else:
raise NotImplementedError
return criterion_class(*args, **kwargs)
class RegularizedLoss(nn.Module):
"""Wrap a criterion. Collect regularization losses from model and combine with wrapped criterion.
"""
def __init__(self, criterion, *args, **kwargs):
super(RegularizedLoss, self).__init__()
self.criterion = build_criterion(criterion, *args, **kwargs)
def forward(self, *args, trainer=None, model=None, **kwargs):
# calculate wrapped loss
main_loss = self.criterion(*args, **kwargs)
# If no trainer, we cannot record states
if trainer is None:
warnings.warn('No trainer parameter provided. Not logging regularization losses.')
elif model is None:
model = trainer.model
# If no model or trainer, we cannot record states or collect losses
if model is None:
warnings.warn('No model or trainer parameter provided. Not calculating regularization losses.')
regularization_losses = {}
total_regularization_loss = None
total_loss = main_loss
else:
regularization_losses = collect_losses(model)
total_regularization_loss = sum(regularization_losses.values())
total_loss = main_loss + total_regularization_loss
# Record losses if trainer provided
if trainer is not None:
# prefix depending on mode
if self.training:
prefix = 'training'
else:
prefix = 'validation'
# main loss
updates = {'{}_main_loss'.format(prefix): main_loss}
# total regulariztion loss
if total_regularization_loss is not None:
updates['{}_total_regularization_loss'.format(prefix)] = total_regularization_loss
# detailed regularization losses
for k, v in regularization_losses.items():
updates['{}_{}'.format(prefix, k)] = v
# record state
trainer.update_state_from_dictionary(updates)
return total_loss
# Convenience wrappers for common losses
class RegularizedCrossEntropyLoss(RegularizedLoss):
def __init__(self, *args, **kwargs):
super(RegularizedCrossEntropyLoss, self).__init__(nn.CrossEntropyLoss, *args, **kwargs)
class RegularizedBCEWithLogitsLoss(RegularizedLoss):
def __init__(self, *args, **kwargs):
super(RegularizedBCEWithLogitsLoss, self).__init__(nn.BCEWithLogitsLoss, *args, **kwargs)
class RegularizedBCELoss(RegularizedLoss):
def __init__(self, *args, **kwargs):
super(RegularizedBCELoss, self).__init__(nn.BCELoss, *args, **kwargs)
class RegularizedMSELoss(RegularizedLoss):
def __init__(self, *args, **kwargs):
super(RegularizedMSELoss, self).__init__(nn.MSELoss, *args, **kwargs)
class RegularizedNLLLoss(RegularizedLoss):
def __init__(self, *args, **kwargs):
super(RegularizedNLLLoss, self).__init__(nn.NLLLoss, *args, **kwargs)
| 4,589 | 33 | 107 | py |
inferno | inferno-master/inferno/extensions/optimizers/adam.py | import math
from torch.optim import Optimizer
class Adam(Optimizer):
"""Implements Adam algorithm with the option of adding a L1 penalty.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
lambda_l1=0, weight_decay=0, **kwargs):
defaults = dict(lr=lr, betas=betas, eps=eps,
lambda_l1=lambda_l1, weight_decay=weight_decay,
**kwargs)
super(Adam, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = grad.new().resize_as_(grad).zero_()
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = grad.new().resize_as_(grad).zero_()
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['lambda_l1'] != 0:
grad.add_(group['lambda_l1'], p.data.sign())
if group['weight_decay'] != 0:
grad.add_(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
| 3,106 | 37.8375 | 88 | py |
inferno | inferno-master/inferno/utils/model_utils.py | import torch
from .exceptions import assert_, NotTorchModuleError, ShapeError
def is_model_cuda(model):
try:
return next(model.parameters()).is_cuda
except StopIteration:
# Assuming that if a network has no parameters, it doesn't use CUDA
return False
class ModelTester(object):
def __init__(self, input_shape, expected_output_shape):
self._is_cuda = False
self.input_shape = input_shape
self.expected_output_shape = expected_output_shape
def cuda(self):
self._is_cuda = True
return self
def get_input(self):
with torch.no_grad():
if self._is_cuda:
return torch.rand(*self.input_shape, requires_grad=False).cuda()
else:
return torch.rand(*self.input_shape, requires_grad=False)
def __call__(self, model):
# Make sure model is a model
assert_(isinstance(model, torch.nn.Module),
"Model is not a torch module.",
NotTorchModuleError)
# Transfer to cuda if required
if not is_model_cuda(model) and self._is_cuda:
model.cuda()
input_ = self.get_input()
output = model(input_)
assert_(list(output.size()) == list(self.expected_output_shape),
"Expected output shape {} for input shape {}, "
"got output of shape {} instead.".format(list(self.expected_output_shape),
list(self.input_shape),
list(output.size())),
ShapeError)
return model
class MultiscaleModelTester(ModelTester):
def __call__(self, model):
# Make sure model is a model
assert_(isinstance(model, torch.nn.Module),
"Model is not a torch module.",
NotTorchModuleError)
# Transfer to cuda if required
if not is_model_cuda(model) and self._is_cuda:
model.cuda()
input_ = self.get_input()
output = model(input_)
assert_(isinstance(output, tuple), "Expect tuple output")
for scale in range(len(output)):
assert_(list(output[scale].size()) == list(self.expected_output_shape[scale]),
"Expected output shape {} for input shape {}, "
"got output of shape {} instead.".format(list(self.expected_output_shape[scale]),
list(self.input_shape),
list(output[scale].size())),
ShapeError)
return model
| 2,688 | 37.971014 | 101 | py |
inferno | inferno-master/inferno/utils/train_utils.py | """Utilities for training."""
import numpy as np
from .exceptions import assert_, FrequencyTypeError, FrequencyValueError
class AverageMeter(object):
"""
Computes and stores the average and current value.
Taken from https://github.com/pytorch/examples/blob/master/imagenet/main.py
"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class MovingAverage(object):
"""Computes the moving average of a given float."""
def __init__(self, momentum=0):
self.momentum = momentum
self.val = None
self.previous = None
def reset(self):
self.val = None
def update(self, val):
self.previous = self.val
if self.val is None:
self.val = val
else:
self.val = self.momentum * self.val + (1 - self.momentum) * val
return self.val
@property
def relative_change(self):
if None not in [self.val, self.previous]:
relative_change = (self.previous - self.val) / self.previous
return relative_change
else:
return None
class CLUI(object):
"""Command Line User Interface"""
def __call__(self, f):
def decorated(cls, *args, **kwargs):
try:
f(cls, *args, **kwargs)
except KeyboardInterrupt:
options_ = input("[!] Interrupted. Please select:\n"
"[w] Save\n"
"[d] Debug with PDB\n"
"[q] Quit\n"
"[c] Continue\n"
"[?] >>> ")
save_now = 'w' in options_
quit_now = 'q' in options_
debug_now = 'd' in options_
continue_now = 'c' in options_ or not quit_now
if save_now:
cls.save()
if debug_now:
print("[*] Firing up PDB. The trainer instance might be accessible as 'cls'.")
import pdb
pdb.set_trace()
if quit_now:
cls.print("Exiting.")
raise SystemExit
if continue_now:
return
return decorated
class Frequency(object):
def __init__(self, value=None, units=None):
# Private
self._last_match_value = None
self._value = None
self._units = None
# Public
self.value = value
self.units = units
@property
def value(self):
return self._value
@value.setter
def value(self, value):
# If value is not being set, make sure the frequency never matches muhahaha
if value is None or value == 'never':
value = np.inf
self.assert_value_consistent(value)
self._value = value
UNIT_PRIORITY = 'iterations'
VALID_UNIT_NAME_MAPPING = {'iterations': 'iterations',
'iteration': 'iterations',
'epochs': 'epochs',
'epoch': 'epochs'}
@property
def units(self):
return self._units
@units.setter
def units(self, value):
if value is None:
value = self.UNIT_PRIORITY
self.assert_units_consistent(value)
self._units = self.VALID_UNIT_NAME_MAPPING.get(value)
def assert_value_consistent(self, value=None):
value = value or self.value
# Make sure that value is an integer or inf
assert_(isinstance(value, (int, float)),
"Value must be an integer or np.inf, got {} instead."
.format(type(value).__name__),
FrequencyTypeError)
if isinstance(value, float):
assert_(value == np.inf,
"Provided value must be numpy.inf if a float, got {}.".format(value),
FrequencyValueError)
def assert_units_consistent(self, units=None):
units = units or self.units
# Map
units = self.VALID_UNIT_NAME_MAPPING.get(units)
assert_(units is not None, "Unit '{}' not understood.".format(units),
FrequencyValueError)
@property
def is_consistent(self):
try:
self.assert_value_consistent()
self.assert_units_consistent()
return True
except (FrequencyValueError, FrequencyTypeError):
return False
def epoch(self):
self.units = 'epochs'
return self
def iteration(self):
self.units = 'iterations'
return self
@property
def by_epoch(self):
return self.units == 'epochs'
@property
def by_iteration(self):
return self.units == 'iterations'
def every(self, value):
self.value = value
return self
def match(self, iteration_count=None, epoch_count=None, persistent=False, match_zero=True):
match_value = {'iterations': iteration_count, 'epochs': epoch_count}.get(self.units)
if not match_zero and match_value == 0:
match = False
else:
match = match_value is not None and \
self.value != np.inf and \
match_value % self.value == 0
if persistent and match and self._last_match_value == match_value:
# Last matched value is the current matched value, i.e. we've matched once already,
# and don't need to match again
match = False
if match:
# Record current match value as the last known match value to maintain persistency
self._last_match_value = match_value
return match
def __str__(self):
return "{} {}".format(self.value, self.units)
def __repr__(self):
return "{}(value={}, units={})".format(type(self).__name__, self.value, self.units)
@classmethod
def from_string(cls, string):
assert_(isinstance(string, str), "`string` must be a string, got {} instead."
.format(type(string).__name__), TypeError)
if string == 'never':
return cls(np.inf, 'iterations')
else:
value_and_unit = string.split(' ')
assert_(len(value_and_unit) == 2,
"Was expecting a string 'value units' with one white-space "
"between 'value' and 'units'.", ValueError)
value, unit = value_and_unit
value = np.inf if value == 'inf' else int(value)
return cls(value, unit)
@classmethod
def build_from(cls, args, priority='iterations'):
if isinstance(args, int):
return cls(args, priority)
elif isinstance(args, (tuple, list)):
return cls(*args)
elif isinstance(args, Frequency):
return args
elif isinstance(args, str):
return cls.from_string(args)
else:
raise NotImplementedError
class Duration(Frequency):
"""Like frequency, but measures a duration."""
def match(self, iteration_count=None, epoch_count=None, when_equal_return=False, **_):
match_value = {'iterations': iteration_count, 'epochs': epoch_count}.get(self.units)
assert_(match_value is not None,
"Could not match duration because {} is not known.".format(self.units),
ValueError)
if match_value == self.value:
return when_equal_return
return match_value > self.value
def compare(self, iteration_count=None, epoch_count=None):
compare_value = {'iterations': iteration_count, 'epochs': epoch_count}.get(self.units)
assert_(compare_value is not None,
"Could not match duration because {} is not known.".format(self.units),
ValueError)
compared = {'iterations': None, 'epochs': None}
compared.update({self.units: self.value - compare_value})
return compared
def __sub__(self, other):
assert_(isinstance(other, Duration),
"Object of type {} cannot be subtracted from "
"a Duration object.".format(type(other)),
TypeError)
assert_(other.units == self.units,
"The Duration objects being subtracted must have the same units.",
ValueError)
return Duration(value=(self.value - other.value), units=self.units)
class NoLogger(object):
def __init__(self, logdir=None):
self.logdir = logdir
def log_value(self, *kwargs):
pass
def set_state(module, key, value):
"""Writes `key`-`value` pair to `module`'s state hook."""
if hasattr(module, '_state_hooks'):
state_hooks = getattr(module, '_state_hooks')
assert isinstance(state_hooks, dict), \
"State hook (i.e. module._state_hooks) is not a dictionary."
state_hooks.update({key: value})
else:
setattr(module, '_state_hooks', {key: value})
return module
def get_state(module, key, default=None):
"""Gets key from `module`'s state hooks."""
return getattr(module, '_state_hooks', {}).get(key, default)
| 9,484 | 31.934028 | 98 | py |
inferno | inferno-master/inferno/utils/torch_utils.py | import numpy as np
import torch
from .python_utils import delayed_keyboard_interrupt
from .exceptions import assert_, ShapeError, NotUnwrappableError
def unwrap(input_, to_cpu=True, as_numpy=False, extract_item=False):
if isinstance(input_, (list, tuple)):
return type(input_)([unwrap(_t, to_cpu=to_cpu, as_numpy=as_numpy)
for _t in input_])
elif torch.is_tensor(input_):
tensor = input_
elif isinstance(input_, np.ndarray):
return input_
elif isinstance(input_, (float, int)):
return input_
else:
raise NotUnwrappableError("Cannot unwrap a '{}'."
.format(type(input_).__name__))
# Transfer to CPU if required
if to_cpu:
with delayed_keyboard_interrupt():
tensor = tensor.cpu().detach()
# Convert to numpy if required
if as_numpy:
return tensor.cpu().detach().numpy()
elif extract_item:
try:
return tensor.item()
except AttributeError:
return tensor[0]
else:
return tensor
def is_tensor(object_):
missed_tensor_classes = (torch.HalfTensor,)
return torch.is_tensor(object_) or isinstance(object_, missed_tensor_classes)
def is_label_tensor(object_):
return is_tensor(object_) and object_.type() in ['torch.LongTensor', 'torch.cuda.LongTensor']
def is_image_tensor(object_):
return is_tensor(object_) and object_.dim() == 4
def is_volume_tensor(object_):
return is_tensor(object_) and object_.dim() == 5
def is_image_or_volume_tensor(object_):
return is_image_tensor(object_) or is_volume_tensor(object_)
def is_label_image_tensor(object_):
return is_label_tensor(object_) and object_.dim() == 3
def is_label_volume_tensor(object_):
return is_label_tensor(object_) and object_.dim() == 4
def is_label_image_or_volume_tensor(object_):
return is_label_image_tensor(object_) or is_label_volume_tensor(object_)
def is_matrix_tensor(object_):
return is_tensor(object_) and object_.dim() == 2
def is_scalar_tensor(object_):
return is_tensor(object_) and object_.dim() <= 1 and object_.numel() == 1
def is_vector_tensor(object_):
return is_tensor(object_) and object_.dim() == 1 and object_.numel() > 1
def assert_same_size(tensor_1, tensor_2):
assert_(list(tensor_1.size()) == list(tensor_2.size()),
"Tensor sizes {} and {} do not match.".format(tensor_1.size(), tensor_2.size()),
ShapeError)
def where(condition, if_true, if_false):
"""
Torch equivalent of numpy.where.
Parameters
----------
condition : torch.ByteTensor or torch.cuda.ByteTensor
Condition to check.
if_true : torch.Tensor or torch.cuda.Tensor
Output value if condition is true.
if_false: torch.Tensor or torch.cuda.Tensor
Output value if condition is false
Returns
-------
torch.Tensor
Raises
------
AssertionError
if if_true and if_false don't have the same datatype.
"""
# noinspection PyArgumentList
assert if_true.type() == if_false.type(), \
"Type mismatch: {} and {}".format(if_true.data.type(), if_false.data.type())
casted_condition = condition.type_as(if_true)
output = casted_condition * if_true + (1 - casted_condition) * if_false
return output
def flatten_samples(input_):
"""
Flattens a tensor or a variable such that the channel axis is first and the sample axis
is second. The shapes are transformed as follows:
(N, C, H, W) --> (C, N * H * W)
(N, C, D, H, W) --> (C, N * D * H * W)
(N, C) --> (C, N)
The input must be atleast 2d.
"""
assert_(input_.dim() >= 2,
"Tensor or variable must be atleast 2D. Got one of dim {}."
.format(input_.dim()),
ShapeError)
# Get number of channels
num_channels = input_.size(1)
# Permute the channel axis to first
permute_axes = list(range(input_.dim()))
permute_axes[0], permute_axes[1] = permute_axes[1], permute_axes[0]
# For input shape (say) NCHW, this should have the shape CNHW
permuted = input_.permute(*permute_axes).contiguous()
# Now flatten out all but the first axis and return
flattened = permuted.view(num_channels, -1)
return flattened
def clip_gradients_(parameters, mode, norm_or_value):
assert_(mode in ['norm', 'value'],
f"Mode must be 'norm' or 'value', got '{mode}' instead.",
ValueError)
if mode == 'norm':
torch.nn.utils.clip_grad_norm_(parameters, norm_or_value)
elif mode == 'value':
torch.nn.utils.clip_grad_value_(parameters, norm_or_value)
else:
raise NotImplementedError
| 4,749 | 30.045752 | 97 | py |
inferno | inferno-master/inferno/utils/test_utils.py | import torch
from torch.utils.data.dataset import TensorDataset
from torch.utils.data.dataloader import DataLoader
import numpy as np
def generate_random_data(num_samples, shape, num_classes, hardness=0.3, dtype=None):
"""Generate a random dataset with a given hardness and number of classes."""
dataset_input = np.zeros((num_samples,) + shape, dtype=dtype)
dataset_target = np.random.randint(num_classes, size=num_samples)
for sample_num in range(num_samples):
dataset_input[sample_num] = np.random.normal(loc=dataset_target[sample_num],
scale=(1 - hardness),
size=shape)
return dataset_input, dataset_target
def generate_random_dataset(num_samples, shape, num_classes, hardness=0.3, dtype=None):
"""Generate a random dataset with a given hardness and number of classes."""
# Generate numpy arrays
dataset_input, dataset_target = generate_random_data(num_samples, shape, num_classes,
hardness=hardness, dtype=dtype)
# Convert to tensor and build dataset
dataset = TensorDataset(torch.from_numpy(dataset_input),
torch.from_numpy(dataset_target))
return dataset
def generate_random_dataloader(num_samples, shape, num_classes, hardness=0.3, dtype=None,
batch_size=1, shuffle=False, num_workers=0, pin_memory=False):
"""Generate a loader with a random dataset of given hardness and number of classes."""
dataset = generate_random_dataset(num_samples, shape, num_classes, hardness=hardness,
dtype=dtype)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workers, pin_memory=pin_memory)
return dataloader
| 1,901 | 50.405405 | 93 | py |
inferno | inferno-master/inferno/trainers/basic.py | from datetime import datetime
from inspect import signature
import os
import shutil
# These are fetched from globals, they're not unused
# noinspection PyUnresolvedReferences
import dill
# noinspection PyUnresolvedReferences
import pickle
import torch
from numpy import inf
from torch.utils.data import DataLoader
from torch.nn.parallel.data_parallel import data_parallel
from .callbacks.logging.base import Logger
from .callbacks.logging import get_logger
from ..utils import train_utils as tu
from ..utils import python_utils as pyu
from ..utils import torch_utils as thu
from ..extensions import metrics
from ..extensions import optimizers
from ..extensions import criteria
from .callbacks import CallbackEngine
from .callbacks import Console
from ..utils.exceptions import assert_, NotSetError, NotTorchModuleError, DeviceError
# NOTE for distributed training, we might also need
# from apex.parallel import DistributedDataParallel as DDP
# but I don't know where exactly to put it.
try:
from apex import amp
except ImportError:
amp = None
class Trainer(object):
"""A basic trainer.
Given a torch model, this class encapsulates the training and validation loops,
checkpoint creation, logging, CPU <-> GPU transfers and managing data-loaders.
In addition, this class interacts with the callback engine (found at
`inferno.trainers.callbacks.base.CallbackEngine`), which manages callbacks at
certain preset events.
Notes
-----
Logging is implemented as a special callback, in the sense that it's jointly
managed by the this class and the callback engine. This is primarily because
general callbacks are not intended to be serializable, but not being able to
serialize the logger is a nuisance.
"""
def __init__(self, model=None):
"""
Parameters
----------
model : torch.nn.Module
Torch model to bind to.
"""
# Privates
# Core
self._model = None
self._optimizer = None
self._criterion = None
self._retain_graph = False
self._backprop_every = 1
# Metric evaluation
self._metric = None
self._evaluate_metric_every = None
self._metric_evaluation_externally_triggered = False
self._last_metric_evaluated_at_epoch = 0
# Logging
self._logger = None
self._last_logged = {}
self._log_directory = {}
# Data logistics
self._loaders = {}
self._loader_iters = {}
self._loader_specs = {}
# Iteration and epoch book-keeping
self._iteration_count = 0
self._epoch_count = 0
self._batch_count = 0
self._current_mode = 'train'
# GPU and dtype business
self._use_cuda = False
self._dtype = 'float'
self._devices = None
self._base_device_ordinal = None
# Validation
self._save_at_best_validation_score = False
self._best_validation_score = None
self._is_iteration_with_best_validation_score = False
self._validate_every = None
self._num_validation_iterations = None
self._target_batch_dim = 0
self._validation_criterion = None
# We should exclude the zero-th epoch from validation
self._last_validated_at_epoch = 0
self._last_validated_at_iteration = 0
# This is to allow a callback to trigger a validation by setting
# trainer.validate_now = True
self._validation_externally_triggered = False
# Checkpointing
self._save_every = None
self._save_to_directory = None
self._pickle_module = 'pickle'
# Defaults for file names
self._checkpoint_filename = 'checkpoint.pytorch'
self._best_checkpoint_filename = 'best_checkpoint.pytorch'
# Nothing to save at epoch 0
self._last_saved_at_epoch = 0
# This is to allow a callback to trigger a save by setting trainer.save_now = True
self._save_externally_triggered = False
# Stopping conditions
self._max_num_iterations = None
self._max_num_epochs = None
# Callbacks and states
self._callback_engine = CallbackEngine().bind_trainer(self)
self._state = {}
# Print console
self._console = Console()
# Train with mixed precision, only works
# if we have apex
self._mixed_precision = False
self._apex_opt_level = 'O1'
# Public
if model is not None:
self.model = model
@property
def mixed_precision(self):
return self._mixed_precision
# this needs to be called after model and optimizer are set
@mixed_precision.setter
def mixed_precision(self, mp):
if mp:
assert_(amp is not None, "Cannot use mixed precision training without apex library", RuntimeError)
assert_(self.model is not None and self._optimizer is not None,
"Model and optimizer need to be set before activating mixed precision", RuntimeError)
# in order to support BCE loss
amp.register_float_function(torch, 'sigmoid')
# For now, we don't allow to set 'keep_batchnorm' and 'loss_scale'
self.model, self._optimizer = amp.initialize(self.model, self._optimizer,
opt_level=self._apex_opt_level,
keep_batchnorm_fp32=None)
self._mixed_precision = mp
@property
def apex_opt_level(self):
return self._apex_opt_level
@apex_opt_level.setter
def apex_opt_level(self, opt_level):
assert_(opt_level in ('O0', 'O1', 'O2', 'O3'),
"Invalid optimization level", ValueError)
self._apex_opt_level = opt_level
@property
def console(self):
"""Get the current console."""
return self._console
def set_console(self, console):
assert_(isinstance(console, Console), "`console` must be a Console object.", TypeError)
self._console = console
return self
def quiet(self):
self.console.toggle_progress(False)
return self
@property
def callbacks(self):
"""Gets the callback engine."""
return self._callback_engine
def register_callback(self, callback, trigger='auto', **callback_kwargs):
"""
Registers a callback with the internal callback engine.
Parameters
----------
callback : type or callable
Callback to register.
trigger : str
Specify the event that triggers the callback. Leave at 'auto' to have the
callback-engine figure out the triggers. See
`inferno.training.callbacks.base.CallbackEngine` documentation for more on this.
callback_kwargs : dict
If `callback` is a type, initialize an instance with these keywords to the
__init__ method.
Returns
-------
Trainer
self.
"""
if isinstance(callback, type):
callback = callback(**callback_kwargs)
self._callback_engine.register_callback(callback, trigger=trigger)
return self
@property
def model(self):
"""Gets the model."""
assert_(self._model is not None, "Model is not defined yet.", NotSetError)
return self._model
@model.setter
def model(self, value):
self.bind_model(value)
def bind_model(self, model):
"""
Binds a model to the trainer. Equivalent to setting model.
Parameters
----------
model : torch.nn.Module
Model to bind.
Returns
-------
Trainer
self.
"""
assert_(isinstance(model, torch.nn.Module),
"Model must be a torch.nn.Module.",
NotTorchModuleError)
self._model = model
# Transfer model to GPU if required
if self._use_cuda:
self._model.cuda()
return self
@property
def model_is_defined(self):
return self._model is not None
@property
def retain_graph(self):
return self._retain_graph
@retain_graph.setter
def retain_graph(self, value):
assert isinstance(value, bool)
self._retain_graph = value
@property
def backprop_every(self):
return self._backprop_every
@backprop_every.setter
def backprop_every(self, value):
self.set_backprop_every(value)
def set_backprop_every(self, num_steps):
"""
Set frequency of backpropagation.
To use in cases of small batch sizes.
Parameters
----------
num_steps : number of steps (iterations/batches) to backprop after
Returns
-------
Trainer
self
"""
assert isinstance(num_steps, int)
self._backprop_every = num_steps
return self
@property
def optimizer(self):
"""Gets the optimizer."""
assert_(self._optimizer is not None, "Optimizer is not set yet.", NotSetError)
return self._optimizer
@optimizer.setter
def optimizer(self, value):
if isinstance(value, str) or callable(value):
self.build_optimizer(value)
elif isinstance(value, dict):
self.build_optimizer(**value)
else:
raise NotImplementedError
@property
def optimizer_is_defined(self):
return self._optimizer is not None
def build_optimizer(self, method, param_groups=None, **kwargs):
"""
Builds the optimizer for training.
Parameters
----------
method : str or callable or torch.optim.Optimizer
Name of the optimizer when str, handle to the optimizer class when callable,
or a torch.optim.Optimizer instance. If a name is provided, this method looks
for the optimizer in `torch.optim` module first and in
inferno.extensions.optimizers second.
param_groups : list of dict
Specifies the parameter group. Defaults to model.parameters() if None.
kwargs : dict
Keyword arguments to the optimizer.
Returns
-------
Trainer
self.
Raises
------
AssertionError
if optimizer is not found
NotImplementedError
if method is not str or callable.
"""
if isinstance(method, str):
optimizer_class = getattr(torch.optim, method, None)
if optimizer_class is None:
# Look for optimizer in extensions
optimizer_class = getattr(optimizers, method, None)
assert optimizer_class is not None, "Optimizer {} not found.".format(method)
elif callable(method) and isinstance(method, type):
optimizer_class = method
elif isinstance(method, torch.optim.Optimizer):
self._optimizer = method
return self
else:
raise NotImplementedError
param_groups = self.model.parameters() if param_groups is None else param_groups
self._optimizer = optimizer_class(param_groups, **kwargs)
return self
@property
def criterion(self):
"""Gets the loss criterion."""
assert_(self._criterion is not None, "Criterion is not set yet.", NotSetError)
return self._criterion
@criterion.setter
def criterion(self, value):
if isinstance(value, str) or callable(value):
self.build_criterion(value)
elif isinstance(value, dict):
self.build_criterion(**value)
else:
raise RuntimeError(f"Criterion can either be set to a string, callable or a dict. "
f"Got {type(value).__name__} instead.")
def build_criterion(self, method, **kwargs):
"""
Builds the loss criterion for training.
Parameters
----------
method : str or callable or torch.nn.Module
Name of the criterion when str, criterion class when callable, or a
torch.nn.Module instance. If a name is provided, this method looks
for the criterion in `torch.nn`.
kwargs : dict
Keyword arguments to the criterion class' constructor if applicable.
Returns
-------
Trainer
self.
Raises
------
AssertionError
if criterion is not found.
NotImplementedError
if method is neither a str nor a callable.
"""
if isinstance(method, str):
# Look for criteria in torch
criterion_class = getattr(torch.nn, method, None)
if criterion_class is None:
# Look for it in extensions
criterion_class = getattr(criteria, method, None)
assert criterion_class is not None, "Criterion {} not found.".format(method)
elif callable(method) and isinstance(method, type):
criterion_class = method
elif isinstance(method, torch.nn.Module):
self._criterion = method
return self
else:
raise NotImplementedError
self._criterion = criterion_class(**kwargs)
# Transfer criterion to GPU if required. This is necessary for e.g. weighted loss,
# where the weight is registered as a buffer.
# The criterion is to be cuda'ed only if the model is on CUDA (self._use_cuda) and
# the base_device is not CPU (ordinal -1).
if hasattr(self, '_base_device_ordinal'):
# This is to not break old checkpoints
base_device_ordinal = self._base_device_ordinal
else:
base_device_ordinal = None
if self._use_cuda and base_device_ordinal != 1:
self._criterion.cuda()
return self
@property
def criterion_is_defined(self):
return self._criterion is not None
@property
def validation_criterion(self):
if self._validation_criterion is None:
return self.criterion
else:
return self._validation_criterion
@validation_criterion.setter
def validation_criterion(self, value):
if isinstance(value, str) or callable(value):
self.build_validation_criterion(value)
elif isinstance(value, dict):
self.build_validation_criterion(**value)
else:
raise RuntimeError(f"Validation criterion can either be set to a string, callable "
f"or a dict. Got {type(value).__name__} instead.")
def build_validation_criterion(self, method, **kwargs):
"""
Builds the loss criterion for validation.
Parameters
----------
method : str or callable or torch.nn.Module
Name of the criterion when str, criterion class when callable, or a
torch.nn.Module instance. If a name is provided, this method looks
for the criterion in `torch.nn`.
kwargs : dict
Keyword arguments to the criterion class' constructor if applicable.
Returns
-------
Trainer
self.
Raises
------
AssertionError
if criterion is not found.
NotImplementedError
if method is neither a str nor a callable.
"""
if isinstance(method, str):
# Look for criteria in torch
criterion_class = getattr(torch.nn, method, None)
if criterion_class is None:
# Look for it in extensions
criterion_class = getattr(criteria, method, None)
assert criterion_class is not None, "Criterion {} not found.".format(method)
elif callable(method) and isinstance(method, type):
criterion_class = method
elif isinstance(method, torch.nn.Module):
self._validation_criterion = method
return self
else:
raise NotImplementedError
self._validation_criterion = criterion_class(**kwargs)
# Transfer criterion to GPU if required. This is necessary for e.g. weighted loss,
# where the weight is registered as a buffer.
# The criterion is to be cuda'ed only if the model is on CUDA (self._use_cuda) and
# the base_device is not CPU (ordinal -1).
if hasattr(self, '_base_device_ordinal'):
# This is to not break old checkpoints
base_device_ordinal = self._base_device_ordinal
else:
base_device_ordinal = None
if self._use_cuda and base_device_ordinal != 1:
self._validation_criterion.cuda()
return self
def validation_criterion_is_train_criterion(self, yes=True):
if yes:
# This will cause the property to return train criterion
self._validation_criterion = None
return self
@property
def validation_criterion_is_defined(self):
return self._validation_criterion is not None
@property
def metric(self):
"""Gets the evaluation metric."""
assert_(self._metric is not None, "Metric is not set yet.", NotSetError)
return self._metric
@metric.setter
def metric(self, value):
if callable(value) or isinstance(value, str):
self.build_metric(value)
else:
raise NotImplementedError
@property
def evaluating_metric_every(self):
return self._evaluate_metric_every
def evaluate_metric_every(self, frequency):
"""
Set frequency of metric evaluation __during training__ (and not during validation).
Parameters
----------
frequency : inferno.utils.train_utils.Frequency or str or tuple or list or int
Metric evaluation frequency. If str, it could be (say) '10 iterations' or '1 epoch'.
If tuple (or list), it could be (10, 'iterations') or (1, 'epoch'). If int
(say 10), it's interpreted as (10, 'iterations').
Returns
-------
Trainer
self
"""
self._evaluate_metric_every = tu.Frequency.build_from(frequency, priority='iterations')
assert self._evaluate_metric_every.is_consistent
return self
@property
def evaluate_metric_now(self):
if self._metric_evaluation_externally_triggered:
# Reset trigger
self._metric_evaluation_externally_triggered = False
return True
elif self._evaluate_metric_every is None:
# By default, evaluate metric every time
return True
elif self._evaluate_metric_every is not None and self._evaluate_metric_every.by_epoch:
# Don't evaluate if we've done so already this epoch
if self._last_metric_evaluated_at_epoch == self._epoch_count:
return False
else:
# If we haven't evaluated this epoch, check if we should
return self._evaluate_metric_every.match(epoch_count=self._epoch_count)
else:
# This is reached when evaluate_metric_every is defined and matching by
# iteration count
return self._evaluate_metric_every.match(iteration_count=self._iteration_count)
@evaluate_metric_now.setter
def evaluate_metric_now(self, value):
self._metric_evaluation_externally_triggered = bool(value)
def build_metric(self, method, **kwargs):
"""
Builds the metric for evaluation.
Parameters
----------
method : callable or str
Name of the metric when string, metric class or a callable object
when callable. If a name is provided, this method looks for the metric in
`inferno.extensions.metrics`.
kwargs : dict
Keyword arguments to the metric class' constructor, if applicable.
Returns
-------
Trainer
self.
Raises
------
AssertionError: if the metric is not found.
"""
if callable(method):
if isinstance(method, type):
self._metric = method(**kwargs)
else:
self._metric = method
elif isinstance(method, str):
assert hasattr(metrics, method), \
"Could not find the metric '{}'.".format(method)
self._metric = getattr(metrics, method)(**kwargs)
else:
raise NotImplementedError
return self
@property
def metric_is_defined(self):
"""Checks if the metric is defined."""
return self._metric is not None
def eval_mode(self):
"""Set model, criterion and metric to eval mode"""
self._current_mode = 'eval'
self.model.eval()
if self.criterion_is_defined and isinstance(self.criterion, torch.nn.Module):
self.criterion.eval()
if self.metric_is_defined and isinstance(self.metric, torch.nn.Module):
self.metric.eval()
return self
def train_mode(self):
"""Set model, criterion and metric to train mode"""
self._current_mode = 'train'
self.model.train()
if self.criterion_is_defined and isinstance(self.criterion, torch.nn.Module):
self.criterion.train()
if self.metric_is_defined and isinstance(self.metric, torch.nn.Module):
self.metric.train()
return self
@property
def train_loader(self):
assert self._loaders.get('train') is not None
return self._loaders.get('train')
@train_loader.setter
def train_loader(self, value):
assert isinstance(value, DataLoader)
self._loaders.update({'train': value})
@property
def validate_loader(self):
assert self._loaders.get('validate') is not None
return self._loaders.get('validate')
@validate_loader.setter
def validate_loader(self, value):
assert isinstance(value, DataLoader)
self._loaders.update({'validate': value})
@property
def logger(self):
"""Gets the logger."""
return self._logger
@logger.setter
def logger(self, value):
if isinstance(value, dict):
self.build_logger(**value)
else:
self.build_logger(logger=value)
@property
def log_directory(self):
"""Gets the log directory."""
return self._log_directory
@log_directory.setter
def log_directory(self, value):
"""Sets the log directory,"""
if value is not None:
self.set_log_directory(value)
@property
def pickle_module(self):
module_ = globals().get(self._pickle_module, None)
assert_(module_ is not None, "Pickle module not found!", ModuleNotFoundError)
return module_
_ALLOWED_PICKLE_MODULES = {'pickle', 'dill'}
@pickle_module.setter
def pickle_module(self, value):
assert_(isinstance(value, str), "`pickle_module` must be set to a string.", TypeError)
assert_(value in self._ALLOWED_PICKLE_MODULES,
f"Pickle module must be one of {self._ALLOWED_PICKLE_MODULES}, "
f"got {value} instead.", ValueError)
self._pickle_module = value
@property
def saving_every(self):
"""Gets the frequency at which checkpoints are made."""
return self._save_every
def save_at_best_validation_score(self, yes=True):
"""Sets whether to save when the validation score is the best seen."""
self._save_at_best_validation_score = yes
return self
@property
def save_now(self):
if self._save_externally_triggered:
# Reset trigger
self._save_externally_triggered = False
# Save if externally triggered
return True
elif self._save_at_best_validation_score and self._is_iteration_with_best_validation_score:
return True
else:
# Check if we're saving by epoch
if self._save_every is not None and self._save_every.by_epoch:
# Don't save if we've already saved once this epoch
if self._epoch_count == self._last_saved_at_epoch:
return False
else:
# If we haven't saved this epoch, check if we should
return self._save_every.match(epoch_count=self._epoch_count)
else:
# We're saving by iterations
return self._save_every is not None and \
self._save_every.match(iteration_count=self._iteration_count)
@save_now.setter
def save_now(self, value):
"""Can be set to true to trigger a checkpoint creation.."""
self._save_externally_triggered = bool(value)
def save_every(self, frequency, to_directory=None,
checkpoint_filename=None, best_checkpoint_filename=None):
"""
Set checkpoint creation frequency.
Parameters
----------
frequency : inferno.utils.train_utils.Frequency or tuple or str
Checkpoint creation frequency. Examples: '100 iterations' or '1 epochs'.
to_directory : str
Directory where the checkpoints are to be created.
checkpoint_filename : str
Name of the checkpoint file.
best_checkpoint_filename : str
Name of the best checkpoint file.
Returns
-------
Trainer
self.
"""
self._save_every = tu.Frequency.build_from(frequency, priority='iterations')
assert self._save_every.is_consistent
self.save_to_directory(to_directory, checkpoint_filename, best_checkpoint_filename)
return self
@property
def save_directory(self):
return self._save_to_directory
def save_to_directory(self, to_directory=None, checkpoint_filename=None,
best_checkpoint_filename=None):
if to_directory is not None:
assert_(isinstance(to_directory, str), exception_type=TypeError)
if not os.path.exists(to_directory):
os.makedirs(to_directory)
else:
assert os.path.isdir(to_directory)
self._save_to_directory = to_directory
if checkpoint_filename is not None:
assert_(isinstance(checkpoint_filename, str), exception_type=TypeError)
self._checkpoint_filename = checkpoint_filename
if best_checkpoint_filename is not None:
assert_(isinstance(best_checkpoint_filename, str), exception_type=TypeError)
self._best_checkpoint_filename = best_checkpoint_filename
return self
@property
def validating_every(self):
return self._validate_every
@property
def validate_now(self):
if self._validation_externally_triggered:
# Reset trigger
self._validation_externally_triggered = False
return True
elif self._validate_every is not None and self._validate_every.by_epoch:
# Don't validate if we've done so already this epoch
if self._last_validated_at_epoch == self._epoch_count:
return False
else:
# If we haven't validated this epoch, check if we should
return self._validate_every.match(epoch_count=self._epoch_count,
match_zero=False)
else:
# Don't validate if we've done once already this iteration
if self._last_validated_at_iteration == self._iteration_count:
return False
else:
# If we haven't validated this iteration, check if we should. The `match_zero` is
# redundant, but we'll leave it on anyway.
return self._validate_every is not None and \
self._validate_every.match(iteration_count=self._iteration_count,
match_zero=False)
@validate_now.setter
def validate_now(self, value):
self._validation_externally_triggered = bool(value)
def validate_every(self, frequency, for_num_iterations=None):
"""
Set validation frequency.
Parameters
----------
frequency : inferno.utils.train_utils.Frequency or str or tuple or list or int
Validation frequency. If str, it could be (say) '10 iterations' or '1 epoch'.
If tuple (or list), it could be (10, 'iterations') or (1, 'epoch'). If int
(say 10), it's interpreted as (10, 'iterations').
for_num_iterations : int
Number of iterations to validate for. If not set, the model is validated on
the entire dataset (i.e. till the data loader is exhausted).
Returns
-------
Trainer
self
"""
self._validate_every = tu.Frequency.build_from(frequency, priority='iterations')
assert self._validate_every.is_consistent
self._num_validation_iterations = for_num_iterations
return self
@property
def iteration_count(self):
return self._iteration_count
@property
def epoch_count(self):
return self._epoch_count
@property
def target_batch_dim(self):
return self._target_batch_dim
@target_batch_dim.setter
def target_batch_dim(self, value):
assert_(value in [0, 1],
"target_batch_dim must be either 0 or 1, got {value} instead.".format(value=value),
ValueError)
self._target_batch_dim = value
def set_target_batch_dim(self, value):
self.target_batch_dim = value
return self
def build_logger(self, logger=None, log_directory=None, **kwargs):
"""
Build the logger.
Parameters
----------
logger : inferno.trainers.callbacks.logging.base.Logger or str or type
Must either be a Logger object or the name of a logger or the class of a logger.
log_directory : str
Path to the directory where the log files are to be stored.
kwargs : dict
Keyword arguments to the logger class.
Returns
-------
Trainer
self
"""
if isinstance(logger, Logger):
# Set logger and register with the callback engine.
self._logger = logger
self.callbacks.register_callback(self._logger)
elif callable(logger):
kwargs.update({'log_directory': log_directory})
self._logger = logger(**kwargs)
self.callbacks.register_callback(self._logger)
elif isinstance(logger, str):
self._logger = get_logger(logger)(**kwargs)
self.callbacks.register_callback(self._logger)
elif logger is None:
pass
else:
raise NotImplementedError
if log_directory is not None:
self.set_log_directory(log_directory)
return self
def set_log_directory(self, log_directory):
"""
Set the directory where the log files are to be stored.
Parameters
----------
log_directory : str
Directory where the log files are to be stored.
Returns
-------
Trainer
self
"""
self._log_directory = log_directory
if self._logger is not None:
self._logger.set_log_directory(log_directory)
return self
# States that are fetched dynamically from the trainer object via properties are
# dynamic states. Such states can not be updated.
# The following dictionary maps state keys to the corresponding trainer attribute
DYNAMIC_STATES = {'learning_rate': 'current_learning_rate'}
def update_state(self, key, value):
assert key not in self.DYNAMIC_STATES, \
"State at key '{}' cannot be updated because it's dynamic.".format(key)
self._state.update({key: value})
return self
def update_state_from_dictionary(self, dictionary):
# Unwrap variables (or tensors)
self._state.update({
state_key: thu.unwrap(state)
for state_key, state in dictionary.items()})
def update_state_from_model_state_hooks(self):
if hasattr(self.model, '_state_hooks'):
state_hooks = getattr(self.model, '_state_hooks')
if isinstance(state_hooks, dict):
self.update_state_from_dictionary(state_hooks)
def get_state(self, key, default=None):
if key in self.DYNAMIC_STATES:
return getattr(self, self.DYNAMIC_STATES.get(key), default)
else:
return self._state.get(key, default)
@property
def current_learning_rate(self):
return self.get_current_learning_rate()
def get_current_learning_rate(self):
"""
Gets the current learning rate.
Returns
-------
list or float
List of learning rates if there are multiple parameter groups, or a float
if there's just one.
"""
learning_rate = [param_group.get('lr', -1.)
for param_group in self.optimizer.param_groups]
learning_rate = [_learning_rate[0] if thu.is_tensor(_learning_rate) else _learning_rate
for _learning_rate in learning_rate]
return pyu.from_iterable(learning_rate)
def to(self, device):
"""
Send trainer to device
----------
device : string or torch.device
Target device where trainer/model should be send to
"""
if device == 'cuda':
return self.cuda()
elif device == 'cpu':
return self.cpu()
elif isinstance(device, torch.torch.device):
self.to(device.type)
else:
raise NotImplementedError("Can not send trainer to device", device)
def cuda(self, devices=None, base_device=None):
"""
Train on the GPU.
Parameters
----------
devices : list
Specify the ordinals of the devices to use for dataparallel training.
base_device : {'cpu', 'cuda'}
When using data-parallel training, specify where the result tensors
are collected. If 'cuda', the results are collected in `devices[0]`.
Returns
-------
Trainer
self
"""
# Validate base_device
assert_(base_device in [None, 'cpu', 'cuda'],
"`base_device` must either be 'cpu' or 'cuda', got {} instead."
.format(base_device),
DeviceError)
if isinstance(devices, int) or (isinstance(devices, (list, tuple)) and len(devices) == 1):
# No data-parallelism, make sure base_device is not CPU
assert_(base_device != 'cpu',
"Without dataparallelism, `base_device` cannot be 'cpu'.",
DeviceError)
self._base_device_ordinal = {None: None, 'cpu': -1, 'cuda': None}.get(base_device)
# Move model to CUDA
if self.model_is_defined:
self.model.cuda()
# Move criterion to cuda if base device ordinal is not -1 (i.e. CPU)
# (the criterion is evaluated on the base device)
if self.criterion_is_defined and self._base_device_ordinal != -1:
self.criterion.cuda()
elif self.criterion_is_defined and self._base_device_ordinal == -1:
# Criterion is evaluated on the CPU, make sure that's where it lives
self.criterion.cpu()
self._use_cuda = True
self._devices = devices
return self
def cpu(self):
"""
Train on the CPU.
Returns
-------
Trainer
self
"""
if self.model_is_defined:
self.model.cpu()
if self.criterion_is_defined:
self.criterion.cpu()
self._use_cuda = False
self._devices = None
return self
def is_cuda(self):
"""Returns whether using GPU for training."""
return self._use_cuda
def to_device(self, objects):
if isinstance(objects, (list, tuple)):
return type(objects)([self.to_device(_object) for _object in objects])
else:
return objects.cuda() if self._use_cuda else objects
def apply_model(self, *inputs):
if hasattr(self, '_base_device_ordinal'):
# This is to not break old checkpoints
base_device_ordinal = self._base_device_ordinal
else:
base_device_ordinal = None
if self._devices is not None:
return data_parallel(self.model, inputs, list(self._devices),
output_device=base_device_ordinal)
else:
return self.model(*inputs)
def cast(self, objects):
if isinstance(objects, (list, tuple)):
return type(objects)([self.cast(_object) for _object in objects])
else:
# Cast only the float types, while leaving the ints alone
if objects.__class__.__name__ in ['HalfTensor', 'FloatTensor', 'DoubleTensor']:
cast_fn = getattr(objects, self._dtype, None)
else:
cast_fn = None
if cast_fn is not None:
return cast_fn()
else:
return objects
def set_precision(self, dtype):
"""
Set training precision.
Parameters
----------
dtype : {'double', 'float', 'half'}
Training precision.
Returns
-------
Trainer
self
"""
assert dtype in ['double', 'float', 'half']
self._dtype = dtype
self._model = getattr(self._model, dtype)()
return self
@property
def dtype(self):
return self._dtype
@dtype.setter
def dtype(self, value):
self.set_precision(value)
def bind_loader(self, name, loader, num_inputs=None, num_targets=1):
"""
Bind a data loader to the trainer.
Parameters
----------
name : {'train', 'validate', 'test'}
Name of the loader, i.e. what it should be used for.
loader : torch.utils.data.DataLoader
DataLoader object.
num_inputs : int
Number of input tensors from the `loader`.
num_targets : int
Number of target tensors from the `loader`.
Returns
-------
Trainer
self
Raises
------
KeyError
if name is invalid.
TypeError
if loader is not a DataLoader instance.
"""
assert_(name in ['train', 'validate', 'test'],
"`name` must be one of ['train', 'validate', 'test']. "
"Got {} instead.".format(name),
KeyError)
assert_(isinstance(loader, DataLoader),
"`loader` must be a DataLoader object. "
"Got {} instead.".format(type(loader).__name__),
TypeError)
# Check to see if the loader is actually new. This should usually be True.
is_new_loader = loader is not self._loaders.get(name)
self._loaders.update({name: loader})
# We also need to account for the case when a loader is being replaced. When this happens,
# the old DataLoaderIter might still have processes running, which we need to kill.
if is_new_loader and name in self._loader_iters:
# This is when the previous loader already has a DataLoaderIter running.
# The DataLoaderIter implements a __del__ method, which shuts down workers.
del self._loader_iters[name]
# Trainers loaded from pickle files might not have '_loader_specs', therefore:
if not hasattr(self, '_loader_specs'):
setattr(self, '_loader_specs', {})
self._loader_specs.update({name: {'num_inputs': num_inputs,
'num_targets': num_targets}})
return self
def get_loader_specs(self, name):
assert name in self._loader_specs.keys(), \
"Could not find specs about loader '{}'. Valid loader names are: {}" \
.format(name, set(self._loader_specs.keys()))
return self._loader_specs.get(name)
def fetch_next_batch(self, from_loader='train', restart_exhausted_generators=True,
update_batch_count=True, update_epoch_count_if_generator_exhausted=True):
# Check if the iterator is built
if from_loader not in self._loader_iters:
self._loader_iters.update({from_loader: self._loaders[from_loader].__iter__()})
# Try to fetch from iterator
try:
# Fetch
next_batch = next(self._loader_iters[from_loader])
# Verify
self.verify_batch(next_batch, from_loader)
if update_batch_count:
self._batch_count += 1
return next_batch
except StopIteration:
# This if clause prevents infinite recursion if the loader is empty
if restart_exhausted_generators:
self._loader_iters.update({from_loader: self._loaders[from_loader].__iter__()})
# Update epoch count
if update_epoch_count_if_generator_exhausted:
self.next_epoch()
return self.fetch_next_batch(from_loader, restart_exhausted_generators=False,
update_batch_count=update_batch_count)
else:
raise
def verify_batch(self, batch, from_loader):
loader_specs = self.get_loader_specs(from_loader)
num_inputs = loader_specs.get('num_inputs')
num_targets = loader_specs.get('num_targets')
if None not in [num_inputs, num_targets]:
assert len(batch) == num_inputs + num_targets, \
"Was expecting a batch with {} (= num_inputs) + {} (= num_targets) tensors, " \
"got one with {} tensors.".format(num_inputs, num_targets, len(batch))
if num_inputs is not None:
assert len(batch) > num_inputs, \
"Expecting {} inputs, but the batch contains only {} tensors." \
.format(num_inputs, len(batch))
if num_targets is not None:
assert len(batch) > num_targets, \
"Expecting {} outputs, but the batch contains only {} tensors." \
.format(num_targets, len(batch))
return batch
def split_batch(self, batch, from_loader):
loader_specs = self.get_loader_specs(from_loader)
num_inputs = loader_specs.get('num_inputs')
num_targets = loader_specs.get('num_targets')
assert not (num_targets is None and num_inputs is None), \
"Can not split batch if both the number of inputs and targets is not known."
if num_inputs is None:
# Unknown number of inputs
num_inputs = len(batch) - num_targets #to allow for num_targets == 0
inputs, targets = batch[:num_inputs], batch[num_inputs:]
elif num_targets is None:
# Unknown number of targets
inputs, targets = batch[:num_inputs], batch[num_inputs:]
else:
# Known number of inputs and targets
inputs, targets = batch[:num_inputs], batch[-num_targets:]
return inputs, pyu.from_iterable(targets)
def restart_generators(self, of_loader=None):
if of_loader is None:
of_loader = self._loaders.keys()
else:
assert of_loader in self._loaders.keys(), \
"Key {} not in loaders ({})".format(of_loader, list(self._loaders))
of_loader = pyu.to_iterable(of_loader)
self._loader_iters.update({from_loader: self._loaders[from_loader].__iter__()
for from_loader in of_loader})
return self
def wrap_batch(self, batch, from_loader=None, requires_grad=False):
base_device_ordinal = \
self._base_device_ordinal if hasattr(self, '_base_device_ordinal') else None
# First, send to the right device
if base_device_ordinal is None:
# Both inputs and labels are sent to the device
batch = self.to_device(batch)
elif base_device_ordinal == -1:
# Input batches go to device, while labels remain on the CPU.
# To start, we need the number of input batches, i.e. from_loader must not be None
assert_(from_loader is not None,
"`from_loader` needs to be specified if base_device_ordinal is -1 "
"(i.e. base device for data-parallel training is CPU).",
ValueError)
loader_spec = self._loader_specs.get(from_loader)
assert_(loader_spec is not None,
"No `loader_spec` found for loader key '{}'.".format(from_loader),
RuntimeError)
num_inputs = loader_spec['num_inputs']
if num_inputs is None:
num_inputs = len(batch) - loader_spec['num_targets']
# Fetch input batches and send'em to device (leave the targets alone)
inputs = batch[:num_inputs]
inputs = self.to_device(inputs)
# Finally, build the batch
batch = inputs + batch[num_inputs:]
else:
raise ValueError("Internal Error: Invalid base_device_ordinal: {}."
.format(base_device_ordinal))
# Cast to the right dtype and return
batch = self.cast(batch)
# Set gradients if required
variable_batch = []
for batch_num, _batch in enumerate(batch):
if thu.is_tensor(_batch):
variable_batch.append(_batch.requires_grad_() if requires_grad else _batch)
elif pyu.is_listlike(_batch):
variable_batch.append([__batch.requires_grad_() if requires_grad else __batch
for __batch in _batch])
else:
raise RuntimeError(f"Was Expecting batch at index {batch_num} to be either a "
f"tensor or a list of tensors. Got {type(_batch)} instead.")
batch = type(batch)(variable_batch)
return batch
def next_iteration(self):
self._iteration_count += 1
def next_epoch(self):
# Callback before the end of epoch
self.callbacks.call(self.callbacks.END_OF_EPOCH,
epoch_count=self._epoch_count,
batch_count=self._batch_count,
iteration_count=self._iteration_count)
self._epoch_count += 1
self._batch_count = 0
# Callback after the start of epoch
self.callbacks.call(self.callbacks.BEGIN_OF_EPOCH,
epoch_count=self._epoch_count,
batch_count=self._batch_count,
iteration_count=self._iteration_count)
def stop_fitting(self, max_num_iterations=None, max_num_epochs=None):
# First priority to iteration count
if max_num_iterations is not None or max_num_epochs is None:
max_num_iterations = \
self._max_num_iterations if max_num_iterations is None else max_num_iterations
assert_(max_num_iterations is not None,
"Neither max_num_iterations nor max_num_epochs was set.",
RuntimeError)
return self._iteration_count >= max_num_iterations
else:
# max_num_epochs is specified. It could be 'auto', in which case we read from the
# class attribute
max_num_epochs = self._max_num_epochs \
if isinstance(max_num_epochs, str) and max_num_epochs.lower() == 'auto' \
else max_num_epochs
return self._epoch_count >= max_num_epochs
INF_STRINGS = {'inf', 'infinity', 'infty'}
def set_max_num_iterations(self, max_num_iterations):
"""
Set the maximum number of training iterations.
Parameters
----------
max_num_iterations : int or float or str
Maximum number of training iterations. If float, it should equal numpy.inf.
If str, it should be one of {'inf', 'infinity', 'infty'}.
Returns
-------
Trainer
self
"""
max_num_iterations = \
inf if max_num_iterations in self.INF_STRINGS else max_num_iterations
# Validate type
assert_(isinstance(max_num_iterations, int) or max_num_iterations == inf,
"max_num_iterations must be an integer or numpy.inf, got {} instead."
.format(type(max_num_iterations).__name__),
TypeError)
self._max_num_iterations = max_num_iterations
return self
def set_max_num_epochs(self, max_num_epochs):
"""
Set the maximum number of training epochs.
Parameters
----------
max_num_epochs : int or float or str
Maximum number of training epochs. If float, it should equal numpy.inf.
If str, it should be one of {'inf', 'infinity', 'infty'}.
Returns
-------
Trainer
self
"""
max_num_epochs = inf if max_num_epochs in self.INF_STRINGS else max_num_epochs
assert_(isinstance(max_num_epochs, int) or max_num_epochs == inf,
"max_num_epochs must be an integer or numpy.inf, got {} instead."
.format(type(max_num_epochs).__name__),
TypeError)
self._max_num_epochs = max_num_epochs
return self
def fit(self, max_num_iterations=None, max_num_epochs=None):
"""
Fit model.
Parameters
----------
max_num_iterations : int or float or str
(Optional) Maximum number of training iterations. Overrides the value set by
`Trainer.set_max_num_iterations`. If float, it should equal numpy.inf.
If str, it should be one of {'inf', 'infinity', 'infty'}.
max_num_epochs : int or float or str
(Optional) Maximum number of training epochs. Overrides the value set by
`Trainer.set_max_num_epochs`. If float, it should equal numpy.inf.
If str, it should be one of {'inf', 'infinity', 'infty'}.
Returns
-------
Trainer
self
"""
# Takes care of:
# - dispatching train
# - validation
# - learning rate scheduling
# - saving
max_num_iterations = inf if max_num_iterations in self.INF_STRINGS else max_num_iterations
max_num_iterations = self._max_num_iterations if max_num_iterations is None \
else max_num_iterations
max_num_epochs = inf if max_num_epochs in self.INF_STRINGS else max_num_epochs
max_num_epochs = self._max_num_epochs if max_num_epochs is None else max_num_epochs
self.callbacks.call(self.callbacks.BEGIN_OF_FIT,
max_num_iterations=max_num_iterations,
max_num_epochs=max_num_epochs)
# Local clock
run_num = 0
while True:
if self.stop_fitting(max_num_iterations, max_num_epochs):
self.console.info("Exceeded max number of iterations / epochs, breaking.")
break
# Train
self.train_for(break_callback=lambda *args: self.stop_fitting(max_num_iterations,
max_num_epochs))
# Check if it's time to validate
if self.validate_now:
self.console.info("Validating.")
self.validate_for()
# Check if it's time to save
if self.save_now:
self.console.info("Saving.")
self.save()
run_num += 1
# Call callback
self.callbacks.call(self.callbacks.END_OF_FIT,
max_num_iterations=max_num_iterations,
max_num_epochs=max_num_epochs,
num_runs=run_num)
return self
def apply_model_and_loss(self, inputs, target, backward=True, mode=None):
if mode is None:
mode = self._current_mode
assert_(mode in ['train', 'eval'],
f"`mode` must be one of ['train', 'eval'], got {mode} instead.", ValueError)
# Compute prediction
prediction = self.apply_model(*inputs)
# Compute loss
kwargs = {}
if (isinstance(self.criterion, torch.nn.Module) and
'trainer' in signature(self.criterion.forward).parameters):
kwargs['trainer'] = self
if mode == 'train':
loss = self.criterion(prediction, target, **kwargs) \
if len(target) != 0 else self.criterion(prediction, **kwargs)
elif mode == 'eval':
loss = self.validation_criterion(prediction, target, **kwargs) \
if len(target) != 0 else self.validation_criterion(prediction, **kwargs)
else:
raise ValueError
if backward:
# Backprop if required
# retain_graph option is needed for some custom
# loss functions like malis, False per default
if self.mixed_precision:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward(retain_graph=self.retain_graph)
else:
loss.backward(retain_graph=self.retain_graph)
return prediction, loss
def train_for(self, num_iterations=None, break_callback=None):
# Switch model to train mode
self.train_mode()
# Call callback
self.callbacks.call(self.callbacks.BEGIN_OF_TRAINING_RUN,
num_iterations=num_iterations)
# iteration_num is a local clock. There's the global self._iteration_count that keeps
# actual track of the number of iterations - this is updated by the call to
# self.next_iteration().
iteration_num = 0
while True:
if num_iterations is not None and iteration_num >= num_iterations:
self.console.info("Finished {} iterations. Breaking...".format(num_iterations))
break
# Break if break callback asks us to
if break_callback is not None and break_callback(iteration_num):
self.console.info("Breaking on request from callback.")
break
self.console.progress("Training iteration {} (batch {} of epoch {})."
.format(iteration_num, self._batch_count, self._epoch_count))
# Call callback
self.callbacks.call(self.callbacks.BEGIN_OF_TRAINING_ITERATION,
iteration_num=iteration_num)
# No interrupts while computing - a SIGINT could shoot down the driver if
# done at the wrong time. Not sure if this has something to do with pinned memory
with pyu.delayed_keyboard_interrupt():
# Get batch
batch = self.fetch_next_batch('train')
# Send to device and wrap as variable
batch = self.wrap_batch(batch, from_loader='train')
# Separate inputs from targets
inputs, target = self.split_batch(batch, from_loader='train')
# Apply model, compute loss and backprop
prediction, loss = self.apply_model_and_loss(inputs, target, backward=True,
mode='train')
self.callbacks.call(self.callbacks.AFTER_MODEL_AND_LOSS_IS_APPLIED,
prediction=prediction, loss=loss, iteration_num=iteration_num)
# Compute metric
if self.metric_is_defined and self.evaluate_metric_now:
self._last_metric_evaluated_at_epoch = self._epoch_count
# TODO Make unwrap a method for folks to overload
error = self.metric(thu.unwrap(prediction, to_cpu=False),
thu.unwrap(target, to_cpu=False))
self.update_state('training_error', thu.unwrap(error))
else:
error = None
# Update state from computation
self.update_state('training_inputs', thu.unwrap(inputs))
self.update_state('training_target', thu.unwrap(target))
self.update_state('training_prediction', thu.unwrap(prediction))
self.update_state('training_loss', thu.unwrap(loss))
# Update state from model's state hooks
self.update_state_from_model_state_hooks()
if iteration_num % self.backprop_every == 0:
# Update parameters
self.optimizer.step()
# Zero out the grads
self.optimizer.zero_grad()
# Call callback
self.callbacks.call(self.callbacks.END_OF_TRAINING_ITERATION,
iteration_num=iteration_num)
# Prepare for next iteration
self.next_iteration()
# Break if validating or saving. It's important that the next_iteration() method is
# called before checking validate_now and save_now - because otherwise, the iteration
# counter is never updated after the first save and validate, resulting in an infinite
# save + validate loop.
if self.validate_now:
self.console.info("Breaking to validate.")
break
if self.save_now:
self.console.info("Breaking to save.")
break
iteration_num += 1
self.callbacks.call(self.callbacks.END_OF_TRAINING_RUN, num_iterations=num_iterations)
return self
def validate_for(self, num_iterations=None, loader_name='validate'):
"""
Validate for a given number of validation (if `num_iterations is not None`)
or over the entire (validation) data set.
Parameters
----------
num_iterations : int
Number of iterations to validate for. To validate on the entire dataset,
leave this as `None`.
loader_name : str
Name of the data loader to use for validation. 'validate' is the obvious default.
Returns
-------
Trainer
self.
"""
assert_(loader_name in ['validate', 'test', 'train'],
"Invalid `loader_name`: {}".format(loader_name),
ValueError)
# Average over errors
validation_error_meter = tu.AverageMeter()
validation_loss_meter = tu.AverageMeter()
iteration_num = 0
num_iterations = \
self._num_validation_iterations if num_iterations is None else num_iterations
# Switch to eval mode (e.g. for batchnorm, etc.)
self.eval_mode()
if loader_name not in self._loader_iters:
self._loader_iters.update({loader_name: self._loaders[loader_name].__iter__()})
# If we don't know num_iterations, we're validating the entire dataset - so we might as
# well restart the loader now
if num_iterations is None:
self.restart_generators(loader_name)
# Record the epoch we're validating in
self._last_validated_at_epoch = self._epoch_count
self._last_validated_at_iteration = self._iteration_count
self.callbacks.call(self.callbacks.BEGIN_OF_VALIDATION_RUN,
num_iterations=num_iterations,
num_iterations_in_generator=len(self._loader_iters[loader_name]),
last_validated_at_epoch=self._last_validated_at_epoch)
while True:
if num_iterations is not None and iteration_num >= num_iterations:
break
self.callbacks.call(self.callbacks.BEGIN_OF_VALIDATION_ITERATION,
iteration_num=iteration_num)
try:
batch = self.fetch_next_batch(loader_name,
restart_exhausted_generators=num_iterations is not None,
update_batch_count=False,
update_epoch_count_if_generator_exhausted=False)
except StopIteration:
self.console.info("{} generator exhausted, breaking.".format(loader_name))
break
self.console.progress("Validating iteration {}.".format(iteration_num))
# Delay SIGINTs till after computation
with pyu.delayed_keyboard_interrupt(), torch.no_grad():
# Wrap
batch = self.wrap_batch(batch, from_loader=loader_name)
# Separate
inputs, target = self.split_batch(batch, from_loader=loader_name)
# Apply model, compute loss
output, loss = self.apply_model_and_loss(inputs, target, backward=False,
mode='eval')
if isinstance(target, (list, tuple)):
batch_size = target[0].size(self._target_batch_dim)
else:
batch_size = target.size(self._target_batch_dim)
validation_loss_meter.update(thu.unwrap(loss, extract_item=True), n=batch_size)
# Compute validation_error
if self.metric_is_defined:
validation_error = self.metric(thu.unwrap(output, to_cpu=False),
thu.unwrap(target, to_cpu=False))
if torch.is_tensor(validation_error):
# Convert to float
validation_error = thu.unwrap(validation_error, extract_item=True)
self.update_state('validation_error', thu.unwrap(validation_error))
validation_error_meter.update(validation_error, n=batch_size)
self.update_state('validation_inputs', thu.unwrap(inputs))
self.update_state('validation_target', thu.unwrap(target))
self.update_state('validation_prediction', thu.unwrap(output))
self.update_state('validation_loss', thu.unwrap(loss))
# This is here for legacy reasons and will eventually be deprecated.
self.update_state('validation_input', self.get_state('validation_inputs'))
# Update from model's state hooks
self.update_state_from_model_state_hooks()
self.callbacks.call(self.callbacks.END_OF_VALIDATION_ITERATION,
iteration_num=iteration_num)
iteration_num += 1
self.console.info("Done validating. Logging results...")
# Report
validation_results = {
'validation_loss': validation_loss_meter.avg,
'validation_error': (validation_error_meter.avg if self.metric_is_defined else None)
}
self.record_validation_results(**validation_results)
self.console.info("Validation loss: {validation_loss}; validation error: "
"{validation_error}".format(**validation_results))
self.callbacks.call(self.callbacks.END_OF_VALIDATION_RUN,
validation_loss_meter=validation_loss_meter,
validation_error_meter=validation_error_meter if
self.metric_is_defined else None)
return self
def record_validation_results(self, validation_loss, validation_error):
# Update state
self.update_state('validation_loss_averaged', thu.unwrap(validation_loss))
if validation_error is not None:
self.update_state('validation_error_averaged', thu.unwrap(validation_error))
# Prefer the error metric (if provided). This should be handled with care -
# validation error should either always not be None, or otherwise.
validation_score = validation_loss if validation_error is None else validation_error
# Check if validation error is less than the best so far
if self._best_validation_score is None or validation_score < self._best_validation_score:
# Best score so far. The following flag will trigger a save
self._is_iteration_with_best_validation_score = True
self._best_validation_score = validation_score
def get_config(self, exclude_loader=True):
# Returns a config dictionary, like __getstate__. Except optionally without the
# data loaders (which might be yuuuuuge if it contains the data)
config_dict = dict(self.__dict__)
# Loader iterators can't be pickled
if '_loader_iters' in config_dict:
config_dict.update({'_loader_iters': {}})
if exclude_loader:
if '_loaders' in config_dict:
config_dict.update({'_loaders': {}})
return config_dict
def set_config(self, config_dict):
# TODO some sanity checks on config_dict (e.g. whether the model is actually a model, etc)
self.__dict__.update(config_dict)
# Rebind trainer to callback engine
self.callbacks.bind_trainer(self)
# Have callback engine rebind all callbacks to trainer
self.callbacks.rebind_trainer_to_all_callbacks()
return self
def save(self, exclude_loader=True, stash_best_checkpoint=True):
# Log the epoch for save_now
self._last_saved_at_epoch = self._epoch_count
self.callbacks.call(self.callbacks.BEGIN_OF_SAVE,
save_to_directory=self._save_to_directory,
epoch_count=self._epoch_count,
batch_count=self._batch_count,
iteration_count=self._iteration_count,
is_iteration_with_best_validation_score=self._is_iteration_with_best_validation_score)
checkpoint_path = os.path.join(self._save_to_directory,
self._checkpoint_filename)
best_checkpoint_path = os.path.join(self._save_to_directory,
self._best_checkpoint_filename)
# Save the state dictionary
torch.save(self.get_config(exclude_loader=exclude_loader),
checkpoint_path,
pickle_module=self.pickle_module)
self.callbacks.call(self.callbacks.END_OF_SAVE,
save_to_directory=self._save_to_directory,
checkpoint_path=checkpoint_path,
best_checkpoint_path=best_checkpoint_path,
epoch_count=self._epoch_count,
batch_count=self._batch_count,
iteration_count=self._iteration_count,
is_iteration_with_best_validation_score=self._is_iteration_with_best_validation_score)
if self._is_iteration_with_best_validation_score and stash_best_checkpoint:
# Do the stashin'
shutil.copyfile(checkpoint_path, best_checkpoint_path)
# This is required to prevent an infinite save loop?
self._is_iteration_with_best_validation_score = False
self.console.info("Saved to {}.".format(self._save_to_directory))
return self
def save_model(self, to_directory=None):
to_directory = self._save_to_directory if to_directory is None else to_directory
# Save the state dictionary
torch.save(self.model,
os.path.join(to_directory, 'model.pytorch'),
pickle_module=self.pickle_module)
return self
def load(self, from_directory=None, best=False, filename=None, map_location=None):
"""
Load the trainer from checkpoint.
Parameters
----------
from_directory : str
Path to the directory where the checkpoint is located. The filename should be
'checkpoint.pytorch' if best=False, or 'best_checkpoint.pytorch' if best=True.
best : bool
Whether to load the best checkpoint. The filename in `from_directory` should be
'best_checkpoint.pytorch'.
filename : str
Overrides the default filename.
map_location : function, torch.device, string or a dict
Specify how to remap storage locations.
Returns
-------
Trainer
self
"""
from_directory = self._save_to_directory if from_directory is None else from_directory
assert from_directory is not None, "Nowhere to load from."
# Get file name
if filename is None:
filename = self._best_checkpoint_filename if best else self._checkpoint_filename
# Load the dictionary
config_dict = torch.load(os.path.join(from_directory, filename),
pickle_module=self.pickle_module, map_location=map_location)
# This is required to prevent an infinite save loop?
self._is_iteration_with_best_validation_score = False
# Set config
self.set_config(config_dict)
return self
def load_model(self, from_directory=None, filename=None):
from_directory = self._save_to_directory if from_directory is None else from_directory
filename = 'model.pytorch' if filename is None else filename
# Load the model
model = torch.load(os.path.join(from_directory, filename),
pickle_module=self.pickle_module)
# Set model
self.model = model
return self
def load_(self, *args, **kwargs):
# Here for legacy reasons - use load instead.
return self.load(*args, **kwargs)
@pyu.deprecated("please use self.console.{info,progress,warning,debug} instead")
def print(self, message):
print("[+][{}] {}".format(str(datetime.now()), message))
@classmethod
def build(cls, model=None, **trainer_config):
"""Factory function to build the trainer."""
# Check if trainer is to be loaded from file
if trainer_config.get('load_from_checkpoint'):
# Load checkpoint config
trainer = cls(model).save_every(**trainer_config.get('checkpoint_config'))
trainer.load_()
else:
trainer = cls(model)
if 'logger_config' in trainer_config:
trainer.build_logger(**trainer_config.get('logger_config'))
if 'criterion_config' in trainer_config:
trainer.build_criterion(**trainer_config.get('criterion_config'))
if 'optimizer_config' in trainer_config:
trainer.build_optimizer(**trainer_config.get('optimizer_config'))
if 'metric_config' in trainer_config:
trainer.build_metric(**trainer_config.get('metric_config'))
if 'checkpoint_config' in trainer_config:
trainer.save_every(**trainer_config.get('checkpoint_config'))
if 'validation_config' in trainer_config:
trainer.validate_every(**trainer_config.get('validation_config'))
if 'max_num_iterations' in trainer_config:
trainer.set_max_num_iterations(trainer_config.get('max_num_iterations'))
if 'max_num_epochs' in trainer_config:
trainer.set_max_num_epochs(trainer_config.get('max_num_epochs'))
if trainer_config.get('use_cuda'):
devices = trainer_config.get('use_cuda').get('devices') \
if isinstance(trainer_config.get('use_cuda'), dict) else None
trainer.cuda(devices=devices)
if 'training_precision' in trainer_config:
trainer.set_precision(trainer_config.get('training_precision'))
return trainer
| 72,802 | 38.848385 | 114 | py |
inferno | inferno-master/inferno/trainers/callbacks/essentials.py | import numpy as np
import os
import h5py as h5
from ...utils import torch_utils as tu
from ...utils.train_utils import Frequency
from ...utils.exceptions import assert_, FrequencyValueError, NotUnwrappableError
from ...utils import python_utils as pyu
from .base import Callback
import gc
class NaNDetector(Callback):
def end_of_training_iteration(self, **_):
training_loss = self.trainer.get_state('training_loss')
# Extract scalar
if tu.is_tensor(training_loss):
training_loss = tu.unwrap(training_loss, extract_item=True)
if not np.isfinite(training_loss):
raise RuntimeError("Loss is not finite (loss={})!".format(training_loss))
class PersistentSave(Callback):
def __init__(self, template='checkpoint.pytorch.epoch{epoch_count}.iteration{iteration_count}'):
super(PersistentSave, self).__init__()
self.template = template
def begin_of_save(self, **kwargs):
self._orig_checkpoint_filename = self.trainer._checkpoint_filename
self.trainer._checkpoint_filename = self.template.format(**kwargs)
def end_of_save(self, save_to_directory, **_):
orig_checkpoint_path = os.path.join(save_to_directory, self._orig_checkpoint_filename)
if os.path.lexists(orig_checkpoint_path):
os.remove(orig_checkpoint_path)
os.symlink(self.trainer._checkpoint_filename, orig_checkpoint_path)
self.trainer._checkpoint_filename = self._orig_checkpoint_filename
class DumpHDF5Every(Callback):
"""Dumps intermediate training states to a HDF5 file."""
def __init__(self, frequency, to_directory,
filename_template='dump.{mode}.epoch{epoch_count}.iteration{iteration_count}.h5',
force_dump=False, dump_after_every_validation_run=False):
super(DumpHDF5Every, self).__init__()
# Privates
self._dump_every = None
self._trainer_states_to_be_dumped_while_training = {'training_inputs',
'training_target',
'training_prediction'}
self._trainer_states_to_be_dumped_while_validating = {'validation_inputs',
'validation_target',
'validation_prediction'}
self._dump_cache = {}
# Publics
self.dump_every = frequency
self.dump_directory = to_directory
self.dump_filename_template = filename_template
self.force_dump = force_dump # hihi
self.dump_after_every_validation_run = dump_after_every_validation_run
@property
def dump_every(self):
return self._dump_every
@dump_every.setter
def dump_every(self, value):
self._dump_every = Frequency.build_from(value)
assert_(self._dump_every.is_consistent,
"Dump frequency is not consistent.",
FrequencyValueError)
@property
def dump_now(self):
return self.dump_every.match(iteration_count=self.trainer.iteration_count,
epoch_count=self.trainer.epoch_count,
persistent=True, match_zero=True)
def add_to_dump_cache(self, key, value):
if pyu.is_listlike(value):
for value_num, _value in enumerate(value):
self.add_to_dump_cache("{}_{}".format(key, value_num), _value)
else:
self._dump_cache.update({key: value})
def clear_dump_cache(self):
self._dump_cache.clear()
def dump_state(self, key, dump_while='training'):
# Validate arguments
keyword_mapping = {'train': 'training',
'training': 'training',
'validation': 'validating',
'validating': 'validating'}
dump_while = keyword_mapping.get(dump_while)
assert_(dump_while is not None,
"The keyword dump_while must be one of: {}."
.format(set(keyword_mapping.keys())),
ValueError)
assert_(isinstance(key, str),
"State key must be a string, got {} instead.".format(type(key).__name__),
TypeError)
# Add to set of observed states
if dump_while == 'training':
self._trainer_states_to_be_dumped_while_training.add(key)
elif dump_while == 'validating':
self._trainer_states_to_be_dumped_while_validating.add(key)
else:
raise NotImplementedError
return self
def dump_states(self, keys, dump_while='training'):
for key in keys:
self.dump_state(key, dump_while=dump_while)
return self
def get_file_path(self, mode):
# Make sure the dump directory exists
if not os.path.exists(self.dump_directory):
os.mkdir(self.dump_directory)
else:
assert_(os.path.isdir(self.dump_directory),
"Dump directory {} is a file.".format(self.dump_directory),
FileExistsError)
filename = self.dump_filename_template.format(epoch_count=self.trainer.epoch_count,
iteration_count=self.trainer.iteration_count,
mode=mode)
return os.path.join(self.dump_directory, filename)
def dump(self, mode):
with h5.File(name=self.get_file_path(mode), mode='w') as h5_file:
for key, to_dump in self._dump_cache.items():
if to_dump is None:
continue
try:
to_dump = tu.unwrap(to_dump, as_numpy=True)
except NotUnwrappableError:
# Can't unwrap to_dump, but let's not throw a tantrum if we're not required to
if not self.force_dump:
continue
else:
raise
# Do the dumpin'
h5_file.create_dataset(name=key, data=to_dump)
def end_of_training_iteration(self, **_):
dump_now = self.dump_now
if dump_now:
# To be double sure
self.clear_dump_cache()
# Get object to dump
for state_name in self._trainer_states_to_be_dumped_while_training:
self.add_to_dump_cache(state_name, self.trainer.get_state(state_name))
# Dump
self.dump(mode='training')
# Clear cache
self.clear_dump_cache()
def end_of_validation_run(self, **_):
if self.dump_after_every_validation_run:
# To be double sure
self.clear_dump_cache()
# Get object to dump
for state_name in self._trainer_states_to_be_dumped_while_validating:
self.add_to_dump_cache(state_name, self.trainer.get_state(state_name))
# Dump
self.dump(mode='validation')
# Clear cache
self.clear_dump_cache()
class SaveAtBestValidationScore(Callback):
"""
Triggers a save at the best EMA (exponential moving average) validation score.
The basic `Trainer` has built in support for saving at the best validation score, but this
callback might eventually replace that functionality.
"""
def __init__(self, smoothness=0, verbose=False):
super(SaveAtBestValidationScore, self).__init__()
# Privates
self._ema_validation_score = None
self._best_ema_validation_score = None
# Publics
self.smoothness = smoothness
self.verbose = verbose
def end_of_validation_run(self, **_):
# Get score (i.e. validation error if available, else validation loss)
current_validation_score = self.trainer.get_state('validation_error_averaged')
current_validation_score = self.trainer.get_state('validation_loss_averaged') \
if current_validation_score is None else current_validation_score
# Maintain ema
if self._ema_validation_score is None:
self._ema_validation_score = current_validation_score
self._best_ema_validation_score = current_validation_score
# If no previous score is known, assume this is the best score and save
self.trainer._is_iteration_with_best_validation_score = True
else:
self._ema_validation_score = self.smoothness * self._ema_validation_score + \
(1 - self.smoothness) * current_validation_score
# This overrides the default behaviour, but reduces to it if smoothness = 0
self.trainer._is_iteration_with_best_validation_score = \
self._ema_validation_score < self._best_ema_validation_score
# Trigger a save
if self.trainer._is_iteration_with_best_validation_score:
if self.verbose:
self.trainer.console.info("Current smoothed validation score {} is better "
"than the best smoothed validation score {}."
.format(self._ema_validation_score,
self._best_ema_validation_score))
self._best_ema_validation_score = self._ema_validation_score
self.trainer.save_now = True
else:
if self.verbose:
self.trainer.console.info("Current smoothed validation score {} is not better "
"than the best smoothed validation score {}."
.format(self._ema_validation_score,
self._best_ema_validation_score))
# Done
class ParameterEMA(Callback):
"""Maintain a moving average of network parameters."""
def __init__(self, momentum):
"""
Parameters
----------
momentum : float
Momentum for the moving average. The following holds:
`new_moving_average = momentum * old_moving_average + (1 - momentum) * value`
"""
super(ParameterEMA, self).__init__()
# Privates
self._parameters = None
# Publics
self.momentum = momentum
def maintain(self):
if self._parameters is None:
self._parameters = [p.data.new().zero_() for p in self.trainer.model.parameters()]
for p_model, p_ema in zip(self.trainer.model.parameters(), self._parameters):
p_ema.mul_(self.momentum).add_(p_model.data.mul(1. - self.momentum))
def apply(self):
assert_(self._parameters is not None,
"Can't apply parameter EMA's: not available.",
ValueError)
for p_model, p_ema in zip(self.trainer.model.parameters(), self._parameters):
p_model.data.copy_(p_ema)
def end_of_training_iteration(self, **_):
self.maintain()
class GradientClip(Callback):
def __init__(self, clip_value=None, clip_norm=None):
super(GradientClip, self).__init__()
assert_(not (clip_value is None and clip_norm is None),
"Must provide either clip_value or clip_norm.",
ValueError)
assert_(clip_value is None or clip_norm is None,
f"Must provide only one, but not both: "
f"clip_value ({clip_value}) or clip_norm ({clip_norm}).",
RuntimeError)
self._clip_value = clip_value
self._clip_norm = clip_norm
@property
def mode(self):
return 'value' if self._clip_value is not None else 'norm'
@property
def norm_or_value(self):
return self._clip_value if self._clip_value is not None else self._clip_norm
def after_model_and_loss_is_applied(self, **_):
tu.clip_gradients_(self.trainer.model.parameters(), self.mode, self.norm_or_value)
class GarbageCollection(Callback):
"""
Callback that triggers garbage collection at the end of every
training iteration in order to reduce the memory footprint of training
"""
def end_of_training_iteration(self, **_):
gc.collect()
| 12,316 | 41.619377 | 100 | py |
inferno | inferno-master/inferno/trainers/callbacks/logging/tensorboard.py | import warnings
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from .base import Logger
from ....utils import torch_utils as tu
from ....utils import python_utils as pyu
from ....utils import train_utils as tru
from ....utils.exceptions import assert_
class TaggedImage(object):
def __init__(self, array, tag):
self.array = array
self.tag = tag
class TensorboardLogger(Logger):
"""Class to enable logging of training progress to Tensorboard.
Currently supports logging scalars and images.
"""
# This is hard coded because tensorboardX doesn't have a __version__
_TENSORBOARDX_IMAGE_FORMAT = 'CHW'
_DEBUG = False
def __init__(self, log_directory=None,
log_scalars_every=None, log_images_every=None, log_histograms_every=None,
send_image_at_batch_indices='all', send_image_at_channel_indices='all',
send_volume_at_z_indices='mid'):
"""
Parameters
----------
log_directory : str
Path to the directory where the log files will be placed.
log_scalars_every : str or tuple or inferno.utils.train_utils.Frequency
How often scalars should be logged to Tensorboard. By default, once every iteration.
log_images_every : str or tuple or inferno.utils.train_utils.Frequency
How often images should be logged to Tensorboard. By default, once every epoch.
log_histograms_every : str or tuple or inferno.utils.train_utils.Frequency
How often histograms should be logged to Tensorboard. By default, never.
send_image_at_batch_indices : list or str
The indices of the batches to be logged. An `image_batch` usually has the shape
(num_samples, num_channels, num_rows, num_cols). By setting this argument to say
[0, 2], only images corresponding to `image_batch[0]` and `image_batch[2]` are
logged. When a str, it should be 'all', in which case, all samples are logged.
send_image_at_channel_indices : list or str
Similar to `send_image_at_batch_indices`, but applying to channels.
send_volume_at_z_indices : list or str
For 3D batches of shape (num_samples, num_channels, num_z_slices, num_rows, num_cols),
select the indices of the z slices to be logged. When a str, it could be 'all' or
'mid' (to log the central z slice).
Warnings
--------
Leaving log_images_every to the default (i.e. once every iteration) might generate a
large logfile and/or slow down the training.
"""
super(TensorboardLogger, self).__init__(log_directory=log_directory)
self._log_scalars_every = None
self._log_images_every = None
self._log_histograms_every = None
self._writer = None
self._config = {'image_batch_indices': send_image_at_batch_indices,
'image_channel_indices': send_image_at_channel_indices,
'volume_z_indices': send_volume_at_z_indices}
# We ought to know the trainer states we're observing (and plotting to tensorboard).
# These are the defaults.
self._trainer_states_being_observed_while_training = {'training_loss',
'training_error',
'training_prediction',
'training_inputs',
'training_target',
'learning_rate'}
self._trainer_states_being_observed_while_validating = {'validation_error_averaged',
'validation_loss_averaged'}
if log_scalars_every is not None:
self.log_scalars_every = log_scalars_every
if log_images_every is not None:
self.log_images_every = log_images_every
if log_histograms_every is not None:
self.log_histograms_every = log_histograms_every
@property
def writer(self):
if self._writer is None:
self._writer = SummaryWriter(self.log_directory)
return self._writer
@property
def log_scalars_every(self):
if self._log_scalars_every is None:
self._log_scalars_every = tru.Frequency(1, 'iterations')
return self._log_scalars_every
@log_scalars_every.setter
def log_scalars_every(self, value):
self._log_scalars_every = tru.Frequency.build_from(value)
@property
def log_scalars_now(self):
# Using persistent=True in a property getter is probably not a very good idea...
# We need to make sure that this getter is called only once per callback-call.
return self.log_scalars_every.match(iteration_count=self.trainer.iteration_count,
epoch_count=self.trainer.epoch_count,
persistent=True)
@property
def log_images_every(self):
if self._log_images_every is None:
self._log_images_every = tru.Frequency(1, 'epochs')
return self._log_images_every
@log_images_every.setter
def log_images_every(self, value):
self._log_images_every = tru.Frequency.build_from(value)
@property
def log_images_now(self):
# Using persistent=True in a property getter is probably not a very good idea...
# We need to make sure that this getter is called only once per callback-call.
return self.log_images_every.match(iteration_count=self.trainer.iteration_count,
epoch_count=self.trainer.epoch_count,
persistent=True)
@property
def log_histograms_every(self):
if self._log_histograms_every is None:
self._log_histograms_every = tru.Frequency('never')
return self._log_histograms_every
@log_histograms_every.setter
def log_histograms_every(self, value):
self._log_histograms_every = tru.Frequency.build_from(value)
@property
def log_histograms_now(self):
# Using persistent=True in a property getter is probably not a very good idea...
# We need to make sure that this getter is called only once per callback-call.
return self.log_histograms_every.match(iteration_count=self.trainer.iteration_count,
epoch_count=self.trainer.epoch_count,
persistent=True)
def observe_state(self, key, observe_while='training'):
# Validate arguments
keyword_mapping = {'train': 'training',
'training': 'training',
'validation': 'validating',
'validating': 'validating'}
observe_while = keyword_mapping.get(observe_while)
assert_(observe_while is not None,
"The keyword observe_while must be one of: {}."
.format(set(keyword_mapping.keys())),
ValueError)
assert_(isinstance(key, str),
"State key must be a string, got {} instead.".format(type(key).__name__),
TypeError)
# Add to set of observed states
if observe_while == 'training':
self._trainer_states_being_observed_while_training.add(key)
elif observe_while == 'validating':
self._trainer_states_being_observed_while_validating.add(key)
else:
raise NotImplementedError
return self
def unobserve_state(self, key, observe_while='training'):
if observe_while == 'training':
self._trainer_states_being_observed_while_training.remove(key)
elif observe_while == 'validating':
self._trainer_states_being_observed_while_validating.remove(key)
else:
raise NotImplementedError
return self
def unobserve_states(self, keys, observe_while='training'):
for key in keys:
self.unobserve_state(key, observe_while=observe_while)
return self
def observe_training_and_validation_state(self, key):
for mode in ['training', 'validation']:
self.observe_state('{}_{}'.format(mode, key), observe_while=mode)
def observe_states(self, keys, observe_while='training'):
for key in keys:
self.observe_state(key, observe_while=observe_while)
return self
def observe_training_and_validation_states(self, keys):
for key in keys:
self.observe_training_and_validation_state(key)
return self
def log_object(self, tag, object_,
allow_scalar_logging=True,
allow_image_logging=True,
allow_histogram_logging=True):
assert isinstance(tag, str)
if isinstance(object_, (list, tuple)):
for object_num, _object in enumerate(object_):
self.log_object("{}_{}".format(tag, object_num),
_object,
allow_scalar_logging,
allow_image_logging,
allow_histogram_logging)
return
# Check whether object is a scalar
if tu.is_scalar_tensor(object_) and allow_scalar_logging:
# Log scalar
value = tu.unwrap(object_.float(), extract_item=True)
self.log_scalar(tag, value, step=self.trainer.iteration_count)
elif isinstance(object_, (float, int)) and allow_scalar_logging:
value = float(object_)
self.log_scalar(tag, value, step=self.trainer.iteration_count)
elif tu.is_label_image_or_volume_tensor(object_) and allow_image_logging:
# Add a channel axis and log as images
self.log_image_or_volume_batch(tag, object_[:, None, ...],
self.trainer.iteration_count)
elif tu.is_image_or_volume_tensor(object_):
if allow_image_logging:
# Log images
self.log_image_or_volume_batch(tag, object_, self.trainer.iteration_count)
elif tu.is_vector_tensor(object_) and allow_histogram_logging:
# Log histograms
values = tu.unwrap(object_, as_numpy=True)
self.log_histogram(tag, values, self.trainer.iteration_count)
else:
# Object is neither a scalar nor an image nor a vector, there's nothing we can do
if tu.is_tensor(object_) and self._DEBUG:
# Throw a warning when in debug mode.
warnings.warn("Unsupported attempt to log tensor `{}` of shape `{}`".format(tag, object_.size()))
def end_of_training_iteration(self, **_):
log_scalars_now = self.log_scalars_now
log_images_now = self.log_images_now
log_histograms_now = self.log_histograms_now
if not log_scalars_now and not log_images_now:
# Nothing to log, so we won't bother
return
# Read states
for state_key in self._trainer_states_being_observed_while_training:
state = self.trainer.get_state(state_key, default=None)
if state is None:
# State not found in trainer but don't throw a hissy fit
continue
self.log_object(state_key, state,
allow_scalar_logging=log_scalars_now,
allow_image_logging=log_images_now,
allow_histogram_logging=log_histograms_now)
def end_of_validation_run(self, **_):
# Log everything
# Read states
for state_key in self._trainer_states_being_observed_while_validating:
state = self.trainer.get_state(state_key, default=None)
if state is None:
# State not found in trainer but don't throw a hissy fit
continue
self.log_object(state_key, state,
allow_scalar_logging=True,
allow_image_logging=True,
allow_histogram_logging=False)
def _tag_image(self, image, base_tag, prefix=None, instance_num=None, channel_num=None,
slice_num=None):
tag = base_tag
if prefix is not None:
tag = '{}/{}'.format(base_tag, prefix)
if instance_num is not None:
tag = '{}/instance_{}'.format(tag, instance_num)
if channel_num is not None:
tag = '{}/channel_{}'.format(tag, channel_num)
if slice_num is not None:
tag = '{}/slice_{}'.format(tag, slice_num)
return TaggedImage(image, tag)
def extract_images_from_batch(self, batch, base_tag=None, prefix=None):
if base_tag is None:
assert_(prefix is None,
"`base_tag` is not provided - `prefix` must be None in this case.",
ValueError)
# Special case when batch is a list or tuple of batches
if isinstance(batch, (list, tuple)):
image_list = []
for batch_num, _batch in batch:
image_list.extend(
self.extract_images_from_batch(_batch, base_tag=base_tag,
prefix='batch_{}'.format(batch_num)))
return image_list
# `batch` really is a tensor from now on.
batch_is_image_tensor = tu.is_image_tensor(batch)
batch_is_volume_tensor = tu.is_volume_tensor(batch)
assert batch_is_volume_tensor != batch_is_image_tensor, \
"Batch must either be a image or a volume tensor."
# Convert to numpy
batch = batch.float().numpy()
# Get the indices of the batches we want to send to tensorboard
batch_indices = self._config.get('image_batch_indices', 'all')
if batch_indices == 'all':
batch_indices = list(range(batch.shape[0]))
elif isinstance(batch_indices, (list, tuple)):
pass
elif isinstance(batch_indices, int):
batch_indices = [batch_indices]
else:
raise NotImplementedError
# Get the indices of the channels we want to send to tensorboard
channel_indices = self._config.get('image_channel_indices', 'all')
if channel_indices == 'all':
channel_indices = list(range(batch.shape[1]))
elif isinstance(channel_indices, (list, tuple)):
pass
elif isinstance(channel_indices, int):
channel_indices = [channel_indices]
else:
raise NotImplementedError
# Extract images from batch
if batch_is_image_tensor:
image_list = [(self._tag_image(image,
base_tag=base_tag, prefix=prefix,
instance_num=instance_num,
channel_num=channel_num)
if base_tag is not None else image)
for instance_num, instance in enumerate(batch)
for channel_num, image in enumerate(instance)
if instance_num in batch_indices and channel_num in channel_indices]
else:
assert batch_is_volume_tensor
# Trim away along the z axis
z_indices = self._config.get('volume_z_indices', 'mid')
if z_indices == 'all':
z_indices = list(range(batch.shape[2]))
elif z_indices == 'mid':
z_indices = [batch.shape[2] // 2]
elif isinstance(z_indices, (list, tuple)):
pass
elif isinstance(z_indices, int):
z_indices = [z_indices]
else:
raise NotImplementedError
# I'm going to hell for this.
image_list = [(self._tag_image(image,
base_tag=base_tag, prefix=prefix,
instance_num=instance_num,
channel_num=channel_num,
slice_num=slice_num)
if base_tag is not None else image)
for instance_num, instance in enumerate(batch)
for channel_num, volume in enumerate(instance)
for slice_num, image in enumerate(volume)
if instance_num in batch_indices and
channel_num in channel_indices and
slice_num in z_indices]
# Done.
return image_list
def log_image_or_volume_batch(self, tag, batch, step=None):
assert pyu.is_maybe_list_of(tu.is_image_or_volume_tensor)(batch)
step = step or self.trainer.iteration_count
image_list = self.extract_images_from_batch(batch, base_tag=tag)
self.log_images(tag, image_list, step)
def log_scalar(self, tag, value, step):
"""
Parameter
----------
tag : basestring
Name of the scalar
value
step : int
training iteration
"""
self.writer.add_scalar(tag=tag, scalar_value=value, global_step=step)
def log_images(self, tag, images, step, image_format='CHW'):
"""Logs a list of images."""
assert_(image_format.upper() in ['CHW', 'HWC'],
"Image format must be either 'CHW' or 'HWC'. Got {} instead.".format(image_format),
ValueError)
for image_num, image in enumerate(images):
if isinstance(image, TaggedImage):
tag = image.tag
image = image.array
else:
tag = "{}/{}".format(tag, image_num)
# This will fail for the wrong tensorboard version.
image = self._order_image_axes(image, image_format, self._TENSORBOARDX_IMAGE_FORMAT)
# unfortunately tensorboardX does not have a __version__ attribute
# so I don't see how to check for the version and provide backwards
# compatability here
# tensorboardX borks if the number of image channels is not 3
# if image.shape[-1] == 1:
# image = image[..., [0, 0, 0]]
image = self._normalize_image(image)
# print(image.dtype, image.shape)
self.writer.add_image(tag, img_tensor=image, global_step=step)
@staticmethod
def _order_image_axes(image, image_format='CHW', target_format='CHW'):
# image axis gymnastics
_not_implemented_message = "target_format must be 'CHW' or 'HCW'."
if image.ndim == 2:
if target_format == 'CHW':
# image is 2D - tensorboardX 1.4+ needs a channel axis in the front
image = image[None, ...]
elif target_format == 'HWC':
# image is 2D - tensorboardX 1.3- needs a channel axis in the end
image = image[..., None]
else:
raise NotImplementedError(_not_implemented_message)
elif image.ndim == 3 and image_format.upper() == 'CHW':
if target_format == 'CHW':
# Nothing to do here
pass
elif target_format == 'HCW':
# We have a CHW image, but need HWC.
image = np.moveaxis(image, 0, 2)
else:
raise NotImplementedError(_not_implemented_message)
elif image.ndim == 3 and image_format.upper() == 'HWC':
if target_format == 'CHW':
# We have a HWC image, but need CHW
image = np.moveaxis(image, 2, 0)
elif target_format == 'HWC':
# Nothing to do here
pass
else:
raise NotImplementedError(_not_implemented_message)
else:
raise RuntimeError
return image
@staticmethod
def _normalize_image(image):
normalized_image = image - image.min()
maxval = normalized_image.max()
if maxval > 0:
normalized_image = normalized_image / maxval
return normalized_image
def log_histogram(self, tag, values, step, bins=1000):
"""Logs the histogram of a list/vector of values."""
# TODO
raise NotImplementedError
def get_config(self):
# Apparently, some SwigPyObject objects cannot be pickled - so we need to build the
# writer on the fly.
config = super(TensorboardLogger, self).get_config()
config.update({'_writer': None})
return config
| 20,981 | 45.215859 | 113 | py |
inferno | inferno-master/tests/test_inferno.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `inferno` package."""
import unittest
import numpy as np
import torch
import os
import shutil
from os.path import dirname, join
from torch.utils.data.dataset import TensorDataset
from torch.utils.data.dataloader import DataLoader
from inferno.extensions.layers import Conv2D, BNReLUConv2D
from inferno.extensions.layers import AsMatrix
from inferno.extensions.containers import Graph
from inferno.trainers.basic import Trainer
from inferno.trainers.callbacks.essentials import NaNDetector
from inferno.trainers.callbacks.base import Callback
from torch import nn
class TestInferno(unittest.TestCase):
"""Tests for `inferno` package."""
NUM_SAMPLES = 100
NUM_TRAINING_SAMPLES = 70
NUM_CLASSES = 10
WORKING_DIRECTORY = dirname(__file__)
def read_environment_variables(self):
self.NUM_SAMPLES = int(os.getenv('INFERNO_TEST_NUM_SAMPLES', str(self.NUM_SAMPLES)))
self.NUM_TRAINING_SAMPLES = int(os.getenv('INFERNO_TEST_NUM_SAMPLES',
str(self.NUM_TRAINING_SAMPLES)))
self.NUM_CLASSES = int(os.getenv('INFERNO_TEST_NUM_CLASSES', str(self.NUM_CLASSES)))
self.WORKING_DIRECTORY = os.getenv('INFERNO_TEST_WORKING_DIRECTORY',
self.WORKING_DIRECTORY)
def setUp(self):
"""Set up test fixtures, if any."""
self.setUpDatasets()
def setUpDatasets(self):
# Build training dataset
inputs, targets = self.generate_random_data(self.NUM_SAMPLES, (3, 32, 32),
num_classes=self.NUM_CLASSES,
dtype='float32')
# Split to train and split
train_inputs, train_targets = inputs[:self.NUM_TRAINING_SAMPLES], \
targets[:self.NUM_TRAINING_SAMPLES]
validate_inputs, validate_targets = inputs[self.NUM_TRAINING_SAMPLES:], \
targets[self.NUM_TRAINING_SAMPLES:]
# Convert to tensor and build dataset
train_dataset = TensorDataset(torch.from_numpy(train_inputs),
torch.from_numpy(train_targets))
validate_dataset = TensorDataset(torch.from_numpy(validate_inputs),
torch.from_numpy(validate_targets))
# Build dataloaders from dataset
self.train_loader = DataLoader(train_dataset, batch_size=16,
shuffle=True, num_workers=0, pin_memory=False)
self.validate_loader = DataLoader(validate_dataset, batch_size=16,
shuffle=True, num_workers=0, pin_memory=False)
def setUpCallbacks(self):
class RecordSaveInfo(Callback):
def __init__(self):
super(RecordSaveInfo, self).__init__()
self.best_saves_at_iteration_epoch = []
self.saves_at_iteration_epoch = []
def begin_of_save(self, epoch_count, iteration_count,
is_iteration_with_best_validation_score, **_):
if is_iteration_with_best_validation_score:
self.best_saves_at_iteration_epoch.append((iteration_count, epoch_count))
else:
self.saves_at_iteration_epoch.append((iteration_count, epoch_count))
self.RecordSaveInfo = RecordSaveInfo
def generate_random_data(self, num_samples, shape, num_classes,
hardness=0.3, dtype=None):
dataset_input = np.zeros((num_samples,) + shape, dtype=dtype)
dataset_target = np.random.randint(num_classes, size=num_samples)
for sample_num in range(num_samples):
dataset_input[sample_num] = np.random.normal(loc=dataset_target[sample_num],
scale=(1 - hardness),
size=shape)
return dataset_input, dataset_target
def tearDown(self):
"""Tear down test fixtures, if any."""
if os.path.exists(join(self.WORKING_DIRECTORY, 'Weights')):
shutil.rmtree(join(self.WORKING_DIRECTORY, 'Weights'))
def build_graph_model(self):
model = Graph()
model\
.add_input_node('input')\
.add_node('conv1', Conv2D(3, 8, 3), 'input')\
.add_node('conv2', BNReLUConv2D(8, 8, 3), 'conv1')\
.add_node('pool1', nn.MaxPool2d(kernel_size=2, stride=2), 'conv2')\
.add_node('conv3', BNReLUConv2D(8, 8, 3), 'pool1')\
.add_node('pool2', nn.MaxPool2d(kernel_size=2, stride=2), 'conv3')\
.add_node('conv4', BNReLUConv2D(8, 8, 3), 'pool2')\
.add_node('pool3', nn.AdaptiveAvgPool2d(output_size=(1, 1)), 'conv4')\
.add_node('matrix', AsMatrix(), 'pool3')\
.add_node('linear', nn.Linear(8, self.NUM_CLASSES), 'matrix')\
.add_output_node('output', 'linear')
return model
def test_training_cpu(self):
"""Test Trainer."""
# Build model
model = self.build_graph_model()
# Build callbacks
# save_info_recorder = RecordSaveInfo()
# Build trainer
trainer = Trainer(model)\
.save_every((2, 'epochs'), to_directory=join(self.WORKING_DIRECTORY, 'Weights'))\
.validate_every((100, 'iterations'), for_num_iterations=10)\
.set_max_num_epochs(4)\
.save_at_best_validation_score()\
.build_optimizer('RMSprop')\
.build_criterion('CrossEntropyLoss')\
.build_metric('CategoricalError')\
.register_callback(NaNDetector)
# Bind datasets
trainer\
.bind_loader('train', self.train_loader)\
.bind_loader('validate', self.validate_loader)
# Go
trainer.pickle_module = 'dill'
trainer.fit()
if __name__ == '__main__':
unittest.main()
| 6,088 | 41.880282 | 93 | py |
inferno | inferno-master/tests/test_io/test_box/test_camvid.py | import os
from os.path import join, dirname, exists, isdir
import unittest
import numpy as np
_CAMVID_ROOT = None
def _camvid_available():
return _CAMVID_ROOT is not None or os.environ.get('CAMVID_ROOT') is not None
class TestCamvid(unittest.TestCase):
CAMVID_ROOT = _CAMVID_ROOT
PLOT_DIRECTORY = join(dirname(__file__), 'plots')
def get_camvid_root(self):
if self.CAMVID_ROOT is None:
root = os.environ.get('CAMVID_ROOT')
assert root is not None, "Camvid Root not found."
else:
return self.CAMVID_ROOT
@unittest.skipUnless(_camvid_available(), "No root available.")
def test_camvid_dataset_without_transforms(self):
from inferno.io.box.camvid import CamVid
camvid = CamVid(self.get_camvid_root())
image, label = camvid[0]
image = np.asarray(image)
label = np.asarray(label)
self.assertSequenceEqual(image.shape, (360, 480, 3))
self.assertSequenceEqual(label.shape, (360, 480))
self.assertLessEqual(label.max(), 11)
@unittest.skipUnless(_camvid_available(), "No root available.")
def _test_camvid_dataset_with_transforms(self):
from inferno.io.box.camvid import CamVid
from inferno.io.transform.base import Compose
from inferno.io.transform.image import PILImage2NumPyArray, RandomSizedCrop, Scale
from inferno.utils.io_utils import print_tensor
camvid = CamVid(self.get_camvid_root(),
image_transform=Compose(),
label_transform=Compose(),
joint_transform=Compose())
camvid.image_transform.add(PILImage2NumPyArray())
camvid.label_transform.add(PILImage2NumPyArray())
image, label = camvid[0]
self.assertSequenceEqual(image.shape, (3, 360, 480))
self.assertSequenceEqual(label.shape, (360, 480))
# Add crop trafo
camvid.joint_transform.add(RandomSizedCrop(ratio_between=(0.7, 1.0),
preserve_aspect_ratio=True))
# We need 2 scale transforms, one with order 3 (image) and the other with order 0 (label)
camvid.joint_transform.add(Scale(output_image_shape=(360, 480),
interpolation_order=3, apply_to=[0]))
camvid.joint_transform.add(Scale(output_image_shape=(360, 480),
interpolation_order=0, apply_to=[1]))
image, label = camvid[0]
self.assertSequenceEqual(image.shape, (3, 360, 480))
self.assertSequenceEqual(label.shape, (360, 480))
self.assertLessEqual(len(np.unique(label)), 12)
# Print tensors to make sure they look legit
if not exists(self.PLOT_DIRECTORY):
os.mkdir(self.PLOT_DIRECTORY)
else:
assert isdir(self.PLOT_DIRECTORY)
print_tensor(image[None, ...], prefix='IMG--', directory=self.PLOT_DIRECTORY)
print_tensor(label[None, None, ...], prefix='LAB--', directory=self.PLOT_DIRECTORY)
print("[+] Inspect images at {}".format(self.PLOT_DIRECTORY))
@unittest.skipUnless(_camvid_available(), "No root available.")
def test_camvid_dataset_with_transforms(self):
from inferno.io.box.camvid import get_camvid_loaders
from inferno.utils.io_utils import print_tensor
train_loader, validate_loader, test_loader = get_camvid_loaders(self.get_camvid_root())
train_dataset = train_loader.dataset
image, label = train_dataset[0]
# Make sure the shapes checkout
self.assertSequenceEqual(image.size(), (3, 360, 480))
self.assertSequenceEqual(label.size(), (360, 480))
self.assertEqual(image.type(), 'torch.FloatTensor')
self.assertEqual(label.type(), 'torch.LongTensor')
# Print tensors to make sure they look legit
if not exists(self.PLOT_DIRECTORY):
os.mkdir(self.PLOT_DIRECTORY)
else:
assert isdir(self.PLOT_DIRECTORY)
print_tensor(image.numpy()[None, ...], prefix='IMG--', directory=self.PLOT_DIRECTORY)
print_tensor(label.numpy()[None, None, ...], prefix='LAB--', directory=self.PLOT_DIRECTORY)
print("[+] Inspect images at {}".format(self.PLOT_DIRECTORY))
@unittest.skipUnless(_camvid_available(), "No root available.")
def test_camvid_dataset_with_transforms_onehot(self):
from inferno.io.box.camvid import get_camvid_loaders
from inferno.utils.io_utils import print_tensor
train_loader, validate_loader, test_loader = get_camvid_loaders(self.get_camvid_root(),
labels_as_onehot=True)
train_dataset = train_loader.dataset
image, label = train_dataset[0]
# Make sure the shapes checkout
self.assertSequenceEqual(image.size(), (3, 360, 480))
self.assertSequenceEqual(label.size(), (12, 360, 480))
self.assertEqual(image.type(), 'torch.FloatTensor')
self.assertEqual(label.type(), 'torch.FloatTensor')
# Print tensors to make sure they look legit
if not exists(self.PLOT_DIRECTORY):
os.mkdir(self.PLOT_DIRECTORY)
else:
assert isdir(self.PLOT_DIRECTORY)
print_tensor(image.numpy()[None, ...], prefix='IMG--', directory=self.PLOT_DIRECTORY)
print_tensor(label.numpy()[None, ...], prefix='LAB--', directory=self.PLOT_DIRECTORY)
print("[+] Inspect images at {}".format(self.PLOT_DIRECTORY))
if __name__ == '__main__':
unittest.main()
| 5,616 | 45.421488 | 99 | py |
inferno | inferno-master/tests/test_io/test_box/test_cityscapes.py | import os
from os.path import join, dirname, exists, isdir
import unittest
import numpy as np
import time
_CITYSCAPES_ROOT = None
def _cityscapes_available():
return _CITYSCAPES_ROOT is not None or os.environ.get('CITYSCAPES_ROOT') is not None
class TestCityscapes(unittest.TestCase):
CITYSCAPES_ROOT = _CITYSCAPES_ROOT
PLOT_DIRECTORY = join(dirname(__file__), 'plots')
INCLUDE_COARSE = False
def get_cityscapes_root(self):
if self.CITYSCAPES_ROOT is None:
root = os.environ.get('CITYSCAPES_ROOT')
assert root is not None, "Cityscapes Root not found."
else:
return self.CITYSCAPES_ROOT
@unittest.skipUnless(_cityscapes_available(), "No cityscapes available.")
def test_cityscapes_dataset_without_transforms(self):
from inferno.io.box.cityscapes import Cityscapes
cityscapes = Cityscapes(self.get_cityscapes_root())
image, label = cityscapes[0]
image = np.asarray(image)
label = np.asarray(label)
self.assertSequenceEqual(image.shape, (1024, 2048, 3))
self.assertSequenceEqual(label.shape, (1024, 2048))
self.assertLessEqual(label.max(), 33)
@unittest.skipUnless(_cityscapes_available(), "No cityscapes available.")
def test_cityscapes_dataset_without_transforms_unzipped(self):
from inferno.io.box.cityscapes import Cityscapes
cityscapes = Cityscapes(join(self.get_cityscapes_root(), 'extracted'),
read_from_zip_archive=False)
image, label = cityscapes[0]
image = np.asarray(image)
label = np.asarray(label)
self.assertSequenceEqual(image.shape, (1024, 2048, 3))
self.assertSequenceEqual(label.shape, (1024, 2048))
self.assertLessEqual(label.max(), 33)
@unittest.skipUnless(_cityscapes_available(), "No cityscapes available.")
def test_cityscapes_dataset_with_transforms(self):
from inferno.io.box.cityscapes import get_cityscapes_loaders
from inferno.utils.io_utils import print_tensor
train_loader, validate_loader = get_cityscapes_loaders(self.get_cityscapes_root(),
include_coarse_dataset=self.INCLUDE_COARSE)
train_dataset = train_loader.dataset
tic = time.time()
image, label = train_dataset[0]
toc = time.time()
print("[+] Loaded sample in {} seconds.".format(toc - tic))
# Make sure the shapes checkout
self.assertSequenceEqual(image.size(), (3, 1024, 2048))
self.assertSequenceEqual(label.size(), (1024, 2048))
self.assertEqual(image.type(), 'torch.FloatTensor')
self.assertEqual(label.type(), 'torch.LongTensor')
# Print tensors to make sure they look legit
if not exists(self.PLOT_DIRECTORY):
os.mkdir(self.PLOT_DIRECTORY)
else:
assert isdir(self.PLOT_DIRECTORY)
print_tensor(image.numpy()[None, ...], prefix='IMG--', directory=self.PLOT_DIRECTORY)
for class_id in np.unique(label.numpy()):
print_tensor((label.numpy()[None, None, ...] == class_id).astype('float32'),
prefix='LAB-{}--'.format(class_id),
directory=self.PLOT_DIRECTORY)
print_tensor(label.numpy()[None, None, ...],
prefix='LAB--',
directory=self.PLOT_DIRECTORY)
print("[+] Inspect images at {}".format(self.PLOT_DIRECTORY))
@unittest.skipUnless(_cityscapes_available(), "No cityscapes available.")
def test_cityscapes_dataset_with_transforms_unzipped(self):
from inferno.io.box.cityscapes import get_cityscapes_loaders
from inferno.utils.io_utils import print_tensor
train_loader, validate_loader = get_cityscapes_loaders(join(self.get_cityscapes_root(),
'extracted'),
include_coarse_dataset=self.INCLUDE_COARSE,
read_from_zip_archive=False)
train_dataset = train_loader.dataset
tic = time.time()
image, label = train_dataset[0]
toc = time.time()
print("[+] Loaded sample in {} seconds.".format(toc - tic))
# Make sure the shapes checkout
self.assertSequenceEqual(image.size(), (3, 1024, 2048))
self.assertSequenceEqual(label.size(), (1024, 2048))
self.assertEqual(image.type(), 'torch.FloatTensor')
self.assertEqual(label.type(), 'torch.LongTensor')
# Print tensors to make sure they look legit
if not exists(self.PLOT_DIRECTORY):
os.mkdir(self.PLOT_DIRECTORY)
else:
assert isdir(self.PLOT_DIRECTORY)
print_tensor(image.numpy()[None, ...], prefix='IMG--', directory=self.PLOT_DIRECTORY)
for class_id in np.unique(label.numpy()):
print_tensor((label.numpy()[None, None, ...] == class_id).astype('float32'),
prefix='LAB-{}--'.format(class_id),
directory=self.PLOT_DIRECTORY)
print_tensor(label.numpy()[None, None, ...],
prefix='LAB--',
directory=self.PLOT_DIRECTORY)
print("[+] Inspect images at {}".format(self.PLOT_DIRECTORY))
if __name__ == '__main__':
unittest.main()
| 5,462 | 45.29661 | 106 | py |
inferno | inferno-master/tests/test_io/test_core/test_zip.py | import unittest
class ZipTest(unittest.TestCase):
def test_zip_minimal(self):
"""Minimal test with python lists as iterators."""
from inferno.io.core import Zip
from torch.utils.data.dataset import Dataset
with self.assertRaises(TypeError):
zipped = Zip([1, 2, 3], [4, 5, 6, 7])
# This is required because Zip checks if its inputs are actually torch datasets
class ListDataset(list, Dataset):
pass
dataset_1 = ListDataset([1, 2, 3, 4])
dataset_2 = ListDataset([5, 6, 7, 8, 9])
zipped = Zip(dataset_1, dataset_2)
self.assertEqual(len(zipped), 4)
fetched = zipped[1]
self.assertEqual(fetched, [2, 6])
with self.assertRaises(IndexError):
fetched = zipped[4]
def test_zip_sync(self):
"""Test synchronization mechanics."""
# TODO
def test_zip_reject(self):
from inferno.io.core import ZipReject
from torch.utils.data.dataset import Dataset
# This is required because Zip checks if its inputs are actually torch datasets
class ListDataset(list, Dataset):
pass
def rejection_criterion(sample_1, sample_2):
return sample_1 < sample_2
dataset_1 = ListDataset([1, 2, 3, 4])
dataset_2 = ListDataset([2, 1, 3, 4])
dataset_3 = ListDataset([0, 1, 2, 3])
zipped = ZipReject(dataset_1, dataset_2, dataset_3,
rejection_criterion=rejection_criterion,
random_jump_after_reject=False,
rejection_dataset_indices=[0, 1])
fetched = zipped[0]
self.assertSequenceEqual(fetched, [2, 1, 1])
zipped = ZipReject(dataset_1, dataset_2, dataset_3,
rejection_criterion=rejection_criterion,
rejection_dataset_indices=[1, 0])
fetched = zipped[0]
self.assertSequenceEqual(fetched, [1, 2, 0])
if __name__ == '__main__':
unittest.main()
| 2,058 | 31.68254 | 87 | py |
inferno | inferno-master/tests/test_io/test_core/test_concatenate.py | import unittest
class ConcatenateTest(unittest.TestCase):
def test_concatenate(self):
from inferno.io.core import Concatenate
from torch.utils.data.dataset import Dataset
with self.assertRaises(AssertionError):
cated = Concatenate([1, 2, 3], [4, 5, 6, 7])
class ListDataset(list, Dataset):
pass
dataset_1 = ListDataset([1, 2, 3, 4])
dataset_2 = ListDataset([5, 6, 7])
dataset_3 = ListDataset([8, 9, 10, 11, 12])
cated = Concatenate(dataset_1, dataset_2, dataset_3)
self.assertEqual(len(cated), 12)
# Try to fetch
self.assertEqual(cated[2], 3)
self.assertEqual(cated[4], 5)
self.assertEqual(cated[6], 7)
self.assertEqual(cated[10], 11)
self.assertEqual(cated[11], 12)
with self.assertRaises(AssertionError):
_ = cated[12]
if __name__ == '__main__':
unittest.main()
| 945 | 26.823529 | 60 | py |
inferno | inferno-master/tests/test_utils/test_partial_cls.py | import unittest
import inferno.utils.model_utils as mu
from inferno.utils.partial_cls import register_partial_cls
import torch
import torch.nn as nn
class TestCls(object):
def __init__(self, a, b, c=1, d=2):
self.a = a
self.b = b
self.c = c
self.d = d
class PartialClsTester(unittest.TestCase):
def test_partial_cls(self):
register_partial_cls(TestCls, 'TestA',
fix=dict(a='a'),
default=dict(b='b'),
module=__name__
)
assert 'TestA' in globals()
inst = TestA()
assert inst.a == 'a'
assert inst.b == 'b'
assert inst.c == 1
assert inst.d == 2
inst = TestA('fu','bar','fubar')
assert inst.a == 'a'
assert inst.b == 'fu'
assert inst.c == 'bar'
assert inst.d == 'fubar'
with self.assertRaises(TypeError):
inst = TestA(a=2)
def test_update_existing_default_cls(self):
register_partial_cls(TestCls, 'TestA',
fix=dict(a='a'),
default=dict(d=3),
module=__name__
)
assert 'TestA' in globals()
inst = TestA(42)
assert inst.a == 'a'
assert inst.b == 42
assert inst.c == 1
assert inst.d == 3
with self.assertRaises(TypeError):
inst = TestA()
def test_fix_nothing(self):
register_partial_cls(TestCls, 'TestA',
module=__name__
)
assert 'TestA' in globals()
inst = TestA(1,2,3,4)
assert inst.a == 1
assert inst.b == 2
assert inst.c == 3
assert inst.d == 4
with self.assertRaises(TypeError):
inst = TestA()
def test_fix_all(self):
register_partial_cls(TestCls, 'TestA',
module=__name__,
fix=dict(a=4, b=3, c=2, d=1)
)
assert 'TestA' in globals()
inst = TestA()
assert inst.a == 4
assert inst.b == 3
assert inst.c == 2
assert inst.d == 1
with self.assertRaises(TypeError):
inst = TestA('a')
with self.assertRaises(TypeError):
inst = TestA(a=1)
with self.assertRaises(TypeError):
inst = TestA(b=1)
with self.assertRaises(TypeError):
inst = TestA(c=1)
with self.assertRaises(TypeError):
inst = TestA(d=1)
def test_default_all(self):
register_partial_cls(TestCls, 'TestA',
module=__name__,
default=dict(a=4, b=3, c=2, d=1)
)
assert 'TestA' in globals()
inst = TestA()
assert inst.a == 4
assert inst.b == 3
assert inst.c == 2
assert inst.d == 1
inst = TestA(2)
assert inst.a == 2
assert inst.b == 3
assert inst.c == 2
assert inst.d == 1
inst = TestA(2,3,4,5)
assert inst.a == 2
assert inst.b == 3
assert inst.c == 4
assert inst.d == 5
with self.assertRaises(TypeError):
inst = TestA(3,4,5,a=2)
inst = TestA(3,4,5,d=2)
assert inst.a == 3
assert inst.b == 4
assert inst.c == 5
assert inst.d == 2
if __name__ == '__main__':
unittest.main()
| 3,316 | 23.036232 | 58 | py |
inferno | inferno-master/tests/test_utils/test_model_utils.py | import unittest
import inferno.utils.model_utils as mu
from inferno.utils.exceptions import ShapeError
import torch
import torch.nn as nn
class ModelUtilTester(unittest.TestCase):
def test_model_tester(self):
model = mu.ModelTester((1, 10, 32, 32), (1, 20, 32, 32))(nn.Conv2d(10, 20, 3, padding=1))
with self.assertRaises(ShapeError):
mu.ModelTester((1, 10, 32, 32), (1, 30, 32, 32))(model)
@unittest.skipUnless(torch.cuda.is_available(), "need cuda")
def test_model_tester_cuda(self):
tester = mu.ModelTester((1, 10, 32, 32), (1, 20, 32, 32)).cuda()
model = tester(nn.Conv2d(10, 20, 3, padding=1).cuda())
with self.assertRaises(ShapeError):
mu.ModelTester((1, 10, 32, 32), (1, 30, 32, 32)).cuda()(model)
if __name__ == '__main__':
unittest.main()
| 832 | 35.217391 | 97 | py |
inferno | inferno-master/tests/test_training/test_basic.py | from unittest import TestCase, skipUnless
import torch
from unittest import main
import time
from os.path import join, dirname
class TestTrainer(TestCase):
# Parameters
ROOT_DIR = dirname(__file__)
CUDA = False
HALF_PRECISION = False
DOWNLOAD_CIFAR = True
@staticmethod
def _make_test_model():
import torch.nn as nn
from inferno.extensions.layers.reshape import AsMatrix
toy_net = nn.Sequential(nn.Conv2d(3, 8, 3, 1, 1),
nn.ELU(),
nn.MaxPool2d(2),
nn.Conv2d(8, 8, 3, 1, 1),
nn.ELU(),
nn.MaxPool2d(2),
nn.Conv2d(8, 16, 3, 1, 1),
nn.ELU(),
nn.AdaptiveAvgPool2d((1, 1)),
AsMatrix(),
nn.Linear(16, 10))
return toy_net
def test_cifar(self):
from inferno.trainers.basic import Trainer
from inferno.io.box.cifar import get_cifar10_loaders
# Build cifar10 loaders
trainloader, testloader = get_cifar10_loaders(root_directory=join(self.ROOT_DIR, 'data'),
download=self.DOWNLOAD_CIFAR)
# Make model
net = self._make_test_model()
tic = time.time()
# Make trainer
trainer = Trainer(model=net)\
.build_optimizer('Adam')\
.build_criterion('CrossEntropyLoss')\
.build_metric('CategoricalError')\
.validate_every((1, 'epochs'))\
.save_every((1, 'epochs'), to_directory=join(self.ROOT_DIR, 'saves'))\
.save_at_best_validation_score()\
.set_max_num_epochs(2)
# Bind trainer to datasets
trainer.bind_loader('train', trainloader).bind_loader('validate', testloader)
# Check device and fit
if self.CUDA:
if self.HALF_PRECISION:
trainer.cuda().set_precision('half').fit()
else:
trainer.cuda().fit()
else:
trainer.fit()
toc = time.time()
print("[*] Elapsed time: {} seconds.".format(toc - tic))
def test_multi_io(self):
from torch.utils.data.dataset import Dataset
from torch.utils.data.dataloader import DataLoader
from inferno.trainers.basic import Trainer
class DummyDataset(Dataset):
def __len__(self):
return 42
def __getitem__(self, item):
# 2 inputs and 3 targets (say)
return torch.rand(3, 32, 32), \
torch.rand(3, 32, 32), \
torch.rand(1).uniform_(), \
torch.rand(1).uniform_(), \
torch.rand(1).uniform_()
class DummyNetwork(torch.nn.Module):
def __init__(self):
super(DummyNetwork, self).__init__()
self.conv = torch.nn.Conv2d(3, 1, 3, padding=1)
def forward(self, *inputs):
assert len(inputs) == 2
out = self.conv(inputs[0])
return out.view(inputs[0].size(0), -1).mean(1), \
out.view(inputs[0].size(0), -1).mean(1), \
out.view(inputs[0].size(0), -1).mean(1)
class DummyCriterion(torch.nn.Module):
def forward(self, predictions, targets):
assert len(predictions) == len(targets) == 3
return predictions[0].mean()
loader = DataLoader(DummyDataset())
net = DummyNetwork()
trainer = Trainer(net)\
.build_criterion(DummyCriterion)\
.build_optimizer('Adam')\
.set_max_num_iterations(50)\
.bind_loader('train', loader, num_inputs=2, num_targets=3)
trainer.fit()
def test_serialization(self):
from inferno.trainers.basic import Trainer
import os
# Make model
net = self._make_test_model()
# Make trainer
trainer = Trainer(model=net) \
.build_optimizer('Adam') \
.build_criterion('CrossEntropyLoss') \
.build_metric('CategoricalError') \
.validate_every((1, 'epochs')) \
.save_every((1, 'epochs'), to_directory=os.path.join(self.ROOT_DIR, 'saves')) \
.save_at_best_validation_score() \
.set_max_num_epochs(2)
# Try to serialize
trainer.save()
# Try to unserialize
trainer = Trainer(net).save_to_directory(os.path.join(self.ROOT_DIR, 'saves')).load()
@skipUnless(torch.cuda.device_count() >= 4, "Not enough cuda devices for test_multi_gpu.")
def test_multi_gpu(self):
if not torch.cuda.is_available():
return
from inferno.trainers.basic import Trainer
from inferno.io.box.cifar import get_cifar10_loaders
import os
# Make model
net = self._make_test_model()
# Make trainer
trainer = Trainer(model=net) \
.build_optimizer('Adam') \
.build_criterion('CrossEntropyLoss') \
.build_metric('CategoricalError') \
.validate_every((1, 'epochs')) \
.save_every((1, 'epochs'), to_directory=os.path.join(self.ROOT_DIR, 'saves')) \
.save_at_best_validation_score() \
.set_max_num_epochs(2)\
.cuda(devices=[0, 1, 2, 3], base_device='cpu')
train_loader, validate_loader = get_cifar10_loaders(root_directory=self.ROOT_DIR,
download=True)
trainer.bind_loader('train', train_loader)
trainer.bind_loader('validate', validate_loader)
trainer.fit()
def test_save(self):
from inferno.trainers.basic import Trainer
trainer = Trainer().save_to_directory(to_directory=self.ROOT_DIR,
checkpoint_filename='dummy.pytorch')
trainer.save()
# Instantiate new trainer and load
trainer = Trainer().load(from_directory=self.ROOT_DIR, filename='dummy.pytorch')
@skipUnless(torch.cuda.device_count() >= 2, "Not enough cuda devices for test_multi_gpu_setup.")
def test_multi_gpu_setup(self):
from torch.nn import CrossEntropyLoss
from inferno.trainers.basic import Trainer
# Test base_device = 'cpu'
# Build model
net = self._make_test_model()
# Make dummy criterion
criterion = CrossEntropyLoss(weight=torch.rand(10))
# Make trainer
trainer = Trainer(net).build_criterion(criterion).cuda([0, 1], base_device='cpu')
self.assertIsInstance(trainer.criterion.weight, torch.FloatTensor)
# Test base_device = 'cpu'
# Build model
net = self._make_test_model()
criterion = CrossEntropyLoss(weight=torch.rand(10))
# Make trainer
trainer = Trainer(net).build_criterion(criterion).cuda([0, 1], base_device='cuda')
self.assertIsInstance(trainer.criterion.weight, torch.cuda.FloatTensor)
if __name__ == '__main__':
main()
| 7,264 | 37.036649 | 100 | py |
inferno | inferno-master/tests/test_training/test_callbacks/test_essentials.py | import unittest
import shutil
import h5py as h5
from os.path import dirname, join
from os import listdir
from inferno.trainers.basic import Trainer
from inferno.trainers.callbacks.essentials import DumpHDF5Every
from inferno.utils.test_utils import generate_random_dataloader
from inferno.extensions.layers import Conv2D, AsMatrix
from torch.nn import Sequential, MaxPool2d, AdaptiveAvgPool2d, Linear, Softmax
class TestEssentials(unittest.TestCase):
WORKING_DIRECTORY = dirname(__file__)
def setUp(self):
# Build a simple ass model
model = Sequential(Conv2D(3, 8, 3, activation='ReLU'),
MaxPool2d(2, 2),
Conv2D(8, 8, 3, activation='ReLU'),
MaxPool2d(2, 2),
Conv2D(8, 8, 3, activation='ReLU'),
MaxPool2d(2, 2),
Conv2D(8, 8, 3, activation='ReLU'),
AdaptiveAvgPool2d((1, 1)),
AsMatrix(),
Linear(8, 10))
train_dataloader = generate_random_dataloader(512, (3, 32, 32), 10, batch_size=16,
dtype='float32')
validate_dataloader = generate_random_dataloader(32, (3, 32, 32), 10, batch_size=16,
dtype='float32')
# Build trainer
trainer = Trainer(model)\
.bind_loader('train', train_dataloader)\
.bind_loader('validate', validate_dataloader)\
.save_to_directory(to_directory=join(self.WORKING_DIRECTORY, 'Weights'))\
.build_criterion('CrossEntropyLoss').build_optimizer('RMSprop')
self.trainer = trainer
def test_dump_hdf5_every(self):
# Configure callback
dumper = DumpHDF5Every((1, 'epoch'),
to_directory=join(self.WORKING_DIRECTORY, 'Weights'),
dump_after_every_validation_run=True)
self.trainer\
.set_max_num_epochs(4)\
.register_callback(dumper)\
.validate_every((16, 'iterations'))
self.trainer.fit()
all_files = listdir(join(self.WORKING_DIRECTORY, 'Weights'))
for epoch in range(5):
self.assertIn('dump.training.epoch{}.iteration{}.h5'.format(epoch, epoch * 32),
all_files)
# We don't validate at last epoch
if epoch != 4:
self.assertIn('dump.validation.epoch{}.iteration{}.h5'
.format(epoch, (epoch * 32) + 16),
all_files)
self.assertIn('dump.validation.epoch{}.iteration{}.h5'
.format(epoch, (epoch * 32) + 32),
all_files)
# Check if the keys are right in a training dump
sample_file_path = join(self.WORKING_DIRECTORY, 'Weights',
'dump.training.epoch0.iteration0.h5')
with h5.File(sample_file_path, 'r') as sample_file:
all_dataset_names = list(sample_file.keys())
self.assertSequenceEqual(all_dataset_names,
['training_inputs_0', 'training_prediction', 'training_target'])
# Check if the keys are right in a validation dump
sample_file_path = join(self.WORKING_DIRECTORY, 'Weights',
'dump.validation.epoch0.iteration16.h5')
with h5.File(sample_file_path, 'r') as sample_file:
all_dataset_names = list(sample_file.keys())
self.assertSequenceEqual(all_dataset_names,
['validation_inputs_0', 'validation_prediction',
'validation_target'])
def tearDown(self):
shutil.rmtree(join(self.WORKING_DIRECTORY, 'Weights'))
if __name__ == '__main__':
unittest.main()
| 3,964 | 44.574713 | 97 | py |
inferno | inferno-master/tests/test_training/test_callbacks/test_scheduling.py | import unittest
from inferno.trainers.callbacks.scheduling import ManualLR
from torch import nn
from torch.optim import Adam
class TestSchedulers(unittest.TestCase):
def test_manual_lr(self):
class DummyTrainer(object):
def __init__(self):
self.iteration_count = 0
self.epoch_count = 0
self.optimizer = Adam(nn.Linear(10, 10).parameters(), lr=1.)
manual_lr = ManualLR([((100, 'iterations'), 0.5),
((200, 'iterations'), 0.5),
((200, 'iterations'), 0.1)])
trainer = DummyTrainer()
manual_lr._trainer = trainer
manual_lr.end_of_training_iteration()
self.assertEqual(trainer.optimizer.param_groups[0]['lr'], 1.)
trainer.iteration_count = 100
manual_lr.end_of_training_iteration()
self.assertEqual(trainer.optimizer.param_groups[0]['lr'], 0.5)
trainer.iteration_count = 200
manual_lr.end_of_training_iteration()
self.assertEqual(trainer.optimizer.param_groups[0]['lr'], 0.025)
trainer.iteration_count = 300
self.assertEqual(trainer.optimizer.param_groups[0]['lr'], 0.025)
if __name__ == '__main__':
unittest.main()
| 1,256 | 34.914286 | 76 | py |
inferno | inferno-master/tests/test_training/test_callbacks/test_base.py | import unittest
import torch
from inferno.trainers.callbacks.base import Callback, CallbackEngine
from inferno.trainers.basic import Trainer
from os.path import join, dirname, exists
from os import makedirs
from shutil import rmtree
class DummyCallback(Callback):
def end_of_training_iteration(self, **_):
assert self.trainer is not None
class WrongDummyCallback(Callback):
def end_of_iteration(self):
pass
class CallbackMechTest(unittest.TestCase):
ROOT_DIR = join(dirname(__file__), 'root')
def setUp(self):
makedirs(self.ROOT_DIR, exist_ok=True)
def tearDown(self):
if exists(self.ROOT_DIR):
rmtree(self.ROOT_DIR)
def test_serialization(self):
# Build engine and trainer
callback_engine = CallbackEngine().bind_trainer(Trainer())
callback_engine.register_callback(DummyCallback())
# Serialize
torch.save(callback_engine, join(self.ROOT_DIR, 'callback_engine.pkl'))
# Unserialize
callback_engine = torch.load(join(self.ROOT_DIR, 'callback_engine.pkl'))
# Make sure the trainer is detached
self.assertIsNone(callback_engine._trainer)
self.assertIsInstance(next(iter(callback_engine
._callback_registry
.get('end_of_training_iteration'))),
DummyCallback)
def test_auto_registry(self):
callback_engine = CallbackEngine().bind_trainer(Trainer())
callback_engine.register_callback(DummyCallback())
self.assertIsInstance(next(iter(callback_engine
._callback_registry
.get('end_of_training_iteration'))),
DummyCallback)
with self.assertRaises(AssertionError):
callback_engine.register_callback(WrongDummyCallback())
def test_instance_registry(self):
class Foo(Callback):
pass
class Bar(Callback):
pass
foo = Foo()
bar = Bar()
self.assertIs(foo.get_instances(), foo)
self.assertIs(bar.get_instances(), bar)
foo2 = Foo()
self.assertSequenceEqual(foo2.get_instances(), [foo, foo2])
self.assertIs(bar.get_instances(), bar)
if __name__ == '__main__':
unittest.main()
| 2,391 | 32.222222 | 80 | py |
inferno | inferno-master/tests/test_training/test_callbacks/test_logging/test_tensorboard.py | import unittest
import os
from shutil import rmtree
import numpy as np
import torch
import torch.nn as nn
from inferno.trainers.basic import Trainer
from torch.utils.data.dataset import TensorDataset
from torch.utils.data.dataloader import DataLoader
from inferno.trainers.callbacks.logging.tensorboard import TensorboardLogger
from inferno.extensions.layers.reshape import AsMatrix
class TestTensorboard(unittest.TestCase):
ROOT_DIR = os.path.dirname(__file__)
PRECISION = 'float'
SAVE_DIRECTORY = os.path.join(ROOT_DIR, 'saves')
LOG_DIRECTORY = os.path.join(ROOT_DIR, 'logs')
@staticmethod
def _make_test_model(input_channels):
toy_net = nn.Sequential(nn.Conv2d(input_channels, 8, 3, 1, 1),
nn.ELU(),
nn.MaxPool2d(2),
nn.Conv2d(8, 8, 3, 1, 1),
nn.ELU(),
nn.MaxPool2d(2),
nn.Conv2d(8, 16, 3, 1, 1),
nn.ELU(),
nn.AdaptiveMaxPool2d((1, 1)),
AsMatrix(),
nn.Linear(16, 10))
return toy_net
def tearDown(self):
for d in [self.SAVE_DIRECTORY, self.LOG_DIRECTORY]:
try:
rmtree(d)
except OSError:
pass
def get_random_dataloaders(self, input_channels=3):
# Convert build random tensor dataset
data_shape = (1, input_channels, 64, 64)
target_shape = (1)
random_array = torch.from_numpy(np.random.rand(*data_shape)).float()
target_array = torch.from_numpy(np.random.randint(0, 9, size=target_shape))
train_dataset = TensorDataset(random_array, target_array)
test_dataset = TensorDataset(random_array, target_array)
# Build dataloaders from dataset
train_loader = DataLoader(train_dataset, batch_size=1,
shuffle=True, num_workers=0, pin_memory=False)
test_loader = DataLoader(test_dataset, batch_size=1,
shuffle=True, num_workers=0, pin_memory=False)
return train_loader, test_loader
def get_trainer(self, input_channels):
# Build model
net = self._make_test_model(input_channels)
# Build trainer
trainer = Trainer(net)\
.build_logger(TensorboardLogger(send_image_at_batch_indices=0,
send_image_at_channel_indices='all',
log_images_every=(20, 'iterations')),
log_directory=self.LOG_DIRECTORY)\
.build_criterion('CrossEntropyLoss')\
.build_metric('CategoricalError')\
.build_optimizer('Adam')\
.validate_every((1, 'epochs'))\
.save_every((2, 'epochs'), to_directory=self.SAVE_DIRECTORY)\
.save_at_best_validation_score()\
.set_max_num_epochs(2)\
.set_precision(self.PRECISION)
# Bind loaders
train_loader, test_loader = self.get_random_dataloaders(input_channels=input_channels)
trainer.bind_loader('train', train_loader).bind_loader('validate', test_loader)
return trainer
def test_tensorboard(self):
trainer = self.get_trainer(3)
trainer.fit()
def test_tensorboard_grayscale(self):
trainer = self.get_trainer(1)
trainer.fit()
def test_serialization(self):
trainer = self.get_trainer(3)
# Serialize
trainer.save()
# Unserialize
trainer = Trainer().load(os.path.join(self.ROOT_DIR, 'saves'))
train_loader, test_loader = self.get_random_dataloaders(input_channels=3)
trainer.bind_loader('train', train_loader).bind_loader('validate', test_loader)
trainer.fit()
if __name__ == '__main__':
unittest.main()
| 3,992 | 38.147059 | 94 | py |
inferno | inferno-master/tests/test_extensions/test_layers/test_device.py | import unittest
from inferno.extensions.layers.device import DeviceTransfer, OnDevice
import torch
class TransferTest(unittest.TestCase):
@unittest.skipIf(not torch.cuda.is_available(), "GPU not available.")
def test_device_transfer(self):
if not torch.cuda.is_available():
return
# Build transfer model
transfer = DeviceTransfer('cpu')
x = torch.rand(10, 10).cuda()
y = transfer(x)
loss = y.mean()
loss.backward()
self.assertFalse(y.data.is_cuda)
self.assertIsNotNone(x.grad)
self.assertTrue(x.grad.data.is_cuda)
@unittest.skipIf(not torch.cuda.is_available(), "GPU not available.")
def test_on_device(self):
if not torch.cuda.is_available():
return
# Build variable on the GPU
x = torch.rand(1, 10)
# Build model over multiple devices
multi_device_model = torch.nn.Sequential(OnDevice(torch.nn.Linear(10, 10), 'cuda'),
OnDevice(torch.nn.Linear(10, 10), 'cpu'))
y = multi_device_model(x)
self.assertIsInstance(y.data, torch.FloatTensor)
if __name__ == '__main__':
unittest.main()
| 1,215 | 32.777778 | 91 | py |
inferno | inferno-master/tests/test_extensions/test_layers/test_reshape.py | import unittest
import torch
class TestReshape(unittest.TestCase):
def _get_input_variable(self, *shape):
return torch.rand(*shape)
def test_as_matrix(self):
from inferno.extensions.layers.reshape import AsMatrix
input = self._get_input_variable(10, 20, 1, 1)
as_matrix = AsMatrix()
output = as_matrix(input)
self.assertEqual(list(output.size()), [10, 20])
def test_flatten(self):
from inferno.extensions.layers.reshape import Flatten
input = self._get_input_variable(10, 20, 2, 2)
flatten = Flatten()
output = flatten(input)
self.assertEqual(list(output.size()), [10, 80])
def test_as_2d(self):
from inferno.extensions.layers.reshape import As2D
as_2d = As2D()
output_shape = as_2d(self._get_input_variable(10, 20, 3, 30, 30)).size()
self.assertEqual(list(output_shape), [10, 60, 30, 30])
output_shape = as_2d(self._get_input_variable(10, 20, 30, 30)).size()
self.assertEqual(list(output_shape), [10, 20, 30, 30])
output_shape = as_2d(self._get_input_variable(10, 20)).size()
self.assertEqual(list(output_shape), [10, 20, 1, 1])
def test_as_3d(self):
from inferno.extensions.layers.reshape import As3D
from inferno.utils.exceptions import ShapeError
as_3d = As3D()
output_shape = as_3d(self._get_input_variable(10, 20, 3, 30, 30)).size()
self.assertEqual(list(output_shape), [10, 20, 3, 30, 30])
output_shape = as_3d(self._get_input_variable(10, 20, 30, 30)).size()
self.assertEqual(list(output_shape), [10, 20, 1, 30, 30])
output_shape = as_3d(self._get_input_variable(10, 20)).size()
self.assertEqual(list(output_shape), [10, 20, 1, 1, 1])
as_3d.channel_as_z = True
output_shape = as_3d(self._get_input_variable(10, 20, 30, 30)).size()
self.assertEqual(list(output_shape), [10, 1, 20, 30, 30])
as_3d.num_channels_or_num_z_slices = 2
output_shape = as_3d(self._get_input_variable(10, 40, 30, 30)).size()
self.assertEqual(list(output_shape), [10, 2, 20, 30, 30])
with self.assertRaises(ShapeError):
output_shape = as_3d(self._get_input_variable(10, 41, 30, 30)).size()
self.assertEqual(list(output_shape), [10, 2, 20, 30, 30])
if __name__ == '__main__':
unittest.main()
| 2,420 | 34.086957 | 81 | py |
inferno | inferno-master/tests/test_extensions/test_layers/test_activations.py | import unittest
import torch
import inferno.extensions.layers.activations as activations
class ActivationTest(unittest.TestCase):
def test_selu(self):
x = torch.rand(100)
y = activations.SELU()(x)
self.assertEqual(list(x.size()), list(y.size()))
if __name__ == '__main__':
unittest.main()
| 325 | 20.733333 | 59 | py |
inferno | inferno-master/tests/test_extensions/test_layers/test_convolutional.py | import unittest
import torch
from inferno.utils.model_utils import ModelTester
class TestConvolutional(unittest.TestCase):
@unittest.skipIf(not torch.cuda.is_available(), "GPU not available.")
def test_bn_relu_depthwise_conv2d_pyinn(self):
from inferno.extensions.layers.convolutional import BNReLUDepthwiseConv2D
model = BNReLUDepthwiseConv2D(10, 'auto', 3)
ModelTester((1, 10, 100, 100),
(1, 10, 100, 100)).cuda()(model)
self.assertTrue(model.depthwise)
self.assertEqual(model.conv.groups, 10)
if __name__ == '__main__':
unittest.main()
| 615 | 31.421053 | 81 | py |
inferno | inferno-master/tests/test_extensions/test_layers/deprecated/building_blocks.py | import unittest
import torch
import inferno.extensions.layers.building_blocks as bb
class ResBlockTest(unittest.TestCase):
def test_2D_simple_(self):
x = torch.rand(1, 3, 64, 15)
model = bb.ResBlock(in_channels=3, out_channels=3, dim=2)
xx = model(x)
out_size = xx.size()
self.assertEqual(list(out_size), [1,3, 64, 15])
def test_3D_simple_(self):
x = torch.rand(1,3,20, 64,15)
model = bb.ResBlock(in_channels=3, out_channels=3, dim=3)
xx = model(x)
out_size = xx.size()
self.assertEqual(list(out_size), [1,3, 20, 64, 15])
def test_2D_simple_2(self):
x = torch.rand(1,3,64,64)
model = bb.ResBlock(in_channels=3, out_channels=6, dim=2)
xx = model(x)
out_size = xx.size()
self.assertEqual(list(out_size), [1,6, 64, 64])
def test_2D_simple_3(self):
x = torch.rand(1,3,64,64)
model = bb.ResBlock(in_channels=3, out_channels=6, dim=2, size=4)
xx = model(x)
out_size = xx.size()
self.assertEqual(list(out_size), [1,6, 64, 64])
def test_2D_simple_4(self):
x = torch.rand(1,6,64,64)
model = bb.ResBlock(in_channels=6, out_channels=6, dim=2, size=4,
force_skip_op=True)
xx = model(x)
out_size = xx.size()
self.assertEqual(list(out_size), [1,6, 64, 64])
def test_2D_simple_5(self):
x = torch.rand(1,6,64,64)
model = bb.ResBlock(in_channels=6, batchnorm=False, out_channels=6, dim=2, size=4,
force_skip_op=True)
xx = model(x)
out_size = xx.size()
self.assertEqual(list(out_size), [1,6, 64, 64])
def test_2D_simple_6(self):
x = torch.rand(1,6,64,64)
model = bb.ResBlock(in_channels=6, batchnorm=False, out_channels=6, dim=2, size=4,
force_skip_op=True, activated=False)
xx = model(x)
out_size = xx.size()
self.assertEqual(list(out_size), [1,6, 64, 64])
def test_3D_simple_6(self):
x = torch.rand(1,6,64,64, 20)
model = bb.ResBlock(in_channels=6, batchnorm=False, out_channels=6, dim=3, size=4,
force_skip_op=True, activated=False)
xx = model(x)
out_size = xx.size()
self.assertEqual(list(out_size), [1,6, 64, 64, 20])
if __name__ == '__main__':
unittest.main()
| 2,372 | 29.037975 | 90 | py |
inferno | inferno-master/tests/test_extensions/test_containers/test_graph.py | import unittest
from functools import reduce
import torch
class TestGraph(unittest.TestCase):
def setUp(self):
import torch.nn as nn
from inferno.utils.python_utils import from_iterable
class DummyNamedModule(nn.Module):
def __init__(self, name, history, num_inputs=1):
super(DummyNamedModule, self).__init__()
self.name = name
self.history = history
self.num_inputs = num_inputs
def forward(self, *inputs):
assert len(inputs) == self.num_inputs
self.history.append(self.name)
if self.num_inputs > 1:
output = reduce(lambda x, y: x + y, inputs)
else:
output = from_iterable(inputs)
return output
self.DummyNamedModule = DummyNamedModule
# @unittest.skip
def test_graph_dummy_basic(self):
import torch
from inferno.extensions.containers.graph import Graph
if not hasattr(self, 'DummyNamedModule'):
self.setUp()
DummyNamedModule = self.DummyNamedModule
history = []
# Build graph
model = Graph()
model.add_input_node('input_0')
model.add_input_node('input_1')
model.add_node('conv0_0', DummyNamedModule('conv0_0', history))
model.add_node('conv0_1', DummyNamedModule('conv0_1', history))
model.add_node('conv1', DummyNamedModule('conv1', history, 2))
model.add_node('conv2', DummyNamedModule('conv2', history))
model.add_output_node('output_0')
model.add_edge('input_0', 'conv0_0')\
.add_edge('input_1', 'conv0_1')\
.add_edge('conv0_0', 'conv1')\
.add_edge('conv0_1', 'conv1')\
.add_edge('conv1', 'conv2')\
.add_edge('conv2', 'output_0')
input_0 = torch.rand(10, 10)
input_1 = torch.rand(10, 10)
model(input_0, input_1)
self.assertTrue(history == ['conv0_0', 'conv0_1', 'conv1', 'conv2'] or
history == ['conv0_1', 'conv0_0', 'conv1', 'conv2'])
# @unittest.skip
def test_graph_dummy_inception(self):
import torch
from inferno.extensions.containers.graph import Graph
if not hasattr(self, 'DummyNamedModule'):
self.setUp()
DummyNamedModule = self.DummyNamedModule
history = []
# Build graph
model = Graph()
model.add_input_node('input_0')
model.add_node('conv0', DummyNamedModule('conv0', history), 'input_0')
model.add_node('conv1_0', DummyNamedModule('conv1_0', history), 'conv0')
model.add_node('conv1_1', DummyNamedModule('conv1_1', history), 'conv0')
model.add_node('conv2', DummyNamedModule('conv2', history, 2),
['conv1_0', 'conv1_1'])
model.add_output_node('output_0', 'conv2')
input_0 = torch.rand(10, 10)
model(input_0)
self.assertTrue(history == ['conv0', 'conv1_0', 'conv1_1', 'conv2'] or
history == ['conv0', 'conv1_1', 'conv1_2', 'conv2'])
# @unittest.skip
def test_graph_basic(self):
from inferno.extensions.containers.graph import Graph
from inferno.extensions.layers.convolutional import ConvELU2D
from inferno.utils.model_utils import ModelTester
# Build graph
model = Graph()
model.add_input_node('input_0')
model.add_node('conv0', ConvELU2D(1, 10, 3), previous='input_0')
model.add_node('conv1', ConvELU2D(10, 1, 3), previous='conv0')
model.add_output_node('output_0', previous='conv1')
ModelTester((1, 1, 100, 100), (1, 1, 100, 100))(model)
@unittest.skipUnless(torch.cuda.is_available(), "No cuda.")
def test_graph_device_transfers(self):
from inferno.extensions.containers.graph import Graph
from inferno.extensions.layers.convolutional import ConvELU2D
import torch
# Build graph
model = Graph()
model.add_input_node('input_0')
model.add_node('conv0', ConvELU2D(1, 10, 3), previous='input_0')
model.add_node('conv1', ConvELU2D(10, 1, 3), previous='conv0')
model.add_output_node('output_0', previous='conv1')
# Transfer
model.to_device('conv0', 'cpu').to_device('conv1', 'cuda', 0)
x = torch.rand(1, 1, 100, 100)
y = model(x)
self.assertIsInstance(y.data, torch.cuda.FloatTensor)
@unittest.skip("Needs machine with 4 GPUs")
def test_multi_gpu(self):
import torch
import torch.nn as nn
from torch.nn.parallel.data_parallel import data_parallel
from inferno.extensions.containers.graph import Graph
input_shape = [8, 1, 3, 128, 128]
model = Graph() \
.add_input_node('input') \
.add_node('conv0', nn.Conv3d(1, 10, 3, padding=1), previous='input') \
.add_node('conv1', nn.Conv3d(10, 1, 3, padding=1), previous='conv0') \
.add_output_node('output', previous='conv1')
model.cuda()
input = torch.rand(*input_shape).cuda()
data_parallel(model, input, device_ids=[0, 1, 2, 3])
if __name__ == '__main__':
unittest.main()
| 5,288 | 37.05036 | 82 | py |
inferno | inferno-master/tests/test_extensions/test_models/test_unet.py | import unittest
import torch.cuda as cuda
from inferno.utils.model_utils import ModelTester, MultiscaleModelTester
from inferno.extensions.models import UNet
class _MultiscaleUNet(UNet):
def conv_op_factory(self, in_channels, out_channels, part, index):
return super(_MultiscaleUNet, self).conv_op_factory(in_channels, out_channels, part, index)[0], True
def forward(self, input):
x = self._initial_conv(input)
x = list(super(UNet, self).forward(x))
x[-1] = self._output(x[-1])
return tuple(x)
class UNetTest(unittest.TestCase):
def test_unet_2d(self):
tester = ModelTester((1, 1, 256, 256), (1, 1, 256, 256))
if cuda.is_available():
tester.cuda()
tester(UNet(1, 1, dim=2, initial_features=32))
def test_unet_3d(self):
tester = ModelTester((1, 1, 16, 64, 64), (1, 1, 16, 64, 64))
if cuda.is_available():
tester.cuda()
# test default unet 3d
tester(UNet(1, 1, dim=3, initial_features=8))
def test_monochannel_unet_3d(self):
nc = 2
class _UNetMonochannel(_MultiscaleUNet):
def _get_num_channels(self, depth):
return nc
shapes = [(1, nc, 16, 64, 64), (1, nc, 8, 32, 32), (1, nc, 4, 16, 16), (1, nc, 2, 8, 8), (1, nc, 1, 4, 4),
(1, nc, 2, 8, 8), (1, nc, 4, 16, 16), (1, nc, 8, 32, 32), (1, 1, 16, 64, 64)]
tester = MultiscaleModelTester((1, 1, 16, 64, 64), shapes)
if cuda.is_available():
tester.cuda()
tester(_UNetMonochannel(1, 1, dim=3, initial_features=8))
def test_inverse_pyramid_unet_2d(self):
class _UNetInversePyramid(_MultiscaleUNet):
def _get_num_channels(self, depth):
return [13, 12, 11][depth - 1]
shapes = [(1, 13, 16, 64), (1, 12, 8, 32), (1, 11, 4, 16), (1, 11, 2, 8),
(1, 12, 4, 16), (1, 13, 8, 32), (1, 1, 16, 64)]
tester = MultiscaleModelTester((1, 1, 16, 64), shapes)
if cuda.is_available():
tester.cuda()
tester(_UNetInversePyramid(1, 1, dim=2, depth=3, initial_features=8))
if __name__ == '__main__':
unittest.main()
| 2,199 | 36.288136 | 114 | py |
inferno | inferno-master/tests/test_extensions/test_models/test_res_unet.py | import unittest
import torch
import torch.cuda as cuda
from inferno.utils.model_utils import ModelTester
class ResUNetTest(unittest.TestCase):
def test_res_unet_2d(self):
from inferno.extensions.models import ResBlockUNet
tester = ModelTester((1, 1, 256, 256), (1, 1, 256, 256))
if cuda.is_available():
tester.cuda()
tester(ResBlockUNet(in_channels=1, out_channels=1, dim=2))
def test_res_unet_3d(self):
from inferno.extensions.models import ResBlockUNet
tester = ModelTester((1, 1, 16, 64, 64), (1, 1, 16, 64, 64))
if cuda.is_available():
tester.cuda()
# test default unet 3d
tester(ResBlockUNet(in_channels=1, out_channels=1, dim=3))
def test_2d_side_out_bot_up(self):
from inferno.extensions.models import ResBlockUNet
depth = 3
in_channels = 3
x = torch.rand(1, in_channels, 64, 32)
model = ResBlockUNet(in_channels=in_channels,
out_channels=8, dim=2,
side_out_parts=['bottom','up'],
unet_kwargs=dict(depth=depth))
out_list = model(x)
self.assertEqual(len(out_list), depth + 1)
self.assertEqual(list(out_list[0].size()), [1, 24, 8, 4])
self.assertEqual(list(out_list[1].size()), [1, 12, 16, 8])
self.assertEqual(list(out_list[2].size()), [1, 6, 32, 16])
self.assertEqual(list(out_list[3].size()), [1, 8, 64, 32])
def test_2d_side_out_up(self):
from inferno.extensions.models import ResBlockUNet
depth = 3
in_channels = 3
x = torch.rand(1, in_channels, 64, 32)
model = ResBlockUNet(in_channels=in_channels,
out_channels=8, dim=2,
side_out_parts=['up'],
unet_kwargs=dict(depth=depth))
out_list = model(x)
self.assertEqual(len(out_list), depth)
self.assertEqual(list(out_list[0].size()), [1,12, 16, 8])
self.assertEqual(list(out_list[1].size()), [1, 6, 32, 16])
self.assertEqual(list(out_list[2].size()), [1, 8, 64, 32])
def test_2d_side_out_down(self):
from inferno.extensions.models import ResBlockUNet
depth = 3
in_channels = 3
x = torch.rand(1, in_channels, 64, 32)
model = ResBlockUNet(in_channels=in_channels,
out_channels=8, dim=2,
side_out_parts=['down'],
unet_kwargs=dict(depth=depth))
out_list = model(x)
self.assertEqual(len(out_list), depth + 1)
self.assertEqual(list(out_list[0].size()), [1, 6, 64, 32])
self.assertEqual(list(out_list[1].size()), [1, 12, 32, 16])
self.assertEqual(list(out_list[2].size()), [1, 24, 16, 8])
# the actual output
self.assertEqual(list(out_list[3].size()), [1, 8, 64, 32])
if __name__ == '__main__':
unittest.main()
| 3,023 | 35 | 68 | py |
inferno | inferno-master/tests/test_extensions/test_criteria/test_core.py | import unittest
import torch
import torch.nn as nn
class TestCore(unittest.TestCase):
def test_as_2d_criterion(self):
from inferno.extensions.criteria.core import As2DCriterion
prediction = torch.FloatTensor(2, 10, 100, 100).uniform_()
prediction = nn.Softmax2d()(prediction)
target = torch.LongTensor(2, 100, 100).fill_(0)
criterion = As2DCriterion(nn.CrossEntropyLoss())
criterion(prediction, target)
if __name__ == '__main__':
unittest.main()
| 507 | 25.736842 | 66 | py |
inferno | inferno-master/tests/test_extensions/test_criteria/test_elementwise_measures.py | import unittest
import inferno.extensions.criteria.elementwise_measures as em
import torch
class TestElementwiseMeasures(unittest.TestCase):
def test_weighted_mse_loss(self):
input = torch.zeros(10, 10)
target = torch.ones(10, 10)
loss = em.WeightedMSELoss(positive_class_weight=2.)(input, target)
self.assertAlmostEqual(loss.item(), 2., delta=1e-5)
target = torch.zeros(10, 10)
input = torch.ones(10, 10)
loss = em.WeightedMSELoss(positive_class_weight=2.)(input, target)
self.assertAlmostEqual(loss.item(), 1., delta=1e-5)
if __name__ == '__main__':
unittest.main()
| 644 | 31.25 | 74 | py |
inferno | inferno-master/tests/test_extensions/test_criteria/test_set_similarity_measures.py | import unittest
import torch
class SetSimilarityTest(unittest.TestCase):
def get_dummy_variables(self):
x = torch.zeros(3, 2, 100, 100).uniform_()
y = torch.zeros(3, 2, 100, 100).uniform_()
return x, y
def get_dummy_variables_with_channels_and_classes(self):
# (batch_size, channels, classes, ...)
x = torch.zeros(3, 2, 5, 100, 100).uniform_()
y = torch.zeros(3, 2, 5, 100, 100).uniform_()
return x, y
class TestSorensenDice(SetSimilarityTest):
# noinspection PyCallingNonCallable
def test_channelwise(self):
from inferno.extensions.criteria.set_similarity_measures import SorensenDiceLoss
x, y = self.get_dummy_variables()
channelwise = SorensenDiceLoss(channelwise=True)
not_channelwise = SorensenDiceLoss(channelwise=False)
# Compute expected channelwise loss
expected_channelwise_loss = \
not_channelwise(x[:, 0, ...], y[:, 0, ...]) + \
not_channelwise(x[:, 1, ...], y[:, 1, ...])
# Compute channelwise
channelwise_loss = channelwise(x, y)
# Compare
self.assertAlmostEqual(expected_channelwise_loss.item(), channelwise_loss.item())
class TestGeneralizedSorensenDice(SetSimilarityTest):
def test_channelwise(self):
from inferno.extensions.criteria.set_similarity_measures import GeneralizedDiceLoss
x, y = self.get_dummy_variables_with_channels_and_classes()
channelwise = GeneralizedDiceLoss(channelwise=True)
not_channelwise = GeneralizedDiceLoss(channelwise=False)
# Compute channelwise loss and expected one:
channelwise_loss = channelwise(x, y)
expected_channelwise_loss = \
not_channelwise(x[:, 0, ...], y[:, 0, ...]) + \
not_channelwise(x[:, 1, ...], y[:, 1, ...])
# Compare
self.assertAlmostEqual(expected_channelwise_loss.item(), channelwise_loss.item())
if __name__ == '__main__':
unittest.main()
| 1,999 | 37.461538 | 91 | py |
inferno | inferno-master/tests/test_extensions/test_metrics/categorical.py | import unittest
import torch
from inferno.extensions.metrics import IOU
class TestCategorical(unittest.TestCase):
def test_iou_basic(self):
# from one hot
predicted_image = torch.zeros(*(2, 10, 10))
predicted_image[:, 0:4, 0:4] = 1
target_image = torch.zeros(*(2, 10, 10))
target_image[:, 0:3, 0:3] = 1
expected_iou = (3 * 3)/(4 * 4)
iou = IOU()(predicted_image[None, ...], target_image[None, ...])
self.assertAlmostEqual(iou, expected_iou, places=4)
def test_iou_with_ignore_class(self):
predicted_image = torch.zeros(*(2, 10, 10))
predicted_image[0, 0:4, 0:4] = 1
target_image = torch.zeros(*(2, 10, 10))
target_image[:, 0:3, 0:3] = 1
expected_iou = (3 * 3) / (4 * 4)
iou = IOU(ignore_class=1)(predicted_image[None, ...], target_image[None, ...])
self.assertAlmostEqual(iou, expected_iou, places=4)
def test_multiclass_iou(self):
predicted_image = torch.zeros(*(2, 10, 10))
predicted_image[0, 0:4, 0:4] = 1
target_image = torch.zeros(*(2, 10, 10))
target_image[:, 0:3, 0:3] = 1
iou_class_0 = (3 * 3) / (4 * 4)
iou_class_1 = 0
expected_mean_iou = 0.5 * (iou_class_0 + iou_class_1)
iou = IOU()(predicted_image[None, ...], target_image[None, ...])
self.assertAlmostEqual(iou, expected_mean_iou, places=4)
def test_multiclass_iou_with_ignore_class(self):
predicted_image = torch.zeros(*(3, 10, 10))
predicted_image[0, 0:4, 0:4] = 1
# Have the third plane be crap
predicted_image[2, :, :] = 1
target_image = torch.zeros(*(3, 10, 10))
target_image[:, 0:3, 0:3] = 1
iou_class_0 = (3 * 3) / (4 * 4)
iou_class_1 = 0
expected_mean_iou = 0.5 * (iou_class_0 + iou_class_1)
iou = IOU(ignore_class=-1)(predicted_image[None, ...], target_image[None, ...])
self.assertAlmostEqual(iou, expected_mean_iou, places=4)
if __name__ == '__main__':
unittest.main() | 2,048 | 39.176471 | 87 | py |
inferno | inferno-master/docs/conf.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# inferno documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import matplotlib
matplotlib.use('Agg')
import sphinx_gallery
import sys
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
# MOCK_MODULES = ['pygtk',
# 'hdf5',
# 'skimage',
# 'argparse',
# 'pandas',
# 'torch',
# 'torch.nn', 'torch.nn.init', 'torch.nn.functional',
# 'torch.nn.parallel', 'torch.nn.parallel.data_parallel',
# 'torch.multiprocessing', 'torch.autograd',
# 'torch.utils', 'torch.utils.data',
# 'torch.optim', 'torch.sparse', 'torch.cuda']
# sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import inferno
import inferno.extensions
import inferno.extensions.layers
from inferno.extensions.layers import *
from inferno.extensions.layers.reshape import *
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'sphinx.ext.mathjax',
'sphinx.ext.graphviz',
'sphinx_gallery.gen_gallery',
'sphinxcontrib.bibtex',
'sphinx.ext.napoleon',
'sphinxcontrib.inlinesyntaxhighlight'
]
sphinx_gallery_conf = {
# path to your examples scripts
'examples_dirs' :
'../examples',
# path where to save gallery generated examples
'gallery_dirs' :
'auto_examples',
'backreferences_dir' :
'gen_modules/backreferences',
'scan_used_functions':
True,
'doc_module' :
('inferno','inferno.extensions','inferno.extensions.layers','inferno.extensions.layers.convolutional'),
'docs_resolv': True,
'parallel_read_safe': True,
'reference_url': {
# The module you locally document uses a None
'inferno': None,
# External python modules use their documentation websites
#'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.13.0'}
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'inferno'
copyright = u"2018, f"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = inferno.__version__
# The full version, including alpha/beta/rc tags.
release = inferno.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'infernodoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'inferno.tex',
u'inferno Documentation',
u'Inferno Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'inferno',
u'inferno Documentation',
[u'Inferno Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'inferno',
u'inferno Documentation',
u'Inferno Team',
'inferno',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 10,920 | 28.357527 | 111 | py |
building-inspection-toolkit | building-inspection-toolkit-master/setup.py | import setuptools
with open("README.md", "r") as file:
long_description = file.read()
#with open("requirements.txt") as file:
# required = file.read().splitlines()
setuptools.setup(
name="building-inspection-toolkit",
version="0.3.0",
author="Philipp J. Roesch, Johannes Flotzinger",
author_email="philipp.roesch@unibw.de, johannes.flotzinger@unibw.de",
description="Building Inspection Toolkit",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/phiyodr/bridge-inspection-toolkit",
classifiers=[
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Image Recognition"
],
packages=setuptools.find_packages(),
python_requires='>=3.6',
package_data={
# If any package contains *.txt or *.rst files, include them:
"": ["*.txt", "*.csv", "*.json"]},
install_requires=[
"numpy > 1.20"
"requests"
"torch"
"torchvision"
"pandas"
"Pillow"
"patool"
"pathlib"
"tqdm"
"matplotlib"
"opencv-python-headless" # opencv-python
"efficientnet_pytorch"
"torchmetrics"
]
)
| 1,319 | 29 | 73 | py |
building-inspection-toolkit | building-inspection-toolkit-master/tests/test_codebrim.py | #!/usr/local/bin/python3
# Test Modules
import sys
import pytest
from torchvision import transforms
from os import path, makedirs
import torch
import numpy as np
from PIL import Image
from pathlib import Path
import os
# Import module under test
# sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from bikit.datasets import BikitDataset
home_path = Path(path.expanduser('~'))
travis_homes = [Path("/home/travis"), Path("C:/Users/travis"), Path("/Users/travis")]
if home_path in travis_homes:
image_path = home_path / ".cache/bikit/codebrim-classif-balanced/classification_dataset_balanced/train/background/"
Path(image_path).mkdir(parents=True, exist_ok=True)
image_file = home_path / ".cache/bikit/codebrim-classif-balanced/classification_dataset_balanced/train/background/image_0000001_crop_0000001.png"
img_np = np.ones((379, 513, 3), dtype=np.int8) * 100
img_pil = Image.fromarray(np.uint8(img_np)).convert('RGB')
img_pil.save(image_file)
def test_codebrim_basic():
name = "codebrim-classif-balanced"
all_dataset = BikitDataset(name, split="")
train_dataset = BikitDataset(name, split="train")
val_dataset = BikitDataset(name, split="val")
test_dataset = BikitDataset(name, split="test")
development_dataset = BikitDataset(name, split="test", devel_mode=True)
transform_dataset = BikitDataset(name, split="",
transform=transforms.Compose(
[transforms.Resize((256, 256)), transforms.ToTensor()]))
img, targets = all_dataset[0]
assert img.dtype == torch.float32
assert targets.dtype == torch.float32
assert list(img.shape) == [3, 379, 513]
assert list(targets.shape) == [6]
# Dataset length
assert len(all_dataset) == 7261
assert len(train_dataset) == 6013
assert len(val_dataset) == 616
assert len(test_dataset) == 632
assert len(development_dataset) == 100
assert len(transform_dataset) == 7261
@pytest.mark.skipif(home_path in travis_homes,
reason="Long-running test with real datasets for local use only, not on Travis.")
def test_codebrim_local():
name = "codebrim-classif-balanced"
# all_in_mem Test requires at least 10GB of free RAM to work
# all_in_mem = BikitDataset(name="codebrim-classif-balanced", split="", load_all_in_mem=True)
all_in_mem_develmode = BikitDataset(name, split="", load_all_in_mem=True, devel_mode=True)
#Test correct cache_dir func
cache_test = BikitDataset(name, split="", cache_dir=Path(os.path.join(os.path.expanduser("~"), ".cache/bikit")))
img, targets = cache_test[0]
assert list(targets.shape) == [6]
# assert len(all_in_mem) == 7261
assert len(all_in_mem_develmode) == 100
if __name__ == '__main__':
test_codebrim_local()
test_codebrim_basic()
| 2,869 | 37.266667 | 149 | py |
building-inspection-toolkit | building-inspection-toolkit-master/tests/test_mcds.py | #!/usr/local/bin/python3
# Test Modules
import sys
import pytest
from os import path, makedirs
import torch
import numpy as np
from PIL import Image
from pathlib import Path
from torchvision import transforms
import os
# Import module under test
# sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from bikit.datasets import BikitDataset
home_path = Path(path.expanduser('~'))
travis_homes = [Path("/home/travis"), Path("C:/Users/travis"), Path("/Users/travis")]
if home_path in travis_homes:
image_path = home_path / ".cache/bikit/mcds/Corrosion/no rust staining"
makedirs(image_path)
image_file = home_path / ".cache/bikit/mcds/Corrosion/no rust staining/001_0fwtaowy.t1o.jpg"
img_np = np.ones((92, 400, 3), dtype=np.int8) * 100
img_pil = Image.fromarray(np.uint8(img_np)).convert('RGB')
img_pil.save(image_file)
def test_mcds_bukhsh_basic():
name = "mcds_Bukhsh"
all_dataset = BikitDataset(name,split="")
trainval_dataset = BikitDataset(name, split="trainval")
test_dataset = BikitDataset(name, split="test")
development_dataset = BikitDataset(name, split="test", devel_mode=True)
transform_dataset = BikitDataset(name, split="", devel_mode=True,
transform=transforms.Compose(
[transforms.Resize((256, 256)), transforms.ToTensor()]))
img, targets = all_dataset[0]
assert img.dtype == torch.float32
assert targets.dtype == torch.float32
assert list(img.shape) == [3, 92, 400]
assert list(targets.shape) == [10]
# Dataset length
assert len(all_dataset) == 2612
assert len(trainval_dataset) == 2114
assert len(test_dataset) == 498
assert len(development_dataset) == 100
assert len(transform_dataset) == 100
@pytest.mark.skipif(home_path in travis_homes,
reason="Long-running test with real datasets for local use only, not on Travis.")
def test_mcds_bukhsh_local():
name = "mcds_Bukhsh"
all_in_mem = BikitDataset(name, split="", load_all_in_mem=True)
all_in_mem_develmode = BikitDataset(name, split="", load_all_in_mem=True, devel_mode=True)
assert len(all_in_mem) == 2612
assert len(all_in_mem_develmode) == 100
#Test correct cache_dir func
cache_test = BikitDataset(name, split="", cache_dir=Path(os.path.join(os.path.expanduser("~"), ".cache/bikit")))
img, targets = cache_test[0]
assert list(targets.shape) == [10]
def test_mcds_bikit_basic():
name = "mcds_bikit"
all_dataset = BikitDataset(name, split="")
train_dataset = BikitDataset(name, split="train")
valid_dataset = BikitDataset(name, split="valid")
test_dataset = BikitDataset(name, split="test")
development_dataset = BikitDataset(name, split="test", devel_mode=True)
transform_dataset = BikitDataset(name, split="",
transform=transforms.Compose(
[transforms.Resize((256, 256)), transforms.ToTensor()]))
img, targets = all_dataset[0]
assert img.dtype == torch.float32
assert targets.dtype == torch.float32
assert list(img.shape) == [3, 92, 400]
assert list(targets.shape) == [8]
# Dataset length
assert len(all_dataset) == 2597
assert len(train_dataset) == 2057
assert len(valid_dataset) == 270
assert len(test_dataset) == 270
assert len(development_dataset) == 100
assert len(transform_dataset) == 2597
@pytest.mark.skipif(home_path in travis_homes,
reason="Long-running test with real datasets for local use only, not on Travis.")
def test_mcds_bikit_local():
name = "mcds_bikit"
all_in_mem = BikitDataset(name, split="", load_all_in_mem=True)
all_in_mem_develmode = BikitDataset(name, split="", load_all_in_mem=True, devel_mode=True)
assert len(all_in_mem_develmode) == 100
assert len(all_in_mem) == 2597
#Test correct cache_dir func
cache_test = BikitDataset(name, split="", cache_dir=Path(os.path.join(os.path.expanduser("~"), ".cache/bikit")))
img, targets = cache_test[0]
assert list(targets.shape) == [8]
def test_mcds_catch():
with pytest.raises(Exception):
d = BikitDataset(name="mcds_bikit", split="ERROR")
d = BikitDataset(name="WRONG_NAME")
if __name__ == '__main__':
test_mcds_bikit_local()
test_mcds_bikit_basic()
test_mcds_bukhsh_local()
test_mcds_bukhsh_basic()
test_mcds_catch()
| 4,476 | 34.816 | 116 | py |
building-inspection-toolkit | building-inspection-toolkit-master/tests/test_sdnet.py | #!/usr/local/bin/python3
# Test Modules
import sys
import pytest
from torchvision import transforms
from os import path, makedirs
import torch
import numpy as np
from PIL import Image
from pathlib import Path
import os
# Import module under test
# sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from bikit.datasets import BikitDataset
home_path = Path(path.expanduser('~'))
travis_homes = [Path("/home/travis"), Path("C:/Users/travis"), Path("/Users/travis")]
if home_path in travis_homes:
image_path = home_path / ".cache/bikit/sdnet/D/CD/"
makedirs(image_path)
image_file = home_path / ".cache/bikit/sdnet/D/CD/7001-115.jpg"
img_np = np.ones((256, 256, 3), dtype=np.int8) * 100
img_pil = Image.fromarray(np.uint8(img_np)).convert('RGB')
img_pil.save(image_file)
@pytest.mark.parametrize("name", ["sdnet", "sdnet_binary"])
def test_sdnet_basic(name):
all_dataset = BikitDataset(name, split="")
transform_dataset = BikitDataset(name, split="",
transform=transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor()]))
train_dataset = BikitDataset(name, split="train")
val_dataset = BikitDataset(name, split="val")
test_dataset = BikitDataset(name, split="test")
development_dataset = BikitDataset(name, split="test", devel_mode=True)
img, targets = all_dataset[0]
assert img.dtype == torch.float32
assert targets.dtype == torch.float32
assert list(img.shape) == [3, 256, 256]
if name == "sdnet":
assert list(targets.shape) == [6]
elif name == "sdnet_binary":
assert list(targets.shape) == [2]
# Dataset length
assert len(all_dataset) == 56092
assert len(train_dataset) == 50488
assert len(val_dataset) == 2808
assert len(test_dataset) == 2796
assert len(development_dataset) == 100
assert len(transform_dataset) == 56092
@pytest.mark.skipif(home_path in travis_homes,
reason="Long-running test with real datasets for local use only, not on Travis.")
@pytest.mark.parametrize("name", ["sdnet", "sdnet_binary"])
def test_sdnet_local(name):
# This test requieres at least 15GB of free RAM to work!
#all_in_mem_dataset = BikitDataset(name="sdnet", split="", load_all_in_mem=True)
all_in_mem_develmode = BikitDataset(name, split="", load_all_in_mem=True, devel_mode=True)
#assert len(all_in_mem_dataset) == 56092
assert len(all_in_mem_develmode) == 100
#Test correct cache_dir func
cache_test = BikitDataset(name, split="", cache_dir=Path(os.path.join(os.path.expanduser("~"), ".cache/bikit")))
img, targets = cache_test[0]
if name == "sdnet":
assert list(targets.shape) == [6]
elif name == "sdnet_binary":
assert list(targets.shape) == [2]
if __name__ == '__main__':
test_sdnet_local()
test_sdnet_basic() | 2,883 | 35.506329 | 122 | py |
building-inspection-toolkit | building-inspection-toolkit-master/tests/test_bcd.py | #!/usr/local/bin/python3
# Test Modules
import sys
import pytest
from torchvision import transforms
from os import path, makedirs
import torch
import numpy as np
from PIL import Image
from pathlib import Path
import os
# Import module under test
# sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from bikit.datasets import BikitDataset
home_path = Path(path.expanduser('~'))
travis_homes = [Path("/home/travis"), Path("C:/Users/travis"), Path("/Users/travis")]
if home_path in travis_homes:
image_path = home_path / ".cache/bikit/bcd/"
makedirs(image_path)
image_file = home_path / ".cache/bikit/bcd/1.jpg"
img_np = np.ones((224, 224, 3), dtype=np.int8) * 100
img_pil = Image.fromarray(np.uint8(img_np)).convert('RGB')
img_pil.save(image_file)
def test_bcd_basic():
all_dataset = BikitDataset(name="bcd", split="")
transform_dataset = BikitDataset(name="bcd", split="",
transform=transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor()]))
train_dataset = BikitDataset(name="bcd", split="train")
val_dataset = BikitDataset(name="bcd", split="val")
test_dataset = BikitDataset(name="bcd", split="test")
development_dataset = BikitDataset(name="bcd", split="test", devel_mode=True)
img, targets = all_dataset[0]
assert img.dtype == torch.float32
assert targets.dtype == torch.float32
assert list(img.shape) == [3, 224, 224]
assert list(targets.shape) == [2]
# Dataset length
assert len(all_dataset) == 6069
assert len(train_dataset) == 4869
assert len(val_dataset) == 600
assert len(test_dataset) == 600
assert len(development_dataset) == 100
assert len(transform_dataset) == 6069
@pytest.mark.skipif(home_path in travis_homes,
reason="Long-running test with real datasets for local use only, not on Travis.")
def test_bcd_local():
all_in_mem_dataset = BikitDataset(name="bcd", split="", load_all_in_mem=True)
all_in_mem_develmode = BikitDataset(name="bcd", split="", load_all_in_mem=True, devel_mode=True)
assert len(all_in_mem_dataset) == 6069
assert len(all_in_mem_develmode) == 100
#Test correct cache_dir func
cache_test = BikitDataset(name="bcd", split="", cache_dir=Path(os.path.join(os.path.expanduser("~"), ".cache/bikit")))
img, targets = cache_test[0]
assert list(targets.shape) == [2]
if __name__ == '__main__':
test_bcd_local()
test_bcd_basic()
| 2,493 | 35.144928 | 122 | py |
building-inspection-toolkit | building-inspection-toolkit-master/tests/test_cds.py | #!/usr/local/bin/python3
# Test Modules
import sys
import pytest
from torchvision import transforms
from os import path, makedirs
import torch
import numpy as np
from PIL import Image
from pathlib import Path
import os
# Import module under test
# sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from bikit.datasets import BikitDataset
home_path = Path(path.expanduser('~'))
travis_homes = [Path("/home/travis"), Path("C:/Users/travis"), Path("/Users/travis")]
if home_path in travis_homes:
image_path = home_path / ".cache/bikit/cds/Healthy/"
makedirs(image_path)
image_file = home_path / ".cache/bikit/cds/Healthy/01wbfrvx.qqq.jpg"
img_np = np.ones((299, 299, 3), dtype=np.int8) * 100
img_pil = Image.fromarray(np.uint8(img_np)).convert('RGB')
img_pil.save(image_file)
def test_cds_basic():
name = "cds"
all_dataset = BikitDataset(name, split="")
transform_dataset = BikitDataset(name, split="",
transform=transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor()]))
train_dataset = BikitDataset(name, split="train")
val_dataset = BikitDataset(name, split="val")
test_dataset = BikitDataset(name, split="test")
development_dataset = BikitDataset(name, split="test", devel_mode=True)
img, targets = all_dataset[0]
assert img.dtype == torch.float32
assert targets.dtype == torch.float32
assert list(img.shape) == [3, 299, 299]
assert list(targets.shape) == [2]
# Dataset length
assert len(all_dataset) == 1028
assert len(train_dataset) == 824
assert len(val_dataset) == 104
assert len(test_dataset) == 100
assert len(development_dataset) == 100
assert len(transform_dataset) == 1028
@pytest.mark.skipif(home_path in travis_homes,
reason="Long-running test with real datasets for local use only, not on Travis.")
def test_cds_local():
name = "cds"
all_in_mem_dataset = BikitDataset(name, split="", load_all_in_mem=True)
all_in_mem_develmode = BikitDataset(name, split="", load_all_in_mem=True, devel_mode=True)
assert len(all_in_mem_dataset) == 1028
assert len(all_in_mem_develmode) == 100
#Test correct cache_dir func
cache_test = BikitDataset(name, split="", cache_dir=Path(os.path.join(os.path.expanduser("~"), ".cache/bikit")))
img, targets = cache_test[0]
assert list(targets.shape) == [2]
if __name__ == '__main__':
test_cds_local()
test_cds_basic() | 2,498 | 34.7 | 119 | py |
building-inspection-toolkit | building-inspection-toolkit-master/bikit/utils.py | import gdown
import os
import hashlib
import zipfile
import json
import pprint
from os.path import dirname
from PIL import Image
from urllib.request import urlretrieve
from time import sleep
import requests
import cv2
import requests
from io import BytesIO
import torch
pp = pprint.PrettyPrinter(indent=4)
bikit_path = dirname(__file__)
with open(os.path.join(bikit_path, "data/datasets.json")) as f:
DATASETS = json.load(f)
DEMO_DATASETS = {"test_zip": {"description": "",
"download_name": "test_zip",
"license": "",
"urls": ["https://github.com/phiyodr/building-inspection-toolkit/raw/master/bikit/data/test_zip.zip"],
"original_names": ["test_zip.zip"],
"checksums": ["63b3722e69dcf7e14c879411c1907dae"]}}
def pil_loader(path):
"""Outputs an PIL Image object"""
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def cv2_loader(path):
"""Outputs an numpy.ndarray object"""
# Can only use str not pathlib.PosixPath
return cv2.cvtColor(cv2.imread(str(path)), cv2.COLOR_BGR2RGB)
def load_img_from_url(img_url):
return Image.open(BytesIO(requests.get(img_url).content))
########## Model ##########
def list_models(verbose=True, cache_dir='~/.cache/bikit-models', force_redownload=False):
"""
List all datasets available
:param verbose: Print datasets
:return: Return dictionary containing datasets name, url and original name.
"""
models_metadata = get_metadata(cache_dir, force_redownload)
if verbose:
pp.pprint(models_metadata)
return models_metadata
def download_model(name, cache_dir='~/.cache/bikit-models', force_redownload=False):
models_metadata = get_metadata(cache_dir, force_redownload)
all_model_names = list(models_metadata.keys())
assert name in all_model_names, f"Please specify a valid model <name> out of {all_model_names}. You used {name}."
base_url = "https://github.com/phiyodr/bikit-models/raw/master/models/"
model_url = os.path.join(base_url, models_metadata[name]["pth_name"])
filename = os.path.join(os.path.expanduser(cache_dir), models_metadata[name]["pth_name"])
if not os.path.isfile(filename) or force_redownload:
print(f"Start to download {name}.")
urlretrieve(model_url, filename)
print(f"Successfully downloaded model to {filename}.")
else:
print(f"Model {filename} already exists.")
return filename
########## Metadata ##########
def download_metadata(cache_dir='~/.cache/bikit-models', force_redownload=False):
"""Download metadata.json from Repository."""
cache_dir = os.path.expanduser(cache_dir)
metadata_url = "https://github.com/phiyodr/bikit-models/raw/master/metadata.json"
filename = os.path.join(cache_dir, "metadata.json")
if not os.path.isfile(filename) or force_redownload:
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
urlretrieve(metadata_url, filename)
print(f"Successfully downloaded metadata.json to {filename}.")
else:
print(f"metadata.json already exists at {filename}.")
def read_metadata(cache_dir='~/.cache/bikit-models'):
"""Read metadata.json from directory."""
filename = os.path.join(os.path.expanduser(cache_dir), "metadata.json")
with open(filename) as json_file:
metadata = json.load(json_file)
return metadata
def get_metadata(cache_dir='~/.cache/bikit-models', force_redownload=False):
"Return metadata.json as dict."
filename = os.path.join(os.path.expanduser(cache_dir), "metadata.json")
if not os.path.isfile(filename) or force_redownload:
_ = download_metadata(cache_dir, force_redownload)
return read_metadata(cache_dir)
def load_model(name, add_metadata=True, cache_dir="~/.cache/bikit-models", force_redownload=False):
from .models import DaclNet
models_metadata = get_metadata(cache_dir, force_redownload)
all_model_names = list(models_metadata.keys())
assert name in all_model_names, f"Please specify a valid model <name> out of {all_model_names}. You used {name}."
model_path = os.path.join(os.path.expanduser(cache_dir), models_metadata[name]["pth_name"])
if not os.path.isfile(model_path) or force_redownload:
download_model(name, cache_dir='~/.cache/bikit-models')
cp = torch.load(model_path, map_location=torch.device('cpu'))
model = DaclNet(base_name=cp['base'],
resolution = cp['resolution'],
hidden_layers=cp['hidden_layers'],
drop_prob=cp['drop_prob'],
num_class=cp['num_class'])
model.load_state_dict(cp['state_dict'])
model.eval()
if add_metadata:
metadata = get_metadata(cache_dir, force_redownload)[name]
return model, metadata
else:
return model
########## Datasets ##########
def list_datasets(verbose=True):
"""
List all datasets available
:param verbose: Print datasets
:return: Return dictionary containing datasets name, url and original name.
"""
datasets = DATASETS
if verbose:
pp.pprint(datasets)
return datasets
def download_dataset(name, cache_dir='~/.cache/bikit', rm_zip_or_rar=True, force_redownload=False):
# Get details from DATASETS
dct = DATASETS[name]
uid = dct["url"]
print(f"The {name} dataset was published at {dct['publications']}\n",
f"With downloading you accept the licence: {dct['license']}\n",
f"More details at {dct['webpage']}")
cache_full_dir = os.path.expanduser(cache_dir)
zip_file = f"{dct['download_name']}.zip"
cache_zip_file = os.path.join(cache_full_dir, zip_file)
cache_zip_folder = os.path.splitext(cache_zip_file)[0]
# Create cache directory
if not os.path.exists(cache_full_dir):
print(f"Create folder {cache_full_dir}")
os.makedirs(cache_full_dir)
# Download if not already present or not forced
if not os.path.exists(cache_zip_folder) or force_redownload:
# Download
url = f'https://drive.google.com/uc?id={uid}'
gdown.download(url, cache_zip_file, quiet=False)
# Unzip
print("\nStart to unzip file", end=" ")
with zipfile.ZipFile(cache_zip_file, 'r') as zip_ref:
zip_ref.extractall(cache_full_dir)
print("- unzip done!")
if rm_zip_or_rar:
print(f"Removing {cache_zip_file}")
os.remove(cache_zip_file)
else:
print(f"Folder {cache_zip_folder} already exists.\n",
f"Use argument set 'force_redownload=True' to force redownload.")
if __name__ == "__main__":
name = "dacl1k"
download_dataset(name)
from bikit.datasets import BikitDataset # Deprecated: from bikit.datasets.data import BikitDataset
from torch.utils.data import DataLoader
from torchvision import transforms
my_transform = transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor()])
train_dataset = BikitDataset(name, split="train", transform=my_transform, return_type="pt")
Train_loader = DataLoader(dataset=train_dataset, batch_size=64, shuffle=False, num_workers=0)
# Use it in your training loop
for i, (imgs, labels) in enumerate(train_dataset):
print(i, imgs.shape, labels.shape)
break
test_data, test_meta = False, False
if test_data:
name = "codebrim-classif"
#download_dataset(name, rm_zip_or_rar=True, force_redownload=False)
print("===Download done===")
from bikit.datasets import BikitDataset
from torch.utils.data import DataLoader
from torchvision import transforms
my_transform = transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor()])
trainval_dataset = BikitDataset(name, split="test", transform=my_transform)
trainval_loader = DataLoader(dataset=trainval_dataset, batch_size=64, shuffle=False, num_workers=0)
# Use it in your training loop
for i, (imgs, labels) in enumerate(trainval_loader):
print(i, imgs.shape, labels.shape, labels)
if i > 1:
break
print("===Done===")
elif test_meta:
download_metadata() | 8,465 | 37.307692 | 132 | py |
building-inspection-toolkit | building-inspection-toolkit-master/bikit/datasets.py | import torch
from torch.utils.data import Dataset
import pandas as pd
import numpy as np
import json
import os
import PIL
from PIL import Image
from torchvision import transforms
from os.path import dirname
from bikit.utils import pil_loader, cv2_loader, DATASETS
from pathlib import Path
from tqdm import tqdm
class BikitDataset(Dataset):
"""PyTorch Dataset for all Datasets"""
#bikit_path = Path(dirname(dirname(__file__)))
#bikit_path = Path(os.path.join(bikit_path, "bikit"))
bikit_path = Path(os.path.join(os.path.dirname(os.path.dirname(__file__)), "bikit"))
with open(Path(os.path.join(bikit_path, "data/datasets.json"))) as f:
DATASETS = json.load(f)
def __init__(self, name, split=None, cache_dir=None, transform=None, img_type="pil", return_type="pt",
load_all_in_mem=False, devel_mode=False):
"""
:param name: Dataset name.
:param split: Use 'train', 'val' or 'test.
:param transform: Torch transformation for image data (this depends on your CNN).
:param img_type: Load image as PIL or CV2.
:param return_type: Returns Torch tensor ('pt') or numpy ('np').
:param cache_dir: Path to cache_dir.
:param load_all_in_mem: Whether or not to load all image data into memory (this depends on the dataset size and
your memory). Loading all in memory can speed up your training.
:param devel_mode:
"""
assert img_type.lower() in ["pil", "cv2"], f"Not a valid imgage type. Use something from ['pil','cv2']."
if img_type == "pil":
self.img_loader = pil_loader
elif img_type == "cv2":
self.img_loader = cv2_loader
assert name in list(self.DATASETS.keys()), f"This name does not exists. Use something from {list(DATASETS.keys())}."
bikit_path = Path(os.path.join(os.path.dirname(os.path.dirname(__file__)), "bikit"))
self.csv_filename = Path(os.path.join(bikit_path, "data", name) + ".csv")
self.split_column =DATASETS[name]["split_column"]
self.available_splits =DATASETS[name]["splits"]
assert split in self.available_splits + [""], f"{split} is not a valid split. Use somethong from {self.available_splits}."
assert return_type in ["pt", "np"], f"{return_type} is not a valid return_type. Use somethong from {['pt', 'np']}."
self.return_type = return_type
# Misc
self.split = split
if cache_dir:
self.cache_full_dir = Path(os.path.join(cache_dir))
else:
self.cache_full_dir = Path(os.path.join(os.path.expanduser("~"), ".cache/bikit"))
self.devel_mode = devel_mode
self.class_names = self.DATASETS[name]["class_names"]
self.num_classes = self.DATASETS[name]["num_classes"]
self.load_all_in_mem = load_all_in_mem
# Data prep
self.transform = transform
self.df = pd.read_csv(self.csv_filename)
if split:
self.df = self.df[self.df[self.split_column] == split]
if devel_mode:
self.df = self.df[:100]
self.n_samples = self.df.shape[0]
if load_all_in_mem:
self.img_dict = {}
for index, row in tqdm(self.df.iterrows(), total=self.df.shape[0], desc="Load images in CPU memory"):
img_filename = Path(os.path.join(self.cache_full_dir, row['img_path']))
img_name = row['img_name']
img = self.img_loader(img_filename)
self.img_dict[img_name] = img
def __getitem__(self, index):
"""Returns image as torch.Tensor and label as torch.Tensor with dimension (bs, num_classes)
where 1 indicates that the label is present."""
data = self.df.iloc[index]
# Get image
if self.load_all_in_mem:
img = self.img_dict[data['img_name']]
else:
img_filename = Path(os.path.join(self.cache_full_dir, data['img_path']))
img = self.img_loader(img_filename)
if self.transform:
img = self.transform(img)
elif self.return_type == "pt":
img = transforms.ToTensor()(img)
if (self.return_type == "np") and isinstance(img, PIL.Image.Image):
img = np.array(img)
# Get label with shape (1,)
if self.return_type == "np":
label = data[self.class_names].to_numpy().astype("float32")
else:
label = torch.FloatTensor(data[self.class_names].to_numpy().astype("float32"))
return img, label
def __len__(self):
return self.n_samples
if __name__ == "__main__":
print(__file__)
dataset1 = BikitDataset(name="cds", split="train")
dataset2 = BikitDataset(name="sdnet", split="train", img_type="cv2")
dataset3 = BikitDataset(name="bcd", split="train", img_type="cv2")
dataset4 = BikitDataset(name="mcds_bikit", split="train", img_type="cv2")
#train_dataset = BikitDataset(split="", load_all_in_mem=True)
img, targets = dataset1[0]
print(img.shape, targets.shape)
print(len(dataset1))
print("======")
for key in DATASETS:
if key not in ["codebrim-classif"]:
for split in DATASETS[key]["splits"]:
for img_type in ["pil", "cv2"]:
for return_type in ["pt", "np"]:
dataset = BikitDataset(name=key, split=split, img_type=img_type, return_type=return_type)
img, targets = dataset[0]
print(key, split, img_type, return_type, img.shape, type(img), targets.shape, type(targets)) | 5,624 | 42.945313 | 130 | py |
building-inspection-toolkit | building-inspection-toolkit-master/bikit/models.py | import numpy as np
import torch
from torch import nn
from torchvision import models
from efficientnet_pytorch import EfficientNet
from efficientnet_pytorch.utils import MemoryEfficientSwish
import time
from PIL import Image
import matplotlib.pyplot as plt
import json
from pathlib import Path
import sys
# Dict to find the suiting EfficientNet model according to the resolution of the input-images:
efnet_dict = {'b0': 224, 'b1': 240, 'b2': 260, 'b3': 300,
'b4': 380, 'b5': 456, 'b6': 528, 'b7': 600
}
class DaclNet(nn.Module):
def __init__(self, base_name, resolution, hidden_layers, num_class, drop_prob=0.2, freeze_base=True):
'''
Builds a network separated into a base model and a classifier with arbitrary hidden layers.
Attributes
---------
base_name: string, basemodel for the NN
resolution: resolution of the input-images, example: 224, 240...(look efnet_dic), Only needed for EfficientNet
hidden_layers: list of integers, the sizes of the hidden layers
drop_prob: float, dropout probability
freeze_base: boolean, choose if you want to freeze the parameters of the base model
num_class: integer, size of the output layer according to the number of classes
Example
---------
model = Network(base_name='efficientnet', resolution=224, hidden_layers=[32,16], num_class=6, drop_prob=0.2, freeze_base=True)
'''
super(DaclNet, self).__init__()
self.base_name = base_name
self.resolution = resolution
self.hidden_layers = hidden_layers
self.freeze_base = freeze_base
if self.base_name == 'mobilenet':
base = models.mobilenet_v3_large(pretrained=True)
modules = list(base.children())[:-1]
self.base = nn.Sequential(*modules)
# for pytorch model:
if hidden_layers:
self.classifier = nn.ModuleList([nn.Linear(base.classifier[0].in_features, self.hidden_layers[0])])
else:
self.classifier = nn.Linear(base.classifier[0].in_features, num_class)
self.activation = nn.Hardswish()
elif self.base_name == 'resnet':
base = models.resnet50(pretrained=True)
modules = list(base.children())[:-1]
self.base = nn.Sequential(*modules)
if self.hidden_layers:
self.classifier = nn.ModuleList([nn.Linear(base.fc.in_features, self.hidden_layers[0])])
else:
self.classifier = nn.Linear(base.fc.in_features, num_class)
self.activation = nn.ELU() # Eliminates dying RELU problem according to: https://tungmphung.com/elu-activation-a-comprehensive-analysis/
elif self.base_name == 'efficientnet':
# Implementing Effnet the same way like the others didn't work, because omitting the last module also removes last batchnorm, avg-pooling
for ver in efnet_dict:
if efnet_dict[ver] == self.resolution:
self.version = ver
full_name = self.base_name+'-'+ver
self.base = EfficientNet.from_pretrained(model_name=full_name)
if self.hidden_layers:
self.classifier = nn.ModuleList([nn.Linear(self.base._fc.in_features, self.hidden_layers[0])])
else:
self.classifier = nn.Linear(self.base._fc.in_features, num_class)
self.activation = MemoryEfficientSwish()
elif self.base_name == 'mobilenetv2':
base = models.mobilenet.mobilenet_v2(pretrained=True)
print(base)
modules = list(base.children())[:-1]
self.base = nn.Sequential(*modules)
if hidden_layers:
# Input features = depth of the last BatchNorm layer = input features of first layer of original classifier:
self.classifier = nn.ModuleList([nn.Linear(base.classifier[1].in_features, self.hidden_layers[0])])
else:
self.classifier = nn.Linear(base.classifier[1].in_features, num_class)
self.activation = nn.ReLU()
else:
raise NotImplementedError
# freeze the base
if self.freeze_base:
for param in self.base.parameters():
param.requires_grad_(False)
self.dropout = nn.Dropout(p=drop_prob, inplace=True)
# classifier
# Add a variable number of more hidden layers
if self.hidden_layers:
layer_sizes = zip(self.hidden_layers[:-1], self.hidden_layers[1:]) # The default baseV3Large model has one hidden layer with 1280 nodes
self.classifier.extend([nn.Linear(h1, h2) for h1, h2 in layer_sizes])
# add output layer to classifier
self.classifier.append(nn.Linear(self.hidden_layers[-1], num_class))
else:
pass
def forward(self, input_batch):
'''
Performs the feed-forward process for the input batch and returns the logits
Arguments
---------
input_batch: torch.Tensor, Multidimensional array holding elements of datatype: torch.float32,
it's shape is: [1, 3, 224, 224] according to N x C x H x W,
The input batch carries all pixel values from the images inside the batch
Note
---------
Every model uses 2d-Average-Pooling with output_size=1 after the feature extraction or rather before flattening.
The pooling layer of ResNet50 and MobileNetV3 was kept in the sequential -> Doesn't have to be called in forward!
EffNet had to be implemented with the AdaptiveAvgpool2d in this forward function because of missing pooling when
calling: "effnet.extract_features(input_batch)"
Also MobileNetV2 needs the manually added pooling layer.
Returns
---------
logits: torch.Tensor, shape: [1, num_class], datatype of elements: float
'''
# Check if model is one that needs Pooling layer
if self.base_name in ['efficientnet', 'mobilenetv2']:
if self.base_name == 'efficientnet':
x = self.base.extract_features(input_batch)
else:
# For MobileNetV2:
x= self.base(input_batch)
pool = nn.AdaptiveAvgPool2d(1)
x = pool(x)
else:
# For any other model don't additionally apply pooling:
x = self.base(input_batch)
x = self.dropout(x) # Originally only in EfficientNet a Dropout is aplied after last bottleneck, in others not!
x = x.view(x.size(0), -1) # Or: x.flatten(start_dim=1)
if self.hidden_layers:
for i,each in enumerate(self.classifier):
if i < len(self.classifier)-1:
x = self.activation(each(x))
x = self.dropout(x)
else:
logits = each(x)
break
else:
logits = self.classifier(x)
return logits
def preprocess_img(img):
if isinstance(img, str):
img = Image.open(img)
img = img.resize((224,224))
img_np = np.array(img)
img_np = (img_np / 255 - [0.485, 0.456, 0.406]) / [0.229, 0.224, 0.225]
img_np = img_np.transpose(2, 0, 1)
img = torch.from_numpy(img_np)
img = img.unsqueeze_(0)
img = img.type(torch.FloatTensor)
return img
def _print_prediction_bar(prediction_probability, label):
assert (prediction_probability>=0.0) and (prediction_probability<=1.0)
bar_size = 40
bar = '█' * int(bar_size * prediction_probability)
bar = bar + '' * int(bar_size * (1-prediction_probability))
sys.stdout.write(f"{label.ljust(20)} [{bar:{bar_size}s}] {prediction_probability*100:>6.2f}% \n")
sys.stdout.flush()
def make_prediction(model, img, metadata, print_predictions=True, preprocess_image=True):
# Read image if it is a string
if isinstance(img, str):
img = Image.open(img)
# Preprocess image
if preprocess_image:
img = preprocess_img(img)
model.eval()
tic = time.perf_counter()
with torch.no_grad():
logits = model(img)
probabilities = torch.sigmoid(logits).numpy()[0]
predictions = probabilities > 0.5
toc = time.perf_counter()
if print_predictions:
n_classes = len(metadata["id2labels"])
for i in range(n_classes):
label_name = metadata["id2labels"][str(i)]
_print_prediction_bar(probabilities[i], label_name)
print(f"Inference time (CPU): {(toc - tic)*1000:0.2f} ms")
return probabilities, predictions
if __name__ == "__main__":
from bikit.utils import load_model, get_metadata
img_path = "/home/philipp/Documents/MyLocalProjects/dacl_project/bridge-inspection-toolkit/bikit/data/11_001990.jpg"
model_name = "MCDS_MobileNetV3Large"
model, metadata = load_model(model_name)
make_prediction(model, img_path, metadata) | 9,120 | 41.226852 | 152 | py |
building-inspection-toolkit | building-inspection-toolkit-master/bikit/metrics.py | import torch
from torchmetrics import Metric
from torchmetrics import Recall
class EMR_mt(Metric):
def __init__(self, use_logits=True, dist_sync_on_step=False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.use_logits = use_logits
self.add_state("correct", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum")
def update(self, preds: torch.Tensor, targets: torch.Tensor):
#assert preds.shape == target.shape
#print(preds.shape, targets.shape)
if self.use_logits:
y_hat = (preds > 0.0)
else:
y_hat = (preds > 0.5)
z = (y_hat == targets)
#print(z)
z = torch.all(z, dim=1)
#print(z)
z = z.sum()
#print(z)
self.correct += z
self.total += targets.shape[0]
#print("+"*50)
def compute(self):
return self.correct.float() / self.total
class Recalls_mt(Recall):
def __init__(self, average='none', num_classes=6):
""" Initalisieren über Eltern-Klasse """
super().__init__(average=average, num_classes=num_classes)
if __name__ == '__main__':
myemr = EMR_mt(use_logits=False)
myrecalls = Recalls_mt()
# data
preds0 = torch.tensor([[.9, 0.1, 0.9, 0.1, 0.9, 0.1],
[.8, 0.2, 0.9, 0.2, 0.9, 0.2],
[.7, 0.9, 0.2 , 0.2, 0.2 , 0.2]])
preds1 = torch.tensor([[.0, 0.1, 0.9, 0.1, 0.9, 0.1],
[.8, 0.2, 0.9, 0.2, 0.9, 0.2],
[.7, 0.9, 0.2 , 0.9, 0.2 , 0.9]])
target = torch.tensor([[1, 0, 1, 0, 0, 1],
[1, 1, 0, 0, 1, 0],
[1, 1, 0, 1, 0, 1]])
# batch 0
myemr(preds0, target), myrecalls(preds0, target)
print(myemr.compute(), myrecalls.compute())
# batch 1
myemr(preds1, target), myrecalls(preds1, target)
print(myemr.compute(), myrecalls.compute())
# Reset at end of epoch
myemr.reset(), myrecalls.reset()
print(myemr, myrecalls) | 2,124 | 32.203125 | 80 | py |
Hierarchical-Localization | Hierarchical-Localization-master/hloc/extract_features.py | import argparse
import torch
from pathlib import Path
from typing import Dict, List, Union, Optional
import h5py
from types import SimpleNamespace
import cv2
import numpy as np
from tqdm import tqdm
import pprint
import collections.abc as collections
import PIL.Image
import glob
from . import extractors, logger
from .utils.base_model import dynamic_load
from .utils.parsers import parse_image_lists
from .utils.io import read_image, list_h5_names
'''
A set of standard configurations that can be directly selected from the command
line using their name. Each is a dictionary with the following entries:
- output: the name of the feature file that will be generated.
- model: the model configuration, as passed to a feature extractor.
- preprocessing: how to preprocess the images read from disk.
'''
confs = {
'superpoint_aachen': {
'output': 'feats-superpoint-n4096-r1024',
'model': {
'name': 'superpoint',
'nms_radius': 3,
'max_keypoints': 4096,
},
'preprocessing': {
'grayscale': True,
'resize_max': 1024,
},
},
# Resize images to 1600px even if they are originally smaller.
# Improves the keypoint localization if the images are of good quality.
'superpoint_max': {
'output': 'feats-superpoint-n4096-rmax1600',
'model': {
'name': 'superpoint',
'nms_radius': 3,
'max_keypoints': 4096,
},
'preprocessing': {
'grayscale': True,
'resize_max': 1600,
'resize_force': True,
},
},
'superpoint_inloc': {
'output': 'feats-superpoint-n4096-r1600',
'model': {
'name': 'superpoint',
'nms_radius': 4,
'max_keypoints': 4096,
},
'preprocessing': {
'grayscale': True,
'resize_max': 1600,
},
},
'r2d2': {
'output': 'feats-r2d2-n5000-r1024',
'model': {
'name': 'r2d2',
'max_keypoints': 5000,
},
'preprocessing': {
'grayscale': False,
'resize_max': 1024,
},
},
'd2net-ss': {
'output': 'feats-d2net-ss',
'model': {
'name': 'd2net',
'multiscale': False,
},
'preprocessing': {
'grayscale': False,
'resize_max': 1600,
},
},
'sift': {
'output': 'feats-sift',
'model': {
'name': 'dog'
},
'preprocessing': {
'grayscale': True,
'resize_max': 1600,
},
},
'sosnet': {
'output': 'feats-sosnet',
'model': {
'name': 'dog',
'descriptor': 'sosnet'
},
'preprocessing': {
'grayscale': True,
'resize_max': 1600,
},
},
'disk': {
'output': 'feats-disk',
'model': {
'name': 'disk',
'max_keypoints': 5000,
},
'preprocessing': {
'grayscale': False,
'resize_max': 1600,
},
},
# Global descriptors
'dir': {
'output': 'global-feats-dir',
'model': {'name': 'dir'},
'preprocessing': {'resize_max': 1024},
},
'netvlad': {
'output': 'global-feats-netvlad',
'model': {'name': 'netvlad'},
'preprocessing': {'resize_max': 1024},
},
'openibl': {
'output': 'global-feats-openibl',
'model': {'name': 'openibl'},
'preprocessing': {'resize_max': 1024},
},
'cosplace': {
'output': 'global-feats-cosplace',
'model': {'name': 'cosplace'},
'preprocessing': {'resize_max': 1024},
}
}
def resize_image(image, size, interp):
if interp.startswith('cv2_'):
interp = getattr(cv2, 'INTER_'+interp[len('cv2_'):].upper())
h, w = image.shape[:2]
if interp == cv2.INTER_AREA and (w < size[0] or h < size[1]):
interp = cv2.INTER_LINEAR
resized = cv2.resize(image, size, interpolation=interp)
elif interp.startswith('pil_'):
interp = getattr(PIL.Image, interp[len('pil_'):].upper())
resized = PIL.Image.fromarray(image.astype(np.uint8))
resized = resized.resize(size, resample=interp)
resized = np.asarray(resized, dtype=image.dtype)
else:
raise ValueError(
f'Unknown interpolation {interp}.')
return resized
class ImageDataset(torch.utils.data.Dataset):
default_conf = {
'globs': ['*.jpg', '*.png', '*.jpeg', '*.JPG', '*.PNG'],
'grayscale': False,
'resize_max': None,
'resize_force': False,
'interpolation': 'cv2_area', # pil_linear is more accurate but slower
}
def __init__(self, root, conf, paths=None):
self.conf = conf = SimpleNamespace(**{**self.default_conf, **conf})
self.root = root
if paths is None:
paths = []
for g in conf.globs:
paths += glob.glob(
(Path(root) / '**' / g).as_posix(), recursive=True)
if len(paths) == 0:
raise ValueError(f'Could not find any image in root: {root}.')
paths = sorted(set(paths))
self.names = [Path(p).relative_to(root).as_posix() for p in paths]
logger.info(f'Found {len(self.names)} images in root {root}.')
else:
if isinstance(paths, (Path, str)):
self.names = parse_image_lists(paths)
elif isinstance(paths, collections.Iterable):
self.names = [p.as_posix() if isinstance(p, Path) else p
for p in paths]
else:
raise ValueError(f'Unknown format for path argument {paths}.')
for name in self.names:
if not (root / name).exists():
raise ValueError(
f'Image {name} does not exists in root: {root}.')
def __getitem__(self, idx):
name = self.names[idx]
image = read_image(self.root / name, self.conf.grayscale)
image = image.astype(np.float32)
size = image.shape[:2][::-1]
if self.conf.resize_max and (self.conf.resize_force
or max(size) > self.conf.resize_max):
scale = self.conf.resize_max / max(size)
size_new = tuple(int(round(x*scale)) for x in size)
image = resize_image(image, size_new, self.conf.interpolation)
if self.conf.grayscale:
image = image[None]
else:
image = image.transpose((2, 0, 1)) # HxWxC to CxHxW
image = image / 255.
data = {
'image': image,
'original_size': np.array(size),
}
return data
def __len__(self):
return len(self.names)
@torch.no_grad()
def main(conf: Dict,
image_dir: Path,
export_dir: Optional[Path] = None,
as_half: bool = True,
image_list: Optional[Union[Path, List[str]]] = None,
feature_path: Optional[Path] = None,
overwrite: bool = False) -> Path:
logger.info('Extracting local features with configuration:'
f'\n{pprint.pformat(conf)}')
dataset = ImageDataset(image_dir, conf['preprocessing'], image_list)
if feature_path is None:
feature_path = Path(export_dir, conf['output']+'.h5')
feature_path.parent.mkdir(exist_ok=True, parents=True)
skip_names = set(list_h5_names(feature_path)
if feature_path.exists() and not overwrite else ())
dataset.names = [n for n in dataset.names if n not in skip_names]
if len(dataset.names) == 0:
logger.info('Skipping the extraction.')
return feature_path
device = 'cuda' if torch.cuda.is_available() else 'cpu'
Model = dynamic_load(extractors, conf['model']['name'])
model = Model(conf['model']).eval().to(device)
loader = torch.utils.data.DataLoader(
dataset, num_workers=1, shuffle=False, pin_memory=True)
for idx, data in enumerate(tqdm(loader)):
name = dataset.names[idx]
pred = model({'image': data['image'].to(device, non_blocking=True)})
pred = {k: v[0].cpu().numpy() for k, v in pred.items()}
pred['image_size'] = original_size = data['original_size'][0].numpy()
if 'keypoints' in pred:
size = np.array(data['image'].shape[-2:][::-1])
scales = (original_size / size).astype(np.float32)
pred['keypoints'] = (pred['keypoints'] + .5) * scales[None] - .5
if 'scales' in pred:
pred['scales'] *= scales.mean()
# add keypoint uncertainties scaled to the original resolution
uncertainty = getattr(model, 'detection_noise', 1) * scales.mean()
if as_half:
for k in pred:
dt = pred[k].dtype
if (dt == np.float32) and (dt != np.float16):
pred[k] = pred[k].astype(np.float16)
with h5py.File(str(feature_path), 'a', libver='latest') as fd:
try:
if name in fd:
del fd[name]
grp = fd.create_group(name)
for k, v in pred.items():
grp.create_dataset(k, data=v)
if 'keypoints' in pred:
grp['keypoints'].attrs['uncertainty'] = uncertainty
except OSError as error:
if 'No space left on device' in error.args[0]:
logger.error(
'Out of disk space: storing features on disk can take '
'significant space, did you enable the as_half flag?')
del grp, fd[name]
raise error
del pred
logger.info('Finished exporting features.')
return feature_path
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--image_dir', type=Path, required=True)
parser.add_argument('--export_dir', type=Path, required=True)
parser.add_argument('--conf', type=str, default='superpoint_aachen',
choices=list(confs.keys()))
parser.add_argument('--as_half', action='store_true')
parser.add_argument('--image_list', type=Path)
parser.add_argument('--feature_path', type=Path)
args = parser.parse_args()
main(confs[args.conf], args.image_dir, args.export_dir, args.as_half)
| 10,549 | 32.814103 | 79 | py |
Hierarchical-Localization | Hierarchical-Localization-master/hloc/pairs_from_retrieval.py | import argparse
from pathlib import Path
from typing import Optional
import h5py
import numpy as np
import torch
import collections.abc as collections
from . import logger
from .utils.parsers import parse_image_lists
from .utils.read_write_model import read_images_binary
from .utils.io import list_h5_names
def parse_names(prefix, names, names_all):
if prefix is not None:
if not isinstance(prefix, str):
prefix = tuple(prefix)
names = [n for n in names_all if n.startswith(prefix)]
if len(names) == 0:
raise ValueError(
f'Could not find any image with the prefix `{prefix}`.')
elif names is not None:
if isinstance(names, (str, Path)):
names = parse_image_lists(names)
elif isinstance(names, collections.Iterable):
names = list(names)
else:
raise ValueError(f'Unknown type of image list: {names}.'
'Provide either a list or a path to a list file.')
else:
names = names_all
return names
def get_descriptors(names, path, name2idx=None, key='global_descriptor'):
if name2idx is None:
with h5py.File(str(path), 'r', libver='latest') as fd:
desc = [fd[n][key].__array__() for n in names]
else:
desc = []
for n in names:
with h5py.File(str(path[name2idx[n]]), 'r', libver='latest') as fd:
desc.append(fd[n][key].__array__())
return torch.from_numpy(np.stack(desc, 0)).float()
def pairs_from_score_matrix(scores: torch.Tensor,
invalid: np.array,
num_select: int,
min_score: Optional[float] = None):
assert scores.shape == invalid.shape
if isinstance(scores, np.ndarray):
scores = torch.from_numpy(scores)
invalid = torch.from_numpy(invalid).to(scores.device)
if min_score is not None:
invalid |= scores < min_score
scores.masked_fill_(invalid, float('-inf'))
topk = torch.topk(scores, num_select, dim=1)
indices = topk.indices.cpu().numpy()
valid = topk.values.isfinite().cpu().numpy()
pairs = []
for i, j in zip(*np.where(valid)):
pairs.append((i, indices[i, j]))
return pairs
def main(descriptors, output, num_matched,
query_prefix=None, query_list=None,
db_prefix=None, db_list=None, db_model=None, db_descriptors=None):
logger.info('Extracting image pairs from a retrieval database.')
# We handle multiple reference feature files.
# We only assume that names are unique among them and map names to files.
if db_descriptors is None:
db_descriptors = descriptors
if isinstance(db_descriptors, (Path, str)):
db_descriptors = [db_descriptors]
name2db = {n: i for i, p in enumerate(db_descriptors)
for n in list_h5_names(p)}
db_names_h5 = list(name2db.keys())
query_names_h5 = list_h5_names(descriptors)
if db_model:
images = read_images_binary(db_model / 'images.bin')
db_names = [i.name for i in images.values()]
else:
db_names = parse_names(db_prefix, db_list, db_names_h5)
if len(db_names) == 0:
raise ValueError('Could not find any database image.')
query_names = parse_names(query_prefix, query_list, query_names_h5)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
db_desc = get_descriptors(db_names, db_descriptors, name2db)
query_desc = get_descriptors(query_names, descriptors)
sim = torch.einsum('id,jd->ij', query_desc.to(device), db_desc.to(device))
# Avoid self-matching
self = np.array(query_names)[:, None] == np.array(db_names)[None]
pairs = pairs_from_score_matrix(sim, self, num_matched, min_score=0)
pairs = [(query_names[i], db_names[j]) for i, j in pairs]
logger.info(f'Found {len(pairs)} pairs.')
with open(output, 'w') as f:
f.write('\n'.join(' '.join([i, j]) for i, j in pairs))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--descriptors', type=Path, required=True)
parser.add_argument('--output', type=Path, required=True)
parser.add_argument('--num_matched', type=int, required=True)
parser.add_argument('--query_prefix', type=str, nargs='+')
parser.add_argument('--query_list', type=Path)
parser.add_argument('--db_prefix', type=str, nargs='+')
parser.add_argument('--db_list', type=Path)
parser.add_argument('--db_model', type=Path)
parser.add_argument('--db_descriptors', type=Path)
args = parser.parse_args()
main(**args.__dict__)
| 4,649 | 36.804878 | 79 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.