repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
NBFNet | NBFNet-master/nbfnet/model.py | from collections.abc import Sequence
import torch
from torch import nn
from torch import autograd
from torch_scatter import scatter_add
from torchdrug import core, layers, utils
from torchdrug.layers import functional
from torchdrug.core import Registry as R
from . import layer
@R.register("model.NBFNet")
class NeuralBellmanFordNetwork(nn.Module, core.Configurable):
def __init__(self, input_dim, hidden_dims, num_relation=None, symmetric=False,
message_func="distmult", aggregate_func="pna", short_cut=False, layer_norm=False, activation="relu",
concat_hidden=False, num_mlp_layer=2, dependent=True, remove_one_hop=False,
num_beam=10, path_topk=10):
super(NeuralBellmanFordNetwork, self).__init__()
if not isinstance(hidden_dims, Sequence):
hidden_dims = [hidden_dims]
if num_relation is None:
double_relation = 1
else:
num_relation = int(num_relation)
double_relation = num_relation * 2
self.dims = [input_dim] + list(hidden_dims)
self.num_relation = num_relation
self.symmetric = symmetric
self.short_cut = short_cut
self.concat_hidden = concat_hidden
self.remove_one_hop = remove_one_hop
self.num_beam = num_beam
self.path_topk = path_topk
self.layers = nn.ModuleList()
for i in range(len(self.dims) - 1):
self.layers.append(layer.GeneralizedRelationalConv(self.dims[i], self.dims[i + 1], double_relation,
self.dims[0], message_func, aggregate_func, layer_norm,
activation, dependent))
feature_dim = hidden_dims[-1] * (len(hidden_dims) if concat_hidden else 1) + input_dim
self.query = nn.Embedding(double_relation, input_dim)
self.mlp = layers.MLP(feature_dim, [feature_dim] * (num_mlp_layer - 1) + [1])
def remove_easy_edges(self, graph, h_index, t_index, r_index=None):
if self.remove_one_hop:
h_index_ext = torch.cat([h_index, t_index], dim=-1)
t_index_ext = torch.cat([t_index, h_index], dim=-1)
if r_index is not None:
any = -torch.ones_like(h_index_ext)
pattern = torch.stack([h_index_ext, t_index_ext, any], dim=-1)
else:
pattern = torch.stack([h_index_ext, t_index_ext], dim=-1)
else:
if r_index is not None:
pattern = torch.stack([h_index, t_index, r_index], dim=-1)
else:
pattern = torch.stack([h_index, t_index], dim=-1)
pattern = pattern.flatten(0, -2)
edge_index = graph.match(pattern)[0]
edge_mask = ~functional.as_mask(edge_index, graph.num_edge)
return graph.edge_mask(edge_mask)
def negative_sample_to_tail(self, h_index, t_index, r_index):
# convert p(h | t, r) to p(t' | h', r')
# h' = t, r' = r^{-1}, t' = h
is_t_neg = (h_index == h_index[:, [0]]).all(dim=-1, keepdim=True)
new_h_index = torch.where(is_t_neg, h_index, t_index)
new_t_index = torch.where(is_t_neg, t_index, h_index)
new_r_index = torch.where(is_t_neg, r_index, r_index + self.num_relation)
return new_h_index, new_t_index, new_r_index
def as_relational_graph(self, graph, self_loop=True):
# add self loop
# convert homogeneous graphs to knowledge graphs with 1 relation
edge_list = graph.edge_list
edge_weight = graph.edge_weight
if self_loop:
node_in = node_out = torch.arange(graph.num_node, device=self.device)
loop = torch.stack([node_in, node_out], dim=-1)
edge_list = torch.cat([edge_list, loop])
edge_weight = torch.cat([edge_weight, torch.ones(graph.num_node, device=self.device)])
relation = torch.zeros(len(edge_list), 1, dtype=torch.long, device=self.device)
edge_list = torch.cat([edge_list, relation], dim=-1)
graph = type(graph)(edge_list, edge_weight=edge_weight, num_node=graph.num_node,
num_relation=1, meta_dict=graph.meta_dict, **graph.data_dict)
return graph
@utils.cached
def bellmanford(self, graph, h_index, r_index, separate_grad=False):
query = self.query(r_index)
index = h_index.unsqueeze(-1).expand_as(query)
boundary = torch.zeros(graph.num_node, *query.shape, device=self.device)
boundary.scatter_add_(0, index.unsqueeze(0), query.unsqueeze(0))
with graph.graph():
graph.query = query
with graph.node():
graph.boundary = boundary
hiddens = []
step_graphs = []
layer_input = boundary
for layer in self.layers:
if separate_grad:
step_graph = graph.clone().requires_grad_()
else:
step_graph = graph
hidden = layer(step_graph, layer_input)
if self.short_cut and hidden.shape == layer_input.shape:
hidden = hidden + layer_input
hiddens.append(hidden)
step_graphs.append(step_graph)
layer_input = hidden
node_query = query.expand(graph.num_node, -1, -1)
if self.concat_hidden:
output = torch.cat(hiddens + [node_query], dim=-1)
else:
output = torch.cat([hiddens[-1], node_query], dim=-1)
return {
"node_feature": output,
"step_graphs": step_graphs,
}
def forward(self, graph, h_index, t_index, r_index=None, all_loss=None, metric=None):
if all_loss is not None:
graph = self.remove_easy_edges(graph, h_index, t_index, r_index)
shape = h_index.shape
if graph.num_relation:
graph = graph.undirected(add_inverse=True)
h_index, t_index, r_index = self.negative_sample_to_tail(h_index, t_index, r_index)
else:
graph = self.as_relational_graph(graph)
h_index = h_index.view(-1, 1)
t_index = t_index.view(-1, 1)
r_index = torch.zeros_like(h_index)
assert (h_index[:, [0]] == h_index).all()
assert (r_index[:, [0]] == r_index).all()
output = self.bellmanford(graph, h_index[:, 0], r_index[:, 0])
feature = output["node_feature"].transpose(0, 1)
index = t_index.unsqueeze(-1).expand(-1, -1, feature.shape[-1])
feature = feature.gather(1, index)
if self.symmetric:
assert (t_index[:, [0]] == t_index).all()
output = self.bellmanford(graph, t_index[:, 0], r_index[:, 0])
inv_feature = output["node_feature"].transpose(0, 1)
index = h_index.unsqueeze(-1).expand(-1, -1, inv_feature.shape[-1])
inv_feature = inv_feature.gather(1, index)
feature = (feature + inv_feature) / 2
score = self.mlp(feature).squeeze(-1)
return score.view(shape)
def visualize(self, graph, h_index, t_index, r_index):
assert h_index.numel() == 1 and h_index.ndim == 1
graph = graph.undirected(add_inverse=True)
output = self.bellmanford(graph, h_index, r_index, separate_grad=True)
feature = output["node_feature"]
step_graphs = output["step_graphs"]
index = t_index.unsqueeze(0).unsqueeze(-1).expand(-1, -1, feature.shape[-1])
feature = feature.gather(0, index).squeeze(0)
score = self.mlp(feature).squeeze(-1)
edge_weights = [graph.edge_weight for graph in step_graphs]
edge_grads = autograd.grad(score, edge_weights)
for graph, edge_grad in zip(step_graphs, edge_grads):
with graph.edge():
graph.edge_grad = edge_grad
distances, back_edges = self.beam_search_distance(step_graphs, h_index, t_index, self.num_beam)
paths, weights = self.topk_average_length(distances, back_edges, t_index, self.path_topk)
return paths, weights
@torch.no_grad()
def beam_search_distance(self, graphs, h_index, t_index, num_beam=10):
num_node = graphs[0].num_node
input = torch.full((num_node, num_beam), float("-inf"), device=self.device)
input[h_index, 0] = 0
distances = []
back_edges = []
for graph in graphs:
graph = graph.edge_mask(graph.edge_list[:, 0] != t_index)
node_in, node_out = graph.edge_list.t()[:2]
message = input[node_in] + graph.edge_grad.unsqueeze(-1)
msg_source = graph.edge_list.unsqueeze(1).expand(-1, num_beam, -1)
is_duplicate = torch.isclose(message.unsqueeze(-1), message.unsqueeze(-2)) & \
(msg_source.unsqueeze(-2) == msg_source.unsqueeze(-3)).all(dim=-1)
is_duplicate = is_duplicate.float() - \
torch.arange(num_beam, dtype=torch.float, device=self.device) / (num_beam + 1)
# pick the first occurrence as the previous state
prev_rank = is_duplicate.argmax(dim=-1, keepdim=True)
msg_source = torch.cat([msg_source, prev_rank], dim=-1)
node_out, order = node_out.sort()
node_out_set = torch.unique(node_out)
# sort message w.r.t. node_out
message = message[order].flatten()
msg_source = msg_source[order].flatten(0, -2)
size = scatter_add(torch.ones_like(node_out), node_out, dim_size=num_node)
msg2out = functional._size_to_index(size[node_out_set] * num_beam)
# deduplicate
is_duplicate = (msg_source[1:] == msg_source[:-1]).all(dim=-1)
is_duplicate = torch.cat([torch.zeros(1, dtype=torch.bool, device=self.device), is_duplicate])
message = message[~is_duplicate]
msg_source = msg_source[~is_duplicate]
msg2out = msg2out[~is_duplicate]
size = scatter_add(torch.ones_like(msg2out), msg2out, dim_size=len(node_out_set))
if not torch.isinf(message).all():
distance, rel_index = functional.variadic_topk(message, size, k=num_beam)
abs_index = rel_index + (size.cumsum(0) - size).unsqueeze(-1)
back_edge = msg_source[abs_index]
distance = distance.view(len(node_out_set), num_beam)
back_edge = back_edge.view(len(node_out_set), num_beam, 4)
distance = scatter_add(distance, node_out_set, dim=0, dim_size=num_node)
back_edge = scatter_add(back_edge, node_out_set, dim=0, dim_size=num_node)
else:
distance = torch.full((num_node, num_beam), float("-inf"), device=self.device)
back_edge = torch.zeros(num_node, num_beam, 4, dtype=torch.long, device=self.device)
distances.append(distance)
back_edges.append(back_edge)
input = distance
return distances, back_edges
def topk_average_length(self, distances, back_edges, t_index, k=10):
paths = []
average_lengths = []
for i in range(len(distances)):
distance, order = distances[i][t_index].flatten(0, -1).sort(descending=True)
back_edge = back_edges[i][t_index].flatten(0, -2)[order]
for d, (h, t, r, prev_rank) in zip(distance[:k].tolist(), back_edge[:k].tolist()):
if d == float("-inf"):
break
path = [(h, t, r)]
for j in range(i - 1, -1, -1):
h, t, r, prev_rank = back_edges[j][h, prev_rank].tolist()
path.append((h, t, r))
paths.append(path[::-1])
average_lengths.append(d / len(path))
if paths:
average_lengths, paths = zip(*sorted(zip(average_lengths, paths), reverse=True)[:k])
return paths, average_lengths | 11,950 | 44.441065 | 118 | py |
NBFNet | NBFNet-master/nbfnet/dataset.py | import os
import csv
import glob
from tqdm import tqdm
from ogb import linkproppred
import torch
from torch.utils import data as torch_data
from torchdrug import data, datasets, utils
from torchdrug.core import Registry as R
class InductiveKnowledgeGraphDataset(data.KnowledgeGraphDataset):
def load_inductive_tsvs(self, train_files, test_files, verbose=0):
assert len(train_files) == len(test_files) == 2
inv_train_entity_vocab = {}
inv_test_entity_vocab = {}
inv_relation_vocab = {}
triplets = []
num_samples = []
for txt_file in train_files:
with open(txt_file, "r") as fin:
reader = csv.reader(fin, delimiter="\t")
if verbose:
reader = tqdm(reader, "Loading %s" % txt_file, utils.get_line_count(txt_file))
num_sample = 0
for tokens in reader:
h_token, r_token, t_token = tokens
if h_token not in inv_train_entity_vocab:
inv_train_entity_vocab[h_token] = len(inv_train_entity_vocab)
h = inv_train_entity_vocab[h_token]
if r_token not in inv_relation_vocab:
inv_relation_vocab[r_token] = len(inv_relation_vocab)
r = inv_relation_vocab[r_token]
if t_token not in inv_train_entity_vocab:
inv_train_entity_vocab[t_token] = len(inv_train_entity_vocab)
t = inv_train_entity_vocab[t_token]
triplets.append((h, t, r))
num_sample += 1
num_samples.append(num_sample)
for txt_file in test_files:
with open(txt_file, "r") as fin:
reader = csv.reader(fin, delimiter="\t")
if verbose:
reader = tqdm(reader, "Loading %s" % txt_file, utils.get_line_count(txt_file))
num_sample = 0
for tokens in reader:
h_token, r_token, t_token = tokens
if h_token not in inv_test_entity_vocab:
inv_test_entity_vocab[h_token] = len(inv_test_entity_vocab)
h = inv_test_entity_vocab[h_token]
assert r_token in inv_relation_vocab
r = inv_relation_vocab[r_token]
if t_token not in inv_test_entity_vocab:
inv_test_entity_vocab[t_token] = len(inv_test_entity_vocab)
t = inv_test_entity_vocab[t_token]
triplets.append((h, t, r))
num_sample += 1
num_samples.append(num_sample)
train_entity_vocab, inv_train_entity_vocab = self._standarize_vocab(None, inv_train_entity_vocab)
test_entity_vocab, inv_test_entity_vocab = self._standarize_vocab(None, inv_test_entity_vocab)
relation_vocab, inv_relation_vocab = self._standarize_vocab(None, inv_relation_vocab)
self.train_graph = data.Graph(triplets[:num_samples[0]],
num_node=len(train_entity_vocab), num_relation=len(relation_vocab))
self.valid_graph = self.train_graph
self.test_graph = data.Graph(triplets[sum(num_samples[:2]): sum(num_samples[:3])],
num_node=len(test_entity_vocab), num_relation=len(relation_vocab))
self.graph = self.train_graph
self.triplets = torch.tensor(triplets[:sum(num_samples[:2])] + triplets[sum(num_samples[:3]):])
self.num_samples = num_samples[:2] + num_samples[3:]
self.train_entity_vocab = train_entity_vocab
self.test_entity_vocab = test_entity_vocab
self.relation_vocab = relation_vocab
self.inv_train_entity_vocab = inv_train_entity_vocab
self.inv_test_entity_vocab = inv_test_entity_vocab
self.inv_relation_vocab = inv_relation_vocab
def __getitem__(self, index):
return self.triplets[index]
def split(self):
offset = 0
splits = []
for num_sample in self.num_samples:
split = torch_data.Subset(self, range(offset, offset + num_sample))
splits.append(split)
offset += num_sample
return splits
@R.register("datasets.CoraLinkPrediction")
class CoraLinkPrediction(datasets.Cora):
def __init__(self, **kwargs):
super(CoraLinkPrediction, self).__init__(**kwargs)
self.transform = None
def __getitem__(self, index):
return self.graph.edge_list[index]
def __len__(self):
return self.graph.num_edge
def split(self, ratios=(85, 5, 10)):
length = self.graph.num_edge
norm = sum(ratios)
lengths = [int(r / norm * length) for r in ratios]
lengths[-1] = length - sum(lengths[:-1])
g = torch.Generator()
g.manual_seed(0)
return torch_data.random_split(self, lengths, generator=g)
@R.register("datasets.CiteSeerLinkPrediction")
class CiteSeerLinkPrediction(datasets.CiteSeer):
def __init__(self, **kwargs):
super(CiteSeerLinkPrediction, self).__init__(**kwargs)
self.transform = None
def __getitem__(self, index):
return self.graph.edge_list[index]
def __len__(self):
return self.graph.num_edge
def split(self, ratios=(85, 5, 10)):
length = self.graph.num_edge
norm = sum(ratios)
lengths = [int(r / norm * length) for r in ratios]
lengths[-1] = length - sum(lengths[:-1])
g = torch.Generator()
g.manual_seed(0)
return torch_data.random_split(self, lengths, generator=g)
@R.register("datasets.PubMedLinkPrediction")
class PubMedLinkPrediction(datasets.PubMed):
def __init__(self, **kwargs):
super(PubMedLinkPrediction, self).__init__(**kwargs)
self.transform = None
def __getitem__(self, index):
return self.graph.edge_list[index]
def __len__(self):
return self.graph.num_edge
def split(self, ratios=(85, 5, 10)):
length = self.graph.num_edge
norm = sum(ratios)
lengths = [int(r / norm * length) for r in ratios]
lengths[-1] = length - sum(lengths[:-1])
g = torch.Generator()
g.manual_seed(0)
return torch_data.random_split(self, lengths, generator=g)
@R.register("datasets.FB15k237Inductive")
class FB15k237Inductive(InductiveKnowledgeGraphDataset):
train_urls = [
"https://raw.githubusercontent.com/kkteru/grail/master/data/fb237_%s/train.txt",
"https://raw.githubusercontent.com/kkteru/grail/master/data/fb237_%s/valid.txt",
]
test_urls = [
"https://raw.githubusercontent.com/kkteru/grail/master/data/fb237_%s_ind/train.txt",
"https://raw.githubusercontent.com/kkteru/grail/master/data/fb237_%s_ind/test.txt",
]
def __init__(self, path, version="v1", verbose=1):
path = os.path.expanduser(path)
if not os.path.exists(path):
os.makedirs(path)
self.path = path
train_files = []
for url in self.train_urls:
url = url % version
save_file = "fb15k237_%s_%s" % (version, os.path.basename(url))
txt_file = os.path.join(path, save_file)
if not os.path.exists(txt_file):
txt_file = utils.download(url, self.path, save_file=save_file)
train_files.append(txt_file)
test_files = []
for url in self.test_urls:
url = url % version
save_file = "fb15k237_%s_ind_%s" % (version, os.path.basename(url))
txt_file = os.path.join(path, save_file)
if not os.path.exists(txt_file):
txt_file = utils.download(url, self.path, save_file=save_file)
test_files.append(txt_file)
self.load_inductive_tsvs(train_files, test_files, verbose=verbose)
@R.register("datasets.WN18RRInductive")
class WN18RRInductive(InductiveKnowledgeGraphDataset):
train_urls = [
"https://raw.githubusercontent.com/kkteru/grail/master/data/WN18RR_%s/train.txt",
"https://raw.githubusercontent.com/kkteru/grail/master/data/WN18RR_%s/valid.txt",
]
test_urls = [
"https://raw.githubusercontent.com/kkteru/grail/master/data/WN18RR_%s_ind/train.txt",
"https://raw.githubusercontent.com/kkteru/grail/master/data/WN18RR_%s_ind/test.txt",
]
def __init__(self, path, version="v1", verbose=1):
path = os.path.expanduser(path)
if not os.path.exists(path):
os.makedirs(path)
self.path = path
train_files = []
for url in self.train_urls:
url = url % version
save_file = "wn18rr_%s_%s" % (version, os.path.basename(url))
txt_file = os.path.join(path, save_file)
if not os.path.exists(txt_file):
txt_file = utils.download(url, self.path, save_file=save_file)
train_files.append(txt_file)
test_files = []
for url in self.test_urls:
url = url % version
save_file = "wn18rr_%s_ind_%s" % (version, os.path.basename(url))
txt_file = os.path.join(path, save_file)
if not os.path.exists(txt_file):
txt_file = utils.download(url, self.path, save_file=save_file)
test_files.append(txt_file)
self.load_inductive_tsvs(train_files, test_files, verbose=verbose)
@R.register("datasets.OGBLBioKG")
class OGBLBioKG(data.KnowledgeGraphDataset):
def __init__(self, path, verbose=1):
path = os.path.expanduser(path)
self.path = path
dataset = linkproppred.LinkPropPredDataset("ogbl-biokg", path)
self.load_ogb(dataset, verbose=verbose)
def load_ogb(self, dataset, verbose=1):
entity_vocab = []
relation_vocab = []
entity_type_vocab = []
inv_entity_type_offset = {}
entity_type2num = []
zip_files = glob.glob(os.path.join(dataset.root, "mapping/*.gz"))
for zip_file in zip_files:
csv_file = utils.extract(zip_file)
type = os.path.basename(csv_file).split("_")[0]
with open(csv_file, "r") as fin:
reader = csv.reader(fin)
if verbose:
reader = iter(tqdm(reader, "Loading %s" % csv_file, utils.get_line_count(csv_file)))
fields = next(reader)
if "relidx" in csv_file:
for index, token in reader:
relation_vocab.append(token)
else:
entity_type_vocab.append(type)
inv_entity_type_offset[type] = len(entity_vocab)
num_entity = 0
for index, token in reader:
entity_vocab.append("%s (%s)" % (type, token))
num_entity += 1
entity_type2num.append(num_entity)
edge_split = dataset.get_edge_split()
triplets = []
num_samples = []
num_samples_with_neg = []
negative_heads = []
negative_tails = []
for key in ["train", "valid", "test"]:
split_dict = edge_split[key]
h = torch.as_tensor(split_dict["head"])
t = torch.as_tensor(split_dict["tail"])
r = torch.as_tensor(split_dict["relation"])
h_type = torch.tensor([inv_entity_type_offset[h] for h in split_dict["head_type"]])
t_type = torch.tensor([inv_entity_type_offset[t] for t in split_dict["tail_type"]])
h = h + h_type
t = t + t_type
triplet = torch.stack([h, t, r], dim=-1)
triplets.append(triplet)
num_samples.append(len(h))
if "head_neg" in split_dict:
neg_h = torch.as_tensor(split_dict["head_neg"])
neg_t = torch.as_tensor(split_dict["tail_neg"])
neg_h = neg_h + h_type.unsqueeze(-1)
neg_t = neg_t + t_type.unsqueeze(-1)
negative_heads.append(neg_h)
negative_tails.append(neg_t)
num_samples_with_neg.append(len(h))
else:
num_samples_with_neg.append(0)
triplets = torch.cat(triplets)
self.load_triplet(triplets, entity_vocab=entity_vocab, relation_vocab=relation_vocab)
entity_type_vocab, inv_entity_type_vocab = self._standarize_vocab(entity_type_vocab, None)
self.entity_type_vocab = entity_type_vocab
self.inv_entity_type_vocab = inv_entity_type_vocab
self.num_samples = num_samples
self.num_samples_with_neg = num_samples_with_neg
self.negative_heads = torch.cat(negative_heads)
self.negative_tails = torch.cat(negative_tails)
node_type = []
for i, num_entity in enumerate(entity_type2num):
node_type += [i] * num_entity
with self.graph.node():
self.graph.node_type = torch.tensor(node_type)
def split(self, test_negative=True):
offset = 0
neg_offset = 0
splits = []
for num_sample, num_sample_with_neg in zip(self.num_samples, self.num_samples_with_neg):
if test_negative and num_sample_with_neg:
pos_h, pos_t, pos_r = self[offset: offset + num_sample].t()
neg_h = self.negative_heads[neg_offset: neg_offset + num_sample_with_neg]
neg_t = self.negative_tails[neg_offset: neg_offset + num_sample_with_neg]
num_negative = neg_h.shape[-1]
h = pos_h.unsqueeze(-1).repeat(2, num_negative + 1)
t = pos_t.unsqueeze(-1).repeat(2, num_negative + 1)
r = pos_r.unsqueeze(-1).repeat(2, num_negative + 1)
t[:num_sample_with_neg, 1:] = neg_t
h[num_sample_with_neg:, 1:] = neg_h
split = torch.stack([h, t, r], dim=-1)
else:
split = torch_data.Subset(self, range(offset, offset + num_sample))
splits.append(split)
offset += num_sample
neg_offset += num_sample_with_neg
return splits
| 14,201 | 39.005634 | 105 | py |
NBFNet | NBFNet-master/nbfnet/util.py | import os
import time
import logging
import argparse
import yaml
import jinja2
from jinja2 import meta
import easydict
import torch
from torch.utils import data as torch_data
from torch import distributed as dist
from torchdrug import core, utils
from torchdrug.utils import comm
logger = logging.getLogger(__file__)
def get_root_logger(file=True):
logger = logging.getLogger("")
logger.setLevel(logging.INFO)
format = logging.Formatter("%(asctime)-10s %(message)s", "%H:%M:%S")
if file:
handler = logging.FileHandler("log.txt")
handler.setFormatter(format)
logger.addHandler(handler)
return logger
def create_working_directory(cfg):
file_name = "working_dir.tmp"
world_size = comm.get_world_size()
if world_size > 1 and not dist.is_initialized():
comm.init_process_group("nccl", init_method="env://")
working_dir = os.path.join(os.path.expanduser(cfg.output_dir),
cfg.task["class"], cfg.dataset["class"], cfg.task.model["class"],
time.strftime("%Y-%m-%d-%H-%M-%S"))
# synchronize working directory
if comm.get_rank() == 0:
with open(file_name, "w") as fout:
fout.write(working_dir)
os.makedirs(working_dir)
comm.synchronize()
if comm.get_rank() != 0:
with open(file_name, "r") as fin:
working_dir = fin.read()
comm.synchronize()
if comm.get_rank() == 0:
os.remove(file_name)
os.chdir(working_dir)
return working_dir
def detect_variables(cfg_file):
with open(cfg_file, "r") as fin:
raw = fin.read()
env = jinja2.Environment()
ast = env.parse(raw)
vars = meta.find_undeclared_variables(ast)
return vars
def load_config(cfg_file, context=None):
with open(cfg_file, "r") as fin:
raw = fin.read()
template = jinja2.Template(raw)
instance = template.render(context)
cfg = yaml.safe_load(instance)
cfg = easydict.EasyDict(cfg)
return cfg
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", help="yaml configuration file", required=True)
parser.add_argument("-s", "--seed", help="random seed for PyTorch", type=int, default=1024)
args, unparsed = parser.parse_known_args()
# get dynamic arguments defined in the config file
vars = detect_variables(args.config)
parser = argparse.ArgumentParser()
for var in vars:
parser.add_argument("--%s" % var, required=True)
vars = parser.parse_known_args(unparsed)[0]
vars = {k: utils.literal_eval(v) for k, v in vars._get_kwargs()}
return args, vars
def build_solver(cfg, dataset):
train_set, valid_set, test_set = dataset.split()
if comm.get_rank() == 0:
logger.warning(dataset)
logger.warning("#train: %d, #valid: %d, #test: %d" % (len(train_set), len(valid_set), len(test_set)))
if "fast_test" in cfg:
if comm.get_rank() == 0:
logger.warning("Quick test mode on. Only evaluate on %d samples for valid / test." % cfg.fast_test)
g = torch.Generator()
g.manual_seed(1024)
valid_set = torch_data.random_split(valid_set, [cfg.fast_test, len(valid_set) - cfg.fast_test], generator=g)[0]
test_set = torch_data.random_split(test_set, [cfg.fast_test, len(test_set) - cfg.fast_test], generator=g)[0]
if hasattr(dataset, "num_relation"):
cfg.task.model.num_relation = dataset.num_relation
task = core.Configurable.load_config_dict(cfg.task)
cfg.optimizer.params = task.parameters()
optimizer = core.Configurable.load_config_dict(cfg.optimizer)
solver = core.Engine(task, train_set, valid_set, test_set, optimizer, **cfg.engine)
if "checkpoint" in cfg:
solver.load(cfg.checkpoint)
return solver | 3,838 | 30.467213 | 119 | py |
gca-rom | gca-rom-main/main.py | import sys
sys.path.append('../')
import torch
from gca_rom import network, pde, loader, plotting, preprocessing, training, initialization, testing, error
import numpy as np
if __name__ == "__main__":
problem_name, variable, mu1_range, mu2_range = pde.problem(int(sys.argv[1]))
print("PROBLEM: ", problem_name, "for variable ", variable, "\n")
AE_Params = network.AE_Params
device = initialization.set_device()
initialization.set_reproducibility(AE_Params)
initialization.set_path(AE_Params)
dataset_dir = '../dataset/'+problem_name+'_unstructured.mat'
dataset = loader.LoadDataset(dataset_dir, variable)
dataset_graph, graph_loader, train_loader, test_loader, \
val_loader, scaler_all, scaler_test, xx, yy, var, VAR_all, VAR_test, \
train_trajectories, test_trajectories = preprocessing.graphs_dataset(dataset, AE_Params)
mu1, mu2 = np.meshgrid(mu1_range, mu2_range)
params = torch.tensor(np.vstack((mu1.T, mu2.T)).reshape(2, -1).T)
params = params.to(device)
print('Shape of parameter space:', params.shape, '\n')
model = network.Net()
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=AE_Params.learning_rate, weight_decay=AE_Params.weight_decay)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=AE_Params.miles, gamma=AE_Params.gamma)
history = dict(train=[], l1=[], l2=[])
history_test = dict(test=[], l1=[], l2=[])
min_test_loss = np.Inf
try:
model.load_state_dict(torch.load(AE_Params.net_dir+AE_Params.net_name+AE_Params.net_run+'.pt', map_location=torch.device('cpu')))
print('Loading saved network')
except FileNotFoundError:
print('Training net')
# with torch.autograd.profiler.profile() as prof:
for epoch in range(AE_Params.max_epochs):
train_rmse = training.train(model, optimizer, device, scheduler, params, train_loader, train_trajectories, AE_Params, history)
if AE_Params.cross_validation:
test_rmse = training.val(model, device, params, test_loader, test_trajectories, AE_Params, history_test)
print("Epoch[{}/{}, train_mse loss:{}, test_mse loss:{}".format(epoch + 1, AE_Params.max_epochs, history['train'][-1], history_test['test'][-1]))
else:
test_rmse = train_rmse
print("Epoch[{}/{}, train_mse loss:{}".format(epoch + 1, AE_Params.max_epochs, history['train'][-1]))
if test_rmse < min_test_loss:
min_test_loss = test_rmse
best_epoch = epoch
torch.save(model.state_dict(), AE_Params.net_dir+AE_Params.net_name+AE_Params.net_run+'.pt')
if AE_Params.tolerance >= train_rmse:
print('Early stopping!')
break
np.save(AE_Params.net_dir+'history'+AE_Params.net_run+'.npy', history)
np.save(AE_Params.net_dir+'history_test'+AE_Params.net_run+'.npy', history_test)
# print(prof.key_averages().table(sort_by="self_cpu_time_total"))
print("\nLoading best network for epoch: ", best_epoch)
model.load_state_dict(torch.load(AE_Params.net_dir+AE_Params.net_name+AE_Params.net_run+'.pt', map_location=torch.device('cpu')))
model.to("cpu")
params = params.to("cpu")
vars = "GCA-ROM"
results, latents_map, latents_gca = testing.evaluate(VAR_all, model, graph_loader, params, AE_Params, range(params.shape[0]))
plotting.plot_loss(AE_Params)
plotting.plot_latent(AE_Params, latents_map, latents_gca)
plotting.plot_error(results, VAR_all, scaler_all, AE_Params, mu1_range, mu2_range, params, train_trajectories, vars)
N = 5
snapshots = np.arange(params.shape[0]).tolist()
np.random.shuffle(snapshots)
for SNAP in snapshots[0:N]:
plotting.plot_fields(SNAP, results, scaler_all, AE_Params, dataset, xx, yy, params)
plotting.plot_error_fields(SNAP, results, VAR_all, scaler_all, AE_Params, dataset, xx, yy, params)
results_test, _, _ = testing.evaluate(VAR_test, model, val_loader, params, AE_Params, test_trajectories)
error_abs, norm = error.compute_error(results_test, VAR_test, scaler_test, AE_Params)
error.print_error(error_abs, norm, vars)
error.save_error(error_abs, norm, AE_Params, vars)
| 4,087 | 45.454545 | 157 | py |
gca-rom | gca-rom-main/gca_rom/preprocessing.py | import numpy as np
import torch
from torch_geometric.data import Data
from torch_geometric.loader import DataLoader
from gca_rom import scaling
def graphs_dataset(dataset, AE_Params):
"""
graphs_dataset: function to process and scale the input dataset for graph autoencoder model.
Inputs:
dataset: an object containing the dataset to be processed.
AE_Params: an object containing the hyperparameters of the graph autoencoder model.
Outputs:
dataset_graph: an object containing the processed and scaled dataset.
loader: a DataLoader object of the processed and scaled dataset.
train_loader: a DataLoader object of the training set.
test_loader: a DataLoader object of the test set.
val_loader: a DataLoader object of the validation set.
scaler_all: a scaler object to scale the entire dataset.
scaler_test: a scaler object to scale the test set.
xx: an array of the x-coordinate of the nodes.
yy: an array of the y-coordinate of the nodes.
var: an array of the node features.
VAR_all: an array of the scaled node features of the entire dataset.
VAR_test: an array of the scaled node features of the test set.
train_snapshots: a list of indices of the training set.
test_snapshots: a list of indices of the test set.
"""
xx = dataset.xx
yy = dataset.yy
var = dataset.U
# PROCESSING DATASET
num_nodes = var.shape[0]
num_graphs = var.shape[1]
print("Number of nodes processed: ", num_nodes)
print("Number of graphs processed: ", num_graphs)
total_sims = int(num_graphs)
rate = AE_Params.rate/100
train_sims = int(rate * total_sims)
test_sims = total_sims - train_sims
main_loop = np.arange(total_sims).tolist()
np.random.shuffle(main_loop)
train_snapshots = main_loop[0:train_sims]
train_snapshots.sort()
test_snapshots = main_loop[train_sims:total_sims]
test_snapshots.sort()
## FEATURE SCALING
var_test = dataset.U[:, test_snapshots]
scaling_type = AE_Params.scaling_type
scaler_all, VAR_all = scaling.tensor_scaling(var, scaling_type)
scaler_test, VAR_test = scaling.tensor_scaling(var_test, scaling_type)
print("Shape of scaled dataset: ", VAR_all.shape)
print("Shape of scaled test set: ", VAR_test.shape)
graphs = []
edge_index = torch.t(dataset.E) - 1
for graph in range(num_graphs):
pos = torch.cat((xx[:, graph].unsqueeze(1), yy[:, graph].unsqueeze(1)), 1)
ei = torch.index_select(pos, 0, edge_index[0, :])
ej = torch.index_select(pos, 0, edge_index[1, :])
edge_attr = ej - ei
edge_attr = torch.sqrt(torch.pow(edge_attr[:, 0], 2) + torch.pow(edge_attr[:, 1], 2))
node_features = VAR_all[graph, :]
dataset_graph = Data(x=node_features, edge_index=edge_index, edge_attr=edge_attr, pos=pos)
graphs.append(dataset_graph)
AE_Params.num_nodes = dataset_graph.num_nodes
train_dataset = [graphs[i] for i in train_snapshots]
test_dataset = [graphs[i] for i in test_snapshots]
loader = DataLoader(graphs, batch_size=1)
train_loader = DataLoader(train_dataset, batch_size=train_sims, shuffle=False)
test_loader = DataLoader(test_dataset, batch_size=test_sims, shuffle=False)
val_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)
return dataset_graph, loader, train_loader, test_loader, \
val_loader, scaler_all, scaler_test, xx, yy, var, VAR_all, VAR_test, train_snapshots, test_snapshots
| 3,515 | 38.954545 | 112 | py |
gca-rom | gca-rom-main/gca_rom/network.py | import sys
import torch
from torch import nn
from gca_rom import gca, scaling, pde
problem_name, variable, mu1_range, mu2_range = pde.problem(int(sys.argv[1]))
class AE_Params():
"""Class that holds the hyperparameters for the autoencoder model.
Args:
sparse_method (str): The method to use for sparsity constraint.
rate (int): Amount of data used in training.
seed (int): Seed for the random number generator.
bottleneck_dim (int): The dimension of the bottleneck layer.
tolerance (float): The tolerance value for stopping the training.
lambda_map (float): The weight for the map loss.
learning_rate (float): The learning rate for the optimizer.
ffn (int): The number of feed-forward layers.
in_channels (int): The number of input channels.
hidden_channels (list): The number of hidden channels for each layer.
act (function): The activation function to use.
nodes (int): The number of nodes in each hidden layer.
skip (int): The number of skipped connections.
layer_vec (list): The structure of the network.
net_name (str): The name of the network.
scaler_name (str): The name of the scaler used for preprocessing.
weight_decay (float): The weight decay for the optimizer.
max_epochs (int): The maximum number of epochs to run training for.
miles (list): The miles for learning rate update in scheduler.
gamma (float): The gamma value for the optimizer.
num_nodes (int): The number of nodes in the network.
scaling_type (int): The type of scaling to use for preprocessing.
net_dir (str): The directory to save the network in.
cross_validation (bool): Whether to perform cross-validation.
"""
def __init__(self):
self.scaling_type = int(sys.argv[2])
_, self.scaler_name = scaling.scaler_functions(int(sys.argv[3]))
self.skip = int(sys.argv[4])
self.rate = int(sys.argv[5])
self.sparse_method = 'L1_mean'
self.ffn = int(sys.argv[6])
self.nodes = int(sys.argv[7])
self.bottleneck_dim = int(sys.argv[8])
self.lambda_map = float(sys.argv[9])
self.in_channels = int(sys.argv[10])
self.seed = 10
self.tolerance = 1e-6
self.learning_rate = 0.001
self.hidden_channels = [1]*self.in_channels
self.act = torch.tanh
self.layer_vec=[2, self.nodes, self.nodes, self.nodes, self.nodes, self.bottleneck_dim]
self.net_name = problem_name
self.net_run = '_' + self.scaler_name
self.weight_decay = 0.00001
self.max_epochs = 5000
self.miles = []
self.gamma = 0.0001
self.num_nodes = 0
self.net_dir = './' + problem_name + '/' + self.net_run+ '/' + variable + '_' + self.net_name + '_lmap' + str(self.lambda_map) + '_btt' + str(self.bottleneck_dim) \
+ '_seed' + str(self.seed) + '_lv' + str(len(self.layer_vec)-2) + '_hc' + str(len(self.hidden_channels)) + '_nd' + str(self.nodes) \
+ '_ffn' + str(self.ffn) + '_skip' + str(self.skip) + '_lr' + str(self.learning_rate) + '_sc' + str(self.scaling_type) + '_rate' + str(self.rate) + '/'
self.cross_validation = True
AE_Params = AE_Params()
class Net(torch.nn.Module):
"""
Class Net
---------
A PyTorch neural network class which consists of encoder, decoder and mapping modules.
Attributes
----------
encoder : gca.Encoder
An encoder module from the gca module.
decoder : gca.Decoder
A decoder module from the gca module.
act_map : AE_Params.act
The activation map specified in the AE_Params.
layer_vec : AE_Params.layer_vec
The layer vector specified in the AE_Params. Shape of the layers for the parameter space mapping.
steps : int
Number of layers for the parameter mapping MLP.
maptovec : nn.ModuleList
A list of linear layers for mapping.
Methods
-------
solo_encoder(data)
Encodes the input data using the encoder module.
Returns the encoded representation.
solo_decoder(x, data)
Decodes the encoded representation and the input data using the decoder module.
Returns the decoded output.
mapping(x)
Maps the input using the linear modules in maptovec.
Returns the mapped output.
forward(data, parameters)
Runs a forward pass through the network using the input data and parameters.
Returns the decoded output, encoded representation, and estimated encoded representation.
"""
def __init__(self):
super().__init__()
self.encoder = gca.Encoder(AE_Params.hidden_channels, AE_Params.bottleneck_dim, AE_Params.num_nodes, ffn=AE_Params.ffn, skip=AE_Params.skip)
self.decoder = gca.Decoder(AE_Params.hidden_channels, AE_Params.bottleneck_dim, AE_Params.num_nodes, ffn=AE_Params.ffn, skip=AE_Params.skip)
self.act_map = AE_Params.act
self.layer_vec = AE_Params.layer_vec
self.steps = len(self.layer_vec) - 1
self.maptovec = nn.ModuleList()
for k in range(self.steps):
self.maptovec.append(nn.Linear(self.layer_vec[k], self.layer_vec[k+1]))
def solo_encoder(self,data):
x = self.encoder(data)
return x
def solo_decoder(self,x, data):
x = self.decoder(x, data)
return x
def mapping(self, x):
idx = 0
for layer in self.maptovec:
if(idx==self.steps): x = layer(x)
else: x =self.act_map(layer(x))
idx += 1
return x
def forward(self, data, parameters):
z = self.solo_encoder(data)
z_estimation = self.mapping(parameters)
x = self.solo_decoder(z, data)
# x = self.solo_decoder(z_estimation, data)
return x, z, z_estimation
| 5,946 | 39.732877 | 179 | py |
gca-rom | gca-rom-main/gca_rom/training.py | import torch
import torch.nn.functional as F
def train(model, optimizer, device, scheduler, params, train_loader, train_trajectories, AE_Params, history):
"""Trains the autoencoder model.
This function trains the autoencoder model using mean squared error (MSE) loss and a map loss, where the map loss
is the MSE between the estimated z (latent space) and the actual z. The final loss is the sum of the MSE loss and the
map loss multiplied by the weight `AE_Params.lambda_map`. The model is trained on the data from `train_loader` and
the optimization process is performed using the `optimizer`. The learning rate is updated after every iteration
using `scheduler`. Use of mini-batching for reducing the computational cost.
Args:
model (torch.nn.Module): The autoencoder model to be trained.
optimizer (torch.optim.Optimizer): The optimizer to update the model parameters.
device (str): The device to run the model on ('cuda' or 'cpu').
scheduler (torch.optim.lr_scheduler._LRScheduler): The learning rate scheduler to update the learning rate.
params (torch.Tensor): Tensor containing the parameters of the model.
train_loader (torch.utils.data.DataLoader): The data loader to provide the training data.
train_trajectories (int): The number of training trajectories.
AE_Params (dict): The dictionary containing the hyperparameters for the autoencoder model.
history (dict): The dictionary to store the loss history.
Returns:
float: The average loss over all training examples.
"""
model.train()
total_loss_train = total_examples = sum_loss = 0
total_loss_train_1 = total_loss_train_2 = 0
sum_loss_1 = sum_loss_2 = 0
for data in train_loader:
optimizer.zero_grad()
data = data.to(device)
out, z, z_estimation = model(data, params[train_trajectories, :])
loss_train_mse = F.mse_loss(out, data.x, reduction='mean')
loss_train_map = F.mse_loss(z_estimation, z, reduction='mean')
loss_train = loss_train_mse + AE_Params.lambda_map * loss_train_map
loss_train.backward()
optimizer.step()
sum_loss += loss_train.item()
sum_loss_1 += loss_train_mse.item()
sum_loss_2 += loss_train_map.item()
total_examples += 1
scheduler.step()
total_loss_train = sum_loss / total_examples
total_loss_train_1 = sum_loss_1 / total_examples
total_loss_train_2 = sum_loss_2 / total_examples
history['train'].append(total_loss_train)
history['l1'].append(total_loss_train_1)
history['l2'].append(total_loss_train_2)
return total_loss_train
def val(model, device, params, test_loader, test_trajectories, AE_Params, history_test):
"""
Evaluate the performance of a model on a test set.
This function calculates the mean of the total loss, the mean of loss_test_mse and the mean of loss_test_map for all test examples. The losses are computed as the mean squared error (MSE) between the model's predictions and the true target variables, and between the estimated latent code and the true latent code. The lambda_map weight balances the contribution of each loss term to the total loss. The function adds the computed loss values to the history_test dictionary.
Parameters:
model (torch.nn.Module): The model to be evaluated.
device (torch.device): The device to use for computations (e.g. 'cpu' or 'cuda').
params (torch.Tensor): Tensor containing the parameters of the model.
test_loader (torch.utils.data.DataLoader): The test data to use for evaluation.
test_trajectories (int): The index of the test trajectory.
AE_Params (object): Object containing hyperparameters for the model.
history_test (dict): Dictionary to store the evaluation results.
Returns:
float: The mean of the total loss computed over all test examples.
"""
with torch.no_grad():
model.eval()
total_loss_test = total_examples = sum_loss = 0
total_loss_test_1 = total_loss_test_2 = 0
sum_loss_1 = sum_loss_2 = 0
for data in test_loader:
data = data.to(device)
out, z, z_estimation = model(data, params[test_trajectories, :])
loss_test_mse = F.mse_loss(out, data.x, reduction='mean')
loss_test_map = F.mse_loss(z_estimation, z, reduction='mean')
loss_test = loss_test_mse + AE_Params.lambda_map * loss_test_map
sum_loss += loss_test.item()
sum_loss_1 += loss_test_mse.item()
sum_loss_2 += loss_test_map.item()
total_examples += 1
total_loss_test = sum_loss / total_examples
total_loss_test_1 = sum_loss_1 / total_examples
total_loss_test_2 = sum_loss_2 / total_examples
history_test['test'].append(total_loss_test)
history_test['l1'].append(total_loss_test_1)
history_test['l2'].append(total_loss_test_2)
return total_loss_test
| 5,063 | 48.647059 | 478 | py |
gca-rom | gca-rom-main/gca_rom/testing.py | import torch
from tqdm import tqdm
import numpy as np
def evaluate(VAR, model, loader, params, AE_Params, test):
"""
This function evaluates the performance of a trained Autoencoder (AE) model.
It encodes the input data using both the model's encoder and a mapping function,
and decodes the resulting latent representations to obtain predicted solutions.
The relative error between the two latent representations is also computed.
Inputs:
VAR: np.array, ground truth solution
model: object, trained AE model
loader: object, data loader for the input data
params: np.array, model parameters
AE_Params: class, model architecture and training parameters
Returns:
results: np.array, predicted solutions
latents_map: np.array, latent representations obtained using the mapping function
latents_gca: np.array, latent representations obtained using the AE encoder
"""
results = torch.zeros(VAR.shape[0], VAR.shape[1], 1)
latents_map = torch.zeros(VAR.shape[0], AE_Params.bottleneck_dim)
latents_gca = torch.zeros(VAR.shape[0], AE_Params.bottleneck_dim)
index = 0
latents_error = list()
with torch.no_grad():
for data in tqdm(loader):
z_net = model.solo_encoder(data)
z_map = model.mapping(params[test[index], :])
latents_map[index, :] = z_map
latents_gca[index, :] = z_net
lat_err = np.linalg.norm(z_net - z_map)/np.linalg.norm(z_net)
latents_error.append(lat_err)
results[index, :, :] = model.solo_decoder(z_map, data)
index += 1
np.savetxt(AE_Params.net_dir+'latents'+AE_Params.net_run+'.csv', latents_map.detach(), delimiter =',')
latents_error = np.array(latents_error)
# print("\nMaximum relative error for latent = ", max(latents_error))
# print("Mean relative error for latent = ", sum(latents_error)/len(latents_error))
# print("Minimum relative error for latent = ", min(latents_error))
return results, latents_map, latents_gca | 2,071 | 44.043478 | 110 | py |
gca-rom | gca-rom-main/gca_rom/initialization.py | import os
import torch
import numpy as np
import random
import warnings
def set_device():
"""
Returns the device to be used (GPU or CPU)
Returns:
device (str): The device to be used ('cuda' if GPU is available, 'cpu' otherwise)
"""
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("Device used = ", device)
torch.set_default_dtype(torch.float64)
warnings.filterwarnings("ignore")
return device
def set_reproducibility(AE_Params):
"""
Sets the seed for reproducibility of results.
Args:
AE_Params (class): Contains the hyperparameters of the autoencoder
"""
seed = AE_Params.seed
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def set_path(AE_Params):
"""
Creates the directory path to store the network results.
Args:
AE_Params (class): Contains the hyperparameters of the autoencoder
"""
path = AE_Params.net_dir
isExist = os.path.exists(path)
if not isExist:
os.makedirs(path, exist_ok=False)
| 1,224 | 21.685185 | 89 | py |
gca-rom | gca-rom-main/gca_rom/loader.py | import sys
from torch_geometric.data import Dataset
import torch
import scipy
class LoadDataset(Dataset):
"""
A custom dataset class which loads data from a .mat file using scipy.io.loadmat.
data_mat : scipy.io.loadmat
The loaded data in a scipy.io.loadmat object.
U : torch.Tensor
The tensor representation of the specified variable from the data_mat.
xx : torch.Tensor
The tensor representation of the 'xx' key from the data_mat. Refers to X coordinates of the domain
yy : torch.Tensor
The tensor representation of the 'yy' key from the data_mat.Refers to Y coordinates of the domain
T : torch.Tensor
The tensor representation of the 'T' key from the data_mat, casted to int. Adjacency Matrix
E : torch.Tensor
The tensor representation of the 'E' key from the data_mat, casted to int. Connection Matrix
__init__(self, root_dir, variable)
Initializes the LoadDataset object by loading the data from the .mat file at the root_dir location and converting the specified variable to a tensor representation.
"""
def __init__(self, root_dir, variable):
self.data_mat = scipy.io.loadmat(root_dir)
self.U = torch.tensor(self.data_mat[variable])
self.xx = torch.tensor(self.data_mat['xx'])
self.yy = torch.tensor(self.data_mat['yy'])
self.T = torch.tensor(self.data_mat['T'].astype(int))
self.E = torch.tensor(self.data_mat['E'].astype(int))
| 1,488 | 41.542857 | 172 | py |
gca-rom | gca-rom-main/gca_rom/gca.py | import torch
from torch import nn
import torch.nn.functional as F
from torch_geometric.nn import GMMConv
class Encoder(torch.nn.Module):
"""
Encoder Class
The Encoder class is a subclass of torch.nn.Module that implements a deep neural network for encoding graph data.
It uses the Gaussian Mixture convolution (GMMConv) module to extract features from the graph structure and node features.
The encoding is then passed through a feed-forward neural network with two fully connected layers to produce the final encoding.
Arguments:
hidden_channels (list): A list of hidden channel sizes for each layer of the GMMConv module.
bottleneck (int): Size of the bottleneck layer in the feed-forward neural network.
input_size (int): Size of the node features.
ffn (int): Size of the intermediate layer in the feed-forward neural network.
skip (bool): If True, the input node features will be concatenated with the GMMConv output at each layer.
act (function): Activation function used in the GMMConv layers and feed-forward neural network. Defaults to F.elu.
Methods:
encoder(data): Encodes the graph data using the GMMConv module and feed-forward neural network.
reset_parameters(): Resets the parameters of the GMMConv layers and feed-forward neural network.
forward(data): A convenience function that calls the encoder method.
"""
def __init__(self, hidden_channels, bottleneck, input_size, ffn, skip, act=F.elu):
super().__init__()
self.hidden_channels = hidden_channels
self.depth = len(self.hidden_channels)
self.act = act
self.ffn = ffn
self.skip = skip
self.bottleneck = bottleneck
self.input_size = input_size
self.down_convs = torch.nn.ModuleList()
for i in range(self.depth-1):
self.down_convs.append(GMMConv(self.hidden_channels[i], self.hidden_channels[i+1], dim=1, kernel_size=5))
self.fc_in1 = nn.Linear(self.input_size*self.hidden_channels[-1], self.ffn)
self.fc_in2 = nn.Linear(self.ffn, self.bottleneck)
self.reset_parameters()
def encoder(self, data):
edge_weight = data.edge_attr
edge_index = data.edge_index
x = data.x
idx = 0
for layer in self.down_convs:
x = self.act(layer(x, edge_index, edge_weight.unsqueeze(1)))
if self.skip:
x = x + data.x
idx += 1
x = x.reshape(data.num_graphs, self.input_size * self.hidden_channels[-1])
x = self.act(self.fc_in1(x))
x = self.fc_in2(x)
return x
def reset_parameters(self):
for conv in self.down_convs:
conv.reset_parameters()
for name, param in conv.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0)
else:
nn.init.kaiming_uniform_(param)
def forward(self,data):
x = self.encoder(data)
return x
class Decoder(torch.nn.Module):
"""
Class Decoder
A torch.nn.Module subclass for the decoder part of a neural network.
Attributes:
hidden_channels (list of ints): A list of hidden channel sizes.
depth (int): The length of hidden_channels list.
act (function): Activation function to use.
ffn (int): Size of output after the first linear layer.
skip (bool): Whether to add skip connections.
bottleneck (int): Size of bottleneck layer.
input_size (int): Size of input data.
fc_out1 (torch.nn.Linear): Linear layer from bottleneck to ffn.
fc_out2 (torch.nn.Linear): Linear layer from ffn to input_size * hidden_channels[-1].
up_convs (torch.nn.ModuleList): A list of GMMConv layers.
Methods:
decoder(self, x, data):
Decodes the input data x and returns the output.
reset_parameters(self):
Resets the parameters of the up_convs layer.
forward(self, x, data):
Performs a forward pass on the input data x and returns the output.
"""
def __init__(self, hidden_channels, bottleneck, input_size, ffn, skip, act=F.elu):
super().__init__()
self.hidden_channels = hidden_channels
self.depth = len(self.hidden_channels)
self.act = act
self.ffn = ffn
self.skip = skip
self.bottleneck = bottleneck
self.input_size = input_size
self.fc_out1 = nn.Linear(self.bottleneck, self.ffn)
self.fc_out2 = nn.Linear(self.ffn, self.input_size * self.hidden_channels[-1])
self.up_convs = torch.nn.ModuleList()
for i in range(self.depth-1):
self.up_convs.append(GMMConv(self.hidden_channels[self.depth-1-i], self.hidden_channels[self.depth-i-2], dim=1, kernel_size=5))
self.reset_parameters()
def decoder(self, x, data):
edge_weight = data.edge_attr
edge_index = data.edge_index
x = self.act(self.fc_out1(x))
x = self.act(self.fc_out2(x))
h = x.reshape(data.num_graphs*self.input_size, self.hidden_channels[-1])
x = h
idx = 0
for layer in self.up_convs:
if (idx == self.depth - 2):
x = layer(x, edge_index, edge_weight.unsqueeze(1))
else:
x = self.act(layer(x, edge_index, edge_weight.unsqueeze(1)))
if self.skip:
x = x + h
idx += 1
return x
def reset_parameters(self):
for conv in self.up_convs:
conv.reset_parameters()
for name, param in conv.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0)
else:
nn.init.kaiming_uniform_(param)
def forward(self, x, data):
x = self.decoder(x, data)
return x
| 5,918 | 36.226415 | 139 | py |
gca-rom | gca-rom-main/gca_rom/scaling.py | from sklearn import preprocessing
import torch
import sys
def scaler_functions(k):
match k:
case 1:
sc_name = "minmax"
sc_fun = preprocessing.MinMaxScaler()
case 2:
sc_name = "robust"
sc_fun = preprocessing.RobustScaler()
case 3:
sc_name = "standard"
sc_fun = preprocessing.StandardScaler()
return sc_fun, sc_name
def tensor_scaling(tensor, scaling_type):
scaling_fun_1, _ = scaler_functions(int(sys.argv[3]))
scaling_fun_2, _ = scaler_functions(int(sys.argv[3]))
match scaling_type:
case 1:
# print("SAMPLE SCALING")
scale = scaling_fun_1.fit(tensor)
scaled_data = torch.unsqueeze(torch.tensor(scale.transform(tensor)),0).permute(2, 1, 0)
case 2:
# print("FEATURE SCALING")
scale = scaling_fun_1.fit(torch.t(tensor))
scaled_data = torch.unsqueeze(torch.tensor(scale.transform(torch.t(tensor))), 0).permute(1, 2, 0)
case 3:
# print("FEATURE-SAMPLE SCALING")
scaler_f = scaling_fun_1.fit(torch.t(tensor))
temp = torch.tensor(scaler_f.transform(torch.t(tensor)))
scaler_s = scaling_fun_2.fit(temp)
scaled_data = torch.unsqueeze(torch.tensor(scaler_s.transform(temp)), 0).permute(1, 2, 0)
scale = [scaler_f, scaler_s]
case 4:
# print("SAMPLE-FEATURE SCALING")
scaler_s = scaling_fun_1.fit(tensor)
temp = torch.t(torch.tensor(scaler_s.transform(tensor)))
scaler_f = scaling_fun_2.fit(temp)
scaled_data = torch.unsqueeze(torch.t(torch.tensor(scaler_f.transform(temp))), 0).permute(2, 1, 0)
scale = [scaler_s, scaler_f]
return scale, scaled_data
def inverse_scaling(tensor, scale, scaling_type):
match scaling_type:
case 1:
# print("SAMPLE SCALING")
rescaled_data = torch.tensor(scale.inverse_transform(torch.t(torch.tensor(tensor[:, :, 0].detach().numpy().squeeze()))))
case 2:
# print("FEATURE SCALING")
rescaled_data = torch.tensor(torch.t(torch.tensor(scale.inverse_transform(tensor[:, :, 0].detach().numpy().squeeze()))))
case 3:
# print("FEATURE-SAMPLE SCALING")
scaler_f = scale[0]
scaler_s = scale[1]
rescaled_data = torch.t(torch.tensor(scaler_f.inverse_transform(torch.tensor(scaler_s.inverse_transform(tensor[:, :, 0].detach().numpy().squeeze())))))
case 4:
# print("SAMPLE-FEATURE SCALING")
scaler_s = scale[0]
scaler_f = scale[1]
rescaled_data = torch.tensor(scaler_s.inverse_transform(torch.t(torch.tensor(scaler_f.inverse_transform(tensor[:, :, 0].detach().numpy().squeeze())))))
return rescaled_data
| 2,869 | 41.205882 | 163 | py |
CRST | CRST-master/evaluate.py | import argparse
import scipy
from scipy import ndimage
import numpy as np
import sys
from packaging import version
import time
import util
import torch
import torchvision.models as models
import torch.nn.functional as F
from torch.utils import data, model_zoo
from deeplab.model import Res_Deeplab
from deeplab.datasets import GTA5TestDataSet
from collections import OrderedDict
import os
from PIL import Image
import matplotlib.pyplot as plt
import torch.nn as nn
# IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
IMG_MEAN = np.array((0.406, 0.456, 0.485), dtype=np.float32) # BGR
IMG_STD = np.array((0.225, 0.224, 0.229), dtype=np.float32) # BGR
DATA_DIRECTORY = './dataset/cityscapes'
DATA_LIST_PATH = './dataset/list/cityscapes/val.lst'
SAVE_PATH = './cityscapes/eval'
TEST_IMAGE_SIZE = '1024,2048'
TEST_SCALE = 1.0
IGNORE_LABEL = 255
NUM_CLASSES = 19
NUM_STEPS = 500 # Number of images in the validation set.
RESTORE_FROM = './src_model/gta5/src_model.pth'
DATA_SRC = 'cityscapes'
SET = 'val'
LOG_FILE = 'log'
MODEL = 'DeeplabRes'
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="DeepLab-ResNet Network")
parser.add_argument("--model", type=str, default=MODEL,
help="Model Choice (DeeplabMulti/DeeplabVGG).")
parser.add_argument("--data-src", type=str, default=DATA_SRC,
help="Data name.")
parser.add_argument("--data-dir", type=str, default=DATA_DIRECTORY,
help="Path to the directory containing the Cityscapes dataset.")
parser.add_argument("--data-list", type=str, default=DATA_LIST_PATH,
help="Path to the file listing the images in the dataset.")
parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL,
help="The index of the label to ignore during the training.")
parser.add_argument("--num-classes", type=int, default=NUM_CLASSES,
help="Number of classes to predict (including background).")
parser.add_argument("--restore-from", type=str, default=RESTORE_FROM,
help="Where restore model parameters from.")
parser.add_argument("--gpu", type=int, default=0,
help="choose gpu device.")
parser.add_argument('--test-flipping', dest='test_flipping',
help='If average predictions of original and flipped images.',
default=False, action='store_true')
parser.add_argument("--set", type=str, default=SET,
help="choose evaluation set.")
parser.add_argument("--save", type=str, default=SAVE_PATH,
help="Path to save result.")
parser.add_argument("--log-file", type=str, default=LOG_FILE,
help="The name of log file.")
parser.add_argument('--debug',help='True means logging debug info.',
default=False, action='store_true')
parser.add_argument('--test-scale', type=str, default=TEST_SCALE,
help='The test scales. Multi-scale supported')
parser.add_argument('--test-image-size', default=TEST_IMAGE_SIZE,
help='The test image size',
type=str)
return parser.parse_args()
args = get_arguments()
# palette
if args.data_src == 'gta' or args.data_src == 'cityscapes':
# gta:
palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,
220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,
0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]
elif args.data_src == 'synthia':
# synthia:
palette = [128,64,128,244,35,232,70,70,70,102,102,156,64,64,128,153,153,153,250,170,30,220,220,0,
107,142,35,70,130,180,220,20,60,255,0,0,0,0,142,0,60,100,0,0,230,119,11,32]
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
def colorize_mask(mask):
# mask: numpy array of the mask
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
def main():
"""Create the model and start the evaluation process."""
device = torch.device("cuda:" + str(args.gpu))
if not os.path.exists(args.save):
os.makedirs(args.save)
logger = util.set_logger(args.save, args.log_file, args.debug)
logger.info('start with arguments %s', args)
x_num = 0
with open(args.data_list) as f:
for _ in f.readlines():
x_num = x_num + 1
sys.path.insert(0, 'dataset/helpers')
if args.data_src == 'gta' or args.data_src == 'cityscapes':
from labels import id2label, trainId2label
elif args.data_src == 'synthia':
from labels_cityscapes_synthia import id2label, trainId2label
#
label_2_id = 255 * np.ones((256,))
for l in id2label:
if l in (-1, 255):
continue
label_2_id[l] = id2label[l].trainId
id_2_label = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
valid_labels = sorted(set(id_2_label.ravel()))
scorer = ScoreUpdater(valid_labels, args.num_classes, x_num, logger)
scorer.reset()
if args.model == 'DeeplabRes':
model = Res_Deeplab(num_classes=args.num_classes)
# elif args.model == 'DeeplabVGG':
# model = DeeplabVGG(num_classes=args.num_classes)
# if args.restore_from == RESTORE_FROM:
# args.restore_from = RESTORE_FROM_VGG
if args.restore_from[:4] == 'http' :
saved_state_dict = model_zoo.load_url(args.restore_from)
new_params = model.state_dict().copy()
for i in saved_state_dict:
# Scale.layer5.conv2d_list.3.weight
i_parts = str(i).split('.')
# print i_parts
if not i_parts[0] == 'fc':
new_params['.'.join(i_parts[0:])] = saved_state_dict[i]
else:
loc = "cuda:" + str(args.gpu)
saved_state_dict = torch.load(args.restore_from,map_location=loc)
new_params = saved_state_dict.copy()
model.load_state_dict(new_params)
#model.train()
model.eval()
model.to(device)
testloader = data.DataLoader(GTA5TestDataSet(args.data_dir, args.data_list, test_scale = 1.0, test_size=(1024, 512), mean=IMG_MEAN, std=IMG_STD, scale=False, mirror=False),
batch_size=1, shuffle=False, pin_memory=True)
test_scales = [float(_) for _ in str(args.test_scale).split(',')]
h, w = map(int, args.test_image_size.split(','))
if version.parse(torch.__version__) >= version.parse('0.4.0'):
interp = nn.Upsample(size=(h, w), mode='bilinear', align_corners=True)
else:
interp = nn.Upsample(size=(h, w), mode='bilinear')
test_image_size = (h, w)
mean_rgb = IMG_MEAN[::-1].copy()
std_rgb = IMG_STD[::-1].copy()
with torch.no_grad():
for index, batch in enumerate(testloader):
image, label, _, name = batch
img = image.clone()
num_scales = len(test_scales)
# output_dict = {k: [] for k in range(num_scales)}
for scale_idx in range(num_scales):
if version.parse(torch.__version__) > version.parse('0.4.0'):
image = F.interpolate(image, scale_factor=test_scales[scale_idx], mode='bilinear', align_corners=True)
else:
test_size = ( int(h*test_scales[scale_idx]), int(w*test_scales[scale_idx]) )
interp_tmp = nn.Upsample(size=test_size, mode='bilinear', align_corners=True)
image = interp_tmp(img)
if args.model == 'DeeplabRes':
output2 = model(image.to(device))
coutput = interp(output2).cpu().data[0].numpy()
if args.test_flipping:
output2 = model(torch.from_numpy(image.numpy()[:,:,:,::-1].copy()).to(device))
coutput = 0.5 * ( coutput + interp(output2).cpu().data[0].numpy()[:,:,::-1] )
if scale_idx == 0:
output = coutput.copy()
else:
output += coutput
output = output/num_scales
output = output.transpose(1,2,0)
output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
pred_label = output.copy()
label = label_2_id[np.asarray(label.numpy(), dtype=np.uint8)]
scorer.update(pred_label.flatten(), label.flatten(), index)
output_col = colorize_mask(output)
output = Image.fromarray(output)
name = name[0].split('/')[-1]
output.save('%s/%s' % (args.save, name))
output_col.save('%s/%s_color.png' % (args.save, name.split('.')[0]))
class ScoreUpdater(object):
# only IoU are computed. accu, cls_accu, etc are ignored.
def __init__(self, valid_labels, c_num, x_num, logger=None, label=None, info=None):
self._valid_labels = valid_labels
self._confs = np.zeros((c_num, c_num))
self._per_cls_iou = np.zeros(c_num)
self._logger = logger
self._label = label
self._info = info
self._num_class = c_num
self._num_sample = x_num
@property
def info(self):
return self._info
def reset(self):
self._start = time.time()
self._computed = np.zeros(self._num_sample) # one-dimension
self._confs[:] = 0
def fast_hist(self,label, pred_label, n):
k = (label >= 0) & (label < n)
return np.bincount(n * label[k].astype(int) + pred_label[k], minlength=n ** 2).reshape(n, n)
def per_class_iu(self,hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def do_updates(self, conf, i, computed=True):
if computed:
self._computed[i] = 1
self._per_cls_iou = self.per_class_iu(conf)
def update(self, pred_label, label, i, computed=True):
conf = self.fast_hist(label, pred_label, self._num_class)
self._confs += conf
self.do_updates(self._confs, i, computed)
self.scores(i)
def scores(self, i=None, logger=None):
x_num = self._num_sample
ious = np.nan_to_num( self._per_cls_iou )
logger = self._logger if logger is None else logger
if logger is not None:
if i is not None:
speed = 1. * self._computed.sum() / (time.time() - self._start)
logger.info('Done {}/{} with speed: {:.2f}/s'.format(i + 1, x_num, speed))
name = '' if self._label is None else '{}, '.format(self._label)
logger.info('{}mean iou: {:.2f}%'. \
format(name, np.mean(ious) * 100))
with util.np_print_options(formatter={'float': '{:5.2f}'.format}):
logger.info('\n{}'.format(ious * 100))
return ious
if __name__ == '__main__':
main()
| 11,168 | 39.762774 | 176 | py |
CRST | CRST-master/crst_seg.py | import argparse
import sys
from packaging import version
import time
import util
import os
import os.path as osp
import timeit
from collections import OrderedDict
import scipy.io
import torch
import torchvision.models as models
import torch.nn.functional as F
from torch.utils import data, model_zoo
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
from operator import itemgetter
import scipy
from scipy import ndimage
import math
from PIL import Image
import numpy as np
import shutil
import random
from deeplab.model import Res_Deeplab
from deeplab.datasets import GTA5TestDataSet
from deeplab.datasets import SrcSTDataSet, GTA5StMineDataSet, SoftSrcSTDataSet, SoftGTA5StMineDataSet
### shared ###
IMG_MEAN = np.array((0.406, 0.456, 0.485), dtype=np.float32) # BGR
IMG_STD = np.array((0.225, 0.224, 0.229), dtype=np.float32) # BGR
# data
### source
## gta
DATA_SRC_DIRECTORY = './dataset/gta5'
DATA_SRC_LIST_PATH = './dataset/list/gta5/train.lst'
DATA_SRC = 'gta'
RESTORE_FROM = './src_model/gta5/src_model.pth'
NUM_CLASSES = 19
INIT_SRC_PORT = 0.03 # GTA: 0.03
### target
DATA_TGT_DIRECTORY = './dataset/cityscapes'
DATA_TGT_TRAIN_LIST_PATH = './dataset/list/cityscapes/train_ClsConfSet.lst'
DATA_TGT_TEST_LIST_PATH = './dataset/list/cityscapes/val.lst'
IGNORE_LABEL = 255
# train scales for src and tgt
TRAIN_SCALE_SRC = '0.5,1.5'
TRAIN_SCALE_TGT = '0.5,1.5'
# model
MODEL = 'DeeplabRes'
# gpu
GPU = 0
PIN_MEMORY = False
# log files
LOG_FILE = 'self_training_log'
### train ###
BATCH_SIZE = 2
INPUT_SIZE = '512,1024'# 512,1024 for GTA;
RANDSEED = 3
# params for optimizor
LEARNING_RATE =5e-5
POWER = 0.0
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0005
NUM_ROUNDS = 4
EPR = 2
SRC_SAMPLING_POLICY = 'r'
KC_POLICY = 'cb'
KC_VALUE = 'conf'
INIT_TGT_PORT = 0.2
MAX_TGT_PORT = 0.5
TGT_PORT_STEP = 0.05
# varies but dataset
MAX_SRC_PORT = 0.06 #0.06;
SRC_PORT_STEP = 0.0025 #0.0025:
MRKLD = 0.0
LRENT = 0.0
MRSRC = 0.0
MINE_PORT = 1e-3
RARE_CLS_NUM = 3
MINE_CHANCE = 0.8
### val ###
SAVE_PATH = 'debug'
TEST_IMAGE_SIZE = '1024,2048'
EVAL_SCALE = 0.9
TEST_SCALE = '0.9,1.0,1.2'
DS_RATE = 4
def seed_torch(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
#torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False
#torch.backends.cudnn.deterministic = True
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="DeepLab-ResNet Network")
### shared by train & val
# data
parser.add_argument("--data-src", type=str, default=DATA_SRC,
help="Name of source dataset.")
parser.add_argument("--data-src-dir", type=str, default=DATA_SRC_DIRECTORY,
help="Path to the directory containing the source dataset.")
parser.add_argument("--data-src-list", type=str, default=DATA_SRC_LIST_PATH,
help="Path to the file listing the images&labels in the source dataset.")
parser.add_argument("--data-tgt-dir", type=str, default=DATA_TGT_DIRECTORY,
help="Path to the directory containing the target dataset.")
parser.add_argument("--data-tgt-train-list", type=str, default=DATA_TGT_TRAIN_LIST_PATH,
help="Path to the file listing the images*GT labels in the target train dataset.")
parser.add_argument("--data-tgt-test-list", type=str, default=DATA_TGT_TEST_LIST_PATH,
help="Path to the file listing the images*GT labels in the target test dataset.")
parser.add_argument("--num-classes", type=int, default=NUM_CLASSES,
help="Number of classes to predict (including background).")
parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL,
help="The index of the label to ignore during the training.")
# model
parser.add_argument("--model", type=str, default=MODEL,
help="Model Choice (DeeplabMulti/DeeplabVGG).")
parser.add_argument("--restore-from", type=str, default=RESTORE_FROM,
help="Where restore model parameters from.")
# gpu
parser.add_argument("--gpu", type=int, default=GPU,
help="choose gpu device.")
parser.add_argument("--pin-memory", type=bool, default=PIN_MEMORY,
help="Whether to pin memory in train & eval.")
# log files
parser.add_argument("--log-file", type=str, default=LOG_FILE,
help="The name of log file.")
parser.add_argument('--debug',help='True means logging debug info.',
default=False, action='store_true')
### train ###
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Number of images sent to the network in one step.")
parser.add_argument("--input-size", type=str, default=INPUT_SIZE,
help="Comma-separated string with height and width of images.")
parser.add_argument("--is-training", action="store_true",
help="Whether to updates the running means and variances during the training.")
parser.add_argument("--eval-training", action="store_true",
help="Use the saved means and variances, or running means and variances during the evaluation.")
parser.add_argument("--random-mirror", action="store_true",
help="Whether to randomly mirror the inputs during the training.")
parser.add_argument("--random-scale", action="store_true",
help="Whether to randomly scale the inputs during the training.")
parser.add_argument("--train-scale-src", type=str, default=TRAIN_SCALE_SRC,
help="The scale for multi-scale training in source domain.")
parser.add_argument("--train-scale-tgt", type=str, default=TRAIN_SCALE_TGT,
help="The scale for multi-scale training in target domain.")
# params for optimizor
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Base learning rate for training with polynomial decay.")
parser.add_argument("--power", type=float, default=POWER,
help="Decay parameter to compute the learning rate.")
parser.add_argument("--momentum", type=float, default=MOMENTUM,
help="Momentum component of the optimiser.")
parser.add_argument("--weight-decay", type=float, default=WEIGHT_DECAY,
help="Regularisation parameter for L2-loss.")
### val
parser.add_argument('--test-flipping', dest='test_flipping',
help='If average predictions of original and flipped images.',
default=False, action='store_true')
parser.add_argument("--test-image-size", type=str, default=TEST_IMAGE_SIZE,
help="The test image size.")
parser.add_argument("--eval-scale", type=float, default=EVAL_SCALE,
help="The test image scale.")
parser.add_argument("--test-scale", type=str, default=TEST_SCALE,
help="The test image scale.")
### self-training params
parser.add_argument("--save", type=str, default=SAVE_PATH,
help="Path to save result for self-training.")
parser.add_argument("--num-rounds", type=int, default=NUM_ROUNDS,
help="Number of rounds for self-training.")
parser.add_argument("--epr", type=int, default=EPR,
help="Number of epochs per round for self-training.")
parser.add_argument('--kc-policy', default=KC_POLICY, type=str, dest='kc_policy',
help='The policy to determine kc. "cb" for weighted class-balanced threshold')
parser.add_argument('--kc-value', default=KC_VALUE, type=str,
help='The way to determine kc values, either "conf", or "prob".')
parser.add_argument('--ds-rate', default=DS_RATE, type=int,
help='The downsampling rate in kc calculation.')
parser.add_argument('--init-tgt-port', default=INIT_TGT_PORT, type=float, dest='init_tgt_port',
help='The initial portion of target to determine kc')
parser.add_argument('--max-tgt-port', default=MAX_TGT_PORT, type=float, dest='max_tgt_port',
help='The max portion of target to determine kc')
parser.add_argument('--tgt-port-step', default=TGT_PORT_STEP, type=float, dest='tgt_port_step',
help='The portion step in target domain in every round of self-paced self-trained neural network')
parser.add_argument('--init-src-port', default=INIT_SRC_PORT, type=float, dest='init_src_port',
help='The initial portion of source portion for self-trained neural network')
parser.add_argument('--max-src-port', default=MAX_SRC_PORT, type=float, dest='max_src_port',
help='The max portion of source portion for self-trained neural network')
parser.add_argument('--src-port-step', default=SRC_PORT_STEP, type=float, dest='src_port_step',
help='The portion step in source domain in every round of self-paced self-trained neural network')
parser.add_argument('--randseed', default=RANDSEED, type=int,
help='The random seed to sample the source dataset.')
parser.add_argument("--src-sampling-policy", type=str, default=SRC_SAMPLING_POLICY,
help="The sampling policy on source dataset: 'c' for 'cumulative' and 'r' for replace ")
parser.add_argument('--mine-port', default=MINE_PORT, type=float,
help='If a class has a predication portion lower than the mine_port, then mine the patches including the class in self-training.')
parser.add_argument('--rare-cls-num', default=RARE_CLS_NUM, type=int,
help='The number of classes to be mined.')
parser.add_argument('--mine-chance', default=MINE_CHANCE, type=float,
help='The chance of patch mining.')
parser.add_argument('--rm-prob',
help='If remove the probability maps generated in every round.',
default=False, action='store_true')
parser.add_argument('--mr-weight-kld', default=MRKLD, type=float, dest='mr_weight_kld',
help='weight of kld model regularization')
parser.add_argument('--lr-weight-ent', default=LRENT, type=float, dest='lr_weight_ent',
help='weight of negative entropy label regularization')
parser.add_argument('--mr-weight-src', default=MRSRC, type=float, dest='mr_weight_src',
help='weight of regularization in source domain')
return parser.parse_args()
args = get_arguments()
# palette
if args.data_src == 'gta':
# gta:
palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,
220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,
0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
def colorize_mask(mask):
# mask: numpy array of the mask
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
def main():
randseed = args.randseed
seed_torch(randseed)
device = torch.device("cuda:" + str(args.gpu))
save_path = args.save
save_pseudo_label_path = osp.join(save_path, 'pseudo_label') # in 'save_path'. Save labelIDs, not trainIDs.
save_stats_path = osp.join(save_path, 'stats') # in 'save_path'
save_lst_path = osp.join(save_path, 'list')
if not os.path.exists(save_path):
os.makedirs(save_path)
if not os.path.exists(save_pseudo_label_path):
os.makedirs(save_pseudo_label_path)
if not os.path.exists(save_stats_path):
os.makedirs(save_stats_path)
if not os.path.exists(save_lst_path):
os.makedirs(save_lst_path)
logger = util.set_logger(args.save, args.log_file, args.debug)
logger.info('start with arguments %s', args)
if args.model == 'DeeplabRes':
model = Res_Deeplab(num_classes=args.num_classes)
if args.restore_from[:4] == 'http' :
saved_state_dict = model_zoo.load_url(args.restore_from)
new_params = model.state_dict().copy()
for i in saved_state_dict:
# Scale.layer5.conv2d_list.3.weight
i_parts = str(i).split('.')
# print i_parts
if not i_parts[0] == 'fc':
new_params['.'.join(i_parts[0:])] = saved_state_dict[i]
else:
loc = "cuda:" + str(args.gpu)
saved_state_dict = torch.load(args.restore_from,map_location=loc)
new_params = saved_state_dict.copy()
model.load_state_dict(new_params)
image_src_list, _, label_src_list, src_num = parse_split_list(args.data_src_list)
image_tgt_list, image_name_tgt_list, _, tgt_num = parse_split_list(args.data_tgt_train_list)
_, _, _, test_num = parse_split_list(args.data_tgt_test_list)
## label mapping
sys.path.insert(0, 'dataset/helpers')
if args.data_src == 'synthia':
from labels_cityscapes_synthia import id2label, trainId2label
elif args.data_src == 'gta':
from labels import id2label, trainId2label
label_2_id = 255 * np.ones((256,))
for l in id2label:
if l in (-1, 255):
continue
label_2_id[l] = id2label[l].trainId
id_2_label = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
valid_labels = sorted(set(id_2_label.ravel()))
# portions
tgt_portion = args.init_tgt_port
src_portion = args.init_src_port
# training crop size
h, w = map(int, args.input_size.split(','))
input_size = (h, w)
lscale_src, hscale_src = map(float, args.train_scale_src.split(','))
train_scale_src = (lscale_src, hscale_src)
lscale_tgt, hscale_tgt = map(float, args.train_scale_tgt.split(','))
train_scale_tgt = (lscale_tgt, hscale_tgt)
for round_idx in range(args.num_rounds):
save_round_eval_path = osp.join(args.save,str(round_idx))
save_pseudo_label_color_path = osp.join(save_round_eval_path, 'pseudo_label_color') # in every 'save_round_eval_path'
if not os.path.exists(save_round_eval_path):
os.makedirs(save_round_eval_path)
if not os.path.exists(save_pseudo_label_color_path):
os.makedirs(save_pseudo_label_color_path)
########## pseudo-label generation
if round_idx != args.num_rounds - 1:
# evaluation & save confidence vectors
conf_dict, pred_cls_num, save_prob_path, save_pred_path = val(model, device, save_round_eval_path, round_idx, tgt_num,
label_2_id, valid_labels, args, logger)
# class-balanced thresholds
cls_thresh = kc_parameters(conf_dict, pred_cls_num, tgt_portion, round_idx, save_stats_path, args, logger)
tgt_portion = min(tgt_portion + args.tgt_port_step, args.max_tgt_port)
# pseudo-label maps generation
label_selection(cls_thresh, tgt_num, image_name_tgt_list, id_2_label, round_idx, save_prob_path, save_pred_path, save_pseudo_label_path, save_pseudo_label_color_path, save_round_eval_path, args, logger)
# save training list
if args.src_sampling_policy == 'c':
randseed = args.randseed
elif args.src_sampling_policy == 'r':
randseed += 1
src_train_lst, tgt_train_lst, src_num_sel = savelst_SrcTgt(src_portion, image_tgt_list, image_name_tgt_list, image_src_list, label_src_list, save_lst_path, save_pseudo_label_path, src_num, tgt_num, randseed, args)
src_portion = min(src_portion + args.src_port_step, args.max_src_port)
########### model retraining
# dataset
epoch_per_round = args.epr
# reg weights
if args.mr_weight_kld == 0:
reg_weight_tgt = 0.0
else: # currently only one kind of model regularizer is supported
reg_weight_tgt = args.mr_weight_kld
reg_weight_src = args.mr_weight_src
### patch mining params
# no patch mining in src
# patch mining in target
rare_id = np.load(save_stats_path + '/rare_id_round' + str(round_idx) + '.npy')
mine_id = np.load(save_stats_path + '/mine_id_round' + str(round_idx) + '.npy')
mine_chance = args.mine_chance
# dataloader
if args.lr_weight_ent == 0.0:
srctrainset = SrcSTDataSet(args.data_src_dir, src_train_lst, max_iters=src_num_sel,reg_weight=reg_weight_src,data_src=args.data_src,
crop_size=input_size,scale=args.random_scale, mirror=args.random_mirror, train_scale=train_scale_src, mean=IMG_MEAN, std=IMG_STD)
tgttrainset = GTA5StMineDataSet(args.data_tgt_dir, tgt_train_lst, pseudo_root=save_pseudo_label_path, max_iters=tgt_num,reg_weight=reg_weight_tgt,rare_id = rare_id,
mine_id=mine_id, mine_chance = mine_chance, crop_size=input_size,scale=args.random_scale,data_src=args.data_src,
mirror=args.random_mirror, train_scale=train_scale_tgt, mean=IMG_MEAN, std=IMG_STD)
elif args.lr_weight_ent > 0.0:
srctrainset = SoftSrcSTDataSet(args.data_src_dir, src_train_lst, max_iters=src_num_sel,reg_weight=reg_weight_src,data_src=args.data_src,num_classes=args.num_classes,
crop_size=input_size,scale=args.random_scale, mirror=args.random_mirror, train_scale=train_scale_src, mean=IMG_MEAN, std=IMG_STD)
tgttrainset = SoftGTA5StMineDataSet(args.data_tgt_dir, tgt_train_lst, pseudo_root=save_pseudo_label_path, max_iters=tgt_num,reg_weight=reg_weight_tgt,rare_id = rare_id,
mine_id=mine_id, mine_chance = mine_chance, crop_size=input_size,scale=args.random_scale,data_src=args.data_src,num_classes=args.num_classes,
mirror=args.random_mirror, train_scale=train_scale_tgt, mean=IMG_MEAN, std=IMG_STD)
mixtrainset = torch.utils.data.ConcatDataset([srctrainset, tgttrainset])
mix_trainloader = torch.utils.data.DataLoader(mixtrainset, batch_size=args.batch_size, shuffle=True,
num_workers=0, pin_memory=args.pin_memory)
# optimizer
tot_iter = np.ceil(float(src_num_sel + tgt_num) / args.batch_size)
optimizer = optim.SGD([{'params': get_1x_lr_params_NOscale(model), 'lr': args.learning_rate},
{'params': get_10x_lr_params(model), 'lr': 10 * args.learning_rate}],
lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
interp = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)
logger.info('###### Start model retraining dataset in round {}! ######'.format(round_idx))
# model
if args.is_training:
model.train()
else:
model.eval()
start = timeit.default_timer()
# cudnn
cudnn.enabled = True # enable cudnn
cudnn.benchmark = True # enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware.
# start training
for epoch in range(epoch_per_round):
train(mix_trainloader, model, device, interp, optimizer, tot_iter, round_idx, epoch, args, logger)
end = timeit.default_timer()
logger.info('###### Finish model retraining dataset in round {}! Time cost: {:.2f} seconds. ######'.format(round_idx, end - start))
# test self-trained model in target domain test set
tgt_set = 'test'
test(model, device, save_round_eval_path, round_idx, tgt_set, test_num, args.data_tgt_test_list, label_2_id,
valid_labels, args, logger)
elif round_idx == args.num_rounds - 1:
shutil.rmtree(save_pseudo_label_path)
tgt_set = 'train'
test(model, device, save_round_eval_path, round_idx, tgt_set, tgt_num, args.data_tgt_train_list, label_2_id,
valid_labels, args, logger)
tgt_set = 'test'
test(model, device, save_round_eval_path, round_idx, tgt_set, test_num, args.data_tgt_test_list, label_2_id,
valid_labels, args, logger)
def val(model, device, save_round_eval_path, round_idx, tgt_num, label_2_id, valid_labels, args, logger):
"""Create the model and start the evaluation process."""
## scorer
scorer = ScoreUpdater(valid_labels, args.num_classes, tgt_num, logger)
scorer.reset()
h, w = map(int, args.test_image_size.split(','))
test_image_size = (h, w)
test_size = ( int(h*args.eval_scale), int(w*args.eval_scale) )
## test data loader
testloader = data.DataLoader(GTA5TestDataSet(args.data_tgt_dir, args.data_tgt_train_list, test_size=test_size, test_scale=args.eval_scale, mean=IMG_MEAN, std=IMG_STD, scale=False, mirror=False),
batch_size=1, shuffle=False, pin_memory=args.pin_memory)
## model for evaluation
if args.eval_training:
model.train()
else:
model.eval()
#
model.to(device)
## upsampling layer
if version.parse(torch.__version__) >= version.parse('0.4.0'):
interp = nn.Upsample(size=test_image_size, mode='bilinear', align_corners=True)
else:
interp = nn.Upsample(size=test_image_size, mode='bilinear')
## output of deeplab is logits, not probability
softmax2d = nn.Softmax2d()
## output folder
save_pred_vis_path = osp.join(save_round_eval_path, 'pred_vis')
save_prob_path = osp.join(save_round_eval_path, 'prob')
save_pred_path = osp.join(save_round_eval_path, 'pred')
if not os.path.exists(save_pred_vis_path):
os.makedirs(save_pred_vis_path)
if not os.path.exists(save_prob_path):
os.makedirs(save_prob_path)
if not os.path.exists(save_pred_path):
os.makedirs(save_pred_path)
# saving output data
conf_dict = {k: [] for k in range(args.num_classes)}
pred_cls_num = np.zeros(args.num_classes)
## evaluation process
logger.info('###### Start evaluating target domain train set in round {}! ######'.format(round_idx))
start_eval = time.time()
with torch.no_grad():
for index, batch in enumerate(testloader):
image, label, _, name = batch
if args.model == 'DeeplabRes':
output2 = model(image.to(device))
output = softmax2d(interp(output2)).cpu().data[0].numpy()
if args.test_flipping:
output2 = model(torch.from_numpy(image.numpy()[:,:,:,::-1].copy()).to(device))
output = 0.5 * ( output + softmax2d(interp(output2)).cpu().data[0].numpy()[:,:,::-1] )
output = output.transpose(1,2,0)
amax_output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
conf = np.amax(output,axis=2)
# score
pred_label = amax_output.copy()
label = label_2_id[np.asarray(label.numpy(), dtype=np.uint8)]
scorer.update(pred_label.flatten(), label.flatten(), index)
# save visualized seg maps & predication prob map
amax_output_col = colorize_mask(amax_output)
name = name[0].split('/')[-1]
image_name = name.split('.')[0]
# prob
np.save('%s/%s.npy' % (save_prob_path, image_name), output)
# trainIDs/vis seg maps
amax_output = Image.fromarray(amax_output)
amax_output.save('%s/%s.png' % (save_pred_path, image_name))
amax_output_col.save('%s/%s_color.png' % (save_pred_vis_path, image_name))
# save class-wise confidence maps
if args.kc_value == 'conf':
for idx_cls in range(args.num_classes):
idx_temp = pred_label == idx_cls
pred_cls_num[idx_cls] = pred_cls_num[idx_cls] + np.sum(idx_temp)
if idx_temp.any():
conf_cls_temp = conf[idx_temp].astype(np.float32)
len_cls_temp = conf_cls_temp.size
# downsampling by ds_rate
conf_cls = conf_cls_temp[0:len_cls_temp:args.ds_rate]
conf_dict[idx_cls].extend(conf_cls)
elif args.kc_value == 'prob':
for idx_cls in range(args.num_classes):
idx_temp = pred_label == idx_cls
pred_cls_num[idx_cls] = pred_cls_num[idx_cls] + np.sum(idx_temp)
# prob slice
prob_cls_temp = output[:,:,idx_cls].astype(np.float32).ravel()
len_cls_temp = prob_cls_temp.size
# downsampling by ds_rate
prob_cls = prob_cls_temp[0:len_cls_temp:args.ds_rate]
conf_dict[idx_cls].extend(prob_cls) # it should be prob_dict; but for unification, use conf_dict
logger.info('###### Finish evaluating target domain train set in round {}! Time cost: {:.2f} seconds. ######'.format(round_idx, time.time()-start_eval))
return conf_dict, pred_cls_num, save_prob_path, save_pred_path # return the dictionary containing all the class-wise confidence vectors
def train(mix_trainloader, model, device, interp, optimizer, tot_iter, round_idx, epoch_idx, args, logger):
"""Create the model and start the training."""
for i_iter, batch in enumerate(mix_trainloader):
images, labels, _, _, reg_weights = batch
images = images.to(device)
labels = labels.to(device)
optimizer.zero_grad()
adjust_learning_rate(optimizer, i_iter, tot_iter)
pred = interp(model(images))
if args.lr_weight_ent == 0.0:
loss = reg_loss_calc(pred, labels, reg_weights.to(device), args)
if args.lr_weight_ent > 0.0:
loss = reg_loss_calc_expand(pred, labels, reg_weights.to(device), args)
loss.backward()
optimizer.step()
logger.info('iter = {} of {} completed, loss = {:.4f}'.format(i_iter+1, tot_iter, loss.data.cpu().numpy()))
print('taking snapshot ...')
torch.save(model.state_dict(), osp.join(args.save, args.data_src + '2city_round' + str(round_idx) + '_epoch' + str(epoch_idx+1) + '.pth'))
def test(model, device, save_round_eval_path, round_idx, tgt_set, test_num, test_list, label_2_id, valid_labels, args, logger):
"""Create the model and start the evaluation process."""
## scorer
scorer = ScoreUpdater(valid_labels, args.num_classes, test_num, logger)
scorer.reset()
h, w = map(int, args.test_image_size.split(','))
test_image_size = (h, w)
test_size = ( h, w )
test_scales = [float(_) for _ in str(args.test_scale).split(',')]
num_scales = len(test_scales)
## test data loader
testloader = data.DataLoader(GTA5TestDataSet(args.data_tgt_dir, test_list, test_size=test_size, test_scale=1.0, mean=IMG_MEAN, std=IMG_STD, scale=False, mirror=False),
batch_size=1, shuffle=False, pin_memory=args.pin_memory)
## model for evaluation
if args.eval_training:
model.train()
else:
model.eval()
#
model.to(device)
## upsampling layer
if version.parse(torch.__version__) >= version.parse('0.4.0'):
interp = nn.Upsample(size=test_image_size, mode='bilinear', align_corners=True)
else:
interp = nn.Upsample(size=test_image_size, mode='bilinear')
## output of deeplab is logits, not probability
softmax2d = nn.Softmax2d()
## output folder
if tgt_set == 'train':
save_test_vis_path = osp.join(save_round_eval_path, 'trainSet_vis')
elif tgt_set == 'test':
save_test_vis_path = osp.join(save_round_eval_path, 'testSet_vis')
if not os.path.exists(save_test_vis_path):
os.makedirs(save_test_vis_path)
## evaluation process
logger.info('###### Start evaluating in target domain {} set in round {}! ######'.format(tgt_set, round_idx))
start_eval = time.time()
with torch.no_grad():
for index, batch in enumerate(testloader):
image, label, _, name = batch
img = image.clone()
for scale_idx in range(num_scales):
if version.parse(torch.__version__) > version.parse('0.4.0'):
image = F.interpolate(img, scale_factor=test_scales[scale_idx], mode='bilinear', align_corners=True)
else:
test_size = (int(h * test_scales[scale_idx]), int(w * test_scales[scale_idx]))
interp_tmp = nn.Upsample(size=test_size, mode='bilinear', align_corners=True)
image = interp_tmp(img)
if args.model == 'DeeplabRes':
output2 = model(image.to(device))
coutput = interp(output2).cpu().data[0].numpy()
if args.test_flipping:
output2 = model(torch.from_numpy(image.numpy()[:,:,:,::-1].copy()).to(device))
coutput = 0.5 * ( coutput + interp(output2).cpu().data[0].numpy()[:,:,::-1] )
if scale_idx == 0:
output = coutput.copy()
else:
output = output+coutput
output = output/num_scales
output = output.transpose(1,2,0)
amax_output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
# score
pred_label = amax_output.copy()
label = label_2_id[np.asarray(label.numpy(), dtype=np.uint8)]
scorer.update(pred_label.flatten(), label.flatten(), index)
# save visualized seg maps & predication prob map
amax_output_col = colorize_mask(amax_output)
name = name[0].split('/')[-1]
image_name = name.split('.')[0]
# vis seg maps
amax_output_col.save('%s/%s_color.png' % (save_test_vis_path, image_name))
logger.info('###### Finish evaluating in target domain {} set in round {}! Time cost: {:.2f} seconds. ######'.format(tgt_set, round_idx, time.time()-start_eval))
return
def kc_parameters(conf_dict, pred_cls_num, tgt_portion, round_idx, save_stats_path, args, logger):
logger.info('###### Start kc generation in round {} ! ######'.format(round_idx))
start_kc = time.time()
# threshold for each class
conf_tot = 0.0
cls_thresh = np.ones(args.num_classes,dtype = np.float32)
cls_sel_size = np.zeros(args.num_classes, dtype=np.float32)
cls_size = np.zeros(args.num_classes, dtype=np.float32)
if args.kc_policy == 'cb' and args.kc_value == 'conf':
for idx_cls in np.arange(0, args.num_classes):
cls_size[idx_cls] = pred_cls_num[idx_cls]
if conf_dict[idx_cls] != None:
conf_dict[idx_cls].sort(reverse=True) # sort in descending order
len_cls = len(conf_dict[idx_cls])
cls_sel_size[idx_cls] = int(math.floor(len_cls * tgt_portion))
len_cls_thresh = int(cls_sel_size[idx_cls])
if len_cls_thresh != 0:
cls_thresh[idx_cls] = conf_dict[idx_cls][len_cls_thresh-1]
conf_dict[idx_cls] = None
# threshold for mine_id with priority
num_mine_id = len(np.nonzero(cls_size / np.sum(cls_size) < args.mine_port)[0])
# chose the smallest mine_id
id_all = np.argsort(cls_size / np.sum(cls_size))
rare_id = id_all[:args.rare_cls_num]
mine_id = id_all[:num_mine_id] # sort mine_id in ascending order w.r.t predication portions
# save mine ids
np.save(save_stats_path + '/rare_id_round' + str(round_idx) + '.npy', rare_id)
np.save(save_stats_path + '/mine_id_round' + str(round_idx) + '.npy', mine_id)
logger.info('Mining ids : {}! {} rarest ids: {}!'.format(mine_id,args.rare_cls_num,rare_id))
# save thresholds
np.save(save_stats_path + '/cls_thresh_round' + str(round_idx) + '.npy', cls_thresh)
np.save(save_stats_path + '/cls_sel_size_round' + str(round_idx) + '.npy', cls_sel_size)
logger.info('###### Finish kc generation in round {}! Time cost: {:.2f} seconds. ######'.format(round_idx,time.time() - start_kc))
return cls_thresh
def label_selection(cls_thresh, tgt_num, image_name_tgt_list, id_2_label, round_idx, save_prob_path, save_pred_path, save_pseudo_label_path, save_pseudo_label_color_path, save_round_eval_path, args, logger):
logger.info('###### Start pseudo-label generation in round {} ! ######'.format(round_idx))
start_pl = time.time()
for idx in range(tgt_num):
sample_name = image_name_tgt_list[idx].split('.')[0]
probmap_path = osp.join(save_prob_path, '{}.npy'.format(sample_name))
pred_path = osp.join(save_pred_path, '{}.png'.format(sample_name))
pred_prob = np.load(probmap_path)
pred_label_trainIDs = np.asarray(Image.open(pred_path))
pred_label_labelIDs = id_2_label[pred_label_trainIDs]
pred_label_trainIDs = pred_label_trainIDs.copy()
if args.kc_policy == 'cb' and args.lr_weight_ent == 0.0:
save_wpred_vis_path = osp.join(save_round_eval_path, 'weighted_pred_vis')
if not os.path.exists(save_wpred_vis_path):
os.makedirs(save_wpred_vis_path)
weighted_prob = pred_prob/cls_thresh
weighted_pred_trainIDs = np.asarray(np.argmax(weighted_prob, axis=2), dtype=np.uint8)
# save weighted predication
wpred_label_col = weighted_pred_trainIDs.copy()
wpred_label_col = colorize_mask(wpred_label_col)
wpred_label_col.save('%s/%s_color.png' % (save_wpred_vis_path, sample_name))
weighted_conf = np.amax(weighted_prob, axis=2)
pred_label_trainIDs = weighted_pred_trainIDs.copy()
pred_label_labelIDs = id_2_label[pred_label_trainIDs]
pred_label_labelIDs[weighted_conf < 1] = 0 # '0' in cityscapes indicates 'unlabaled' for labelIDs
pred_label_trainIDs[weighted_conf < 1] = 255 # '255' in cityscapes indicates 'unlabaled' for trainIDs
elif args.kc_policy == 'cb' and args.lr_weight_ent > 0.0: # check if cb can be combined with kc_value == conf or prob; also check if \alpha can be larger than 1
save_wpred_vis_path = osp.join(save_round_eval_path, 'weighted_pred_vis')
if not os.path.exists(save_wpred_vis_path):
os.makedirs(save_wpred_vis_path)
# soft pseudo-label
soft_pseudo_label = np.power(pred_prob/cls_thresh,1.0/args.lr_weight_ent) # weighted softmax with temperature
soft_pseudo_label_sum = soft_pseudo_label.sum(2)
soft_pseudo_label = soft_pseudo_label.transpose(2,0,1)/soft_pseudo_label_sum
soft_pseudo_label = soft_pseudo_label.transpose(1,2,0).astype(np.float32)
np.save('%s/%s.npy' % (save_pseudo_label_path, sample_name), soft_pseudo_label)
# hard pseudo-label
weighted_pred_trainIDs = np.asarray(np.argmax(soft_pseudo_label, axis=2), dtype=np.uint8)
reg_score = np.sum( -soft_pseudo_label*np.log(pred_prob+1e-32) + args.lr_weight_ent*soft_pseudo_label*np.log(soft_pseudo_label+1e-32), axis=2)
sel_score = np.sum( -soft_pseudo_label*np.log(cls_thresh+1e-32), axis=2)
# save weighted predication
wpred_label_col = weighted_pred_trainIDs.copy()
wpred_label_col = colorize_mask(wpred_label_col)
wpred_label_col.save('%s/%s_color.png' % (save_wpred_vis_path, sample_name))
pred_label_trainIDs = weighted_pred_trainIDs.copy()
pred_label_labelIDs = id_2_label[pred_label_trainIDs]
pred_label_labelIDs[reg_score >= sel_score] = 0 # '0' in cityscapes indicates 'unlabaled' for labelIDs
pred_label_trainIDs[reg_score >= sel_score] = 255 # '255' in cityscapes indicates 'unlabaled' for trainIDs
# pseudo-labels with labelID
pseudo_label_labelIDs = pred_label_labelIDs.copy()
pseudo_label_trainIDs = pred_label_trainIDs.copy()
# save colored pseudo-label map
pseudo_label_col = colorize_mask(pseudo_label_trainIDs)
pseudo_label_col.save('%s/%s_color.png' % (save_pseudo_label_color_path, sample_name))
# save pseudo-label map with label IDs
pseudo_label_save = Image.fromarray(pseudo_label_labelIDs.astype(np.uint8))
pseudo_label_save.save('%s/%s.png' % (save_pseudo_label_path, sample_name))
# remove probability maps
if args.rm_prob:
shutil.rmtree(save_prob_path)
logger.info('###### Finish pseudo-label generation in round {}! Time cost: {:.2f} seconds. ######'.format(round_idx,time.time() - start_pl))
def parse_split_list(list_name):
image_list = []
image_name_list = []
label_list = []
file_num = 0
with open(list_name) as f:
for item in f.readlines():
fields = item.strip().split('\t')
image_name = fields[0].split('/')[-1]
image_list.append(fields[0])
image_name_list.append(image_name)
label_list.append(fields[1])
file_num += 1
return image_list, image_name_list, label_list, file_num
def savelst_SrcTgt(src_portion, image_tgt_list, image_name_tgt_list, image_src_list, label_src_list, save_lst_path, save_pseudo_label_path, src_num, tgt_num, randseed, args):
src_num_sel = int(np.floor(src_num*src_portion))
np.random.seed(randseed)
sel_idx = list( np.random.choice(src_num, src_num_sel, replace=False) )
sel_src_img_list = list( itemgetter(*sel_idx)(image_src_list) )
sel_src_label_list = list(itemgetter(*sel_idx)(label_src_list))
src_train_lst = osp.join(save_lst_path,'src_train.lst')
tgt_train_lst = osp.join(save_lst_path, 'tgt_train.lst')
# generate src train list
with open(src_train_lst, 'w') as f:
for idx in range(src_num_sel):
f.write("%s\t%s\n" % (sel_src_img_list[idx], sel_src_label_list[idx]))
# generate tgt train list
if args.lr_weight_ent > 0:
with open(tgt_train_lst, 'w') as f:
for idx in range(tgt_num):
softlabel_name = image_name_tgt_list[idx].split('.')[0] + '.npy'
soft_label_tgt_path = osp.join(save_pseudo_label_path, softlabel_name)
image_tgt_path = osp.join(save_pseudo_label_path,image_name_tgt_list[idx])
f.write("%s\t%s\t%s\n" % (image_tgt_list[idx], image_tgt_path, soft_label_tgt_path))
elif args.lr_weight_ent == 0:
with open(tgt_train_lst, 'w') as f:
for idx in range(tgt_num):
image_tgt_path = osp.join(save_pseudo_label_path,image_name_tgt_list[idx])
f.write("%s\t%s\n" % (image_tgt_list[idx], image_tgt_path))
return src_train_lst, tgt_train_lst, src_num_sel
class ScoreUpdater(object):
# only IoU are computed. accu, cls_accu, etc are ignored.
def __init__(self, valid_labels, c_num, x_num, logger=None, label=None, info=None):
self._valid_labels = valid_labels
self._confs = np.zeros((c_num, c_num))
self._per_cls_iou = np.zeros(c_num)
self._logger = logger
self._label = label
self._info = info
self._num_class = c_num
self._num_sample = x_num
@property
def info(self):
return self._info
def reset(self):
self._start = time.time()
self._computed = np.zeros(self._num_sample) # one-dimension
self._confs[:] = 0
def fast_hist(self,label, pred_label, n):
k = (label >= 0) & (label < n)
return np.bincount(n * label[k].astype(int) + pred_label[k], minlength=n ** 2).reshape(n, n)
def per_class_iu(self,hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def do_updates(self, conf, i, computed=True):
if computed:
self._computed[i] = 1
self._per_cls_iou = self.per_class_iu(conf)
def update(self, pred_label, label, i, computed=True):
conf = self.fast_hist(label, pred_label, self._num_class)
self._confs += conf
self.do_updates(self._confs, i, computed)
self.scores(i)
def scores(self, i=None, logger=None):
x_num = self._num_sample
ious = np.nan_to_num( self._per_cls_iou )
logger = self._logger if logger is None else logger
if logger is not None:
if i is not None:
speed = 1. * self._computed.sum() / (time.time() - self._start)
logger.info('Done {}/{} with speed: {:.2f}/s'.format(i + 1, x_num, speed))
name = '' if self._label is None else '{}, '.format(self._label)
logger.info('{}mean iou: {:.2f}%'. \
format(name, np.mean(ious) * 100))
with util.np_print_options(formatter={'float': '{:5.2f}'.format}):
logger.info('\n{}'.format(ious * 100))
return ious
def loss_calc(pred, label):
"""
This function returns cross entropy loss for semantic segmentation
"""
# out shape batch_size x channels x h x w -> batch_size x channels x h x w
# label shape h x w x 1 x batch_size -> batch_size x 1 x h x w
criterion = torch.nn.CrossEntropyLoss(ignore_index=IGNORE_LABEL).cuda()
return criterion(pred, label)
def reg_loss_calc(pred, label, reg_weights, args):
"""
This function returns cross entropy loss for semantic segmentation
"""
# out shape batch_size x channels x h x w -> batch_size x channels x h x w
# label shape h x w x 1 x batch_size -> batch_size x 1 x h x w
mr_weight_kld = args.mr_weight_kld
num_class = float(args.num_classes)
valid_num = torch.sum(label != IGNORE_LABEL).float()
label_reg = label[reg_weights != 0,:,:]
valid_reg_num = torch.sum(label_reg != IGNORE_LABEL).float()
softmax = F.softmax(pred, dim=1) # compute the softmax values
logsoftmax = F.log_softmax(pred,dim=1) # compute the log of softmax values
label_expand = torch.unsqueeze(label, 1).repeat(1,int(num_class),1,1)
labels = label_expand.clone()
labels[labels != IGNORE_LABEL] = 1.0
labels[labels == IGNORE_LABEL] = 0.0
labels_valid = labels.clone()
# labels = torch.unsqueeze(labels, 1).repeat(1,num_class,1,1)
labels = torch.cumsum(labels, dim=1)
labels[labels != label_expand + 1] = 0.0
del label_expand
labels[labels != 0 ] = 1.0
### check the vectorized labels
# check_labels = torch.argmax(labels, dim=1)
# label[label == 255] = 0
# print(torch.sum(check_labels.float() - label))
reg_weights = reg_weights.float().view(len(reg_weights),1,1,1)
ce = torch.sum( -logsoftmax*labels ) # cross-entropy loss with vector-form softmax
softmax_val = softmax*labels_valid
logsoftmax_val = logsoftmax*labels_valid
kld = torch.sum( -logsoftmax_val/num_class*reg_weights )
if valid_reg_num > 0:
reg_ce = ce/valid_num + (mr_weight_kld*kld)/valid_reg_num
else:
reg_ce = ce/valid_num
return reg_ce
def reg_loss_calc_expand(pred, label, reg_weights, args):
"""
This function returns cross entropy loss for semantic segmentation
"""
# out shape batch_size x channels x h x w -> batch_size x channels x h x w
# label shape h x w x 1 x batch_size -> batch_size x 1 x h x w
mr_weight_kld = args.mr_weight_kld
num_class = float(args.num_classes)
# soft labels regard ignored labels as zero soft labels in data loader
# C = label.cpu().numpy()
label_sum = torch.sum(label,1)
# D = label_sum.cpu().numpy()
valid_num = torch.sum(label_sum != 0.0).float()
label_reg = label_sum[reg_weights != 0,:,:]
valid_reg_num = torch.sum(label_reg != 0.0).float()
softmax = F.softmax(pred, dim=1) # compute the softmax values
logsoftmax = F.log_softmax(pred,dim=1) # compute the log of softmax values
label_expand = torch.unsqueeze(label_sum, 1).repeat(1,num_class,1,1)
label_valid = label_expand.clone()
label_valid[label_valid != 0] = 1.0
label_valid = label_valid.clone()
# # check the vectorized labels
# check_labels = torch.argmax(labels, dim=1)
# label[label == 255] = 0
# print(torch.sum(check_labels.float() - label))
#
reg_weights = reg_weights.float().view(len(reg_weights),1,1,1)
ce = torch.sum( -logsoftmax*label ) # cross-entropy loss with vector-form softmax
softmax_val = softmax*label_valid
logsoftmax_val = logsoftmax*label_valid
kld = torch.sum( -logsoftmax_val/num_class*reg_weights )
if valid_reg_num > 0:
reg_ce = ce/valid_num + (mr_weight_kld*kld)/valid_reg_num
else:
reg_ce = ce/valid_num
return reg_ce
def lr_poly(base_lr, iter, max_iter, power):
return base_lr * ((1 - float(iter) / max_iter) ** (power))
def get_1x_lr_params_NOscale(model):
"""
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
"""
b = []
b.append(model.conv1)
b.append(model.bn1)
b.append(model.layer1)
b.append(model.layer2)
b.append(model.layer3)
b.append(model.layer4)
for i in range(len(b)):
for j in b[i].modules():
jj = 0
for k in j.parameters():
jj += 1
if k.requires_grad:
yield k
def get_10x_lr_params(model):
"""
This generator returns all the parameters for the last layer of the net,
which does the classification of pixel into classes
"""
b = []
b.append(model.layer5.parameters())
for j in range(len(b)):
for i in b[j]:
yield i
def adjust_learning_rate(optimizer, i_iter, tot_iter):
lr = lr_poly(args.learning_rate, i_iter, tot_iter, args.power)
optimizer.param_groups[0]['lr'] = lr
optimizer.param_groups[1]['lr'] = lr * 10
if __name__ == '__main__':
main()
| 47,013 | 48.229319 | 225 | py |
CRST | CRST-master/train.py | import argparse
import torch
import torch.nn as nn
from torch.utils import data
import numpy as np
import pickle
import cv2
import torch.optim as optim
import scipy.misc
import torch.backends.cudnn as cudnn
import sys
import os
import os.path as osp
from deeplab.model import Res_Deeplab
from deeplab.loss import CrossEntropy2d
from deeplab.datasets import GTA5DataSet
import matplotlib.pyplot as plt
import random
import timeit
import torchvision.transforms as transforms
import util
start = timeit.default_timer()
#IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
IMG_MEAN = np.array((0.406, 0.456, 0.485), dtype=np.float32) # BGR
IMG_STD = np.array((0.225, 0.224, 0.229), dtype=np.float32) # BGR
# IMG_MEAN = [0.485, 0.456, 0.406]
# IMG_STD = [0.229, 0.224, 0.225]
BATCH_SIZE = 4
DATA_DIRECTORY = './datasets/gta5'
DATA_LIST_PATH = './dataset/list/gta5/train.lst'
NUM_CLASSES = 19
IGNORE_LABEL = 255
INPUT_SIZE = '500,500'
TRAIN_SCALE = '0.5,1.5'
LEARNING_RATE = 2.5e-4
MOMENTUM = 0.9
NUM_STEPS = 100000
POWER = 0.9
RANDOM_SEED = 1234
RESTORE_FROM = ''
SAVE_NUM_IMAGES = 2
SAVE_PRED_EVERY = 5000
SNAPSHOT_DIR = './gta_src_train/'
WEIGHT_DECAY = 0.0005
MODEL = 'DeeplabRes101'
LOG_FILE = 'log'
PIN_MEMORY = True
GPU = '0'
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="DeepLab-ResNet Network")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Number of images sent to the network in one step.")
parser.add_argument("--data-dir", type=str, default=DATA_DIRECTORY,
help="Path to the directory containing the PASCAL VOC dataset.")
parser.add_argument("--data-list", type=str, default=DATA_LIST_PATH,
help="Path to the file listing the images in the dataset.")
parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL,
help="The index of the label to ignore during the training.")
parser.add_argument("--input-size", type=str, default=INPUT_SIZE,
help="Comma-separated string with height and width of images.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Base learning rate for training with polynomial decay.")
parser.add_argument("--momentum", type=float, default=MOMENTUM,
help="Momentum component of the optimiser.")
parser.add_argument("--model", type=str, default=MODEL,
help="The base network.")
parser.add_argument("--not-restore-last", action="store_true",
help="Whether to not restore last (FC) layers.")
parser.add_argument("--num-classes", type=int, default=NUM_CLASSES,
help="Number of classes to predict (including background).")
parser.add_argument("--num-steps", type=int, default=NUM_STEPS,
help="Number of training steps.")
parser.add_argument("--power", type=float, default=POWER,
help="Decay parameter to compute the learning rate.")
parser.add_argument("--random-mirror", action="store_true",
help="Whether to randomly mirror the inputs during the training.")
parser.add_argument("--random-scale", action="store_true",
help="Whether to randomly scale the inputs during the training.")
parser.add_argument("--train-scale", type=str, default=TRAIN_SCALE,
help="The scale for multi-scale training.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random seed to have reproducible results.")
parser.add_argument("--restore-from", type=str, default=RESTORE_FROM,
help="Where restore model parameters from.")
parser.add_argument("--save-num-images", type=int, default=SAVE_NUM_IMAGES,
help="How many images to save.")
parser.add_argument("--save-pred-every", type=int, default=SAVE_PRED_EVERY,
help="Save summaries and checkpoint every often.")
parser.add_argument("--snapshot-dir", type=str, default=SNAPSHOT_DIR,
help="Where to save snapshots of the model.")
parser.add_argument("--weight-decay", type=float, default=WEIGHT_DECAY,
help="Regularisation parameter for L2-loss.")
parser.add_argument("--gpu", type=str, default=GPU,
help="choose gpu device.")
parser.add_argument("--pin-memory", type=bool, default=PIN_MEMORY,
help="Whether to pin memory in train & eval.")
parser.add_argument("--log-file", type=str, default=LOG_FILE,
help="The name of log file.")
parser.add_argument('--debug',help='True means logging debug info.',
default=False, action='store_true')
return parser.parse_args()
args = get_arguments()
def loss_calc(pred, label):
"""
This function returns cross entropy loss for semantic segmentation
"""
# out shape batch_size x channels x h x w -> batch_size x channels x h x w
# label shape h x w x 1 x batch_size -> batch_size x 1 x h x w
criterion = torch.nn.CrossEntropyLoss(ignore_index=IGNORE_LABEL).cuda()
return criterion(pred, label)
def lr_poly(base_lr, iter, max_iter, power):
return base_lr*((1-float(iter)/max_iter)**(power))
def get_1x_lr_params_NOscale(model):
"""
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
"""
b = []
b.append(model.conv1)
b.append(model.bn1)
b.append(model.layer1)
b.append(model.layer2)
b.append(model.layer3)
b.append(model.layer4)
for i in range(len(b)):
for j in b[i].modules():
jj = 0
for k in j.parameters():
jj+=1
if k.requires_grad:
yield k
def get_10x_lr_params(model):
"""
This generator returns all the parameters for the last layer of the net,
which does the classification of pixel into classes
"""
b = []
b.append(model.layer5.parameters())
for j in range(len(b)):
for i in b[j]:
yield i
def adjust_learning_rate(optimizer, i_iter):
"""Sets the learning rate to the initial LR divided by 5 at 60th, 120th and 160th epochs"""
lr = lr_poly(args.learning_rate, i_iter, args.num_steps, args.power)
optimizer.param_groups[0]['lr'] = lr
optimizer.param_groups[1]['lr'] = lr * 10
def main():
"""Create the model and start the training."""
if not os.path.exists(args.snapshot_dir):
os.makedirs(args.snapshot_dir)
logger = util.set_logger(args.snapshot_dir, args.log_file, args.debug)
logger.info('start with arguments %s', args)
h, w = map(int, args.input_size.split(','))
input_size = (h, w)
lscale, hscale = map(float, args.train_scale.split(','))
train_scale = (lscale, hscale)
cudnn.enabled = True
# Create network.
model = Res_Deeplab(num_classes=args.num_classes)
#saved_state_dict = torch.load(args.restore_from)
#new_params = model.state_dict().copy()
#for i in saved_state_dict:
# #Scale.layer5.conv2d_list.3.weight
# i_parts = i.split('.')
# # print i_parts
# if not args.num_classes == 21 or not i_parts[1]=='layer5':
# new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
saved_state_dict = torch.utils.model_zoo.load_url(model_urls['resnet101'])
# coco pretrained parameters:
# saved_state_dict = torch.load(args.restore_from)
new_params = model.state_dict().copy()
for i in saved_state_dict:
#Scale.layer5.conv2d_list.3.weight
i_parts = str(i).split('.')
# print i_parts
if not i_parts[0]=='fc':
new_params['.'.join(i_parts[0:])] = saved_state_dict[i]
model.load_state_dict(new_params)
#model.float()
model.eval() # use_global_stats = True
#model.train()
device = torch.device("cuda:" + str(args.gpu))
model.to(device)
cudnn.benchmark = True
trainloader = data.DataLoader(GTA5DataSet(args.data_dir, args.data_list, max_iters=args.num_steps*args.batch_size, crop_size=input_size,train_scale=train_scale,
scale=args.random_scale, mirror=args.random_mirror, mean=IMG_MEAN, std = IMG_STD),
batch_size=args.batch_size, shuffle=True, num_workers=5, pin_memory=args.pin_memory)
optimizer = optim.SGD([{'params': get_1x_lr_params_NOscale(model), 'lr': args.learning_rate },
{'params': get_10x_lr_params(model), 'lr': 10*args.learning_rate}],
lr=args.learning_rate, momentum=args.momentum,weight_decay=args.weight_decay)
optimizer.zero_grad()
interp = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)
for i_iter, batch in enumerate(trainloader):
images, labels, _, _ = batch
images = images.to(device)
labels = labels.long().to(device)
optimizer.zero_grad()
adjust_learning_rate(optimizer, i_iter)
pred = interp(model(images))
loss = loss_calc(pred, labels)
loss.backward()
optimizer.step()
# print('iter = ', i_iter, 'of', args.num_steps,'completed, loss = ', loss.data.cpu().numpy())
logger.info('iter = {} of {} completed, loss = {:.4f}'.format(i_iter,args.num_steps,loss.data.cpu().numpy()))
if i_iter >= args.num_steps-1:
print('save model ...')
torch.save(model.state_dict(),osp.join(args.snapshot_dir, 'VOC12_scenes_'+str(args.num_steps)+'.pth'))
break
if i_iter % args.save_pred_every == 0 and i_iter!=0:
print('taking snapshot ...')
torch.save(model.state_dict(),osp.join(args.snapshot_dir, 'VOC12_scenes_'+str(i_iter)+'.pth'))
end = timeit.default_timer()
print(end-start,'seconds')
if __name__ == '__main__':
main()
| 10,806 | 39.324627 | 164 | py |
CRST | CRST-master/deeplab/loss.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.autograd import Variable
class CrossEntropy2d(nn.Module):
def __init__(self, size_average=True, ignore_label=255):
super(CrossEntropy2d, self).__init__()
self.size_average = size_average
self.ignore_label = ignore_label
def forward(self, predict, target, weight=None):
"""
Args:
predict:(n, c, h, w)
target:(n, h, w)
weight (Tensor, optional): a manual rescaling weight given to each class.
If given, has to be a Tensor of size "nclasses"
"""
assert not target.requires_grad
assert predict.dim() == 4
assert target.dim() == 3
assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0))
assert predict.size(2) == target.size(1), "{0} vs {1} ".format(predict.size(2), target.size(1))
assert predict.size(3) == target.size(2), "{0} vs {1} ".format(predict.size(3), target.size(3))
n, c, h, w = predict.size()
target_mask = (target >= 0) * (target != self.ignore_label)
target = target[target_mask]
if not target.data.dim():
return Variable(torch.zeros(1))
predict = predict.transpose(1, 2).transpose(2, 3).contiguous()
predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c)
loss = F.cross_entropy(predict, target, weight=weight, size_average=self.size_average)
return loss | 1,585 | 44.314286 | 103 | py |
CRST | CRST-master/deeplab/model.py | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch
import numpy as np
affine_par = True
def outS(i):
i = int(i)
i = (i+1)/2
i = int(np.ceil((i+1)/2.0))
i = (i+1)/2
return i
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, affine = affine_par)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, affine = affine_par)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change
self.bn1 = nn.BatchNorm2d(planes,affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
padding = dilation
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change
padding=padding, bias=False, dilation = dilation)
self.bn2 = nn.BatchNorm2d(planes,affine = affine_par)
for i in self.bn2.parameters():
i.requires_grad = False
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4, affine = affine_par)
for i in self.bn3.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Classifier_Module(nn.Module):
def __init__(self, dilation_series, padding_series, num_classes):
super(Classifier_Module, self).__init__()
self.conv2d_list = nn.ModuleList()
for dilation, padding in zip(dilation_series, padding_series):
self.conv2d_list.append(nn.Conv2d(2048, num_classes, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias = True))
for m in self.conv2d_list:
m.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.conv2d_list[0](x)
for i in range(len(self.conv2d_list)-1):
out += self.conv2d_list[i+1](x)
return out
class Residual_Covolution(nn.Module):
def __init__(self, icol, ocol, num_classes):
super(Residual_Covolution, self).__init__()
self.conv1 = nn.Conv2d(icol, ocol, kernel_size=3, stride=1, padding=12, dilation=12, bias=True)
self.conv2 = nn.Conv2d(ocol, num_classes, kernel_size=3, stride=1, padding=12, dilation=12, bias=True)
self.conv3 = nn.Conv2d(num_classes, ocol, kernel_size=1, stride=1, padding=0, dilation=1, bias=True)
self.conv4 = nn.Conv2d(ocol, icol, kernel_size=1, stride=1, padding=0, dilation=1, bias=True)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
dow1 = self.conv1(x)
dow1 = self.relu(dow1)
seg = self.conv2(dow1)
inc1 = self.conv3(seg)
add1 = dow1 + self.relu(inc1)
inc2 = self.conv4(add1)
out = x + self.relu(inc2)
return out, seg
class Residual_Refinement_Module(nn.Module):
def __init__(self, num_classes):
super(Residual_Refinement_Module, self).__init__()
self.RC1 = Residual_Covolution(2048, 512, num_classes)
self.RC2 = Residual_Covolution(2048, 512, num_classes)
def forward(self, x):
x, seg1 = self.RC1(x)
_, seg2 = self.RC2(x)
return [seg1, seg1+seg2]
class ResNet_Refine(nn.Module):
def __init__(self, block, layers, num_classes):
self.inplanes = 64
super(ResNet_Refine, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
self.layer5 = Residual_Refinement_Module(num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# for i in m.parameters():
# i.requires_grad = False
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion,affine = affine_par))
for i in downsample._modules['1'].parameters():
i.requires_grad = False
layers = []
layers.append(block(self.inplanes, planes, stride,dilation=dilation, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
return x
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
self.layer5 = self._make_pred_layer(Classifier_Module, [6,12,18,24],[6,12,18,24],num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# for i in m.parameters():
# i.requires_grad = False
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion,affine = affine_par))
for i in downsample._modules['1'].parameters():
i.requires_grad = False
layers = []
layers.append(block(self.inplanes, planes, stride,dilation=dilation, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def _make_pred_layer(self,block, dilation_series, padding_series,num_classes):
return block(dilation_series,padding_series,num_classes)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
return x
class MS_Deeplab(nn.Module):
def __init__(self,block,num_classes):
super(MS_Deeplab,self).__init__()
self.Scale = ResNet(block,[3, 4, 23, 3],num_classes) #changed to fix #4
def forward(self,x):
output = self.Scale(x) # for original scale
output_size = output.size()[2]
input_size = x.size()[2]
self.interp1 = nn.Upsample(size=(int(input_size*0.75)+1, int(input_size*0.75)+1), mode='bilinear')
self.interp2 = nn.Upsample(size=(int(input_size*0.5)+1, int(input_size*0.5)+1), mode='bilinear')
self.interp3 = nn.Upsample(size=(output_size, output_size), mode='bilinear')
x75 = self.interp1(x)
output75 = self.interp3(self.Scale(x75)) # for 0.75x scale
x5 = self.interp2(x)
output5 = self.interp3(self.Scale(x5)) # for 0.5x scale
out_max = torch.max(torch.max(output, output75), output5)
return [output, output75, output5, out_max]
def Res_Ms_Deeplab(num_classes=21):
model = MS_Deeplab(Bottleneck, num_classes)
return model
def Res_Deeplab(num_classes=21, is_refine=False):
if is_refine:
model = ResNet_Refine(Bottleneck,[3, 4, 23, 3], num_classes)
else:
model = ResNet(Bottleneck,[3, 4, 23, 3], num_classes)
return model
| 11,127 | 36.217391 | 139 | py |
CRST | CRST-master/deeplab/datasets.py | import os
import os.path as osp
import numpy as np
import random
import matplotlib.pyplot as plt
import collections
import torch
import torchvision.transforms as transforms
import torchvision
import cv2
from torch.utils import data
import sys
from PIL import Image
palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,
220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,
0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
def colorize_mask(mask):
# mask: numpy array of the mask
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
class VOCDataSet(data.Dataset):
def __init__(self, root, list_path, max_iters=None, crop_size=(321, 321), mean=(128, 128, 128), scale=True, mirror=True, ignore_label=255):
self.root = root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = [i_id.strip() for i_id in open(list_path)]
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for name in self.img_ids:
img_file = osp.join(self.root, "img/%s.jpg" % name)
label_file = osp.join(self.root, "gt/%s.png" % name)
self.files.append({
"img": img_file,
"label": label_file,
"name": name
})
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
f_scale = 0.5 + random.randint(0, 11) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
return image, label
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
size = image.shape
name = datafiles["name"]
if self.scale:
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.float32)
image -= self.mean
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
# roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
#image = image[:, :, ::-1] # change to BGR
image = image.transpose((2, 0, 1))
if self.is_mirror:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), name
class GTA5DataSet(data.Dataset):
def __init__(self, root, list_path, pseudo_root = None, max_iters=None, crop_size=(500, 500), train_scale = (0.5, 1.5), mean=(128, 128, 128), std = (1,1,1), scale=True, mirror=True, ignore_label=255):
self.root = root
self.pseudo_root = pseudo_root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.crop_h, self.crop_w = crop_size
self.lscale, self.hscale = train_scale
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.std = std
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = []
self.label_ids = []
with open(list_path) as f:
for item in f.readlines():
fields = item.strip().split('\t')
self.img_ids.append(fields[0])
self.label_ids.append(fields[1])
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for idx in range(len(self.img_ids)):
img_name = self.img_ids[idx]
label_name = self.label_ids[idx]
img_file = osp.join(self.root, img_name)
if self.pseudo_root == None:
label_file = osp.join(self.root, label_name)
else:
label_file = label_name
self.files.append({
"img": img_file,
"label": label_file,
"img_name": img_name,
"label_name": label_name
})
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
# f_scale = 0.5 + random.randint(0, 11) / 10.0
f_scale = self.lscale + random.randint(0, int((self.hscale-self.lscale)*10)) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
return image, label
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB
label = np.array(Image.open(datafiles["label"]))
#
sys.path.insert(0, 'dataset/helpers')
from labels import id2label, trainId2label
#
label_2_id = 255 * np.ones((256,))
for l in id2label:
if l in (-1, 255):
continue
label_2_id[l] = id2label[l].trainId
# id_2_label = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
# valid_labels = sorted(set(id_2_label.ravel()))
label = label_2_id[label]
#
size = image.shape
img_name = datafiles["img_name"]
if self.scale:
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.float32)
image = image/255.0 # scale to [0,1]
image -= self.mean # BGR
image = image/self.std#np.reshape(self.std,(1,1,3))
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
# roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image[:, :, ::-1] # change to RGB
image = image.transpose((2, 0, 1))
if self.is_mirror:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), img_name
class SYNTHIADataSet(data.Dataset):
def __init__(self, root, list_path, pseudo_root = None, max_iters=None, crop_size=(500, 500), train_scale = (0.5, 1.5), mean=(128, 128, 128), std = (1,1,1), scale=True, mirror=True, ignore_label=255):
self.root = root
self.pseudo_root = pseudo_root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.crop_h, self.crop_w = crop_size
self.lscale, self.hscale = train_scale
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.std = std
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = []
self.label_ids = []
with open(list_path) as f:
for item in f.readlines():
fields = item.strip().split('\t')
self.img_ids.append(fields[0])
self.label_ids.append(fields[1])
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for idx in range(len(self.img_ids)):
img_name = self.img_ids[idx]
label_name = self.label_ids[idx]
img_file = osp.join(self.root, img_name)
if self.pseudo_root == None:
label_file = osp.join(self.root, label_name)
else:
label_file = label_name
self.files.append({
"img": img_file,
"label": label_file,
"img_name": img_name,
"label_name": label_name
})
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
# f_scale = 0.5 + random.randint(0, 11) / 10.0
f_scale = self.lscale + random.randint(0, int((self.hscale-self.lscale)*10)) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
return image, label
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB
label = np.array(Image.open(datafiles["label"]))
#
sys.path.insert(0, 'dataset/helpers')
from labels_synthia import id2label, trainId2label
#
label_2_id = 255 * np.ones((256,))
for l in id2label:
if l in (-1, 255):
continue
label_2_id[l] = id2label[l].trainId
# id_2_label = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
# valid_labels = sorted(set(id_2_label.ravel()))
label = label_2_id[label]
#
size = image.shape
img_name = datafiles["img_name"]
if self.scale:
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.float32)
image = image/255.0 # scale to [0,1]
image -= self.mean # BGR
image = image/self.std#np.reshape(self.std,(1,1,3))
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
# roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image[:, :, ::-1] # change to RGB
image = image.transpose((2, 0, 1))
if self.is_mirror:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), img_name
class SYNTHIASTDataSet(data.Dataset):
def __init__(self, root, list_path, reg_weight = 0.0, pseudo_root = None, max_iters=None, crop_size=(500, 500), train_scale = (0.5, 1.5), mean=(128, 128, 128), std = (1,1,1), scale=True, mirror=True, ignore_label=255):
self.root = root
self.pseudo_root = pseudo_root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.crop_h, self.crop_w = crop_size
self.lscale, self.hscale = train_scale
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.std = std
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = []
self.label_ids = []
self.reg_weight = reg_weight
with open(list_path) as f:
for item in f.readlines():
fields = item.strip().split('\t')
self.img_ids.append(fields[0])
self.label_ids.append(fields[1])
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for idx in range(len(self.img_ids)):
img_name = self.img_ids[idx]
label_name = self.label_ids[idx]
img_file = osp.join(self.root, img_name)
if self.pseudo_root == None:
label_file = osp.join(self.root, label_name)
else:
label_file = label_name
self.files.append({
"img": img_file,
"label": label_file,
"img_name": img_name,
"label_name": label_name
})
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
# f_scale = 0.5 + random.randint(0, 11) / 10.0
f_scale = self.lscale + random.randint(0, int((self.hscale-self.lscale)*10)) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
return image, label
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB
label = np.array(Image.open(datafiles["label"]))
#
sys.path.insert(0, 'dataset/helpers')
from labels_synthia import id2label, trainId2label
#
label_2_id = 255 * np.ones((256,))
for l in id2label:
if l in (-1, 255):
continue
label_2_id[l] = id2label[l].trainId
# id_2_label = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
# valid_labels = sorted(set(id_2_label.ravel()))
label = label_2_id[label]
#
size = image.shape
img_name = datafiles["img_name"]
if self.scale:
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.float32)
image = image/255.0 # scale to [0,1]
image -= self.mean # BGR
image = image/self.std#np.reshape(self.std,(1,1,3))
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
# roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image[:, :, ::-1] # change to RGB
image = image.transpose((2, 0, 1))
if self.is_mirror:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), img_name, self.reg_weight
class GTA5STDataSet(data.Dataset):
def __init__(self, root, list_path, reg_weight = 0.0, pseudo_root = None, max_iters=None, crop_size=(500, 500), train_scale = (0.5, 1.5), mean=(128, 128, 128), std = (1,1,1), scale=True, mirror=True, ignore_label=255):
self.root = root
self.pseudo_root = pseudo_root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.crop_h, self.crop_w = crop_size
self.lscale, self.hscale = train_scale
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.std = std
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = []
self.label_ids = []
self.reg_weight = reg_weight
with open(list_path) as f:
for item in f.readlines():
fields = item.strip().split('\t')
self.img_ids.append(fields[0])
self.label_ids.append(fields[1])
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for idx in range(len(self.img_ids)):
img_name = self.img_ids[idx]
label_name = self.label_ids[idx]
img_file = osp.join(self.root, img_name)
if self.pseudo_root == None:
label_file = osp.join(self.root, label_name)
else:
label_file = label_name
self.files.append({
"img": img_file,
"label": label_file,
"img_name": img_name,
"label_name": label_name
})
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
# f_scale = 0.5 + random.randint(0, 11) / 10.0
f_scale = self.lscale + random.randint(0, int((self.hscale-self.lscale)*10)) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
return image, label
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB
label = np.array(Image.open(datafiles["label"]))
#
sys.path.insert(0, 'dataset/helpers')
from labels import id2label, trainId2label
#
label_2_id = 255 * np.ones((256,))
for l in id2label:
if l in (-1, 255):
continue
label_2_id[l] = id2label[l].trainId
# id_2_label = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
# valid_labels = sorted(set(id_2_label.ravel()))
label = label_2_id[label]
#
size = image.shape
img_name = datafiles["img_name"]
if self.scale:
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.float32)
image = image/255.0 # scale to [0,1]
image -= self.mean # BGR
image = image/self.std#np.reshape(self.std,(1,1,3))
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
# roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image[:, :, ::-1] # change to RGB
image = image.transpose((2, 0, 1))
if self.is_mirror:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), img_name, self.reg_weight
class SrcSTDataSet(data.Dataset):
def __init__(self, root, list_path, data_src=None, reg_weight = 0.0, pseudo_root = None, max_iters=None, crop_size=(500, 500), train_scale = (0.5, 1.5), mean=(128, 128, 128), std = (1,1,1), scale=True, mirror=True, ignore_label=255):
self.root = root
self.pseudo_root = pseudo_root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.crop_h, self.crop_w = crop_size
self.lscale, self.hscale = train_scale
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.std = std
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = []
self.label_ids = []
self.reg_weight = reg_weight
self.data_src = data_src
with open(list_path) as f:
for item in f.readlines():
fields = item.strip().split('\t')
self.img_ids.append(fields[0])
self.label_ids.append(fields[1])
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for idx in range(len(self.img_ids)):
img_name = self.img_ids[idx]
label_name = self.label_ids[idx]
img_file = osp.join(self.root, img_name)
if self.pseudo_root == None:
label_file = osp.join(self.root, label_name)
else:
label_file = label_name
self.files.append({
"img": img_file,
"label": label_file,
"img_name": img_name,
"label_name": label_name
})
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
# f_scale = 0.5 + random.randint(0, 11) / 10.0
f_scale = self.lscale + random.randint(0, int((self.hscale-self.lscale)*10)) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
return image, label
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB
label = np.array(Image.open(datafiles["label"]))
#
sys.path.insert(0, 'dataset/helpers')
if self.data_src == 'gta':
from labels import id2label
elif self.data_src == 'synthia':
from labels_synthia import id2label
#
label_2_id = 255 * np.ones((256,))
for l in id2label:
if l in (-1, 255):
continue
label_2_id[l] = id2label[l].trainId
# id_2_label = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
# valid_labels = sorted(set(id_2_label.ravel()))
label = label_2_id[label]
#
size = image.shape
img_name = datafiles["img_name"]
if self.scale:
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.float32)
image = image/255.0 # scale to [0,1]
image -= self.mean # BGR
image = image/self.std#np.reshape(self.std,(1,1,3))
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
# roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image[:, :, ::-1] # change to RGB
image = image.transpose((2, 0, 1))
if self.is_mirror:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), img_name, self.reg_weight
class SoftSrcSTDataSet(data.Dataset):
def __init__(self, root, list_path, data_src = None, num_classes = None, reg_weight = 0.0, pseudo_root = None, max_iters=None, crop_size=(500, 500), train_scale = (0.5, 1.5), mean=(128, 128, 128), std = (1,1,1), scale=True, mirror=True, ignore_label=255):
self.root = root
self.pseudo_root = pseudo_root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.crop_h, self.crop_w = crop_size
self.lscale, self.hscale = train_scale
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.std = std
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = []
self.label_ids = []
self.reg_weight = reg_weight
self.data_src = data_src
self.num_classes = num_classes
with open(list_path) as f:
for item in f.readlines():
fields = item.strip().split('\t')
self.img_ids.append(fields[0])
self.label_ids.append(fields[1])
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for idx in range(len(self.img_ids)):
img_name = self.img_ids[idx]
label_name = self.label_ids[idx]
img_file = osp.join(self.root, img_name)
if self.pseudo_root == None:
label_file = osp.join(self.root, label_name)
else:
label_file = label_name
self.files.append({
"img": img_file,
"label": label_file,
"img_name": img_name,
"label_name": label_name
})
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
# f_scale = 0.5 + random.randint(0, 11) / 10.0
f_scale = self.lscale + random.randint(0, int((self.hscale-self.lscale)*10)) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
return image, label
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB
label = np.array(Image.open(datafiles["label"]))
#
sys.path.insert(0, 'dataset/helpers')
if self.data_src == 'gta':
from labels import id2label
elif self.data_src == 'synthia':
from labels_synthia import id2label
#
label_2_id = 255 * np.ones((256,))
for l in id2label:
if l in (-1, 255):
continue
label_2_id[l] = id2label[l].trainId
# id_2_label = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
# valid_labels = sorted(set(id_2_label.ravel()))
label = label_2_id[label]
#
size = image.shape
img_name = datafiles["img_name"]
if self.scale:
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.float32)
image = image/255.0 # scale to [0,1]
image -= self.mean # BGR
image = image/self.std#np.reshape(self.std,(1,1,3))
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
# roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h);
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image[:, :, ::-1] # change to RGB
image = image.transpose((2, 0, 1))
if self.is_mirror:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
label_expand = np.tile( np.expand_dims(label, axis=2), (1, 1, self.num_classes) )
labels = label_expand.copy()
labels[labels != self.ignore_label] = 1.0
labels[labels == self.ignore_label] = 0.0
labels = np.cumsum(labels, axis=2)
labels[labels != label_expand + 1] = 0.0
del label_expand
labels[labels != 0.0] = 1.0
labels = labels.transpose((2,0,1))
# weighted_pred_trainIDs = np.asarray(np.argmax(labels, axis=0), dtype=np.uint8)
# # save weighted predication
# wpred_label_col = weighted_pred_trainIDs.copy()
# wpred_label_col = colorize_mask(wpred_label_col)
# wpred_label_col.save('%s_color.png' % (index))
#
# labels_sum = np.sum(labels,axis=0)
# # save weighted predication
# weighted_pred_trainIDs[labels_sum == 0] = 255
# wpred_label_col = weighted_pred_trainIDs.copy()
# wpred_label_col = colorize_mask(wpred_label_col)
# wpred_label_col.save('%s_pseudo_color.png' % (index))
return image.copy(), labels.copy(), np.array(size), img_name, self.reg_weight
class SoftGTA5StMineDataSet(data.Dataset):
def __init__(self, root, list_path, data_src=None, num_classes = None, reg_weight = 0.0, rare_id = None, mine_id = None, mine_chance = None, pseudo_root = None, max_iters=None, crop_size=(500, 500), train_scale = (0.5, 1.5), mean=(128, 128, 128), std = (1,1,1), scale=True, mirror=True, ignore_label=255):
self.root = root
self.pseudo_root = pseudo_root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.crop_h, self.crop_w = crop_size
self.lscale, self.hscale = train_scale
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.std = std
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = []
self.label_ids = []
self.softlabel_ids = []
self.reg_weight = reg_weight
self.rare_id = rare_id
self.mine_id = mine_id
self.mine_chance = mine_chance
self.data_src = data_src
self.num_classes = num_classes
with open(list_path) as f:
for item in f.readlines():
fields = item.strip().split('\t')
self.img_ids.append(fields[0])
self.label_ids.append(fields[1])
self.softlabel_ids.append(fields[2])
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))
self.softlabel_ids = self.softlabel_ids * int(np.ceil(float(max_iters) / len(self.softlabel_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for idx in range(len(self.img_ids)):
img_name = self.img_ids[idx]
label_name = self.label_ids[idx]
softlabel_name = self.softlabel_ids[idx]
img_file = osp.join(self.root, img_name)
if self.pseudo_root == None:
label_file = osp.join(self.root, label_name)
softlabel_file = osp.join(self.root, softlabel_name)
else:
label_file = label_name
softlabel_file = softlabel_name
self.files.append({
"img": img_file,
"label": label_file,
"softlabel": softlabel_file,
"img_name": img_name,
"label_name": label_name,
"softlabel_name": softlabel_name
})
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label, input_softlabel):
# f_scale = 0.5 + random.randint(0, 11) / 10.0
f_scale = self.lscale + random.randint(0, int((self.hscale-self.lscale)*10)) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
# interpolate the softlabel by 3-channel groups
h,w = label.shape
num_group = int(np.ceil(self.num_classes/3.0))
softlabel = np.zeros((h,w,self.num_classes), dtype=np.float32)
start_idx = 0
for idx in range(num_group):
clabel = input_softlabel[:,:,start_idx:start_idx+3]
clabel = cv2.resize(clabel, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_LINEAR)
softlabel[:,:,start_idx:start_idx+3] = clabel.reshape(h,w,-1)
start_idx = start_idx + 3
softlabel = softlabel.transpose(2,0,1)/np.sum(softlabel,2)
softlabel = softlabel.transpose(1,2,0)
return image, label, softlabel
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB
label = np.array(Image.open(datafiles["label"]))
softlabel = np.load(datafiles["softlabel"])
#
sys.path.insert(0, 'dataset/helpers')
if self.data_src == 'gta':
from labels import id2label
elif self.data_src == 'synthia':
from labels_cityscapes_synthia import id2label
label_2_id = 255 * np.ones((256,))
for l in id2label:
if l in (-1, 255):
continue
label_2_id[l] = id2label[l].trainId
label = label_2_id[label]
#
size = image.shape
img_name = datafiles["img_name"]
if self.scale:
image, label, softlabel = self.generate_scale_label(image, label, softlabel)
image = np.asarray(image, np.float32)
image = image/255.0 # scale to [0,1]
image -= self.mean # BGR
image = image/self.std # np.reshape(self.std,(1,1,3))
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
# softlabel_pad
h_pad, w_pad = label_pad.shape
num_group = int(np.ceil(self.num_classes / 3.0))
softlabel_pad = np.zeros((h_pad, w_pad, self.num_classes), dtype=np.float32)
start_idx = 0
for idx in range(num_group):
clabel_pad = softlabel[:, :, start_idx:start_idx + 3]
clabel_pad = cv2.copyMakeBorder(clabel_pad, 0, pad_h, 0,pad_w, cv2.BORDER_CONSTANT,value=(0.0, 0.0, 0.0))
softlabel_pad[:, :, start_idx:start_idx + 3] = clabel_pad.reshape(h_pad,w_pad,-1)
start_idx = start_idx + 3
else:
img_pad, label_pad, softlabel_pad = image, label, softlabel
img_h, img_w = label_pad.shape
# mining or not
mine_flag = random.uniform(0, 1) < self.mine_chance
if mine_flag and len(self.mine_id) > 0:
label_unique = np.unique(label_pad)
mine_id_temp = np.array([a for a in self.mine_id if a in label_unique]) # if this image has the mine id
if mine_id_temp.size != 0:
# decide the single id to be mined
mine_id_img = mine_id_temp
sel_idx = random.randint(0, mine_id_temp.size-1)
sel_mine_id = mine_id_img[sel_idx]
# seed for the mined id
mine_id_loc = np.where(label_pad == sel_mine_id) # tuple
mine_id_len = len(mine_id_loc[0])
seed_loc = random.randint(0, mine_id_len-1)
hseed = mine_id_loc[0][seed_loc]
wseed = mine_id_loc[1][seed_loc]
# patch crop
half_crop_h = self.crop_h/2
half_crop_w = self.crop_w/2
# center crop at the seed
left_idx = wseed - half_crop_w
right_idx = wseed + half_crop_w -1
up_idx = hseed - half_crop_h
bottom_idx = hseed + half_crop_h - 1
# shift the left_idx or right_idx if they go beyond the pad margins
if left_idx < 0:
left_idx = 0
elif right_idx > img_w - 1:
left_idx = left_idx - ( ( half_crop_w - 1 ) - (img_w - 1 - wseed) ) # left_idx shifts to the left by the right beyond length
if up_idx < 0:
up_idx = 0
elif bottom_idx > img_h - 1:
up_idx = up_idx - ( ( half_crop_h - 1 ) - (img_h - 1 - hseed) ) # up_idx shifts to the up by the bottom beyond length
h_off = up_idx
w_off = left_idx
else:
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
else:
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
softlabel = np.asarray(softlabel_pad[h_off: h_off + self.crop_h, w_off: w_off + self.crop_w], np.float32)
image = image[:, :, ::-1] # change to RGB
image = image.transpose((2, 0, 1))
# set ignored label vector to be all zeros
label_expand = np.tile( np.expand_dims(label, axis=2), (1, 1, self.num_classes) )
labels_ = label_expand.copy()
labels_[labels_ != self.ignore_label] = 1.0
labels_[labels_ == self.ignore_label] = 0.0
labels = labels_*softlabel
labels = labels.transpose((2,0,1))
if self.is_mirror:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
labels = labels[:,:,::flip]
# weighted_pred_trainIDs = np.asarray(np.argmax(labels, axis=0), dtype=np.uint8)
# # save weighted predication
# wpred_label_col = weighted_pred_trainIDs.copy()
# wpred_label_col = colorize_mask(wpred_label_col)
# wpred_label_col.save('%s_color.png' % (index))
#
# labels_sum = np.sum(labels,axis=0)
# # save weighted predication
# weighted_pred_trainIDs[labels_sum == 0] = 255
# wpred_label_col = weighted_pred_trainIDs.copy()
# wpred_label_col = colorize_mask(wpred_label_col)
# wpred_label_col.save('%s_pseudo_color.png' % (index))
return image.copy(), labels.copy(), np.array(size), img_name, self.reg_weight
class GTA5StMineDataSet(data.Dataset):
def __init__(self, root, list_path, data_src=None, reg_weight = 0.0, rare_id = None, mine_id = None, mine_chance = None, pseudo_root = None, max_iters=None, crop_size=(500, 500), train_scale = (0.5, 1.5), mean=(128, 128, 128), std = (1,1,1), scale=True, mirror=True, ignore_label=255):
self.root = root
self.pseudo_root = pseudo_root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.crop_h, self.crop_w = crop_size
self.lscale, self.hscale = train_scale
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.std = std
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = []
self.label_ids = []
self.reg_weight = reg_weight
self.rare_id = rare_id
self.mine_id = mine_id
self.mine_chance = mine_chance
self.data_src = data_src
with open(list_path) as f:
for item in f.readlines():
fields = item.strip().split('\t')
self.img_ids.append(fields[0])
self.label_ids.append(fields[1])
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for idx in range(len(self.img_ids)):
img_name = self.img_ids[idx]
label_name = self.label_ids[idx]
img_file = osp.join(self.root, img_name)
if self.pseudo_root == None:
label_file = osp.join(self.root, label_name)
else:
label_file = label_name
self.files.append({
"img": img_file,
"label": label_file,
"img_name": img_name,
"label_name": label_name
})
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
# f_scale = 0.5 + random.randint(0, 11) / 10.0
f_scale = self.lscale + random.randint(0, int((self.hscale-self.lscale)*10)) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
return image, label
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB
label = np.array(Image.open(datafiles["label"]))
#
sys.path.insert(0, 'dataset/helpers')
if self.data_src == 'gta':
from labels import id2label
elif self.data_src == 'synthia':
from labels_cityscapes_synthia import id2label
label_2_id = 255 * np.ones((256,))
for l in id2label:
if l in (-1, 255):
continue
label_2_id[l] = id2label[l].trainId
label = label_2_id[label]
#
size = image.shape
img_name = datafiles["img_name"]
if self.scale:
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.float32)
image = image/255.0 # scale to [0,1]
image -= self.mean # BGR
image = image/self.std # np.reshape(self.std,(1,1,3))
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
# mining or not
mine_flag = random.uniform(0, 1) < self.mine_chance
if mine_flag and len(self.mine_id) > 0:
label_unique = np.unique(label_pad)
mine_id_temp = np.array([a for a in self.mine_id if a in label_unique]) # if this image has the mine id
if mine_id_temp.size != 0:
# decide the single id to be mined
mine_id_img = mine_id_temp
sel_idx = random.randint(0, mine_id_temp.size-1)
sel_mine_id = mine_id_img[sel_idx]
# seed for the mined id
mine_id_loc = np.where(label_pad == sel_mine_id) # tuple
mine_id_len = len(mine_id_loc[0])
seed_loc = random.randint(0, mine_id_len-1)
hseed = mine_id_loc[0][seed_loc]
wseed = mine_id_loc[1][seed_loc]
# patch crop
half_crop_h = self.crop_h/2
half_crop_w = self.crop_w/2
# center crop at the seed
left_idx = wseed - half_crop_w
right_idx = wseed + half_crop_w -1
up_idx = hseed - half_crop_h
bottom_idx = hseed + half_crop_h - 1
# shift the left_idx or right_idx if they go beyond the pad margins
if left_idx < 0:
left_idx = 0
elif right_idx > img_w - 1:
left_idx = left_idx - ( ( half_crop_w - 1 ) - (img_w - 1 - wseed) ) # left_idx shifts to the left by the right beyond length
if up_idx < 0:
up_idx = 0
elif bottom_idx > img_h - 1:
up_idx = up_idx - ( ( half_crop_h - 1 ) - (img_h - 1 - hseed) ) # up_idx shifts to the up by the bottom beyond length
h_off = up_idx
w_off = left_idx
else:
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
else:
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image[:, :, ::-1] # change to RGB
image = image.transpose((2, 0, 1))
if self.is_mirror:
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), img_name, self.reg_weight
class GTA5TestDataSet(data.Dataset):
def __init__(self, root, list_path, max_iters=None, test_size=(1024, 512), test_scale = 1.0, mean=(128, 128, 128), std = (1,1,1), scale=True, mirror=True, ignore_label=255):
self.root = root
self.list_path = list_path
self.test_h, self.test_w = test_size
self.scale = scale
self.test_scale = test_scale
self.ignore_label = ignore_label
self.mean = mean
self.std = std
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = []
self.label_ids = []
with open(list_path) as f:
for item in f.readlines():
fields = item.strip().split('\t')
self.img_ids.append(fields[0])
self.label_ids.append(fields[1])
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for idx in range(len(self.img_ids)):
img_name = self.img_ids[idx]
label_name = self.label_ids[idx]
img_file = osp.join(self.root, img_name)
label_file = osp.join(self.root, label_name)
self.files.append({
"img": img_file,
"label": label_file,
"img_name": img_name,
"label_name": label_name
})
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
#
# sys.path.insert(0, 'dataset/helpers')
# from labels import id2label, trainId2label
# #
# label_2_id = 255 * np.ones((256,))
# for l in id2label:
# if l in (-1, 255):
# continue
# label_2_id[l] = id2label[l].trainId
# # id_2_label = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
# # valid_labels = sorted(set(id_2_label.ravel()))
# label = label_2_id[label]
#
# resize
img_name = datafiles["img_name"]
# image = cv2.resize(image, (self.test_h, self.test_w), fx=0, fy=0, interpolation = cv2.INTER_LINEAR)
# label = cv2.resize(label, (self.test_h, self.test_w), fx=0, fy=0, interpolation = cv2.INTER_NEAREST)
image = cv2.resize(image, None, fx=self.test_scale, fy=self.test_scale, interpolation = cv2.INTER_LINEAR)
# always keep the resolution of label unchanged
# label = cv2.resize(label, None, fx=1, fy=1, interpolation = cv2.INTER_NEAREST)
image = np.asarray(image, np.float32)
label = np.asarray(label, np.float32)
image = image/255.0 # scale to [0,1]
image -= self.mean # BGR
image = image/self.std#np.reshape(self.std,(1,1,3))
size = image.shape
image = image[:, :, ::-1] # change to RGB
image = image.transpose((2, 0, 1))
return image.copy(), label.copy(), np.array(size), img_name
class GTA5MSTDataSet(data.Dataset):
def __init__(self, root, list_path, max_iters=None, test_size=(1024, 512), test_scale = 1.0, mean=(128, 128, 128), std = (1,1,1), scale=True, mirror=True, ignore_label=255):
self.root = root
self.list_path = list_path
self.test_h, self.test_w = test_size
self.scale = scale
self.test_scale = test_scale
self.ignore_label = ignore_label
self.mean = mean
self.std = std
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = []
self.label_ids = []
with open(list_path) as f:
for item in f.readlines():
fields = item.strip().split('\t')
self.img_ids.append(fields[0])
self.label_ids.append(fields[1])
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for idx in range(len(self.img_ids)):
img_name = self.img_ids[idx]
label_name = self.label_ids[idx]
img_file = osp.join(self.root, img_name)
label_file = osp.join(self.root, label_name)
self.files.append({
"img": img_file,
"label": label_file,
"img_name": img_name,
"label_name": label_name
})
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
#
# sys.path.insert(0, 'dataset/helpers')
# from labels import id2label, trainId2label
# #
# label_2_id = 255 * np.ones((256,))
# for l in id2label:
# if l in (-1, 255):
# continue
# label_2_id[l] = id2label[l].trainId
# # id_2_label = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
# # valid_labels = sorted(set(id_2_label.ravel()))
# label = label_2_id[label]
#
# resize
img_name = datafiles["img_name"]
# image = cv2.resize(image, (self.test_h, self.test_w), fx=0, fy=0, interpolation = cv2.INTER_LINEAR)
# label = cv2.resize(label, (self.test_h, self.test_w), fx=0, fy=0, interpolation = cv2.INTER_NEAREST)
image = cv2.resize(image, None, fx=self.test_scale, fy=self.test_scale, interpolation = cv2.INTER_LINEAR)
# always keep the resolution of label unchanged
# label = cv2.resize(label, None, fx=1, fy=1, interpolation = cv2.INTER_NEAREST)
image = np.asarray(image, np.float32)
label = np.asarray(label, np.float32)
# image = image/255.0 # scale to [0,1]
# image -= self.mean # BGR
# image = image/self.std#np.reshape(self.std,(1,1,3))
size = image.shape
image = image[:, :, ::-1] # change to RGB
image = image.transpose((2, 0, 1))
return image.copy(), label.copy(), np.array(size), img_name
class GTA5TestCRFDataSet(data.Dataset):
def __init__(self, root, list_path, max_iters=None, test_size=(1024, 512), test_scale = 1.0, mean=(128, 128, 128), std = (1,1,1), scale=True, mirror=True, ignore_label=255):
self.root = root
self.list_path = list_path
self.test_h, self.test_w = test_size
self.scale = scale
self.test_scale = test_scale
self.ignore_label = ignore_label
self.mean = mean
self.std = std
self.is_mirror = mirror
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = []
self.label_ids = []
with open(list_path) as f:
for item in f.readlines():
fields = item.strip().split('\t')
self.img_ids.append(fields[0])
self.label_ids.append(fields[1])
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))
self.files = []
# for split in ["train", "trainval", "val"]:
for idx in range(len(self.img_ids)):
img_name = self.img_ids[idx]
label_name = self.label_ids[idx]
img_file = osp.join(self.root, img_name)
label_file = osp.join(self.root, label_name)
self.files.append({
"img": img_file,
"label": label_file,
"img_name": img_name,
"label_name": label_name
})
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
#
# sys.path.insert(0, 'dataset/helpers')
# from labels import id2label, trainId2label
# #
# label_2_id = 255 * np.ones((256,))
# for l in id2label:
# if l in (-1, 255):
# continue
# label_2_id[l] = id2label[l].trainId
# # id_2_label = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
# # valid_labels = sorted(set(id_2_label.ravel()))
# label = label_2_id[label]
#
# resize
img_name = datafiles["img_name"]
# image = cv2.resize(image, (self.test_h, self.test_w), fx=0, fy=0, interpolation = cv2.INTER_LINEAR)
# label = cv2.resize(label, (self.test_h, self.test_w), fx=0, fy=0, interpolation = cv2.INTER_NEAREST)
image_crf = np.asarray(image, np.float32)
image_crf = image_crf[:, :, ::-1] # change to RGB
image = cv2.resize(image, None, fx=self.test_scale, fy=self.test_scale, interpolation = cv2.INTER_LINEAR)
# always keep the resolution of label unchanged
# label = cv2.resize(label, None, fx=1, fy=1, interpolation = cv2.INTER_NEAREST)
image = np.asarray(image, np.float32)
label = np.asarray(label, np.float32)
image = image/255.0 # scale to [0,1]
image -= self.mean # BGR
image = image/self.std#np.reshape(self.std,(1,1,3))
size = image.shape
image = image[:, :, ::-1] # change to RGB
image = image.transpose((2, 0, 1))
return image.copy(), label.copy(), image_crf.copy(), np.array(size), img_name
class VOCDataTestSet(data.Dataset):
def __init__(self, root, list_path, crop_size=(505, 505), mean=(128, 128, 128)):
self.root = root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.mean = mean
# self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = [i_id.strip() for i_id in open(list_path)]
self.files = []
# for split in ["train", "trainval", "val"]:
for name in self.img_ids:
img_file = osp.join(self.root, "img/%s.jpg" % name)
self.files.append({
"img": img_file
})
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
size = image.shape
name = osp.splitext(osp.basename(datafiles["img"]))[0]
image = np.asarray(image, np.float32)
image -= self.mean
img_h, img_w, _ = image.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
image = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
image = image.transpose((2, 0, 1))
return image, name, size
if __name__ == '__main__':
dst = VOCDataSet("./data", is_transform=True)
trainloader = data.DataLoader(dst, batch_size=4)
for i, data in enumerate(trainloader):
imgs, labels = data
if i == 0:
img = torchvision.utils.make_grid(imgs).numpy()
img = np.transpose(img, (1, 2, 0))
img = img[:, :, ::-1]
plt.imshow(img)
plt.show()
| 62,075 | 43.276748 | 309 | py |
imaging_MLPs | imaging_MLPs-master/ImageNet/networks/linear_mixer.py | import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.init as init
import einops
from einops.layers.torch import Rearrange
from einops import rearrange
class PatchEmbedding(nn.Module):
def __init__(
self,
patch_size: int,
embed_dim: int,
channels: int
):
super().__init__()
self.proj = nn.Sequential(
nn.Conv2d(
in_channels=channels,
out_channels=embed_dim,
kernel_size=patch_size,
stride=patch_size
),
Rearrange("b c h w -> b h w c")
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.proj(x)
class PatchExpansion(nn.Module):
def __init__(self, dim_scale, channel_dim, img_channels, norm_layer=nn.LayerNorm):
super().__init__()
self.dim_scale = dim_scale
self.expand = nn.Linear(channel_dim, dim_scale**2* channel_dim, bias=False)
self.output_dim = channel_dim
self.norm = norm_layer(channel_dim)
self.output = nn.Conv2d(in_channels=channel_dim,out_channels=img_channels ,kernel_size=1,bias=False)
def forward(self, x):
"""
x: B, H, W, C
"""
x = self.expand(x)
B, H, W, C = x.shape
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale, c=C//(self.dim_scale**2))
x = x.view(B,-1,self.output_dim)
x= self.norm(x)
x = x.view(B,H*self.dim_scale, W*self.dim_scale,-1)
x = x.permute(0,3,1,2)
x = self.output(x)
return x
class MLPBlock(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int):
super().__init__()
self.model = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
class Mixer(nn.Module):
def __init__(
self,
num_patches: int,
num_channels: int,
f_hidden: int,
):
super().__init__()
self.token_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
Rearrange("b h w c -> b c w h"),
nn.Linear(num_patches, num_patches),
Rearrange("b c w h -> b c h w"),
nn.Linear(num_patches, num_patches),
Rearrange("b c h w -> b h w c"),
)
self.channel_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
MLPBlock(num_channels, num_channels*f_hidden)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.token_mixing(x)
x = x + self.channel_mixing(x)
return x
class Linear_Mixer(nn.Module):
def __init__(
self,
img_size: int = 256,
img_channels: int = 3,
patch_size: int = 4,
embed_dim: int = 140,
num_layers: int = 19,
f_hidden: int = 4,
):
super().__init__()
self.patch_embed = PatchEmbedding(patch_size, embed_dim, img_channels)
layers = [ Mixer(img_size//patch_size, embed_dim, f_hidden)
for _ in range(num_layers)]
self.mixer_layers = nn.Sequential(*layers)
self.patch_expand = PatchExpansion(patch_size, embed_dim, img_channels)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
x = self.mixer_layers(x)
x = self.patch_expand(x)
return x
| 3,644 | 26.613636 | 127 | py |
imaging_MLPs | imaging_MLPs-master/ImageNet/networks/original_mixer.py | import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.init as init
import einops
from einops.layers.torch import Rearrange
from einops import rearrange
class PatchEmbeddings(nn.Module):
def __init__(
self,
patch_size: int,
hidden_dim: int,
channels: int
):
super().__init__()
self.proj = nn.Sequential(
nn.Conv2d(
in_channels=channels,
out_channels=hidden_dim,
kernel_size=patch_size,
stride=patch_size
),
Rearrange("b c h w -> b (h w) c")
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.proj(x)
class PatchEmbeddings_transpose(nn.Module):
def __init__(
self,
patch_size: int,
hidden_dim: int,
channels: int,
d: int
):
super().__init__()
self.proj_transpose = nn.Sequential(
Rearrange("b (h w) c -> b c h w", h=d),
nn.ConvTranspose2d(
in_channels=hidden_dim,
out_channels=channels,
kernel_size=patch_size,
stride=patch_size
)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.proj_transpose(x)
class MLPBlock(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int):
super().__init__()
self.model = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
class MixerBlock(nn.Module):
def __init__(
self,
num_patches: int,
num_channels: int,
tokens_hidden_dim: int,
channels_hidden_dim: int
):
super().__init__()
self.token_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
Rearrange("b p c -> b c p"),
MLPBlock(num_patches, tokens_hidden_dim),
Rearrange("b c p -> b p c")
)
self.channel_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
MLPBlock(num_channels, channels_hidden_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.token_mixing(x)
x = x + self.channel_mixing(x)
return x
class Original_Mixer(nn.Module):
def __init__(
self,
image_size: int = 256,
channels: int = 3,
patch_size: int = 4,
num_layers: int = 8,
hidden_dim: int = 128,
tokens_hidden_dim: int = 96,
channels_hidden_dim: int = 256
):
super().__init__()
num_patches = (image_size // patch_size) ** 2
d=(image_size-patch_size)//patch_size + 1
self.embed = PatchEmbeddings(patch_size, hidden_dim, channels)
layers = [
MixerBlock(
num_patches=num_patches,
num_channels=hidden_dim,
tokens_hidden_dim=tokens_hidden_dim,
channels_hidden_dim=channels_hidden_dim
)
for _ in range(num_layers)
]
self.layers = nn.Sequential(*layers)
self.norm = nn.LayerNorm(hidden_dim)
self.embed_transpose = PatchEmbeddings_transpose(patch_size, hidden_dim, channels, d)
def forward(self, x: torch.Tensor) -> torch.Tensor:
b, c, h, w = x.shape
x = self.embed(x)
x = self.layers(x)
x = self.norm(x)
x = self.embed_transpose(x)
return x | 3,674 | 26.840909 | 93 | py |
imaging_MLPs | imaging_MLPs-master/ImageNet/networks/img2img_mixer.py | import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.init as init
import einops
from einops.layers.torch import Rearrange
from einops import rearrange
class PatchEmbedding(nn.Module):
def __init__(
self,
patch_size: int,
embed_dim: int,
channels: int
):
super().__init__()
self.proj = nn.Sequential(
nn.Conv2d(
in_channels=channels,
out_channels=embed_dim,
kernel_size=patch_size,
stride=patch_size
),
Rearrange("b c h w -> b h w c")
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.proj(x)
class PatchExpansion(nn.Module):
def __init__(self, dim_scale, channel_dim, img_channels, norm_layer=nn.LayerNorm):
super().__init__()
self.dim_scale = dim_scale
self.expand = nn.Linear(channel_dim, dim_scale**2* channel_dim, bias=False)
self.output_dim = channel_dim
self.norm = norm_layer(channel_dim)
self.output = nn.Conv2d(in_channels=channel_dim,out_channels=img_channels ,kernel_size=1,bias=False)
def forward(self, x):
x = self.expand(x)
B, H, W, C = x.shape
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale, c=C//(self.dim_scale**2))
x = x.view(B,-1,self.output_dim)
x= self.norm(x)
x = x.view(B,H*self.dim_scale, W*self.dim_scale,-1)
x = x.permute(0,3,1,2)
x = self.output(x)
return x
class MLPBlock(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int):
super().__init__()
self.model = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
class Mixer(nn.Module):
def __init__(
self,
num_patches: int,
num_channels: int,
f_hidden: int
):
super().__init__()
self.token_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
Rearrange("b h w c -> b c w h"),
MLPBlock(num_patches, num_patches*f_hidden),
Rearrange("b c w h -> b c h w"),
MLPBlock(num_patches, num_patches*f_hidden),
Rearrange("b c h w -> b h w c"),
)
self.channel_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
MLPBlock(num_channels, num_channels*f_hidden)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.token_mixing(x)
x = x + self.channel_mixing(x)
return x
class Img2Img_Mixer(nn.Module):
def __init__(
self,
img_size: int = 256,
img_channels: int = 3,
patch_size: int = 4,
embed_dim: int = 128,
num_layers: int = 16,
f_hidden: int = 4,
):
super().__init__()
self.patch_embed = PatchEmbedding(patch_size, embed_dim, img_channels)
layers = [ Mixer(img_size//patch_size, embed_dim, f_hidden)
for _ in range(num_layers)]
self.mixer_layers = nn.Sequential(*layers)
self.patch_expand = PatchExpansion(patch_size, embed_dim, img_channels)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
x = self.mixer_layers(x)
x = self.patch_expand(x)
return x | 3,618 | 27.054264 | 127 | py |
imaging_MLPs | imaging_MLPs-master/ImageNet/networks/vit.py | '''
This code is modified from https://github.com/facebookresearch/convit. To adapt the vit/convit to image reconstruction, variable input sizes, and patch sizes for both spatial dimensions.
'''
import torch
import torch.nn as nn
from functools import partial
import torch.nn.functional as F
from timm.models.helpers import load_pretrained
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class GPSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,
locality_strength=1., use_local_init=True, grid_size=None):
super().__init__()
self.num_heads = num_heads
self.dim = dim
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.pos_proj = nn.Linear(3, num_heads)
self.proj_drop = nn.Dropout(proj_drop)
self.locality_strength = locality_strength
self.gating_param = nn.Parameter(1*torch.ones(self.num_heads))
self.apply(self._init_weights)
if use_local_init:
self.local_init(locality_strength=locality_strength)
self.current_grid_size = grid_size
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_attention(self, x):
B, N, C = x.shape
k = self.k(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
pos_score = self.pos_proj(self.rel_indices).expand(B, -1, -1,-1).permute(0,3,1,2)
patch_score = (q @ k.transpose(-2, -1)) * self.scale
patch_score = patch_score.softmax(dim=-1)
pos_score = pos_score.softmax(dim=-1)
gating = self.gating_param.view(1,-1,1,1)
attn = (1.-torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score
attn = attn / attn.sum(dim=-1).unsqueeze(-1)
attn = self.attn_drop(attn)
return attn
def get_attention_map(self, x, return_map = False):
attn_map = self.get_attention(x).mean(0) # average over batch
distances = self.rel_indices.squeeze()[:,:,-1]**.5
dist = torch.einsum('nm,hnm->h', (distances, attn_map))
dist /= distances.size(0)
if return_map:
return dist, attn_map
else:
return dist
def local_init(self, locality_strength=1.):
self.v.weight.data.copy_(torch.eye(self.dim))
locality_distance = 1 #max(1,1/locality_strength**.5)
kernel_size = int(self.num_heads**.5)
center = (kernel_size-1)/2 if kernel_size%2==0 else kernel_size//2
for h1 in range(kernel_size):
for h2 in range(kernel_size):
position = h1+kernel_size*h2
self.pos_proj.weight.data[position,2] = -1
self.pos_proj.weight.data[position,1] = 2*(h1-center)*locality_distance
self.pos_proj.weight.data[position,0] = 2*(h2-center)*locality_distance
self.pos_proj.weight.data *= locality_strength
def get_rel_indices(self, ):
H, W = self.current_grid_size
N = H*W
rel_indices = torch.zeros(1, N, N, 3)
indx = torch.arange(W).view(1,-1) - torch.arange(W).view(-1, 1)
indx = indx.repeat(H, H)
indy = torch.arange(H).view(1,-1) - torch.arange(H).view(-1, 1)
indy = indy.repeat_interleave(W, dim=0).repeat_interleave(W, dim=1)
indd = indx**2 + indy**2
rel_indices[:,:,:,2] = indd.unsqueeze(0)
rel_indices[:,:,:,1] = indy.unsqueeze(0)
rel_indices[:,:,:,0] = indx.unsqueeze(0)
device = self.v.weight.device
self.rel_indices = rel_indices.to(device)
def forward(self, x):
B, N, C = x.shape
if not hasattr(self, 'rel_indices') or self.rel_indices.size(1)!=N:
self.get_rel_indices()
attn = self.get_attention(x)
v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class MHSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., grid_size=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.apply(self._init_weights)
self.current_grid_size = grid_size
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_attention_map(self, x, return_map = False):
self.get_rel_indices()
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn_map = (q @ k.transpose(-2, -1)) * self.scale
attn_map = attn_map.softmax(dim=-1).mean(0) # average over batch
distances = self.rel_indices.squeeze()[:,:,-1]**.5
dist = torch.einsum('nm,hnm->h', (distances, attn_map))
dist /= distances.size(0)
if return_map:
return dist, attn_map
else:
return dist
def get_rel_indices(self, ):
H, W = self.current_grid_size
N = H*W
rel_indices = torch.zeros(1, N, N, 3)
indx = torch.arange(W).view(1,-1) - torch.arange(W).view(-1, 1)
indx = indx.repeat(H, H)
indy = torch.arange(H).view(1,-1) - torch.arange(H).view(-1, 1)
indy = indy.repeat_interleave(W, dim=0).repeat_interleave(W, dim=1)
indd = indx**2 + indy**2
rel_indices[:,:,:,2] = indd.unsqueeze(0)
rel_indices[:,:,:,1] = indy.unsqueeze(0)
rel_indices[:,:,:,0] = indx.unsqueeze(0)
device = self.qkv.weight.device
self.rel_indices = rel_indices.to(device)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_gpsa=True, **kwargs):
super().__init__()
self.norm1 = norm_layer(dim)
self.use_gpsa = use_gpsa
if self.use_gpsa:
self.attn = GPSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, **kwargs)
else:
self.attn = MHSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, **kwargs)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, grid_size):
self.attn.current_grid_size = grid_size
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding, from timm
"""
def __init__(self, patch_size, in_chans, embed_dim):
super().__init__()
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.apply(self._init_weights)
def forward(self, x):
x = self.proj(x)
return x
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
class VisionTransformer(nn.Module):
""" Vision Transformer
"""
def __init__(self, avrg_img_size=320, patch_size=16, in_chans=1, embed_dim=64, depth=8,
num_heads=9, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, global_pool=None,
gpsa_interval=[-1, -1], locality_strength=1., use_pos_embed=True):
super().__init__()
self.depth = depth
embed_dim *= num_heads
self.num_features = embed_dim # num_features for consistency with other models
self.locality_strength = locality_strength
self.use_pos_embed = use_pos_embed
if isinstance(avrg_img_size, int):
img_size = to_2tuple(avrg_img_size)
if isinstance(patch_size, int):
self.patch_size = to_2tuple(patch_size)
else:
self.patch_size = patch_size
self.in_chans = in_chans
self.patch_embed = PatchEmbed(
patch_size=self.patch_size, in_chans=in_chans, embed_dim=embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
if self.use_pos_embed:
self.pos_embed = nn.Parameter(
torch.zeros(1, embed_dim,
img_size[0] // self.patch_size[0],
img_size[1] // self.patch_size[1])
)
trunc_normal_(self.pos_embed, std=.02)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_gpsa=True,
locality_strength=locality_strength)
if i>=gpsa_interval[0]-1 and i<gpsa_interval[1] else
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_gpsa=False,)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# head
self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')]
self.head = nn.Linear(self.num_features, in_chans*self.patch_size[0]*self.patch_size[1])
def seq2img(self, x, img_size):
"""
Transforms sequence back into image space, input dims: [batch_size, num_patches, channels]
output dims: [batch_size, channels, H, W]
"""
x = x.view(x.shape[0], x.shape[1], self.in_chans, self.patch_size[0], self.patch_size[1])
x = x.chunk(x.shape[1], dim=1)
x = torch.cat(x, dim=4).permute(0,1,2,4,3)
x = x.chunk(img_size[0]//self.patch_size[0], dim=3)
x = torch.cat(x, dim=4).permute(0,1,2,4,3).squeeze(1)
return x
self.head.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self,):
return {'pos_embed'}
def get_head(self,):
return self.head
def reset_head(self,):
self.head = nn.Linear(self.num_features, in_chans*self.patch_size[0]*self.patch_size[1])
def forward_features(self, x, k=None):
x = self.patch_embed(x)
_, _, H, W = x.shape
if self.use_pos_embed:
pos_embed = F.interpolate(self.pos_embed, size=[H, W], mode='bilinear', align_corners = False)
x = x + pos_embed
x = x.flatten(2).transpose(1, 2)
x = self.pos_drop(x)
for u, blk in enumerate(self.blocks):
x = blk(x, (H, W))
if k is not None and u == k:
self.attention_map = blk.attn.get_attention_map(x, return_map = True)
x = self.norm(x)
return x
def forward(self, x, k=None):
_, _, H, W = x.shape
x = self.forward_features(x, k)
x = self.head(x)
x = self.seq2img(x, (H, W))
return x | 15,082 | 39.007958 | 186 | py |
imaging_MLPs | imaging_MLPs-master/ImageNet/networks/recon_net.py | import torch.nn as nn
import torch.nn.functional as F
from math import ceil, floor
class ReconNet(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
def pad(self, x):
_, _, h, w = x.shape
hp, wp = self.net.patch_size
f1 = ( (wp - w % wp) % wp ) / 2
f2 = ( (hp - h % hp) % hp ) / 2
wpad = [floor(f1), ceil(f1)]
hpad = [floor(f2), ceil(f2)]
x = F.pad(x, wpad+hpad)
return x, wpad, hpad
def unpad(self, x, wpad, hpad):
return x[..., hpad[0] : x.shape[-2]-hpad[1], wpad[0] : x.shape[-1]-wpad[1]]
def forward(self, x, k=None):
x, wpad, hpad = self.pad(x)
x = self.net(x, k)
x = self.unpad(x, wpad, hpad)
return x | 810 | 26.033333 | 90 | py |
imaging_MLPs | imaging_MLPs-master/ImageNet/networks/unet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from torch.nn import functional as F
class Unet(nn.Module):
"""
PyTorch implementation of a U-Net model.
O. Ronneberger, P. Fischer, and Thomas Brox. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical
image computing and computer-assisted intervention, pages 234–241.
Springer, 2015.
"""
def __init__(
self,
in_chans: int = 3,
out_chans: int = 3,
chans: int = 21,
num_pool_layers: int = 4,
drop_prob: float = 0.0,
):
"""
Args:
in_chans: Number of channels in the input to the U-Net model.
out_chans: Number of channels in the output to the U-Net model.
chans: Number of output channels of the first convolution layer.
num_pool_layers: Number of down-sampling and up-sampling layers.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.drop_prob = drop_prob
self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)])
ch = chans
for _ in range(num_pool_layers - 1):
self.down_sample_layers.append(ConvBlock(ch, ch * 2, drop_prob))
ch *= 2
self.conv = ConvBlock(ch, ch * 2, drop_prob)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for _ in range(num_pool_layers - 1):
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(ConvBlock(ch * 2, ch, drop_prob))
ch //= 2
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(
nn.Sequential(
ConvBlock(ch * 2, ch, drop_prob),
nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1),
)
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
stack = []
output = image
# apply down-sampling layers
for layer in self.down_sample_layers:
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
# apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# reflect pad on the right/botton if needed to handle odd input dimensions
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # padding bottom
if torch.sum(torch.tensor(padding)) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
return output
class ConvBlock(nn.Module):
"""
A Convolutional Block that consists of two convolution layers each followed by
instance normalization, LeakyReLU activation and dropout.
"""
def __init__(self, in_chans: int, out_chans: int, drop_prob: float):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.layers = nn.Sequential(
nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
return self.layers(image)
class TransposeConvBlock(nn.Module):
"""
A Transpose Convolutional Block that consists of one convolution transpose
layers followed by instance normalization and LeakyReLU activation.
"""
def __init__(self, in_chans: int, out_chans: int):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.layers = nn.Sequential(
nn.ConvTranspose2d(
in_chans, out_chans, kernel_size=2, stride=2, bias=False
),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H*2, W*2)`.
"""
return self.layers(image) | 5,981 | 32.79661 | 88 | py |
imaging_MLPs | imaging_MLPs-master/ImageNet/networks/u_mixer.py | import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.init as init
import einops
from einops.layers.torch import Rearrange
from einops import rearrange
class PatchEmbeddings(nn.Module):
def __init__(
self,
patch_size: int,
embed_dim: int,
channels: int
):
super().__init__()
self.proj = nn.Sequential(
nn.Conv2d(
in_channels=channels,
out_channels=embed_dim,
kernel_size=patch_size,
stride=patch_size
),
Rearrange("b c h w -> b h w c")
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.proj(x)
class PatchMerge(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (int): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, channel_dim, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = channel_dim
self.reduction = nn.Linear(4 * channel_dim, 2 * channel_dim, bias=False)
self.norm = norm_layer(4 * channel_dim)
def forward(self, x):
"""
x: B, H, W, C
"""
B, H, W, C = x.shape
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, H//2, W//2, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
class PatchExpand(nn.Module):
def __init__(self, channel_dim, dim_scale=2, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = channel_dim
self.expand = nn.Linear(channel_dim, 2* channel_dim, bias=False) if dim_scale==2 else nn.Identity()
self.norm = norm_layer(channel_dim // dim_scale)
def forward(self, x):
"""
x: B, H, W, C
"""
x = self.expand(x)
B, H, W, C = x.shape
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=2, p2=2, c=C//4)
x = x.view(B, H*2, W*2 ,C//4)
x= self.norm(x)
return x
class FinalPatchExpand(nn.Module):
def __init__(self, channel_dim, img_channels, dim_scale=4, norm_layer=nn.LayerNorm):
super().__init__()
self.dim_scale = dim_scale
self.expand = nn.Linear(channel_dim, 16* channel_dim, bias=False)
self.output_dim = channel_dim
self.norm = norm_layer(channel_dim)
self.output = nn.Conv2d(in_channels=channel_dim,out_channels=img_channels ,kernel_size=1,bias=False)
def forward(self, x):
"""
x: B, H, W, C
"""
x = self.expand(x)
B, H, W, C = x.shape
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale, c=C//(self.dim_scale**2))
x = x.view(B,-1,self.output_dim)
x= self.norm(x)
x = x.view(B,H*self.dim_scale, W*self.dim_scale,-1)
x = x.permute(0,3,1,2)
x = self.output(x)
return x
class MLPBlock(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int):
super().__init__()
self.model = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
class Mixer(nn.Module):
def __init__(
self,
num_patches: int,
num_channels: int
):
super().__init__()
self.token_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
Rearrange("b h w c -> b c w h"),
MLPBlock(num_patches, num_patches),
Rearrange("b c w h -> b c h w"),
MLPBlock(num_patches, num_patches),
Rearrange("b c h w -> b h w c"),
)
self.channel_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
MLPBlock(num_channels, num_channels)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.token_mixing(x)
x = x + self.channel_mixing(x)
return x
class U_Mixer(nn.Module):
def __init__(
self,
img_size: int = 256,
img_channels: int = 3,
embed_dim: int = 96,
):
super().__init__()
#mixer blocks
self.mixer1= Mixer( img_size//4, embed_dim)
self.mixer2= Mixer( img_size//8, embed_dim*2)
self.mixer3= Mixer( img_size//16, embed_dim*4)
self.mixer4= Mixer( img_size//32, embed_dim*8)
self.mixer11= Mixer( img_size//4, embed_dim)
self.mixer22= Mixer( img_size//8, embed_dim*2)
self.mixer33= Mixer( img_size//16, embed_dim*4)
self.mixer44= Mixer( img_size//32, embed_dim*8)
#encode
self.patch_embed = PatchEmbeddings(4, embed_dim, img_channels)
self.patch_merge1 = PatchMerge(embed_dim)
self.patch_merge2= PatchMerge(embed_dim*2)
self.patch_merge3= PatchMerge(embed_dim*4)
#decode
self.patch_expand1 = PatchExpand(embed_dim*8)
self.patch_expand2 = PatchExpand(embed_dim*4)
self.patch_expand3 = PatchExpand(embed_dim*2)
self.final_expand = FinalPatchExpand(embed_dim, img_channels)
def forward(self, x: torch.Tensor) -> torch.Tensor:
y=[]
x = self.patch_embed(x)
x = self.mixer1(x)
x = self.mixer1(x)
y.append(x)
x = self.patch_merge1(x)
x = self.mixer2(x)
x = self.mixer2(x)
y.append(x)
x = self.patch_merge2(x)
x = self.mixer3(x)
x = self.mixer3(x)
y.append(x)
x = self.patch_merge3(x)
x = self.mixer4(x)
x = self.mixer4(x)
x = self.mixer44(x)
x = self.mixer44(x)
x = self.patch_expand1(x)
x = self.mixer33(x)
x = self.mixer33(x) + y[2]
x = self.patch_expand2(x)
x = self.mixer22(x)
x = self.mixer22(x) + y[1]
x = self.patch_expand3(x)
x = self.mixer11(x)
x = self.mixer11(x) + y[0]
x = self.final_expand(x)
return x | 6,516 | 28.488688 | 127 | py |
imaging_MLPs | imaging_MLPs-master/SIDD/networks/img2img_mixer.py | import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.init as init
import einops
from einops.layers.torch import Rearrange
from einops import rearrange
class PatchEmbedding(nn.Module):
def __init__(
self,
patch_size: int,
embed_dim: int,
channels: int
):
super().__init__()
self.proj = nn.Sequential(
nn.Conv2d(
in_channels=channels,
out_channels=embed_dim,
kernel_size=patch_size,
stride=patch_size
),
Rearrange("b c h w -> b h w c")
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.proj(x)
class PatchExpansion(nn.Module):
def __init__(self, dim_scale, channel_dim, img_channels, norm_layer=nn.LayerNorm):
super().__init__()
self.dim_scale = dim_scale
self.expand = nn.Linear(channel_dim, dim_scale**2* channel_dim, bias=False)
self.output_dim = channel_dim
self.norm = norm_layer(channel_dim)
self.output = nn.Conv2d(in_channels=channel_dim,out_channels=img_channels ,kernel_size=1,bias=False)
def forward(self, x):
x = self.expand(x)
B, H, W, C = x.shape
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale, c=C//(self.dim_scale**2))
x = x.view(B,-1,self.output_dim)
x= self.norm(x)
x = x.view(B,H*self.dim_scale, W*self.dim_scale,-1)
x = x.permute(0,3,1,2)
x = self.output(x)
return x
class MLPBlock(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int):
super().__init__()
self.model = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
class Mixer(nn.Module):
def __init__(
self,
num_patches: int,
num_channels: int,
f_hidden: int
):
super().__init__()
self.token_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
Rearrange("b h w c -> b c w h"),
MLPBlock(num_patches, num_patches*f_hidden),
Rearrange("b c w h -> b c h w"),
MLPBlock(num_patches, num_patches*f_hidden),
Rearrange("b c h w -> b h w c"),
)
self.channel_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
MLPBlock(num_channels, num_channels*f_hidden)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.token_mixing(x)
x = x + self.channel_mixing(x)
return x
class Img2Img_Mixer(nn.Module):
def __init__(
self,
img_size: int = 256,
img_channels: int = 3,
patch_size: int = 4,
embed_dim: int = 128,
num_layers: int = 16,
f_hidden: int = 4,
):
super().__init__()
self.patch_embed = PatchEmbedding(patch_size, embed_dim, img_channels)
layers = [ Mixer(img_size//patch_size, embed_dim, f_hidden)
for _ in range(num_layers)]
self.mixer_layers = nn.Sequential(*layers)
self.patch_expand = PatchExpansion(patch_size, embed_dim, img_channels)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
x = self.mixer_layers(x)
x = self.patch_expand(x)
return x | 3,618 | 27.054264 | 127 | py |
imaging_MLPs | imaging_MLPs-master/SIDD/networks/vit.py | '''
This code is modified from https://github.com/facebookresearch/convit. To adapt the vit/convit to image reconstruction, variable input sizes, and patch sizes for both spatial dimensions.
'''
import torch
import torch.nn as nn
from functools import partial
import torch.nn.functional as F
from timm.models.helpers import load_pretrained
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class GPSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,
locality_strength=1., use_local_init=True, grid_size=None):
super().__init__()
self.num_heads = num_heads
self.dim = dim
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.pos_proj = nn.Linear(3, num_heads)
self.proj_drop = nn.Dropout(proj_drop)
self.locality_strength = locality_strength
self.gating_param = nn.Parameter(1*torch.ones(self.num_heads))
self.apply(self._init_weights)
if use_local_init:
self.local_init(locality_strength=locality_strength)
self.current_grid_size = grid_size
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_attention(self, x):
B, N, C = x.shape
k = self.k(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
pos_score = self.pos_proj(self.rel_indices).expand(B, -1, -1,-1).permute(0,3,1,2)
patch_score = (q @ k.transpose(-2, -1)) * self.scale
patch_score = patch_score.softmax(dim=-1)
pos_score = pos_score.softmax(dim=-1)
gating = self.gating_param.view(1,-1,1,1)
attn = (1.-torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score
attn = attn / attn.sum(dim=-1).unsqueeze(-1)
attn = self.attn_drop(attn)
return attn
def get_attention_map(self, x, return_map = False):
attn_map = self.get_attention(x).mean(0) # average over batch
distances = self.rel_indices.squeeze()[:,:,-1]**.5
dist = torch.einsum('nm,hnm->h', (distances, attn_map))
dist /= distances.size(0)
if return_map:
return dist, attn_map
else:
return dist
def local_init(self, locality_strength=1.):
self.v.weight.data.copy_(torch.eye(self.dim))
locality_distance = 1 #max(1,1/locality_strength**.5)
kernel_size = int(self.num_heads**.5)
center = (kernel_size-1)/2 if kernel_size%2==0 else kernel_size//2
for h1 in range(kernel_size):
for h2 in range(kernel_size):
position = h1+kernel_size*h2
self.pos_proj.weight.data[position,2] = -1
self.pos_proj.weight.data[position,1] = 2*(h1-center)*locality_distance
self.pos_proj.weight.data[position,0] = 2*(h2-center)*locality_distance
self.pos_proj.weight.data *= locality_strength
def get_rel_indices(self, ):
H, W = self.current_grid_size
N = H*W
rel_indices = torch.zeros(1, N, N, 3)
indx = torch.arange(W).view(1,-1) - torch.arange(W).view(-1, 1)
indx = indx.repeat(H, H)
indy = torch.arange(H).view(1,-1) - torch.arange(H).view(-1, 1)
indy = indy.repeat_interleave(W, dim=0).repeat_interleave(W, dim=1)
indd = indx**2 + indy**2
rel_indices[:,:,:,2] = indd.unsqueeze(0)
rel_indices[:,:,:,1] = indy.unsqueeze(0)
rel_indices[:,:,:,0] = indx.unsqueeze(0)
device = self.v.weight.device
self.rel_indices = rel_indices.to(device)
def forward(self, x):
B, N, C = x.shape
if not hasattr(self, 'rel_indices') or self.rel_indices.size(1)!=N:
self.get_rel_indices()
attn = self.get_attention(x)
v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class MHSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., grid_size=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.apply(self._init_weights)
self.current_grid_size = grid_size
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_attention_map(self, x, return_map = False):
self.get_rel_indices()
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn_map = (q @ k.transpose(-2, -1)) * self.scale
attn_map = attn_map.softmax(dim=-1).mean(0) # average over batch
distances = self.rel_indices.squeeze()[:,:,-1]**.5
dist = torch.einsum('nm,hnm->h', (distances, attn_map))
dist /= distances.size(0)
if return_map:
return dist, attn_map
else:
return dist
def get_rel_indices(self, ):
H, W = self.current_grid_size
N = H*W
rel_indices = torch.zeros(1, N, N, 3)
indx = torch.arange(W).view(1,-1) - torch.arange(W).view(-1, 1)
indx = indx.repeat(H, H)
indy = torch.arange(H).view(1,-1) - torch.arange(H).view(-1, 1)
indy = indy.repeat_interleave(W, dim=0).repeat_interleave(W, dim=1)
indd = indx**2 + indy**2
rel_indices[:,:,:,2] = indd.unsqueeze(0)
rel_indices[:,:,:,1] = indy.unsqueeze(0)
rel_indices[:,:,:,0] = indx.unsqueeze(0)
device = self.qkv.weight.device
self.rel_indices = rel_indices.to(device)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_gpsa=True, **kwargs):
super().__init__()
self.norm1 = norm_layer(dim)
self.use_gpsa = use_gpsa
if self.use_gpsa:
self.attn = GPSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, **kwargs)
else:
self.attn = MHSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, **kwargs)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, grid_size):
self.attn.current_grid_size = grid_size
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding, from timm
"""
def __init__(self, patch_size, in_chans, embed_dim):
super().__init__()
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.apply(self._init_weights)
def forward(self, x):
x = self.proj(x)
return x
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
class VisionTransformer(nn.Module):
""" Vision Transformer
"""
def __init__(self, avrg_img_size=320, patch_size=16, in_chans=1, embed_dim=64, depth=8,
num_heads=9, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, global_pool=None,
gpsa_interval=[-1, -1], locality_strength=1., use_pos_embed=True):
super().__init__()
self.depth = depth
embed_dim *= num_heads
self.num_features = embed_dim # num_features for consistency with other models
self.locality_strength = locality_strength
self.use_pos_embed = use_pos_embed
if isinstance(avrg_img_size, int):
img_size = to_2tuple(avrg_img_size)
if isinstance(patch_size, int):
self.patch_size = to_2tuple(patch_size)
else:
self.patch_size = patch_size
self.in_chans = in_chans
self.patch_embed = PatchEmbed(
patch_size=self.patch_size, in_chans=in_chans, embed_dim=embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
if self.use_pos_embed:
self.pos_embed = nn.Parameter(
torch.zeros(1, embed_dim,
img_size[0] // self.patch_size[0],
img_size[1] // self.patch_size[1])
)
trunc_normal_(self.pos_embed, std=.02)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_gpsa=True,
locality_strength=locality_strength)
if i>=gpsa_interval[0]-1 and i<gpsa_interval[1] else
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_gpsa=False,)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# head
self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')]
self.head = nn.Linear(self.num_features, in_chans*self.patch_size[0]*self.patch_size[1])
def seq2img(self, x, img_size):
"""
Transforms sequence back into image space, input dims: [batch_size, num_patches, channels]
output dims: [batch_size, channels, H, W]
"""
x = x.view(x.shape[0], x.shape[1], self.in_chans, self.patch_size[0], self.patch_size[1])
x = x.chunk(x.shape[1], dim=1)
x = torch.cat(x, dim=4).permute(0,1,2,4,3)
x = x.chunk(img_size[0]//self.patch_size[0], dim=3)
x = torch.cat(x, dim=4).permute(0,1,2,4,3).squeeze(1)
return x
self.head.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self,):
return {'pos_embed'}
def get_head(self,):
return self.head
def reset_head(self,):
self.head = nn.Linear(self.num_features, in_chans*self.patch_size[0]*self.patch_size[1])
def forward_features(self, x, k=None):
x = self.patch_embed(x)
_, _, H, W = x.shape
if self.use_pos_embed:
pos_embed = F.interpolate(self.pos_embed, size=[H, W], mode='bilinear', align_corners = False)
x = x + pos_embed
x = x.flatten(2).transpose(1, 2)
x = self.pos_drop(x)
for u, blk in enumerate(self.blocks):
x = blk(x, (H, W))
if k is not None and u == k:
self.attention_map = blk.attn.get_attention_map(x, return_map = True)
x = self.norm(x)
return x
def forward(self, x, k=None):
_, _, H, W = x.shape
x = self.forward_features(x, k)
x = self.head(x)
x = self.seq2img(x, (H, W))
return x | 15,082 | 39.007958 | 186 | py |
imaging_MLPs | imaging_MLPs-master/SIDD/networks/recon_net.py | import torch.nn as nn
import torch.nn.functional as F
from math import ceil, floor
class ReconNet(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
def pad(self, x):
_, _, h, w = x.shape
hp, wp = self.net.patch_size
f1 = ( (wp - w % wp) % wp ) / 2
f2 = ( (hp - h % hp) % hp ) / 2
wpad = [floor(f1), ceil(f1)]
hpad = [floor(f2), ceil(f2)]
x = F.pad(x, wpad+hpad)
return x, wpad, hpad
def unpad(self, x, wpad, hpad):
return x[..., hpad[0] : x.shape[-2]-hpad[1], wpad[0] : x.shape[-1]-wpad[1]]
def forward(self, x, k=None):
x, wpad, hpad = self.pad(x)
x = self.net(x, k)
x = self.unpad(x, wpad, hpad)
return x | 810 | 26.033333 | 90 | py |
imaging_MLPs | imaging_MLPs-master/SIDD/networks/unet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from torch.nn import functional as F
class Unet(nn.Module):
"""
PyTorch implementation of a U-Net model.
O. Ronneberger, P. Fischer, and Thomas Brox. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical
image computing and computer-assisted intervention, pages 234–241.
Springer, 2015.
"""
def __init__(
self,
in_chans: int = 3,
out_chans: int = 3,
chans: int = 21,
num_pool_layers: int = 4,
drop_prob: float = 0.0,
):
"""
Args:
in_chans: Number of channels in the input to the U-Net model.
out_chans: Number of channels in the output to the U-Net model.
chans: Number of output channels of the first convolution layer.
num_pool_layers: Number of down-sampling and up-sampling layers.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.drop_prob = drop_prob
self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)])
ch = chans
for _ in range(num_pool_layers - 1):
self.down_sample_layers.append(ConvBlock(ch, ch * 2, drop_prob))
ch *= 2
self.conv = ConvBlock(ch, ch * 2, drop_prob)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for _ in range(num_pool_layers - 1):
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(ConvBlock(ch * 2, ch, drop_prob))
ch //= 2
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(
nn.Sequential(
ConvBlock(ch * 2, ch, drop_prob),
nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1),
)
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
stack = []
output = image
# apply down-sampling layers
for layer in self.down_sample_layers:
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
# apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# reflect pad on the right/botton if needed to handle odd input dimensions
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # padding bottom
if torch.sum(torch.tensor(padding)) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
return output
class ConvBlock(nn.Module):
"""
A Convolutional Block that consists of two convolution layers each followed by
instance normalization, LeakyReLU activation and dropout.
"""
def __init__(self, in_chans: int, out_chans: int, drop_prob: float):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.layers = nn.Sequential(
nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
return self.layers(image)
class TransposeConvBlock(nn.Module):
"""
A Transpose Convolutional Block that consists of one convolution transpose
layers followed by instance normalization and LeakyReLU activation.
"""
def __init__(self, in_chans: int, out_chans: int):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.layers = nn.Sequential(
nn.ConvTranspose2d(
in_chans, out_chans, kernel_size=2, stride=2, bias=False
),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H*2, W*2)`.
"""
return self.layers(image) | 5,981 | 32.79661 | 88 | py |
imaging_MLPs | imaging_MLPs-master/SIDD/networks/u_mixer.py | import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.init as init
import einops
from einops.layers.torch import Rearrange
from einops import rearrange
class PatchEmbeddings(nn.Module):
def __init__(
self,
patch_size: int,
embed_dim: int,
channels: int
):
super().__init__()
self.proj = nn.Sequential(
nn.Conv2d(
in_channels=channels,
out_channels=embed_dim,
kernel_size=patch_size,
stride=patch_size
),
Rearrange("b c h w -> b h w c")
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.proj(x)
class PatchMerge(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (int): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, channel_dim, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = channel_dim
self.reduction = nn.Linear(4 * channel_dim, 2 * channel_dim, bias=False)
self.norm = norm_layer(4 * channel_dim)
def forward(self, x):
"""
x: B, H, W, C
"""
B, H, W, C = x.shape
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, H//2, W//2, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
class PatchExpand(nn.Module):
def __init__(self, channel_dim, dim_scale=2, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = channel_dim
self.expand = nn.Linear(channel_dim, 2* channel_dim, bias=False) if dim_scale==2 else nn.Identity()
self.norm = norm_layer(channel_dim // dim_scale)
def forward(self, x):
"""
x: B, H, W, C
"""
x = self.expand(x)
B, H, W, C = x.shape
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=2, p2=2, c=C//4)
x = x.view(B, H*2, W*2 ,C//4)
x= self.norm(x)
return x
class FinalPatchExpand(nn.Module):
def __init__(self, channel_dim, img_channels, dim_scale=4, norm_layer=nn.LayerNorm):
super().__init__()
self.dim_scale = dim_scale
self.expand = nn.Linear(channel_dim, 16* channel_dim, bias=False)
self.output_dim = channel_dim
self.norm = norm_layer(channel_dim)
self.output = nn.Conv2d(in_channels=channel_dim,out_channels=img_channels ,kernel_size=1,bias=False)
def forward(self, x):
"""
x: B, H, W, C
"""
x = self.expand(x)
B, H, W, C = x.shape
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale, c=C//(self.dim_scale**2))
x = x.view(B,-1,self.output_dim)
x= self.norm(x)
x = x.view(B,H*self.dim_scale, W*self.dim_scale,-1)
x = x.permute(0,3,1,2)
x = self.output(x)
return x
class MLPBlock(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int):
super().__init__()
self.model = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
class Mixer(nn.Module):
def __init__(
self,
num_patches: int,
num_channels: int
):
super().__init__()
self.token_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
Rearrange("b h w c -> b c w h"),
MLPBlock(num_patches, num_patches),
Rearrange("b c w h -> b c h w"),
MLPBlock(num_patches, num_patches),
Rearrange("b c h w -> b h w c"),
)
self.channel_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
MLPBlock(num_channels, num_channels)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.token_mixing(x)
x = x + self.channel_mixing(x)
return x
class U_Mixer(nn.Module):
def __init__(
self,
img_size: int = 256,
img_channels: int = 3,
embed_dim: int = 96,
):
super().__init__()
#mixer blocks
self.mixer1= Mixer( img_size//4, embed_dim)
self.mixer2= Mixer( img_size//8, embed_dim*2)
self.mixer3= Mixer( img_size//16, embed_dim*4)
self.mixer4= Mixer( img_size//32, embed_dim*8)
self.mixer11= Mixer( img_size//4, embed_dim)
self.mixer22= Mixer( img_size//8, embed_dim*2)
self.mixer33= Mixer( img_size//16, embed_dim*4)
self.mixer44= Mixer( img_size//32, embed_dim*8)
#encode
self.patch_embed = PatchEmbeddings(4, embed_dim, img_channels)
self.patch_merge1 = PatchMerge(embed_dim)
self.patch_merge2= PatchMerge(embed_dim*2)
self.patch_merge3= PatchMerge(embed_dim*4)
#decode
self.patch_expand1 = PatchExpand(embed_dim*8)
self.patch_expand2 = PatchExpand(embed_dim*4)
self.patch_expand3 = PatchExpand(embed_dim*2)
self.final_expand = FinalPatchExpand(embed_dim, img_channels)
def forward(self, x: torch.Tensor) -> torch.Tensor:
y=[]
x = self.patch_embed(x)
x = self.mixer1(x)
x = self.mixer1(x)
y.append(x)
x = self.patch_merge1(x)
x = self.mixer2(x)
x = self.mixer2(x)
y.append(x)
x = self.patch_merge2(x)
x = self.mixer3(x)
x = self.mixer3(x)
y.append(x)
x = self.patch_merge3(x)
x = self.mixer4(x)
x = self.mixer4(x)
x = self.mixer44(x)
x = self.mixer44(x)
x = self.patch_expand1(x)
x = self.mixer33(x)
x = self.mixer33(x) + y[2]
x = self.patch_expand2(x)
x = self.mixer22(x)
x = self.mixer22(x) + y[1]
x = self.patch_expand3(x)
x = self.mixer11(x)
x = self.mixer11(x) + y[0]
x = self.final_expand(x)
return x | 6,516 | 28.488688 | 127 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/fastmri/losses.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class SSIMLoss(nn.Module):
"""
SSIM loss module.
"""
def __init__(self, win_size: int = 7, k1: float = 0.01, k2: float = 0.03):
"""
Args:
win_size: Window size for SSIM calculation.
k1: k1 parameter for SSIM calculation.
k2: k2 parameter for SSIM calculation.
"""
super().__init__()
self.win_size = win_size
self.k1, self.k2 = k1, k2
self.register_buffer("w", torch.ones(1, 1, win_size, win_size) / win_size ** 2)
NP = win_size ** 2
self.cov_norm = NP / (NP - 1)
def forward(self, X: torch.Tensor, Y: torch.Tensor, data_range: torch.Tensor):
assert isinstance(self.w, torch.Tensor)
data_range = data_range[:, None, None, None]
C1 = (self.k1 * data_range) ** 2
C2 = (self.k2 * data_range) ** 2
ux = F.conv2d(X, self.w) # typing: ignore
uy = F.conv2d(Y, self.w) #
uxx = F.conv2d(X * X, self.w)
uyy = F.conv2d(Y * Y, self.w)
uxy = F.conv2d(X * Y, self.w)
vx = self.cov_norm * (uxx - ux * ux)
vy = self.cov_norm * (uyy - uy * uy)
vxy = self.cov_norm * (uxy - ux * uy)
A1, A2, B1, B2 = (
2 * ux * uy + C1,
2 * vxy + C2,
ux ** 2 + uy ** 2 + C1,
vx + vy + C2,
)
D = B1 * B2
S = (A1 * A2) / D
return 1 - S.mean()
| 1,671 | 28.857143 | 87 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/fastmri/coil_combine.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import fastmri
def rss(data: torch.Tensor, dim: int = 0) -> torch.Tensor:
"""
Compute the Root Sum of Squares (RSS).
RSS is computed assuming that dim is the coil dimension.
Args:
data: The input tensor
dim: The dimensions along which to apply the RSS transform
Returns:
The RSS value.
"""
return torch.sqrt((data ** 2).sum(dim))
def rss_complex(data: torch.Tensor, dim: int = 0) -> torch.Tensor:
"""
Compute the Root Sum of Squares (RSS) for complex inputs.
RSS is computed assuming that dim is the coil dimension.
Args:
data: The input tensor
dim: The dimensions along which to apply the RSS transform
Returns:
The RSS value.
"""
return torch.sqrt(fastmri.complex_abs_sq(data).sum(dim))
| 996 | 22.186047 | 66 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/fastmri/math.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch
def complex_mul(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""
Complex multiplication.
This multiplies two complex tensors assuming that they are both stored as
real arrays with the last dimension being the complex dimension.
Args:
x: A PyTorch tensor with the last dimension of size 2.
y: A PyTorch tensor with the last dimension of size 2.
Returns:
A PyTorch tensor with the last dimension of size 2.
"""
if not x.shape[-1] == y.shape[-1] == 2:
raise ValueError("Tensors do not have separate complex dim.")
re = x[..., 0] * y[..., 0] - x[..., 1] * y[..., 1]
im = x[..., 0] * y[..., 1] + x[..., 1] * y[..., 0]
return torch.stack((re, im), dim=-1)
def complex_conj(x: torch.Tensor) -> torch.Tensor:
"""
Complex conjugate.
This applies the complex conjugate assuming that the input array has the
last dimension as the complex dimension.
Args:
x: A PyTorch tensor with the last dimension of size 2.
y: A PyTorch tensor with the last dimension of size 2.
Returns:
A PyTorch tensor with the last dimension of size 2.
"""
if not x.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
return torch.stack((x[..., 0], -x[..., 1]), dim=-1)
def complex_abs(data: torch.Tensor) -> torch.Tensor:
"""
Compute the absolute value of a complex valued input tensor.
Args:
data: A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
Absolute value of data.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
return (data ** 2).sum(dim=-1).sqrt()
def complex_abs_sq(data: torch.Tensor) -> torch.Tensor:
"""
Compute the squared absolute value of a complex tensor.
Args:
data: A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
Squared absolute value of data.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
return (data ** 2).sum(dim=-1)
def tensor_to_complex_np(data: torch.Tensor) -> np.ndarray:
"""
Converts a complex torch tensor to numpy array.
Args:
data: Input data to be converted to numpy.
Returns:
Complex numpy version of data.
"""
data = data.numpy()
return data[..., 0] + 1j * data[..., 1]
| 2,728 | 25.754902 | 77 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/fastmri/__init__.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from packaging import version
from .coil_combine import rss, rss_complex
from .fftc import fftshift, ifftshift, roll
from .losses import SSIMLoss
from .math import (
complex_abs,
complex_abs_sq,
complex_conj,
complex_mul,
tensor_to_complex_np,
)
from .utils import convert_fnames_to_v2, save_reconstructions
if version.parse(torch.__version__) >= version.parse("1.7.0"):
from .fftc import fft2c_new as fft2c
from .fftc import ifft2c_new as ifft2c
else:
from .fftc import fft2c_old as fft2c
from .fftc import ifft2c_old as ifft2c
| 758 | 26.107143 | 63 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/fastmri/fftc.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from typing import List, Optional
import torch
from packaging import version
if version.parse(torch.__version__) >= version.parse("1.7.0"):
import torch.fft # type: ignore
def fft2c_old(data: torch.Tensor) -> torch.Tensor:
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data: Complex valued input data containing at least 3 dimensions:
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
2. All other dimensions are assumed to be batch dimensions.
Returns:
The FFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.fft(data, 2, normalized=True)
data = fftshift(data, dim=[-3, -2])
return data
def ifft2c_old(data: torch.Tensor) -> torch.Tensor:
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data: Complex valued input data containing at least 3 dimensions:
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
2. All other dimensions are assumed to be batch dimensions.
Returns:
The IFFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.ifft(data, 2, normalized=True)
data = fftshift(data, dim=[-3, -2])
return data
def fft2c_new(data: torch.Tensor) -> torch.Tensor:
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data: Complex valued input data containing at least 3 dimensions:
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
2. All other dimensions are assumed to be batch dimensions.
Returns:
The FFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.view_as_real(
torch.fft.fftn( # type: ignore
torch.view_as_complex(data), dim=(-2, -1), norm="ortho"
)
)
data = fftshift(data, dim=[-3, -2])
return data
def ifft2c_new(data: torch.Tensor) -> torch.Tensor:
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data: Complex valued input data containing at least 3 dimensions:
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
2. All other dimensions are assumed to be batch dimensions.
Returns:
The IFFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.view_as_real(
torch.fft.ifftn( # type: ignore
torch.view_as_complex(data), dim=(-2, -1), norm="ortho"
)
)
data = fftshift(data, dim=[-3, -2])
return data
# Helper functions
def roll_one_dim(x: torch.Tensor, shift: int, dim: int) -> torch.Tensor:
"""
Similar to roll but for only one dim.
Args:
x: A PyTorch tensor.
shift: Amount to roll.
dim: Which dimension to roll.
Returns:
Rolled version of x.
"""
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
def roll(
x: torch.Tensor,
shift: List[int],
dim: List[int],
) -> torch.Tensor:
"""
Similar to np.roll but applies to PyTorch Tensors.
Args:
x: A PyTorch tensor.
shift: Amount to roll.
dim: Which dimension to roll.
Returns:
Rolled version of x.
"""
if len(shift) != len(dim):
raise ValueError("len(shift) must match len(dim)")
for (s, d) in zip(shift, dim):
x = roll_one_dim(x, s, d)
return x
def fftshift(x: torch.Tensor, dim: Optional[List[int]] = None) -> torch.Tensor:
"""
Similar to np.fft.fftshift but applies to PyTorch Tensors
Args:
x: A PyTorch tensor.
dim: Which dimension to fftshift.
Returns:
fftshifted version of x.
"""
if dim is None:
# this weird code is necessary for toch.jit.script typing
dim = [0] * (x.dim())
for i in range(1, x.dim()):
dim[i] = i
# also necessary for torch.jit.script
shift = [0] * len(dim)
for i, dim_num in enumerate(dim):
shift[i] = x.shape[dim_num] // 2
return roll(x, shift, dim)
def ifftshift(x: torch.Tensor, dim: Optional[List[int]] = None) -> torch.Tensor:
"""
Similar to np.fft.ifftshift but applies to PyTorch Tensors
Args:
x: A PyTorch tensor.
dim: Which dimension to ifftshift.
Returns:
ifftshifted version of x.
"""
if dim is None:
# this weird code is necessary for toch.jit.script typing
dim = [0] * (x.dim())
for i in range(1, x.dim()):
dim[i] = i
# also necessary for torch.jit.script
shift = [0] * len(dim)
for i, dim_num in enumerate(dim):
shift[i] = (x.shape[dim_num] + 1) // 2
return roll(x, shift, dim)
| 5,535 | 25.236967 | 80 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/fastmri/data/volume_sampler.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from typing import List, Optional, Union
import torch
import torch.distributed as dist
from fastmri.data.mri_data import CombinedSliceDataset, SliceDataset
from torch.utils.data import Sampler
class VolumeSampler(Sampler):
"""
Sampler for volumetric MRI data.
Based on pytorch DistributedSampler, the difference is that all instances
from the same MRI volume need to go to the same node for distributed
training. Dataset example is a list of tuples (fname, instance), where
fname is essentially the volume name (actually a filename).
"""
def __init__(
self,
dataset: Union[CombinedSliceDataset, SliceDataset],
num_replicas: Optional[int] = None,
rank: Optional[int] = None,
shuffle: bool = True,
seed: int = 0,
):
"""
Args:
dataset: An MRI dataset (e.g., SliceData).
num_replicas: Number of processes participating in distributed
training. By default, :attr:`rank` is retrieved from the
current distributed group.
rank: Rank of the current process within :attr:`num_replicas`. By
default, :attr:`rank` is retrieved from the current distributed
group.
shuffle: If ``True`` (default), sampler will shuffle the indices.
seed: random seed used to shuffle the sampler if
:attr:`shuffle=True`. This number should be identical across
all processes in the distributed group.
"""
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.shuffle = shuffle
self.seed = seed
# get all file names and split them based on number of processes
self.all_volume_names = sorted(
set(str(example[0]) for example in self.dataset.examples)
)
self.all_volumes_split: List[List[str]] = []
for rank_num in range(self.num_replicas):
self.all_volumes_split.append(
[
self.all_volume_names[i]
for i in range(
rank_num, len(self.all_volume_names), self.num_replicas
)
]
)
# get slice indices for each file name
rank_indices: List[List[int]] = [[] for _ in range(self.num_replicas)]
for i, example in enumerate(self.dataset.examples):
vname = str(example[0])
for rank_num in range(self.num_replicas):
if vname in self.all_volumes_split[rank_num]:
rank_indices[rank_num].append(i)
break
# need to send equal number of samples to each process - take the max
self.num_samples = max(len(indices) for indices in rank_indices)
self.total_size = self.num_samples * self.num_replicas
self.indices = rank_indices[self.rank]
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch and seed
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
ordering = torch.randperm(len(self.indices), generator=g).tolist()
indices = [self.indices[i] for i in ordering]
else:
indices = self.indices
# add extra samples to match num_samples
repeat_times = self.num_samples // len(indices)
indices = indices * repeat_times
indices = indices + indices[: self.num_samples - len(indices)]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 4,332 | 36.678261 | 82 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/fastmri/data/mri_data.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
import pickle
import random
import xml.etree.ElementTree as etree
from pathlib import Path
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
from warnings import warn
import h5py
import numpy as np
import torch
import yaml
def et_query(
root: etree.Element,
qlist: Sequence[str],
namespace: str = "http://www.ismrm.org/ISMRMRD",
) -> str:
"""
ElementTree query function.
This can be used to query an xml document via ElementTree. It uses qlist
for nested queries.
Args:
root: Root of the xml to search through.
qlist: A list of strings for nested searches, e.g. ["Encoding",
"matrixSize"]
namespace: Optional; xml namespace to prepend query.
Returns:
The retrieved data as a string.
"""
s = "."
prefix = "ismrmrd_namespace"
ns = {prefix: namespace}
for el in qlist:
s = s + f"//{prefix}:{el}"
value = root.find(s, ns)
if value is None:
raise RuntimeError("Element not found")
return str(value.text)
def fetch_dir(
key: str, data_config_file: Union[str, Path, os.PathLike] = "fastmri_dirs.yaml"
) -> Path:
"""
Data directory fetcher.
This is a brute-force simple way to configure data directories for a
project. Simply overwrite the variables for `knee_path` and `brain_path`
and this function will retrieve the requested subsplit of the data for use.
Args:
key: key to retrieve path from data_config_file. Expected to be in
("knee_path", "brain_path", "log_path").
data_config_file: Optional; Default path config file to fetch path
from.
Returns:
The path to the specified directory.
"""
data_config_file = Path(data_config_file)
if not data_config_file.is_file():
default_config = {
"knee_path": "/path/to/knee",
"brain_path": "/path/to/brain",
"log_path": ".",
}
with open(data_config_file, "w") as f:
yaml.dump(default_config, f)
data_dir = default_config[key]
warn(
f"Path config at {data_config_file.resolve()} does not exist. "
"A template has been created for you. "
"Please enter the directory paths for your system to have defaults."
)
else:
with open(data_config_file, "r") as f:
data_dir = yaml.safe_load(f)[key]
return Path(data_dir)
class CombinedSliceDataset(torch.utils.data.Dataset):
"""
A container for combining slice datasets.
"""
def __init__(
self,
roots: Sequence[Path],
challenges: Sequence[str],
transforms: Optional[Sequence[Optional[Callable]]] = None,
sample_rates: Optional[Sequence[Optional[float]]] = None,
volume_sample_rates: Optional[Sequence[Optional[float]]] = None,
use_dataset_cache: bool = False,
dataset_cache_file: Union[str, Path, os.PathLike] = "dataset_cache.pkl",
num_cols: Optional[Tuple[int]] = None,
):
"""
Args:
roots: Paths to the datasets.
challenges: "singlecoil" or "multicoil" depending on which
challenge to use.
transforms: Optional; A sequence of callable objects that
preprocesses the raw data into appropriate form. The transform
function should take 'kspace', 'target', 'attributes',
'filename', and 'slice' as inputs. 'target' may be null for
test data.
sample_rates: Optional; A sequence of floats between 0 and 1.
This controls what fraction of the slices should be loaded.
When creating subsampled datasets either set sample_rates
(sample by slices) or volume_sample_rates (sample by volumes)
but not both.
volume_sample_rates: Optional; A sequence of floats between 0 and 1.
This controls what fraction of the volumes should be loaded.
When creating subsampled datasets either set sample_rates
(sample by slices) or volume_sample_rates (sample by volumes)
but not both.
use_dataset_cache: Whether to cache dataset metadata. This is very
useful for large datasets like the brain data.
dataset_cache_file: Optional; A file in which to cache dataset
information for faster load times.
num_cols: Optional; If provided, only slices with the desired
number of columns will be considered.
"""
if sample_rates is not None and volume_sample_rates is not None:
raise ValueError(
"either set sample_rates (sample by slices) or volume_sample_rates (sample by volumes) but not both"
)
if transforms is None:
transforms = [None] * len(roots)
if sample_rates is None:
sample_rates = [None] * len(roots)
if volume_sample_rates is None:
volume_sample_rates = [None] * len(roots)
if not (
len(roots)
== len(transforms)
== len(challenges)
== len(sample_rates)
== len(volume_sample_rates)
):
raise ValueError(
"Lengths of roots, transforms, challenges, sample_rates do not match"
)
self.datasets = []
self.examples: List[Tuple[Path, int, Dict[str, object]]] = []
for i in range(len(roots)):
self.datasets.append(
SliceDataset(
root=roots[i],
transform=transforms[i],
challenge=challenges[i],
sample_rate=sample_rates[i],
volume_sample_rate=volume_sample_rates[i],
use_dataset_cache=use_dataset_cache,
dataset_cache_file=dataset_cache_file,
num_cols=num_cols,
)
)
self.examples = self.examples + self.datasets[-1].examples
def __len__(self):
return sum(len(dataset) for dataset in self.datasets)
def __getitem__(self, i):
for dataset in self.datasets:
if i < len(dataset):
return dataset[i]
else:
i = i - len(dataset)
class SliceDataset(torch.utils.data.Dataset):
"""
A PyTorch Dataset that provides access to MR image slices.
"""
def __init__(
self,
root: Union[str, Path, os.PathLike],
challenge: str,
transform: Optional[Callable] = None,
use_dataset_cache: bool = False,
sample_rate: Optional[float] = None,
volume_sample_rate: Optional[float] = None,
dataset_cache_file: Union[str, Path, os.PathLike] = "dataset_cache.pkl",
num_cols: Optional[Tuple[int]] = None,
):
"""
Args:
root: Path to the dataset.
challenge: "singlecoil" or "multicoil" depending on which challenge
to use.
transform: Optional; A callable object that pre-processes the raw
data into appropriate form. The transform function should take
'kspace', 'target', 'attributes', 'filename', and 'slice' as
inputs. 'target' may be null for test data.
use_dataset_cache: Whether to cache dataset metadata. This is very
useful for large datasets like the brain data.
sample_rate: Optional; A float between 0 and 1. This controls what fraction
of the slices should be loaded. Defaults to 1 if no value is given.
When creating a sampled dataset either set sample_rate (sample by slices)
or volume_sample_rate (sample by volumes) but not both.
volume_sample_rate: Optional; A float between 0 and 1. This controls what fraction
of the volumes should be loaded. Defaults to 1 if no value is given.
When creating a sampled dataset either set sample_rate (sample by slices)
or volume_sample_rate (sample by volumes) but not both.
dataset_cache_file: Optional; A file in which to cache dataset
information for faster load times.
num_cols: Optional; If provided, only slices with the desired
number of columns will be considered.
"""
if challenge not in ("singlecoil", "multicoil"):
raise ValueError('challenge should be either "singlecoil" or "multicoil"')
if sample_rate is not None and volume_sample_rate is not None:
raise ValueError(
"either set sample_rate (sample by slices) or volume_sample_rate (sample by volumes) but not both"
)
self.dataset_cache_file = Path(dataset_cache_file)
self.transform = transform
self.recons_key = (
"reconstruction_esc" if challenge == "singlecoil" else "reconstruction_rss"
)
self.examples = []
# set default sampling mode if none given
if sample_rate is None:
sample_rate = 1.0
if volume_sample_rate is None:
volume_sample_rate = 1.0
# load dataset cache if we have and user wants to use it
if self.dataset_cache_file.exists() and use_dataset_cache:
with open(self.dataset_cache_file, "rb") as f:
dataset_cache = pickle.load(f)
else:
dataset_cache = {}
# check if our dataset is in the cache
# if there, use that metadata, if not, then regenerate the metadata
if dataset_cache.get(root) is None or not use_dataset_cache:
files = list(Path(root).iterdir())
for fname in sorted(files):
metadata, num_slices = self._retrieve_metadata(fname)
self.examples += [
(fname, slice_ind, metadata) for slice_ind in range(num_slices)
]
if dataset_cache.get(root) is None and use_dataset_cache:
dataset_cache[root] = self.examples
logging.info(f"Saving dataset cache to {self.dataset_cache_file}.")
with open(self.dataset_cache_file, "wb") as f:
pickle.dump(dataset_cache, f)
else:
logging.info(f"Using dataset cache from {self.dataset_cache_file}.")
self.examples = dataset_cache[root]
# subsample if desired
if sample_rate < 1.0: # sample by slice
random.shuffle(self.examples)
num_examples = round(len(self.examples) * sample_rate)
self.examples = self.examples[:num_examples]
elif volume_sample_rate < 1.0: # sample by volume
vol_names = sorted(list(set([f[0].stem for f in self.examples])))
random.shuffle(vol_names)
num_volumes = round(len(vol_names) * volume_sample_rate)
sampled_vols = vol_names[:num_volumes]
self.examples = [
example for example in self.examples if example[0].stem in sampled_vols
]
if num_cols:
self.examples = [
ex
for ex in self.examples
if ex[2]["encoding_size"][1] in num_cols # type: ignore
]
def _retrieve_metadata(self, fname):
with h5py.File(fname, "r") as hf:
et_root = etree.fromstring(hf["ismrmrd_header"][()])
enc = ["encoding", "encodedSpace", "matrixSize"]
enc_size = (
int(et_query(et_root, enc + ["x"])),
int(et_query(et_root, enc + ["y"])),
int(et_query(et_root, enc + ["z"])),
)
rec = ["encoding", "reconSpace", "matrixSize"]
recon_size = (
int(et_query(et_root, rec + ["x"])),
int(et_query(et_root, rec + ["y"])),
int(et_query(et_root, rec + ["z"])),
)
lims = ["encoding", "encodingLimits", "kspace_encoding_step_1"]
enc_limits_center = int(et_query(et_root, lims + ["center"]))
enc_limits_max = int(et_query(et_root, lims + ["maximum"])) + 1
padding_left = enc_size[1] // 2 - enc_limits_center
padding_right = padding_left + enc_limits_max
num_slices = hf["kspace"].shape[0]
metadata = {
"padding_left": padding_left,
"padding_right": padding_right,
"encoding_size": enc_size,
"recon_size": recon_size,
}
return metadata, num_slices
def __len__(self):
return len(self.examples)
def __getitem__(self, i: int):
fname, dataslice, metadata = self.examples[i]
with h5py.File(fname, "r") as hf:
kspace = hf["kspace"][dataslice]
mask = np.asarray(hf["mask"]) if "mask" in hf else None
target = hf[self.recons_key][dataslice] if self.recons_key in hf else None
attrs = dict(hf.attrs)
attrs.update(metadata)
if self.transform is None:
sample = (kspace, mask, target, attrs, fname.name, dataslice)
else:
sample = self.transform(kspace, mask, target, attrs, fname.name, dataslice)
return sample
| 13,630 | 36.759003 | 116 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/fastmri/data/subsample.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import contextlib
from typing import Optional, Sequence, Tuple, Union
import numpy as np
import torch
@contextlib.contextmanager
def temp_seed(rng: np.random, seed: Optional[Union[int, Tuple[int, ...]]]):
if seed is None:
try:
yield
finally:
pass
else:
state = rng.get_state()
rng.seed(seed)
try:
yield
finally:
rng.set_state(state)
class MaskFunc:
"""
An object for GRAPPA-style sampling masks.
This crates a sampling mask that densely samples the center while
subsampling outer k-space regions based on the undersampling factor.
"""
def __init__(self, center_fractions: Sequence[float], accelerations: Sequence[int]):
"""
Args:
center_fractions: Fraction of low-frequency columns to be retained.
If multiple values are provided, then one of these numbers is
chosen uniformly each time.
accelerations: Amount of under-sampling. This should have the same
length as center_fractions. If multiple values are provided,
then one of these is chosen uniformly each time.
"""
if not len(center_fractions) == len(accelerations):
raise ValueError(
"Number of center fractions should match number of accelerations"
)
self.center_fractions = center_fractions
self.accelerations = accelerations
self.rng = np.random.RandomState() # pylint: disable=no-member
def __call__(
self, shape: Sequence[int], seed: Optional[Union[int, Tuple[int, ...]]] = None
) -> torch.Tensor:
raise NotImplementedError
def choose_acceleration(self):
"""Choose acceleration based on class parameters."""
choice = self.rng.randint(0, len(self.accelerations))
center_fraction = self.center_fractions[choice]
acceleration = self.accelerations[choice]
return center_fraction, acceleration
class RandomMaskFunc(MaskFunc):
"""
RandomMaskFunc creates a sub-sampling mask of a given shape.
The mask selects a subset of columns from the input k-space data. If the
k-space data has N columns, the mask picks out:
1. N_low_freqs = (N * center_fraction) columns in the center
corresponding to low-frequencies.
2. The other columns are selected uniformly at random with a
probability equal to: prob = (N / acceleration - N_low_freqs) /
(N - N_low_freqs). This ensures that the expected number of columns
selected is equal to (N / acceleration).
It is possible to use multiple center_fractions and accelerations, in which
case one possible (center_fraction, acceleration) is chosen uniformly at
random each time the RandomMaskFunc object is called.
For example, if accelerations = [4, 8] and center_fractions = [0.08, 0.04],
then there is a 50% probability that 4-fold acceleration with 8% center
fraction is selected and a 50% probability that 8-fold acceleration with 4%
center fraction is selected.
"""
def __call__(
self, shape: Sequence[int], seed: Optional[Union[int, Tuple[int, ...]]] = None
) -> torch.Tensor:
"""
Create the mask.
Args:
shape: The shape of the mask to be created. The shape should have
at least 3 dimensions. Samples are drawn along the second last
dimension.
seed: Seed for the random number generator. Setting the seed
ensures the same mask is generated each time for the same
shape. The random state is reset afterwards.
Returns:
A mask of the specified shape.
"""
if len(shape) < 3:
raise ValueError("Shape should have 3 or more dimensions")
with temp_seed(self.rng, seed):
num_cols = shape[-2]
center_fraction, acceleration = self.choose_acceleration()
# create the mask
num_low_freqs = int(round(num_cols * center_fraction))
prob = (num_cols / acceleration - num_low_freqs) / (
num_cols - num_low_freqs
)
mask = self.rng.uniform(size=num_cols) < prob
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad : pad + num_low_freqs] = True
# reshape the mask
mask_shape = [1 for _ in shape]
mask_shape[-2] = num_cols
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
return mask
class EquispacedMaskFunc(MaskFunc):
"""
EquispacedMaskFunc creates a sub-sampling mask of a given shape.
The mask selects a subset of columns from the input k-space data. If the
k-space data has N columns, the mask picks out:
1. N_low_freqs = (N * center_fraction) columns in the center
corresponding tovlow-frequencies.
2. The other columns are selected with equal spacing at a proportion
that reaches the desired acceleration rate taking into consideration
the number of low frequencies. This ensures that the expected number
of columns selected is equal to (N / acceleration)
It is possible to use multiple center_fractions and accelerations, in which
case one possible (center_fraction, acceleration) is chosen uniformly at
random each time the EquispacedMaskFunc object is called.
Note that this function may not give equispaced samples (documented in
https://github.com/facebookresearch/fastMRI/issues/54), which will require
modifications to standard GRAPPA approaches. Nonetheless, this aspect of
the function has been preserved to match the public multicoil data.
"""
def __call__(
self, shape: Sequence[int], seed: Optional[Union[int, Tuple[int, ...]]] = None
) -> torch.Tensor:
"""
Args:
shape: The shape of the mask to be created. The shape should have
at least 3 dimensions. Samples are drawn along the second last
dimension.
seed: Seed for the random number generator. Setting the seed
ensures the same mask is generated each time for the same
shape. The random state is reset afterwards.
Returns:
A mask of the specified shape.
"""
if len(shape) < 3:
raise ValueError("Shape should have 3 or more dimensions")
with temp_seed(self.rng, seed):
center_fraction, acceleration = self.choose_acceleration()
num_cols = shape[-2]
num_low_freqs = int(round(num_cols * center_fraction))
# create the mask
mask = np.zeros(num_cols, dtype=np.float32)
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad : pad + num_low_freqs] = True
# determine acceleration rate by adjusting for the number of low frequencies
adjusted_accel = (acceleration * (num_low_freqs - num_cols)) / (
num_low_freqs * acceleration - num_cols
)
offset = self.rng.randint(0, round(adjusted_accel))
accel_samples = np.arange(offset, num_cols - 1, adjusted_accel)
accel_samples = np.around(accel_samples).astype(np.uint)
mask[accel_samples] = True
# reshape the mask
mask_shape = [1 for _ in shape]
mask_shape[-2] = num_cols
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
return mask
def create_mask_for_mask_type(
mask_type_str: str,
center_fractions: Sequence[float],
accelerations: Sequence[int],
) -> MaskFunc:
"""
Creates a mask of the specified type.
Args:
center_fractions: What fraction of the center of k-space to include.
accelerations: What accelerations to apply.
"""
if mask_type_str == "random":
return RandomMaskFunc(center_fractions, accelerations)
elif mask_type_str == "equispaced":
return EquispacedMaskFunc(center_fractions, accelerations)
else:
raise Exception(f"{mask_type_str} not supported")
| 8,448 | 36.887892 | 88 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/fastmri/data/transforms.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from typing import Dict, Optional, Sequence, Tuple, Union
import fastmri
import numpy as np
import torch
from .subsample import MaskFunc
def to_tensor(data: np.ndarray) -> torch.Tensor:
"""
Convert numpy array to PyTorch tensor.
For complex arrays, the real and imaginary parts are stacked along the last
dimension.
Args:
data: Input numpy array.
Returns:
PyTorch version of data.
"""
if np.iscomplexobj(data):
data = np.stack((data.real, data.imag), axis=-1)
return torch.from_numpy(data)
def tensor_to_complex_np(data: torch.Tensor) -> np.ndarray:
"""
Converts a complex torch tensor to numpy array.
Args:
data: Input data to be converted to numpy.
Returns:
Complex numpy version of data.
"""
data = data.numpy()
return data[..., 0] + 1j * data[..., 1]
def apply_mask(
data: torch.Tensor,
mask_func: MaskFunc,
seed: Optional[Union[int, Tuple[int, ...]]] = None,
padding: Optional[Sequence[int]] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Subsample given k-space by multiplying with a mask.
Args:
data: The input k-space data. This should have at least 3 dimensions,
where dimensions -3 and -2 are the spatial dimensions, and the
final dimension has size 2 (for complex values).
mask_func: A function that takes a shape (tuple of ints) and a random
number seed and returns a mask.
seed: Seed for the random number generator.
padding: Padding value to apply for mask.
Returns:
tuple containing:
masked data: Subsampled k-space data
mask: The generated mask
"""
shape = np.array(data.shape)
shape[:-3] = 1
mask = mask_func(shape, seed)
if padding is not None:
mask[:, :, : padding[0]] = 0
mask[:, :, padding[1] :] = 0 # padding value inclusive on right of zeros
masked_data = data * mask + 0.0 # the + 0.0 removes the sign of the zeros
return masked_data, mask
def mask_center(x: torch.Tensor, mask_from: int, mask_to: int) -> torch.Tensor:
"""
Initializes a mask with the center filled in.
Args:
mask_from: Part of center to start filling.
mask_to: Part of center to end filling.
Returns:
A mask with the center filled.
"""
mask = torch.zeros_like(x)
mask[:, :, :, mask_from:mask_to] = x[:, :, :, mask_from:mask_to]
return mask
def center_crop(data: torch.Tensor, shape: Tuple[int, int]) -> torch.Tensor:
"""
Apply a center crop to the input real image or batch of real images.
Args:
data: The input tensor to be center cropped. It should
have at least 2 dimensions and the cropping is applied along the
last two dimensions.
shape: The output shape. The shape should be smaller
than the corresponding dimensions of data.
Returns:
The center cropped image.
"""
if not (0 < shape[0] <= data.shape[-2] and 0 < shape[1] <= data.shape[-1]):
raise ValueError("Invalid shapes.")
w_from = (data.shape[-2] - shape[0]) // 2
h_from = (data.shape[-1] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to]
def complex_center_crop(data: torch.Tensor, shape: Tuple[int, int]) -> torch.Tensor:
"""
Apply a center crop to the input image or batch of complex images.
Args:
data: The complex input tensor to be center cropped. It should have at
least 3 dimensions and the cropping is applied along dimensions -3
and -2 and the last dimensions should have a size of 2.
shape: The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
The center cropped image
"""
if not (0 < shape[0] <= data.shape[-3] and 0 < shape[1] <= data.shape[-2]):
raise ValueError("Invalid shapes.")
w_from = (data.shape[-3] - shape[0]) // 2
h_from = (data.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to, :]
def center_crop_to_smallest(
x: torch.Tensor, y: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Apply a center crop on the larger image to the size of the smaller.
The minimum is taken over dim=-1 and dim=-2. If x is smaller than y at
dim=-1 and y is smaller than x at dim=-2, then the returned dimension will
be a mixture of the two.
Args:
x: The first image.
y: The second image.
Returns:
tuple of tensors x and y, each cropped to the minimim size.
"""
smallest_width = min(x.shape[-1], y.shape[-1])
smallest_height = min(x.shape[-2], y.shape[-2])
x = center_crop(x, (smallest_height, smallest_width))
y = center_crop(y, (smallest_height, smallest_width))
return x, y
def normalize(
data: torch.Tensor,
mean: Union[float, torch.Tensor],
stddev: Union[float, torch.Tensor],
eps: Union[float, torch.Tensor] = 0.0,
) -> torch.Tensor:
"""
Normalize the given tensor.
Applies the formula (data - mean) / (stddev + eps).
Args:
data: Input data to be normalized.
mean: Mean value.
stddev: Standard deviation.
eps: Added to stddev to prevent dividing by zero.
Returns:
Normalized tensor.
"""
return (data - mean) / (stddev + eps)
def normalize_instance(
data: torch.Tensor, eps: Union[float, torch.Tensor] = 0.0
) -> Tuple[torch.Tensor, Union[torch.Tensor], Union[torch.Tensor]]:
"""
Normalize the given tensor with instance norm/
Applies the formula (data - mean) / (stddev + eps), where mean and stddev
are computed from the data itself.
Args:
data: Input data to be normalized
eps: Added to stddev to prevent dividing by zero.
Returns:
torch.Tensor: Normalized tensor
"""
mean = data.mean()
std = data.std()
return normalize(data, mean, std, eps), mean, std
class UnetDataTransform:
"""
Data Transformer for training U-Net models.
"""
def __init__(
self,
which_challenge: str,
mask_func: Optional[MaskFunc] = None,
use_seed: bool = True,
):
"""
Args:
which_challenge: Challenge from ("singlecoil", "multicoil").
mask_func: Optional; A function that can create a mask of
appropriate shape.
use_seed: If true, this class computes a pseudo random number
generator seed from the filename. This ensures that the same
mask is used for all the slices of a given volume every time.
"""
if which_challenge not in ("singlecoil", "multicoil"):
raise ValueError("Challenge should either be 'singlecoil' or 'multicoil'")
self.mask_func = mask_func
self.which_challenge = which_challenge
self.use_seed = use_seed
def __call__(
self,
kspace: np.ndarray,
mask: np.ndarray,
target: np.ndarray,
attrs: Dict,
fname: str,
slice_num: int,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, str, int, float]:
"""
Args:
kspace: Input k-space of shape (num_coils, rows, cols) for
multi-coil data or (rows, cols) for single coil data.
mask: Mask from the test dataset.
target: Target image.
attrs: Acquisition related information stored in the HDF5 object.
fname: File name.
slice_num: Serial number of the slice.
Returns:
tuple containing:
image: Zero-filled input image.
target: Target image converted to a torch.Tensor.
mean: Mean value used for normalization.
std: Standard deviation value used for normalization.
fname: File name.
slice_num: Serial number of the slice.
"""
kspace = to_tensor(kspace)
# check for max value
max_value = attrs["max"] if "max" in attrs.keys() else 0.0
# apply mask
if self.mask_func:
seed = None if not self.use_seed else tuple(map(ord, fname))
masked_kspace, mask = apply_mask(kspace, self.mask_func, seed)
else:
masked_kspace = kspace
# inverse Fourier transform to get zero filled solution
image = fastmri.ifft2c(masked_kspace)
# crop input to correct size
if target is not None:
crop_size = (target.shape[-2], target.shape[-1])
else:
crop_size = (attrs["recon_size"][0], attrs["recon_size"][1])
# check for FLAIR 203
if image.shape[-2] < crop_size[1]:
crop_size = (image.shape[-2], image.shape[-2])
image = complex_center_crop(image, crop_size)
# absolute value
image = fastmri.complex_abs(image)
# apply Root-Sum-of-Squares if multicoil data
if self.which_challenge == "multicoil":
image = fastmri.rss(image)
# normalize input
image, mean, std = normalize_instance(image, eps=1e-11)
image = image.clamp(-6, 6)
# normalize target
if target is not None:
target = to_tensor(target)
target = center_crop(target, crop_size)
target = normalize(target, mean, std, eps=1e-11)
target = target.clamp(-6, 6)
else:
target = torch.Tensor([0])
return image, target, mean, std, fname, slice_num, max_value
class VarNetDataTransform:
"""
Data Transformer for training VarNet models.
"""
def __init__(self, mask_func: Optional[MaskFunc] = None, use_seed: bool = True):
"""
Args:
mask_func: Optional; A function that can create a mask of
appropriate shape. Defaults to None.
use_seed: If True, this class computes a pseudo random number
generator seed from the filename. This ensures that the same
mask is used for all the slices of a given volume every time.
"""
self.mask_func = mask_func
self.use_seed = use_seed
def __call__(
self,
kspace: np.ndarray,
mask: np.ndarray,
target: np.ndarray,
attrs: Dict,
fname: str,
slice_num: int,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, str, int, float, torch.Tensor]:
"""
Args:
kspace: Input k-space of shape (num_coils, rows, cols) for
multi-coil data.
mask: Mask from the test dataset.
target: Target image.
attrs: Acquisition related information stored in the HDF5 object.
fname: File name.
slice_num: Serial number of the slice.
Returns:
tuple containing:
masked_kspace: k-space after applying sampling mask.
mask: The applied sampling mask
target: The target image (if applicable).
fname: File name.
slice_num: The slice index.
max_value: Maximum image value.
crop_size: The size to crop the final image.
"""
if target is not None:
target = to_tensor(target)
max_value = attrs["max"]
else:
target = torch.tensor(0)
max_value = 0.0
kspace = to_tensor(kspace)
seed = None if not self.use_seed else tuple(map(ord, fname))
acq_start = attrs["padding_left"]
acq_end = attrs["padding_right"]
crop_size = torch.tensor([attrs["recon_size"][0], attrs["recon_size"][1]])
if self.mask_func:
masked_kspace, mask = apply_mask(
kspace, self.mask_func, seed, (acq_start, acq_end)
)
else:
masked_kspace = kspace
shape = np.array(kspace.shape)
num_cols = shape[-2]
shape[:-3] = 1
mask_shape = [1] * len(shape)
mask_shape[-2] = num_cols
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
mask = mask.reshape(*mask_shape)
mask[:, :, :acq_start] = 0
mask[:, :, acq_end:] = 0
return (
masked_kspace,
mask.byte(),
target,
fname,
slice_num,
max_value,
crop_size,
)
| 12,887 | 30.205811 | 88 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/networks/img2img_mixer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import torch.nn.init as init
import numpy as np
import einops
from einops.layers.torch import Rearrange
from einops import rearrange
class PatchEmbedding(nn.Module):
def __init__(
self,
patch_size: int,
embed_dim: int,
channels: int
):
super().__init__()
self.proj = nn.Sequential(
nn.Conv2d(
in_channels=channels,
out_channels=embed_dim,
kernel_size=patch_size,
stride=patch_size
),
Rearrange("b c h w -> b h w c")
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.proj(x)
class PatchExpansion(nn.Module):
def __init__(self, dim_scale, channel_dim, img_channels, norm_layer=nn.LayerNorm):
super().__init__()
self.dim_scale = dim_scale
self.expand = nn.Linear(channel_dim, dim_scale**2* channel_dim, bias=False)
self.output_dim = channel_dim
self.norm = norm_layer(channel_dim)
self.output = nn.Conv2d(in_channels=channel_dim,out_channels=img_channels ,kernel_size=1,bias=False)
def forward(self, x):
"""
x: B, H, W, C
"""
x = self.expand(x)
B, H, W, C = x.shape
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale, c=C//(self.dim_scale**2))
x = x.view(B,-1,self.output_dim)
x= self.norm(x)
x = x.view(B,H*self.dim_scale, W*self.dim_scale,-1)
x = x.permute(0,3,1,2)
x = self.output(x)
return x
class MLPBlock(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int):
super().__init__()
self.model = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
class Mixer(nn.Module):
def __init__(
self,
num_patches: int,
num_channels: int,
f_hidden: int
):
super().__init__()
self.token_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
Rearrange("b h w c -> b c w h"),
MLPBlock(num_patches, num_patches*f_hidden),
Rearrange("b c w h -> b c h w"),
MLPBlock(num_patches, num_patches*f_hidden),
Rearrange("b c h w -> b h w c"),
)
self.channel_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
MLPBlock(num_channels, num_channels*f_hidden)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.token_mixing(x)
x = x + self.channel_mixing(x)
return x
class Img2Img_Mixer(nn.Module):
def __init__(
self,
img_size: int = 320,
img_channels: int = 1,
patch_size: int = 4,
embed_dim: int = 128,
num_layers: int = 16,
f_hidden: int = 8,
):
super().__init__()
self.patch_embed = PatchEmbedding(patch_size, embed_dim, img_channels)
layers = [ Mixer(img_size//patch_size, embed_dim, f_hidden)
for _ in range(num_layers)]
self.mixer_layers = nn.Sequential(*layers)
self.patch_expand = PatchExpansion(patch_size, embed_dim, img_channels)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
x = self.mixer_layers(x)
x = self.patch_expand(x)
return x | 3,714 | 26.932331 | 127 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/networks/vision_transformer.py | '''
This code is modified from https://github.com/facebookresearch/convit. To adapt the vit/convit to image reconstruction, variable input sizes, and patch sizes for both spatial dimensions.
'''
import torch
import torch.nn as nn
from functools import partial
import torch.nn.functional as F
from timm.models.helpers import load_pretrained
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class GPSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,
locality_strength=1., use_local_init=True, grid_size=None):
super().__init__()
self.num_heads = num_heads
self.dim = dim
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.pos_proj = nn.Linear(3, num_heads)
self.proj_drop = nn.Dropout(proj_drop)
self.locality_strength = locality_strength
self.gating_param = nn.Parameter(1*torch.ones(self.num_heads))
self.apply(self._init_weights)
if use_local_init:
self.local_init(locality_strength=locality_strength)
self.current_grid_size = grid_size
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_attention(self, x):
B, N, C = x.shape
k = self.k(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
pos_score = self.pos_proj(self.rel_indices).expand(B, -1, -1,-1).permute(0,3,1,2)
patch_score = (q @ k.transpose(-2, -1)) * self.scale
patch_score = patch_score.softmax(dim=-1)
pos_score = pos_score.softmax(dim=-1)
gating = self.gating_param.view(1,-1,1,1)
attn = (1.-torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score
attn = attn / attn.sum(dim=-1).unsqueeze(-1)
attn = self.attn_drop(attn)
return attn
def get_attention_map(self, x, return_map = False):
attn_map = self.get_attention(x).mean(0) # average over batch
distances = self.rel_indices.squeeze()[:,:,-1]**.5
dist = torch.einsum('nm,hnm->h', (distances, attn_map))
dist /= distances.size(0)
if return_map:
return dist, attn_map
else:
return dist
def local_init(self, locality_strength=1.):
self.v.weight.data.copy_(torch.eye(self.dim))
locality_distance = 1 #max(1,1/locality_strength**.5)
kernel_size = int(self.num_heads**.5)
center = (kernel_size-1)/2 if kernel_size%2==0 else kernel_size//2
for h1 in range(kernel_size):
for h2 in range(kernel_size):
position = h1+kernel_size*h2
self.pos_proj.weight.data[position,2] = -1
self.pos_proj.weight.data[position,1] = 2*(h1-center)*locality_distance
self.pos_proj.weight.data[position,0] = 2*(h2-center)*locality_distance
self.pos_proj.weight.data *= locality_strength
def get_rel_indices(self, ):
H, W = self.current_grid_size
N = H*W
rel_indices = torch.zeros(1, N, N, 3)
indx = torch.arange(W).view(1,-1) - torch.arange(W).view(-1, 1)
indx = indx.repeat(H, H)
indy = torch.arange(H).view(1,-1) - torch.arange(H).view(-1, 1)
indy = indy.repeat_interleave(W, dim=0).repeat_interleave(W, dim=1)
indd = indx**2 + indy**2
rel_indices[:,:,:,2] = indd.unsqueeze(0)
rel_indices[:,:,:,1] = indy.unsqueeze(0)
rel_indices[:,:,:,0] = indx.unsqueeze(0)
device = self.v.weight.device
self.rel_indices = rel_indices.to(device)
def forward(self, x):
B, N, C = x.shape
if not hasattr(self, 'rel_indices') or self.rel_indices.size(1)!=N:
self.get_rel_indices()
attn = self.get_attention(x)
v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class MHSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., grid_size=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.apply(self._init_weights)
self.current_grid_size = grid_size
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_attention_map(self, x, return_map = False):
self.get_rel_indices()
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn_map = (q @ k.transpose(-2, -1)) * self.scale
attn_map = attn_map.softmax(dim=-1).mean(0) # average over batch
distances = self.rel_indices.squeeze()[:,:,-1]**.5
dist = torch.einsum('nm,hnm->h', (distances, attn_map))
dist /= distances.size(0)
if return_map:
return dist, attn_map
else:
return dist
def get_rel_indices(self, ):
H, W = self.current_grid_size
N = H*W
rel_indices = torch.zeros(1, N, N, 3)
indx = torch.arange(W).view(1,-1) - torch.arange(W).view(-1, 1)
indx = indx.repeat(H, H)
indy = torch.arange(H).view(1,-1) - torch.arange(H).view(-1, 1)
indy = indy.repeat_interleave(W, dim=0).repeat_interleave(W, dim=1)
indd = indx**2 + indy**2
rel_indices[:,:,:,2] = indd.unsqueeze(0)
rel_indices[:,:,:,1] = indy.unsqueeze(0)
rel_indices[:,:,:,0] = indx.unsqueeze(0)
device = self.qkv.weight.device
self.rel_indices = rel_indices.to(device)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_gpsa=True, **kwargs):
super().__init__()
self.norm1 = norm_layer(dim)
self.use_gpsa = use_gpsa
if self.use_gpsa:
self.attn = GPSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, **kwargs)
else:
self.attn = MHSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, **kwargs)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, grid_size):
self.attn.current_grid_size = grid_size
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding, from timm
"""
def __init__(self, patch_size, in_chans, embed_dim):
super().__init__()
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.apply(self._init_weights)
def forward(self, x):
x = self.proj(x)
return x
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
class VisionTransformer(nn.Module):
""" Vision Transformer
"""
def __init__(self, avrg_img_size=320, patch_size=16, in_chans=1, embed_dim=64, depth=8,
num_heads=9, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, global_pool=None,
gpsa_interval=[-1, -1], locality_strength=1., use_pos_embed=True):
super().__init__()
self.depth = depth
embed_dim *= num_heads
self.num_features = embed_dim # num_features for consistency with other models
self.locality_strength = locality_strength
self.use_pos_embed = use_pos_embed
if isinstance(avrg_img_size, int):
img_size = to_2tuple(avrg_img_size)
if isinstance(patch_size, int):
self.patch_size = to_2tuple(patch_size)
else:
self.patch_size = patch_size
self.in_chans = in_chans
self.patch_embed = PatchEmbed(
patch_size=self.patch_size, in_chans=in_chans, embed_dim=embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
if self.use_pos_embed:
self.pos_embed = nn.Parameter(
torch.zeros(1, embed_dim,
img_size[0] // self.patch_size[0],
img_size[1] // self.patch_size[1])
)
trunc_normal_(self.pos_embed, std=.02)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_gpsa=True,
locality_strength=locality_strength)
if i>=gpsa_interval[0]-1 and i<gpsa_interval[1] else
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_gpsa=False,)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# head
self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')]
self.head = nn.Linear(self.num_features, in_chans*self.patch_size[0]*self.patch_size[1])
def seq2img(self, x, img_size):
"""
Transforms sequence back into image space, input dims: [batch_size, num_patches, channels]
output dims: [batch_size, channels, H, W]
"""
x = x.view(x.shape[0], x.shape[1], self.in_chans, self.patch_size[0], self.patch_size[1])
x = x.chunk(x.shape[1], dim=1)
x = torch.cat(x, dim=4).permute(0,1,2,4,3)
x = x.chunk(img_size[0]//self.patch_size[0], dim=3)
x = torch.cat(x, dim=4).permute(0,1,2,4,3).squeeze(1)
return x
self.head.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self,):
return {'pos_embed'}
def get_head(self,):
return self.head
def reset_head(self,):
self.head = nn.Linear(self.num_features, in_chans*self.patch_size[0]*self.patch_size[1])
def forward_features(self, x, k=None):
x = self.patch_embed(x)
_, _, H, W = x.shape
if self.use_pos_embed:
pos_embed = F.interpolate(self.pos_embed, size=[H, W], mode='bilinear', align_corners = False)
x = x + pos_embed
x = x.flatten(2).transpose(1, 2)
x = self.pos_drop(x)
for u, blk in enumerate(self.blocks):
x = blk(x, (H, W))
if k is not None and u == k:
self.attention_map = blk.attn.get_attention_map(x, return_map = True)
x = self.norm(x)
return x
def forward(self, x, k=None):
_, _, H, W = x.shape
x = self.forward_features(x, k)
x = self.head(x)
x = self.seq2img(x, (H, W))
return x | 15,082 | 39.007958 | 186 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/networks/recon_net.py | import torch.nn as nn
import torch.nn.functional as F
from math import ceil, floor
from .unet import Unet
from .vision_transformer import VisionTransformer
class ReconNet(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
def pad(self, x):
_, _, h, w = x.shape
hp, wp = self.net.patch_size
f1 = ( (wp - w % wp) % wp ) / 2
f2 = ( (hp - h % hp) % hp ) / 2
wpad = [floor(f1), ceil(f1)]
hpad = [floor(f2), ceil(f2)]
x = F.pad(x, wpad+hpad)
return x, wpad, hpad
def unpad(self, x, wpad, hpad):
return x[..., hpad[0] : x.shape[-2]-hpad[1], wpad[0] : x.shape[-1]-wpad[1]]
def norm(self, x):
mean = x.view(x.shape[0], 1, 1, -1).mean(-1, keepdim=True)
std = x.view(x.shape[0], 1, 1, -1,).std(-1, keepdim=True)
x = (x-mean)/std
return x, mean, std
def unnorm(self, x, mean, std):
return x * std + mean
def vit_forward(self, x, k=None):
x, wpad, hpad = self.pad(x)
x, mean, std = self.norm(x)
x = self.net(x, k)
x = self.unnorm(x, mean, std)
x = self.unpad(x, wpad, hpad)
return x
def unet_forward(self, x):
x, mean, std = self.norm(x)
x = self.net(x)
x = self.unnorm(x, mean, std)
return x
def mixer_forward(self, x):
x, mean, std = self.norm(x)
x = self.net(x)
x = self.unnorm(x, mean, std)
return x
def forward(self, x, k=None):
if isinstance(self.net, Unet):
return self.unet_forward(x)
elif isinstance(self.net, VisionTransformer):
return self.vit_forward(x, k)
else:
return self.mixer_forward(x)
| 1,932 | 25.847222 | 90 | py |
imaging_MLPs | imaging_MLPs-master/compressed_sensing/networks/unet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from torch.nn import functional as F
class Unet(nn.Module):
"""
PyTorch implementation of a U-Net model.
O. Ronneberger, P. Fischer, and Thomas Brox. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical
image computing and computer-assisted intervention, pages 234–241.
Springer, 2015.
"""
def __init__(
self,
in_chans: int,
out_chans: int,
chans: int = 32,
num_pool_layers: int = 4,
drop_prob: float = 0.0,
):
"""
Args:
in_chans: Number of channels in the input to the U-Net model.
out_chans: Number of channels in the output to the U-Net model.
chans: Number of output channels of the first convolution layer.
num_pool_layers: Number of down-sampling and up-sampling layers.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.drop_prob = drop_prob
self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)])
ch = chans
for _ in range(num_pool_layers - 1):
self.down_sample_layers.append(ConvBlock(ch, ch * 2, drop_prob))
ch *= 2
self.conv = ConvBlock(ch, ch * 2, drop_prob)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for _ in range(num_pool_layers - 1):
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(ConvBlock(ch * 2, ch, drop_prob))
ch //= 2
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(
nn.Sequential(
ConvBlock(ch * 2, ch, drop_prob),
nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1),
)
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
stack = []
output = image
# apply down-sampling layers
for layer in self.down_sample_layers:
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
# apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# reflect pad on the right/botton if needed to handle odd input dimensions
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # padding bottom
if torch.sum(torch.tensor(padding)) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
return output
class ConvBlock(nn.Module):
"""
A Convolutional Block that consists of two convolution layers each followed by
instance normalization, LeakyReLU activation and dropout.
"""
def __init__(self, in_chans: int, out_chans: int, drop_prob: float):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.layers = nn.Sequential(
nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
return self.layers(image)
class TransposeConvBlock(nn.Module):
"""
A Transpose Convolutional Block that consists of one convolution transpose
layers followed by instance normalization and LeakyReLU activation.
"""
def __init__(self, in_chans: int, out_chans: int):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.layers = nn.Sequential(
nn.ConvTranspose2d(
in_chans, out_chans, kernel_size=2, stride=2, bias=False
),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H*2, W*2)`.
"""
return self.layers(image)
| 5,979 | 31.677596 | 88 | py |
imaging_MLPs | imaging_MLPs-master/untrained/networks/original_mixer.py | import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.init as init
import einops
from einops.layers.torch import Rearrange
from einops import rearrange
class PatchEmbeddings(nn.Module):
def __init__(
self,
patch_size: int,
hidden_dim: int,
channels: int
):
super().__init__()
self.proj = nn.Sequential(
nn.Conv2d(
in_channels=channels,
out_channels=hidden_dim,
kernel_size=patch_size,
stride=patch_size
),
Rearrange("b c h w -> b (h w) c")
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.proj(x)
class PatchEmbeddings_transpose(nn.Module):
def __init__(
self,
patch_size: int,
hidden_dim: int,
channels: int,
d: int
):
super().__init__()
self.proj_transpose = nn.Sequential(
Rearrange("b (h w) c -> b c h w", h=d),
nn.ConvTranspose2d(
in_channels=hidden_dim,
out_channels=channels,
kernel_size=patch_size,
stride=patch_size
)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.proj_transpose(x)
class MLPBlock(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int):
super().__init__()
self.model = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
class MixerBlock(nn.Module):
def __init__(
self,
num_patches: int,
num_channels: int,
tokens_hidden_dim: int,
channels_hidden_dim: int
):
super().__init__()
self.token_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
Rearrange("b p c -> b c p"),
MLPBlock(num_patches, tokens_hidden_dim),
Rearrange("b c p -> b p c")
)
self.channel_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
MLPBlock(num_channels, channels_hidden_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.token_mixing(x)
x = x + self.channel_mixing(x)
return x
class Original_Mixer(nn.Module):
def __init__(
self,
image_size: int = 256,
channels: int = 1,
patch_size: int = 4,
num_layers: int = 8,
hidden_dim: int = 128,
tokens_hidden_dim: int = 96,
channels_hidden_dim: int = 256
):
super().__init__()
num_patches = (image_size // patch_size) ** 2
d=(image_size-patch_size)//patch_size + 1
self.embed = PatchEmbeddings(patch_size, hidden_dim, channels)
layers = [
MixerBlock(
num_patches=num_patches,
num_channels=hidden_dim,
tokens_hidden_dim=tokens_hidden_dim,
channels_hidden_dim=channels_hidden_dim
)
for _ in range(num_layers)
]
self.layers = nn.Sequential(*layers)
self.norm = nn.LayerNorm(hidden_dim)
self.embed_transpose = PatchEmbeddings_transpose(patch_size, hidden_dim, channels, d)
def forward(self, x: torch.Tensor) -> torch.Tensor:
b, c, h, w = x.shape
x = self.embed(x)
x = self.layers(x)
x = self.norm(x)
x = self.embed_transpose(x)
return x | 3,674 | 26.840909 | 93 | py |
imaging_MLPs | imaging_MLPs-master/untrained/networks/img2img_mixer.py | import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.init as init
import einops
from einops.layers.torch import Rearrange
from einops import rearrange
class PatchEmbedding(nn.Module):
def __init__(
self,
patch_size: int,
embed_dim: int,
channels: int
):
super().__init__()
self.proj = nn.Sequential(
nn.Conv2d(
in_channels=channels,
out_channels=embed_dim,
kernel_size=patch_size,
stride=patch_size
),
Rearrange("b c h w -> b h w c")
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.proj(x)
class PatchExpansion(nn.Module):
def __init__(self, dim_scale, channel_dim, img_channels, norm_layer=nn.LayerNorm):
super().__init__()
self.dim_scale = dim_scale
self.expand = nn.Linear(channel_dim, dim_scale**2* channel_dim, bias=False)
self.output_dim = channel_dim
self.norm = norm_layer(channel_dim)
self.output = nn.Conv2d(in_channels=channel_dim,out_channels=img_channels ,kernel_size=1,bias=False)
def forward(self, x):
x = self.expand(x)
B, H, W, C = x.shape
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale, c=C//(self.dim_scale**2))
x = x.view(B,-1,self.output_dim)
x= self.norm(x)
x = x.view(B,H*self.dim_scale, W*self.dim_scale,-1)
x = x.permute(0,3,1,2)
x = self.output(x)
return x
class MLPBlock(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int):
super().__init__()
self.model = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, input_dim)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
class Mixer(nn.Module):
def __init__(
self,
num_patches: int,
num_channels: int,
f_hidden: int
):
super().__init__()
self.token_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
Rearrange("b h w c -> b c w h"),
MLPBlock(num_patches, num_patches*f_hidden),
Rearrange("b c w h -> b c h w"),
MLPBlock(num_patches, num_patches*f_hidden),
Rearrange("b c h w -> b h w c"),
)
self.channel_mixing = nn.Sequential(
nn.LayerNorm(num_channels),
MLPBlock(num_channels, num_channels*f_hidden)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.token_mixing(x)
x = x + self.channel_mixing(x)
return x
class Img2Img_Mixer(nn.Module):
def __init__(
self,
img_size: int = 256,
img_channels: int = 1,
patch_size: int = 4,
embed_dim: int = 128,
num_layers: int = 16,
f_hidden: int = 8,
):
super().__init__()
self.patch_embed = PatchEmbedding(patch_size, embed_dim, img_channels)
layers = [ Mixer(img_size//patch_size, embed_dim, f_hidden)
for _ in range(num_layers)]
self.mixer_layers = nn.Sequential(*layers)
self.patch_expand = PatchExpansion(patch_size, embed_dim, img_channels)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
x = self.mixer_layers(x)
x = self.patch_expand(x)
return x | 3,618 | 27.054264 | 127 | py |
imaging_MLPs | imaging_MLPs-master/untrained/networks/vit.py | '''
This code is modified from https://github.com/facebookresearch/convit. To adapt the vit/convit to image reconstruction, variable input sizes, and patch sizes for both spatial dimensions.
'''
import torch
import torch.nn as nn
from functools import partial
import torch.nn.functional as F
from timm.models.helpers import load_pretrained
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class GPSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,
locality_strength=1., use_local_init=True, grid_size=None):
super().__init__()
self.num_heads = num_heads
self.dim = dim
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.pos_proj = nn.Linear(3, num_heads)
self.proj_drop = nn.Dropout(proj_drop)
self.locality_strength = locality_strength
self.gating_param = nn.Parameter(1*torch.ones(self.num_heads))
self.apply(self._init_weights)
if use_local_init:
self.local_init(locality_strength=locality_strength)
self.current_grid_size = grid_size
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_attention(self, x):
B, N, C = x.shape
k = self.k(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
pos_score = self.pos_proj(self.rel_indices).expand(B, -1, -1,-1).permute(0,3,1,2)
patch_score = (q @ k.transpose(-2, -1)) * self.scale
patch_score = patch_score.softmax(dim=-1)
pos_score = pos_score.softmax(dim=-1)
gating = self.gating_param.view(1,-1,1,1)
attn = (1.-torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score
attn = attn / attn.sum(dim=-1).unsqueeze(-1)
attn = self.attn_drop(attn)
return attn
def get_attention_map(self, x, return_map = False):
attn_map = self.get_attention(x).mean(0) # average over batch
distances = self.rel_indices.squeeze()[:,:,-1]**.5
dist = torch.einsum('nm,hnm->h', (distances, attn_map))
dist /= distances.size(0)
if return_map:
return dist, attn_map
else:
return dist
def local_init(self, locality_strength=1.):
self.v.weight.data.copy_(torch.eye(self.dim))
locality_distance = 1 #max(1,1/locality_strength**.5)
kernel_size = int(self.num_heads**.5)
center = (kernel_size-1)/2 if kernel_size%2==0 else kernel_size//2
for h1 in range(kernel_size):
for h2 in range(kernel_size):
position = h1+kernel_size*h2
self.pos_proj.weight.data[position,2] = -1
self.pos_proj.weight.data[position,1] = 2*(h1-center)*locality_distance
self.pos_proj.weight.data[position,0] = 2*(h2-center)*locality_distance
self.pos_proj.weight.data *= locality_strength
def get_rel_indices(self, ):
H, W = self.current_grid_size
N = H*W
rel_indices = torch.zeros(1, N, N, 3)
indx = torch.arange(W).view(1,-1) - torch.arange(W).view(-1, 1)
indx = indx.repeat(H, H)
indy = torch.arange(H).view(1,-1) - torch.arange(H).view(-1, 1)
indy = indy.repeat_interleave(W, dim=0).repeat_interleave(W, dim=1)
indd = indx**2 + indy**2
rel_indices[:,:,:,2] = indd.unsqueeze(0)
rel_indices[:,:,:,1] = indy.unsqueeze(0)
rel_indices[:,:,:,0] = indx.unsqueeze(0)
device = self.v.weight.device
self.rel_indices = rel_indices.to(device)
def forward(self, x):
B, N, C = x.shape
if not hasattr(self, 'rel_indices') or self.rel_indices.size(1)!=N:
self.get_rel_indices()
attn = self.get_attention(x)
v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class MHSA(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., grid_size=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.apply(self._init_weights)
self.current_grid_size = grid_size
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def get_attention_map(self, x, return_map = False):
self.get_rel_indices()
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn_map = (q @ k.transpose(-2, -1)) * self.scale
attn_map = attn_map.softmax(dim=-1).mean(0) # average over batch
distances = self.rel_indices.squeeze()[:,:,-1]**.5
dist = torch.einsum('nm,hnm->h', (distances, attn_map))
dist /= distances.size(0)
if return_map:
return dist, attn_map
else:
return dist
def get_rel_indices(self, ):
H, W = self.current_grid_size
N = H*W
rel_indices = torch.zeros(1, N, N, 3)
indx = torch.arange(W).view(1,-1) - torch.arange(W).view(-1, 1)
indx = indx.repeat(H, H)
indy = torch.arange(H).view(1,-1) - torch.arange(H).view(-1, 1)
indy = indy.repeat_interleave(W, dim=0).repeat_interleave(W, dim=1)
indd = indx**2 + indy**2
rel_indices[:,:,:,2] = indd.unsqueeze(0)
rel_indices[:,:,:,1] = indy.unsqueeze(0)
rel_indices[:,:,:,0] = indx.unsqueeze(0)
device = self.qkv.weight.device
self.rel_indices = rel_indices.to(device)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_gpsa=True, **kwargs):
super().__init__()
self.norm1 = norm_layer(dim)
self.use_gpsa = use_gpsa
if self.use_gpsa:
self.attn = GPSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, **kwargs)
else:
self.attn = MHSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, **kwargs)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, grid_size):
self.attn.current_grid_size = grid_size
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding, from timm
"""
def __init__(self, patch_size, in_chans, embed_dim):
super().__init__()
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.apply(self._init_weights)
def forward(self, x):
x = self.proj(x)
return x
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
class VisionTransformer(nn.Module):
""" Vision Transformer
"""
def __init__(self, avrg_img_size=320, patch_size=16, in_chans=1, embed_dim=64, depth=8,
num_heads=9, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, global_pool=None,
gpsa_interval=[-1, -1], locality_strength=1., use_pos_embed=True):
super().__init__()
self.depth = depth
embed_dim *= num_heads
self.num_features = embed_dim # num_features for consistency with other models
self.locality_strength = locality_strength
self.use_pos_embed = use_pos_embed
if isinstance(avrg_img_size, int):
img_size = to_2tuple(avrg_img_size)
if isinstance(patch_size, int):
self.patch_size = to_2tuple(patch_size)
else:
self.patch_size = patch_size
self.in_chans = in_chans
self.patch_embed = PatchEmbed(
patch_size=self.patch_size, in_chans=in_chans, embed_dim=embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
if self.use_pos_embed:
self.pos_embed = nn.Parameter(
torch.zeros(1, embed_dim,
img_size[0] // self.patch_size[0],
img_size[1] // self.patch_size[1])
)
trunc_normal_(self.pos_embed, std=.02)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_gpsa=True,
locality_strength=locality_strength)
if i>=gpsa_interval[0]-1 and i<gpsa_interval[1] else
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
use_gpsa=False,)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# head
self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')]
self.head = nn.Linear(self.num_features, in_chans*self.patch_size[0]*self.patch_size[1])
def seq2img(self, x, img_size):
"""
Transforms sequence back into image space, input dims: [batch_size, num_patches, channels]
output dims: [batch_size, channels, H, W]
"""
x = x.view(x.shape[0], x.shape[1], self.in_chans, self.patch_size[0], self.patch_size[1])
x = x.chunk(x.shape[1], dim=1)
x = torch.cat(x, dim=4).permute(0,1,2,4,3)
x = x.chunk(img_size[0]//self.patch_size[0], dim=3)
x = torch.cat(x, dim=4).permute(0,1,2,4,3).squeeze(1)
return x
self.head.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self,):
return {'pos_embed'}
def get_head(self,):
return self.head
def reset_head(self,):
self.head = nn.Linear(self.num_features, in_chans*self.patch_size[0]*self.patch_size[1])
def forward_features(self, x, k=None):
x = self.patch_embed(x)
_, _, H, W = x.shape
if self.use_pos_embed:
pos_embed = F.interpolate(self.pos_embed, size=[H, W], mode='bilinear', align_corners = False)
x = x + pos_embed
x = x.flatten(2).transpose(1, 2)
x = self.pos_drop(x)
for u, blk in enumerate(self.blocks):
x = blk(x, (H, W))
if k is not None and u == k:
self.attention_map = blk.attn.get_attention_map(x, return_map = True)
x = self.norm(x)
return x
def forward(self, x, k=None):
_, _, H, W = x.shape
x = self.forward_features(x, k)
x = self.head(x)
x = self.seq2img(x, (H, W))
return x | 15,082 | 39.007958 | 186 | py |
imaging_MLPs | imaging_MLPs-master/untrained/networks/recon_net.py | import torch.nn as nn
import torch.nn.functional as F
from math import ceil, floor
class ReconNet(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
def pad(self, x):
_, _, h, w = x.shape
hp, wp = self.net.patch_size
f1 = ( (wp - w % wp) % wp ) / 2
f2 = ( (hp - h % hp) % hp ) / 2
wpad = [floor(f1), ceil(f1)]
hpad = [floor(f2), ceil(f2)]
x = F.pad(x, wpad+hpad)
return x, wpad, hpad
def unpad(self, x, wpad, hpad):
return x[..., hpad[0] : x.shape[-2]-hpad[1], wpad[0] : x.shape[-1]-wpad[1]]
def forward(self, x, k=None):
x, wpad, hpad = self.pad(x)
x = self.net(x, k)
x = self.unpad(x, wpad, hpad)
return x | 810 | 26.033333 | 90 | py |
imaging_MLPs | imaging_MLPs-master/untrained/networks/unet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from torch.nn import functional as F
class Unet(nn.Module):
"""
PyTorch implementation of a U-Net model.
O. Ronneberger, P. Fischer, and Thomas Brox. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical
image computing and computer-assisted intervention, pages 234–241.
Springer, 2015.
"""
def __init__(
self,
in_chans: int = 1,
out_chans: int = 1,
chans: int = 32,
num_pool_layers: int = 4,
drop_prob: float = 0.0,
):
"""
Args:
in_chans: Number of channels in the input to the U-Net model.
out_chans: Number of channels in the output to the U-Net model.
chans: Number of output channels of the first convolution layer.
num_pool_layers: Number of down-sampling and up-sampling layers.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.drop_prob = drop_prob
self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)])
ch = chans
for _ in range(num_pool_layers - 1):
self.down_sample_layers.append(ConvBlock(ch, ch * 2, drop_prob))
ch *= 2
self.conv = ConvBlock(ch, ch * 2, drop_prob)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for _ in range(num_pool_layers - 1):
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(ConvBlock(ch * 2, ch, drop_prob))
ch //= 2
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(
nn.Sequential(
ConvBlock(ch * 2, ch, drop_prob),
nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1),
)
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
stack = []
output = image
# apply down-sampling layers
for layer in self.down_sample_layers:
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
# apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# reflect pad on the right/botton if needed to handle odd input dimensions
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # padding bottom
if torch.sum(torch.tensor(padding)) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
return output
class ConvBlock(nn.Module):
"""
A Convolutional Block that consists of two convolution layers each followed by
instance normalization, LeakyReLU activation and dropout.
"""
def __init__(self, in_chans: int, out_chans: int, drop_prob: float):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.layers = nn.Sequential(
nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
return self.layers(image)
class TransposeConvBlock(nn.Module):
"""
A Transpose Convolutional Block that consists of one convolution transpose
layers followed by instance normalization and LeakyReLU activation.
"""
def __init__(self, in_chans: int, out_chans: int):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.layers = nn.Sequential(
nn.ConvTranspose2d(
in_chans, out_chans, kernel_size=2, stride=2, bias=False
),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H*2, W*2)`.
"""
return self.layers(image) | 5,981 | 32.79661 | 88 | py |
DeepIR | DeepIR-main/demo.py | #!/usr/bin/env python
import os
import sys
from pprint import pprint
# Pytorch requires blocking launch for proper working
if sys.platform == 'win32':
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
import numpy as np
from scipy import io
import torch
import torch.nn
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark =True
sys.path.append('modules')
import utils
import motion
import dataset
import thermal
if __name__ == '__main__':
imname = 'test1' # Name of the test file name
camera = 'sim' # 'sim', 'boson' or 'lepton'
scale_sr = 8 # 1 for denoising/NUC, 2, 3, .. for SR
nimg = 20 # Number of input images
method = 'dip' # 'cvx' for Hardie et al., 'dip' for DeepIR
# Load config file --
config = dataset.load_config('configs/%s_%s.ini'%(method, camera))
config['batch_size'] = nimg
config['num_workers'] = (0 if sys.platform=='win32' else 4)
config['lambda_prior'] *= (scale_sr/nimg)
# Load data
if not config['real']:
# This is simulated data
im = utils.get_img(imname, 1)
minval = 0
maxval = 1
else:
# This is real data
im, minval, maxval = utils.get_real_im(imname, camera)
# Get data for SR -- this will also get an initial estimate for registration
im, imstack, ecc_mats = motion.get_SR_data(im, scale_sr, nimg, config)
ecc_mats[:, :, 2] *= scale_sr
H, W = im.shape
# Load LPIPs function
config['gt'] = im
# Now run denoising
if method == 'cvx':
im_dip, profile_dip = thermal.interp_convex(imstack.astype(np.float32),
ecc_mats.astype(np.float32),
(H, W), config)
else:
im_dip, profile_dip = thermal.interp_DIP(imstack.astype(np.float32),
ecc_mats.astype(np.float32),
(H, W), config)
# Save data
mdict = {'gt': im,
'rec': im_dip,
'gain': profile_dip['gain'],
'offset': profile_dip['offset'],
'snr': profile_dip['metrics']['snrval'],
'psnr': profile_dip['metrics']['psnrval'],
'ssim': profile_dip['metrics']['ssimval'],
'minval': minval,
'maxval': maxval}
io.savemat('%s_%s_%s_%dx_%d.mat'%(imname, camera, method,
scale_sr, nimg), mdict)
pprint(profile_dip['metrics'])
| 2,636 | 31.555556 | 80 | py |
DeepIR | DeepIR-main/modules/losses.py | #!/usr/bin/env python
import torch
class TVNorm():
def __init__(self, mode='l1'):
self.mode = mode
def __call__(self, img):
grad_x = img[..., 1:, 1:] - img[..., 1:, :-1]
grad_y = img[..., 1:, 1:] - img[..., :-1, 1:]
if self.mode == 'isotropic':
#return torch.sqrt(grad_x.abs().pow(2) + grad_y.abs().pow(2)).mean()
return torch.sqrt(grad_x**2 + grad_y**2).mean()
elif self.mode == 'l1':
return abs(grad_x).mean() + abs(grad_y).mean()
else:
return (grad_x.pow(2) + grad_y.pow(2)).mean()
class HessianNorm():
def __init__(self):
pass
def __call__(self, img):
# Compute Individual derivatives
fxx = img[..., 1:-1, :-2] + img[..., 1:-1, 2:] - 2*img[..., 1:-1, 1:-1]
fyy = img[..., :-2, 1:-1] + img[..., 2:, 1:-1] - 2*img[..., 1:-1, 1:-1]
fxy = img[..., :-1, :-1] + img[..., 1:, 1:] - \
img[..., 1:, :-1] - img[..., :-1, 1:]
return torch.sqrt(fxx.abs().pow(2) +\
2*fxy[..., :-1, :-1].abs().pow(2) +\
fyy.abs().pow(2)).mean()
class L1Norm():
def __init__(self):
pass
def __call__(self, x1, x2):
return abs(x1 - x2).mean()
class PoissonNorm():
def __init__(self):
pass
def __call__(self, x1, x2):
return (x1 - torch.log(x1 + 1e-12)*x2).mean()
class L2Norm():
def __init__(self):
pass
def __call__(self, x1, x2):
return ((x1 - x2).pow(2)).mean()
| 1,586 | 30.117647 | 80 | py |
DeepIR | DeepIR-main/modules/utils.py | #!/usr/bin/env python
'''
Miscellaneous utilities that are extremely helpful but cannot be clubbed
into other modules.
'''
import torch
# Scientific computing
import numpy as np
import scipy.linalg as lin
from scipy import io
# Plotting
import cv2
import matplotlib.pyplot as plt
def nextpow2(x):
'''
Return smallest number larger than x and a power of 2.
'''
logx = np.ceil(np.log2(x))
return pow(2, logx)
def normalize(x, fullnormalize=False):
'''
Normalize input to lie between 0, 1.
Inputs:
x: Input signal
fullnormalize: If True, normalize such that minimum is 0 and
maximum is 1. Else, normalize such that maximum is 1 alone.
Outputs:
xnormalized: Normalized x.
'''
if x.sum() == 0:
return x
xmax = x.max()
if fullnormalize:
xmin = x.min()
else:
xmin = 0
xnormalized = (x - xmin)/(xmax - xmin)
return xnormalized
def asnr(x, xhat, compute_psnr=False):
'''
Compute affine SNR, which accounts for any scaling and shift between two
signals
Inputs:
x: Ground truth signal(ndarray)
xhat: Approximation of x
Outputs:
asnr_val: 20log10(||x||/||x - (a.xhat + b)||)
where a, b are scalars that miminize MSE between x and xhat
'''
mxy = (x*xhat).mean()
mxx = (xhat*xhat).mean()
mx = xhat.mean()
my = x.mean()
a = (mxy - mx*my)/(mxx - mx*mx)
b = my - a*mx
if compute_psnr:
return psnr(x, a*xhat + b)
else:
return rsnr(x, a*xhat + b)
def rsnr(x, xhat):
'''
Compute reconstruction SNR for a given signal and its reconstruction.
Inputs:
x: Ground truth signal (ndarray)
xhat: Approximation of x
Outputs:
rsnr_val: RSNR = 20log10(||x||/||x-xhat||)
'''
xn = lin.norm(x.reshape(-1))
en = lin.norm((x-xhat).reshape(-1)) + 1e-12
rsnr_val = 20*np.log10(xn/en)
return rsnr_val
def psnr(x, xhat):
''' Compute Peak Signal to Noise Ratio in dB
Inputs:
x: Ground truth signal
xhat: Reconstructed signal
Outputs:
snrval: PSNR in dB
'''
err = x - xhat
denom = np.mean(pow(err, 2)) + 1e-12
snrval = 10*np.log10(np.max(x)/denom)
return snrval
def embed(im, embedsize):
'''
Embed a small image centrally into a larger window.
Inputs:
im: Image to embed
embedsize: 2-tuple of window size
Outputs:
imembed: Embedded image
'''
Hi, Wi = im.shape
He, We = embedsize
dH = (He - Hi)//2
dW = (We - Wi)//2
imembed = np.zeros((He, We), dtype=im.dtype)
imembed[dH:Hi+dH, dW:Wi+dW] = im
return imembed
def measure(x, noise_snr=40, tau=100):
''' Realistic sensor measurement with readout and photon noise
Inputs:
noise_snr: Readout noise in electron count
tau: Integration time. Poisson noise is created for x*tau.
(Default is 100)
Outputs:
x_meas: x with added noise
'''
x_meas = np.copy(x)
noise = pow(10, -noise_snr/20)*np.random.randn(x_meas.size).reshape(x_meas.shape)
# First add photon noise, provided it is not infinity
if tau != float('Inf'):
x_meas = x_meas*tau
x_meas[x > 0] = np.random.poisson(x_meas[x > 0])
x_meas[x <= 0] = -np.random.poisson(-x_meas[x <= 0])
x_meas = (x_meas + noise)/tau
else:
x_meas = x_meas + noise
return x_meas
def grid_plot(imdata):
'''
Plot 3D set of images into a 2D grid using subplots.
Inputs:
imdata: N x H x W image stack
Outputs:
None
'''
N, H, W = imdata.shape
nrows = int(np.sqrt(N))
ncols = int(np.ceil(N/nrows))
for idx in range(N):
plt.subplot(nrows, ncols, idx+1)
plt.imshow(imdata[idx, :, :], cmap='gray')
plt.xticks([], [])
plt.yticks([], [])
def build_montage(images):
'''
Build a montage out of images
'''
nimg, H, W = images.shape
nrows = int(np.ceil(np.sqrt(nimg)))
ncols = int(np.ceil(nimg/nrows))
montage_im = np.zeros((H*nrows, W*ncols), dtype=np.float32)
cnt = 0
for r in range(nrows):
for c in range(ncols):
h1 = r*H
h2 = (r+1)*H
w1 = c*W
w2 = (c+1)*W
if cnt == nimg:
break
montage_im[h1:h2, w1:w2] = images[cnt, ...]
cnt += 1
return montage_im
def ims2rgb(im1, im2):
'''
Concatenate images into RGB
Inputs:
im1, im2: Two images to compare
'''
H, W = im1.shape
imrgb = np.zeros((H, W, 3))
imrgb[..., 0] = im1
imrgb[..., 2] = im2
return imrgb
def textfunc(im, txt):
return cv2.putText(im, txt, (30, 30),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(1, 1, 1),
2,
cv2.LINE_AA)
def get_img(imname, scaling):
# Read image
im = cv2.resize(plt.imread('data/%s.png'%imname), None,
fx=scaling, fy=scaling)
if im.ndim == 2:
im = im[:, :, np.newaxis]
im = im[:, :, [0, 0, 0]]
im = np.copy(im, order='C')
H, W, _ = im.shape
return np.copy(im[..., 1], order='C').astype(np.float32)
def get_real_im(imname, camera):
im = io.loadmat('data/%s/%s.mat'%(camera, imname))['imstack']
minval = im.min()
maxval = im.max()
if camera == 'rgb':
im = normalize(im[:, ::2, ::2], True)
else:
im = normalize(im, True).astype(np.float32)
return im, minval, maxval
def boxify(im, topleft, boxsize, color=[1, 1, 1], width=2):
'''
Generate a box around a region.
'''
h, w = topleft
dh, dw = boxsize
im[h:h+dh+1, w:w+width, :] = color
im[h:h+width, w:w+dh+width, :] = color
im[h:h+dh+1, w+dw:w+dw+width, :] = color
im[h+dh:h+dh+width, w:w+dh+width, :] = color
return im
def get_inp(tensize, const=10.0):
'''
Wrapper to get a variable on graph
'''
inp = torch.rand(tensize).cuda()/const
inp = torch.autograd.Variable(inp, requires_grad=True).cuda()
inp = torch.nn.Parameter(inp)
return inp
| 6,530 | 21.996479 | 85 | py |
DeepIR | DeepIR-main/modules/dataset.py | #!/usr/bin/env python
import os
import sys
import tqdm
import pdb
import math
import configparser
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
from PIL import Image
from torchvision.transforms import Resize, Compose, ToTensor, Normalize
import skimage
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import cv2
def get_mgrid(sidelen, dim=2):
'''Generates a flattened grid of (x,y,...) coordinates in a range of -1 to 1.
sidelen: int
dim: int'''
tensors = tuple(dim * [torch.linspace(-1, 1, steps=sidelen)])
mgrid = torch.stack(torch.meshgrid(*tensors), dim=-1)
mgrid = mgrid.reshape(-1, dim)
return mgrid
def xy_mgrid(H, W):
'''
Generate a flattened meshgrid for heterogenous sizes
Inputs:
H, W: Input dimensions
Outputs:
mgrid: H*W x 2 meshgrid
'''
Y, X = torch.meshgrid(torch.linspace(-1, 1, H),
torch.linspace(-1, 1, W))
mgrid = torch.stack((X, Y), dim=-1).reshape(-1, 2)
return mgrid
class ImageDataset(Dataset):
def __init__(self, img):
super().__init__()
H, W, nchan = img.shape
img = torch.tensor(img)[..., None]
self.pixels = img.view(-1, nchan)
self.coords = xy_mgrid(H, W)
def __len__(self):
return 1
def __getitem__(self, idx):
if idx > 0: raise IndexError
return self.coords, self.pixels
class Image3x3Dataset(Dataset):
def __init__(self, img):
super().__init__()
H, W, nchan = img.shape
img = torch.tensor(img)[..., None]
self.pixels = img.view(-1, nchan)
self.coords = xy_mgrid(H, W)
# Stack coordinates in the 3x3 neighborhood
coords_stack = []
for xshift in [0, 1]:
for yshift in [0, 1]:
shift_array = np.array([xshift/W, yshift/H]).reshape(1, 2)
coords_stack.append(self.coords + shift_array)
self.coords = np.hstack(coords_stack).astype(np.float32)
def __len__(self):
return 1
def __getitem__(self, idx):
if idx > 0: raise IndexError
return self.coords, self.pixels
class ImageFlowDataset(Dataset):
def __init__(self, img1, img2):
super().__init__()
H, W = img1.shape
img1 = torch.tensor(img1)[..., None]
img2 = torch.tensor(img2)[..., None]
self.pixels1 = img1.view(-1, 1)
self.pixels2 = img2.view(-1, 1)
self.coords = xy_mgrid(H, W)
def __len__(self):
return 1
def __getitem__(self, idx):
if idx > 0: raise IndexError
return self.coords, self.pixels1, self.pixels2
class ImageRegDataset(Dataset):
def __init__(self, imstack):
super().__init__()
self.imstack = imstack
self.nimg, H, W = imstack.shape
def __len__(self):
return self.nimg
def __getitem__(self, idx):
img = torch.tensor(self.imstack[idx, ...])[None, ...]
return img, idx
class ImageStackDataset(Dataset):
def __init__(self, imstack):
super().__init__()
self.imstack = imstack
self.nimg, H, W = imstack.shape
self.coords = xy_mgrid(H, W)
def __len__(self):
return self.nimg
def __getitem__(self, idx):
img = torch.tensor(self.imstack[idx, ...])
pixels = img[None, ...].permute(1, 2, 0).view(-1, 1)
return self.coords, pixels
class ImageSRDataset(Dataset):
def __init__(self, imstack, Xstack=None, Ystack=None, masks=None,
jitter=False, xjitter=None, yjitter=None, get_indices=False):
super().__init__()
self.imstack = imstack
self.Xstack = Xstack
self.Ystack = Ystack
self.masks = masks
self.jitter = jitter
self.get_indices = get_indices
self.nimg, self.H, self.W = imstack.shape
if xjitter is None:
self.xjitter = 1/self.W
self.yjitter = 1/self.H
else:
self.xjitter = xjitter
self.yjitter = yjitter
def __len__(self):
return self.nimg
def __getitem__(self, idx):
img = torch.tensor(self.imstack[idx, ...])
# If Jitter is enabled, return stratified sampled coordinates
pixels = img[None, ...].permute(1, 2, 0).view(-1, 1)
if self.masks is not None:
mask = torch.tensor(self.masks[idx, ...])
mask = mask[None, ...].permute(1, 2, 0).view(-1, 1)
else:
mask = torch.zeros(1)
if self.Xstack is not None:
coords = torch.stack((torch.tensor(self.Xstack[idx, ...]),
torch.tensor(self.Ystack[idx, ...])),
dim=-1).reshape(-1, 2)
else:
coords = torch.zeros(1)
if self.get_indices:
return coords, pixels, mask, idx
else:
return coords, pixels, mask
class ImageChunkDataset(Dataset):
def __init__(self, imstack, patchsize):
super().__init__()
self.imstack = imstack
self.nimg, self.H, self.W = imstack.shape
self.patchsize = patchsize
self.patch_coords = xy_mgrid(patchsize[0], patchsize[1])
self.nH = int(np.ceil(self.H/patchsize[0]))
self.nW = int(np.ceil(self.W/patchsize[1]))
def __len__(self):
return (self.nH * self.nW)
def __getitem__(self, idx):
w_idx = int(idx%self.nH)
h_idx = int((idx - w_idx)//self.nH)
h1 = h_idx*self.patchsize[0]
h2 = h_idx*self.patchsize[0] + self.patchsize[0]
w1 = w_idx*self.patchsize[1]
w2 = w_idx*self.patchsize[1] + self.patchsize[1]
if h2 > self.H:
h1 = self.H - self.patchsize[0]
h2 = self.H
if w2 > self.W:
w1 = self.W - self.patchsize[1]
w2 = self.W
img = torch.tensor(self.imstack[:, h1:h2, w1:w2])
pixels = img.reshape(-1, 1)
coords = torch.clone(self.patch_coords)
coords[:, 0] = coords[:, 0] + w1
coords[:, 1] = coords[:, 1] + h1
coords = torch.repeat_interleave(coords, self.nimg, 0)
return coords, pixels
def load_config(configpath):
'''
Load configuration file
'''
parser = configparser.ConfigParser()
parser.read(configpath)
params_dict = dict()
for section in parser.keys():
for key in parser[section].keys():
token = parser[section][key]
if token == 'False':
params_dict[key] = False
elif token == 'True':
params_dict[key] = True
elif '.' in token:
params_dict[key] = float(token)
else:
try:
params_dict[key] = int(token)
except:
params_dict[key] = token
return params_dict | 7,382 | 27.287356 | 81 | py |
DeepIR | DeepIR-main/modules/thermal.py | #!/usr/bin/env python
'''
Routines for dealing with thermal images
'''
import tqdm
import copy
import cv2
import numpy as np
from skimage.metrics import structural_similarity as ssim_func
import torch
import kornia
import torch.nn.functional as F
import utils
import losses
import motion
import deep_prior
def get_metrics(gt, estim, pad=True):
'''
Compute SNR, PSNR, SSIM, and LPIP between two images.
Inputs:
gt: Ground truth image
estim: Estimated image
lpip_func: CUDA function for computing lpip value
pad: if True, remove boundaries when computing metrics
Outputs:
metrics: dictionary with following fields:
snrval: SNR of reconstruction
psnrval: Peak SNR
ssimval: SSIM
lpipval: VGG perceptual metrics
'''
if min(gt.shape) < 50:
pad = False
if pad:
gt = gt[20:-20, 20:-20]
estim = estim[20:-20, 20:-20]
snrval = utils.asnr(gt, estim)
psnrval = utils.asnr(gt, estim, compute_psnr=True)
ssimval = ssim_func(gt, estim)
metrics = {'snrval': snrval,
'psnrval': psnrval,
'ssimval': ssimval}
return metrics
def create_fpn(imsize, vmin=0.9, vmax=1, method='col', rank=1):
'''
Generate fixed pattern noise for microbolometer-type sensors
Inputs:
imsize: (H, W) tuple
vmin, vmax: Minimum and maximum value of gain
method:
'col' -- generate column only noise
'both' -- generate rank-k noise
'corr_col' -- correlated columns
'corr_both' -- correlated rows and columns
rank: if method is 'both' generate noise with this rank.
Outputs:
fpn: (H, W)-sized fixed pattern noise
'''
H, W = imsize
if method == 'col':
fpn = np.ones((H, 1)).dot(vmin + (vmax-vmin)*np.random.rand(1, W))
elif method == 'both':
fpn = 0
for idx in range(rank):
col = vmin + (vmax - vmin)*np.random.rand(H, 1)
row = vmin + (vmax - vmin)*np.random.rand(1, W)
fpn += col.dot(row)
fpn /= rank
elif method == 'corr_col':
row = vmin + (vmax-vmin)*np.random.rand(W)
row = np.convolve(row, np.ones(5)/5, mode='same')
fpn = np.ones((H, 1)).dot(row.reshape(1, W))
elif method == 'corr_both':
row = vmin + (vmax-vmin)*np.random.rand(W)
row = np.convolve(row, np.ones(5)/5, mode='same')
col = vmin + (vmax-vmin)*np.random.rand(H)
col = np.convolve(col, np.ones(5)/5, mode='same')
fpn = col.reshape(H, 1).dot(row.reshape(1, W))
return fpn
def reg_avg_denoise(imstack, ecc_mats=None):
'''
Denoise a thermal stack by registering and averaging.
Inputs:
Outputs:
im_denoised: Denoised image
'''
nimg, H, W = imstack.shape
# if ecc_mats is none, register the stack
if ecc_mats is None:
ecc_mats = motion.register_stack(imstack, (H, W))[:, :2, :]
# Now warp image back to reference and average
ecc_inv = motion.invert_regstack(ecc_mats)
imten = torch.tensor(imstack.astype(np.float32))[:, None, ...]
ecc_ten = torch.tensor(ecc_inv.astype(np.float32))
imwarped = kornia.geometry.warp_affine(imten, ecc_ten, (H, W), flags='bilinear')
im_denoised = imwarped.mean(0)[0, ...].numpy()
weights = (imwarped > 0).type(torch.float32).mean(0)[0, ...].numpy()
weights[weights == 0] = 1
im_denoised /= weights
return im_denoised
def interp_DIP(imstack, reg_stack, hr_size, params_dict):
'''
Super resolve from a stack of images using deep image prior
Inputs:
imstack: (nimg, Hl, Wl) stack of low resolution images
reg_stack: (nimg, 2, 3) stack of affine matrices
hr_size: High resolution image size
params_dict: Dictionary containing parameters for optimization
kernel_type: Type of downsampling
input_type: Type of input
input_depth: Depth of input data (number of channels)
skip_n33d: Parameter for the neural network
skip_n33u: Parameter for the neural network
skip_n11: Parameter for the neural network
num_scales: Parameter for the neural network
upsample_mode: Parameter for the neural network
niters: Number of DIP iterations
batch_size: Batch size of data
num_workers: Workers for data loading
learning_rate: Learning rate for optimization
prior_type: tv, or hessian
lambda_prior: Prior weight
optimize_reg: If True, optimize registration parameters
visualize: If True, visualize reconstructions at each iteration
gt: If visualize is true, gt is the ground truth image
reg_final: If True, register the final result to gt
lpip_func: If gt is true, evaluate perceptual similarity with
this function
Returns:
im_hr: High resolution image
profile: Dictionary containing the following:
loss_array: Array with loss at each iteration
trained_model: State dictionary for best model
metrics: if gt is provided, this is a dictionary with:
snrval: SNR of reconstruction
psnrval: Peak SNR
ssimval: SSIM
lpipval: VGG perceptual metrics
'''
nimg, Hl, Wl = imstack.shape
H, W = hr_size
scale_sr = 0.5*(H/Hl + W/Wl)
# Internal constant
img_every = 2
if params_dict['mul_gain']:
lambda_offset = 10
else:
lambda_offset = 0
# Create loss functions
criterion_fidelity = losses.L1Norm()
criterion_offset = losses.TVNorm(mode='l2')
if params_dict['prior_type'] == 'tv':
criterion_prior = losses.TVNorm()
elif params_dict['prior_type'] == 'hessian':
criterion_prior = losses.HessianNorm()
else:
raise ValueError('Prior not implemented')
# Create input
model_input = deep_prior.get_noise(params_dict['input_depth'],
params_dict['input_type'],
(H, W)).cuda().detach()
# Create the network
if params_dict['predmode'] == 'combined':
nchan = 3
else:
nchan = 1
model = deep_prior.get_net(params_dict['input_depth'], 'skip',
'reflection', n_channels=nchan,
skip_n33d=params_dict['skip_n33d'],
skip_n33u=params_dict['skip_n33u'],
skip_n11=params_dict['skip_n11'],
num_scales=params_dict['num_scales'],
upsample_mode=params_dict['upsample_mode']
).cuda()
# Set it to training
model.train()
if params_dict['integrator'] == 'learnable':
kernel_size = (int(scale_sr), int(scale_sr))
integrator = torch.nn.Conv2d(1, 1, kernel_size=kernel_size,
stride=int(scale_sr), bias=False).cuda()
with torch.no_grad():
integrator.weight.fill_(1.0/(scale_sr*scale_sr))
# Create parameters from affine matrices
affine_mat = torch.tensor(reg_stack).cuda()
affine_var = torch.autograd.Variable(affine_mat, requires_grad=True).cuda()
affine_param = torch.nn.Parameter(affine_var)
# Create gain parameter
vmin = params_dict['fpn_vmin']
params = list(model.parameters())
if params_dict['predmode'] != 'combined':
gain = torch.ones(1, 1, Hl, Wl).cuda()
gain_var = torch.autograd.Variable(gain, requires_grad=True).cuda()
gain_param = torch.nn.Parameter(gain_var)
offset = torch.ones(1, 1, Hl, Wl).cuda()*1e-1
offset_var = torch.autograd.Variable(offset, requires_grad=True).cuda()
offset_param = torch.nn.Parameter(offset_var)
params += [gain_param] + [offset_param]
if params_dict['integrator'] == 'learnable':
params += integrator.parameters()
# Create an ADAM optimizer
optimizer = torch.optim.Adam(lr=params_dict['learning_rate'],
params=params)
# Affine transform requires a separate optimizer
reg_optimizer = torch.optim.Adam(lr=params_dict['affine_learning_rate'],
params=[affine_param])
loss_array = np.zeros(params_dict['niters'])
best_loss = float('inf')
best_state_dict = None
# We will just use all data
gt = torch.tensor(imstack).cuda()[:, None, ...]
for epoch in tqdm.tqdm(range(params_dict['niters'])):
train_loss = 0
img_and_gain = model(model_input)
img_hr = img_and_gain[:, [0], ...]
if params_dict['predmode'] == 'combined':
gain_param = img_and_gain[:, [1], ...]
offset_param = img_and_gain[:, [2], ...]
if scale_sr > 1:
gain_param = F.interpolate(gain_param, (Hl, Wl))
offset_param = F.interpolate(offset_param, (Hl, Wl))
# Generate low resolution images
img_hr_cat = torch.repeat_interleave(img_hr, nimg, 0)
if params_dict['integrator'] == 'area':
img_hr_affine = kornia.geometry.warp_affine(img_hr_cat, affine_param,
(H, W), align_corners=True)
img_lr = F.interpolate(img_hr_affine, (Hl, Wl), mode='area')
elif params_dict['integrator'] == 'learnable':
img_hr_affine = kornia.geometry.warp_affine(img_hr_cat, affine_param,
(H, W), align_corners=True)
img_lr = integrator(img_hr_affine)
else:
img_lr = kornia.geometry.warp_affine(img_hr_cat,
affine_param/scale_sr,
(Hl, Wl), align_corners=False)
# Multiply with the gain term
mask = img_lr > 0
if params_dict['add_offset']:
img_lr = img_lr + offset_param
if params_dict['mul_gain']:
img_lr = gain_param * img_lr
mse_loss = criterion_fidelity(img_lr*mask, gt*mask)
prior_loss = params_dict['lambda_prior']*criterion_prior(img_hr)
loss = mse_loss + prior_loss
if params_dict['add_offset']:
offset_loss = lambda_offset*criterion_offset(offset_param)
loss = loss + offset_loss
optimizer.zero_grad()
if params_dict['optimize_reg']:
reg_optimizer.zero_grad()
loss.backward()
optimizer.step()
if params_dict['optimize_reg']:
reg_optimizer.step()
train_loss = loss.item()
# Find if we have the best mode
if train_loss < best_loss:
best_loss = train_loss
best_state_dict = copy.deepcopy(model.state_dict())
loss_array[epoch] = train_loss
if params_dict['visualize']:
if epoch%img_every == 0:
with torch.no_grad():
img_hr_cpu = img_hr.cpu().detach().numpy().reshape(H, W)
v_idx = np.random.randint(nimg)
img_lr_cpu = img_lr[v_idx, ...].cpu().detach().reshape(Hl, Wl)
snrval = utils.asnr(params_dict['gt'], img_hr_cpu,
compute_psnr=True)
ssimval = ssim_func(params_dict['gt'], img_hr_cpu)
txt = 'PSNR: %.1f | SSIM: %.2f'%(snrval, ssimval)
gain = gain_param.cpu().detach().numpy().reshape(Hl, Wl)
offset = offset_param.cpu().detach().numpy().reshape(Hl, Wl)
img_hr_ann = utils.textfunc(img_hr_cpu/img_hr_cpu.max(), txt)
imtop = np.hstack((imstack[v_idx, ...], img_lr_cpu.numpy()))
imbot = np.hstack((gain/gain.max(), offset/offset.max()))
imcat = np.vstack((imtop, imbot))
imcat_full = np.hstack((params_dict['gt'], img_hr_ann))
cv2.imshow('Recon LR', np.clip(imcat, 0, 1))
cv2.imshow('Recon HR', np.clip(imcat_full, 0, 1))
cv2.waitKey(1)
# We are done, obtain the best model
model.eval()
with torch.no_grad():
model.load_state_dict(best_state_dict)
img_and_gain = model(model_input)
img_hr = img_and_gain[[0], [0], ...].reshape(1, 1, H, W)
img_hr = kornia.geometry.warp_affine(img_hr,
affine_param[[0], ...], (H, W))
img_hr = img_hr.cpu().detach().numpy().reshape(H, W)
if params_dict['predmode'] == 'combined':
gain_param = img_and_gain[0, 1, ...]
offset_param = img_and_gain[0, 2, ...]
# In case there's a shift in reconstruction
if params_dict['reg_final'] and 'gt' in params_dict:
try:
img_hr = motion.ecc_flow(params_dict['gt'], img_hr)[1]
except:
pass
# If ground truth is provided, return metrics
if 'gt' in params_dict:
metrics = get_metrics(params_dict['gt'], img_hr)
gain = gain_param.detach().cpu().numpy().reshape(Hl, Wl)
offset = offset_param.detach().cpu().numpy().reshape(Hl, Wl)
profile = {'loss_array': loss_array,
'trained_model': best_state_dict,
'metrics': metrics,
'ecc_mats': affine_param.detach().cpu().numpy(),
'gain': gain,
'offset': offset}
return img_hr, profile
def interp_convex(imstack, reg_stack, hr_size, params_dict):
'''
Super resolve from a stack of images using convex optimization
Inputs:
imstack: (nimg, Hl, Wl) stack of low resolution images
reg_stack: (nimg, 2, 3) stack of affine matrices
hr_size: High resolution image size
params_dict: Dictionary containing parameters for optimization
niters: Number of SIREN iterations
batch_size: Batch size of data
num_workers: Workers for data loading
learning_rate: Learning rate for optimization
prior_type: tv, or hessian
lambda_prior: Prior weight
optimize_reg: If True, optimize registration parameters
visualize: If True, visualize reconstructions at each iteration
gt: If visualize is true, gt is the ground truth image
reg_final: If True, register the final result to gt
lpip_func: If gt is true, evaluate perceptual similarity with
this function
Returns:
im_hr: High resolution image
profile: Dictionary containing the following:
loss_array: Array with loss at each iteration
trained_model: State dictionary for best model
metrics: if gt is provided, this is a dictionary with:
snrval: SNR of reconstruction
psnrval: Peak SNR
ssimval: SSIM
lpipval: VGG perceptual metrics
'''
nimg, Hl, Wl = imstack.shape
H, W = hr_size
scale_sr = 0.5*(H/Hl + W/Wl)
# Internal constant
img_every = 10
lambda_offset = 10
# Create loss functions
criterion_fidelity = losses.L2Norm()
if params_dict['prior_type'] == 'tv':
criterion_prior = losses.TVNorm()
elif params_dict['prior_type'] == 'hessian':
criterion_prior = losses.HessianNorm()
elif params_dict['prior_type'] == 'l2':
criterion_prior = losses.L2Norm()
else:
raise ValueError('Prior not implemented')
# Initialize solution with linear interpolation
#im_init = torch.tensor(interp_SR(imstack, reg_stack, hr_size))
im_init = torch.rand(H, W)
gain_init = torch.rand(Hl, Wl)
offset_init = torch.ones(Hl, Wl)*1e-2
# Create the variable
img_hr_param = torch.autograd.Variable(im_init[None, None, ...],
requires_grad=True).cuda()
img_hr_param = torch.nn.Parameter(img_hr_param)
# Create gain parameter
gain_param = torch.autograd.Variable(gain_init[None, None, ...],
requires_grad=True).cuda()
gain_param = torch.nn.Parameter(gain_param)
# Create offset parameter
offset_param = torch.autograd.Variable(offset_init[None, None, ...],
requires_grad=True).cuda()
offset_param = torch.nn.Parameter(offset_param)
# Create parameters from affine matrices
affine_mat = torch.tensor(reg_stack).cuda()
affine_var = torch.autograd.Variable(affine_mat, requires_grad=True).cuda()
affine_param = torch.nn.Parameter(affine_var)
params = [img_hr_param] + [gain_param] + [offset_param]
if params_dict['optimize_reg']:
params += [affine_param]
#params += [angles_param] + [translations_param]
# Create an ADAM optimizer
optimizer = torch.optim.Adam(lr=params_dict['learning_rate'],
params=params)
loss_array = np.zeros(params_dict['niters'])
gt = torch.tensor(imstack).cuda()[:, None, ...]
for epoch in tqdm.tqdm(range(params_dict['niters'])):
# Generate low resolution images
img_hr_cat = torch.repeat_interleave(img_hr_param, gt.shape[0], 0)
if params_dict['integrator'] == 'area':
img_hr_affine = kornia.geometry.warp_affine(img_hr_cat, affine_param,
(H, W), align_corners=False)
img_lr = F.interpolate(img_hr_affine, (Hl, Wl), mode='area')
else:
img_lr = kornia.geometry.warp_affine(img_hr_cat,
affine_param/scale_sr,
(Hl, Wl), align_corners=False)
gain_cat = torch.repeat_interleave(gain_param, gt.shape[0], 0)
#offset_cat = torch.repeat_interleave(offset_param, gt.shape[0], 0)
masks = img_lr > 0
#if epoch > params_dict['niters']:
# img_lr = img_lr*(gain_cat + offset_cat)
#else:
img_lr = img_lr + gain_cat
mse_loss = criterion_fidelity(img_lr, gt)
prior_loss = criterion_prior(img_hr_param)
#offset_loss = criterion_prior(offset_param)
loss = mse_loss + params_dict['lambda_prior']*prior_loss
# lambda_offset*offset_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss = loss.item()
loss_array[epoch] = train_loss
if params_dict['visualize']:
if epoch%img_every == 0:
img_hr_cpu = img_hr_param.cpu().detach().numpy().reshape(H, W)
v_idx = np.random.randint(nimg)
img_lr_cpu = img_lr[v_idx, ...]
img_lr_cpu = img_lr_cpu.cpu().detach().numpy().reshape(Hl, Wl)
gain = gain_param.cpu().detach().numpy().reshape(Hl, Wl)
offset = offset_param.cpu().detach().numpy().reshape(Hl, Wl)
snrval = utils.psnr(params_dict['gt'], img_hr_cpu)
ssimval = ssim_func(params_dict['gt'], img_hr_cpu)
txt = 'PSNR: %.1f | SSIM: %.2f'%(snrval, ssimval)
img_hr_ann = utils.textfunc(img_hr_cpu, txt)
imcat = np.hstack((imstack[v_idx, ...], img_lr_cpu,
gain, offset/offset.max()))
imcat_full = np.hstack((params_dict['gt'], img_hr_ann))
cv2.imshow('Recon LR', np.clip(imcat, 0, 1))
cv2.imshow('Recon HR', np.clip(imcat_full, 0, 1))
cv2.waitKey(1)
# We are done, obtain the best model
with torch.no_grad():
img_hr = kornia.geometry.warp_affine(img_hr_param, affine_param[[0], ...],
(H, W))
img_hr = img_hr_param.cpu().detach().numpy().reshape(H, W)
gain = gain_param.cpu().detach().numpy().reshape(Hl, Wl)
offset = offset_param.cpu().detach().numpy().reshape(Hl, Wl)
# In case there's a shift in reconstruction
if params_dict['reg_final'] and 'gt' in params_dict:
try:
img_hr = motion.ecc_flow(params_dict['gt'], img_hr)[1]
except:
pass
# If ground truth is provided, return metrics
if 'gt' in params_dict:
metrics = get_metrics(params_dict['gt'], img_hr)
profile = {'loss_array': loss_array, 'metrics': metrics,
'gain': gain, 'offset': offset}
return img_hr, profile
| 21,820 | 37.485009 | 84 | py |
DeepIR | DeepIR-main/modules/motion.py | #!/usr/bin/env python
'''
Subroutines for estimating motion between images
'''
import os
import sys
import tqdm
import pdb
import math
import numpy as np
from scipy import linalg
from scipy import interpolate
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
from PIL import Image
from torchvision.transforms import Resize, Compose, ToTensor, Normalize
import kornia
from pystackreg import StackReg
import skimage
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import cv2
import utils
import thermal
def xy_mgrid(H, W):
'''
Generate a flattened meshgrid for heterogenous sizes
Inputs:
H, W: Input dimensions
Outputs:
mgrid: H*W x 2 meshgrid
'''
Y, X = torch.meshgrid(torch.linspace(-1, 1, H),
torch.linspace(-1, 1, W))
mgrid = torch.stack((X, Y), dim=-1).reshape(-1, 2)
return mgrid
def getEuclidianMatrix(theta, shift):
'''
Compute 2x3 euclidean matrix
'''
mat = np.array([[np.cos(theta), -np.sin(theta), shift[0]],
[np.sin(theta), np.cos(theta), shift[1]]])
return mat
def fb_flow(frame1, frame2):
H, W = frame1.shape
Y, X = np.mgrid[:H, :W]
hsv = np.zeros((H, W, 3), dtype=np.uint8)
hsv[...,1] = 255
flow = cv2.calcOpticalFlowFarneback(frame1,
frame2,
None, 0.5, 3, 15, 3, 5, 1.2, 0)
mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
hsv[...,0] = ang*180/np.pi/2
hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
frame2_warped = cv2.remap(frame2.astype(np.float32),
(X + flow[..., 0]).astype(np.float32),
(Y + flow[..., 1]).astype(np.float32),
cv2.INTER_LINEAR)
rgb_comp = np.zeros((H, W, 3))
rgb_comp[..., 0] = frame1
rgb_comp[..., 2] = frame2_warped
return frame2_warped, flow, rgb, rgb_comp
def ecc_flow(im1, im2, warp_mode=cv2.MOTION_HOMOGRAPHY, niters=1000, eps=1e-8):
'''
Register images using Opencv intensity based image alignment approach.
Inputs:
im1, im2: Images to register. im2 will be registered to im1.
method: One of cv2.MOTION_*** . Default is MOTION_HOMOGRAPRHY
niters: Number of ECC iterations
eps: Stopping tolerance
Outputs:
warp_matrix: Warping matrix
im2_aligned: Second image warped to first image's coordinates
flow: Flow coordinates to go from im2 to im1
https://learnopencv.com/image-alignment-ecc-in-opencv-c-python/
'''
# Find size of image1
sz = im1.shape
# Define 2x3 or 3x3 matrices and initialize the matrix to identity
if warp_mode == cv2.MOTION_HOMOGRAPHY :
warp_matrix = np.eye(3, 3, dtype=np.float32)
else :
warp_matrix = np.eye(2, 3, dtype=np.float32)
# Define termination criteria
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, niters, eps)
# Run the ECC algorithm. The results are stored in warp_matrix.
(cc, warp_matrix) = cv2.findTransformECC(im1, im2, warp_matrix, warp_mode,
criteria=criteria, inputMask=None,
gaussFiltSize=5)
if warp_mode == cv2.MOTION_HOMOGRAPHY :
# Use warpPerspective for Homography
flags = cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP
im2_aligned = cv2.warpPerspective(im2, warp_matrix, (sz[1],sz[0]),
flags=flags)
else :
# Use warpAffine for Translation, Euclidean and Affine
flags = cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP
im2_aligned = cv2.warpAffine(im2, warp_matrix, (sz[1],sz[0]),
flags=flags)
# Create flow coordinates
Y, X = np.mgrid[:sz[0], :sz[1]]
coords = np.ones((3, sz[0]*sz[1]))
coords[0, :] = X.reshape(1, -1)
coords[1, :] = Y.reshape(1, -1)
coords_new = warp_matrix.dot(coords)
if warp_mode == cv2.MOTION_HOMOGRAPHY:
coords_new = coords_new[:2, :]/coords_new[2, :]
flow = np.zeros((sz[0], sz[1], 2), dtype=np.float32)
flow[..., 0] = (coords_new[0, :] - coords[0, :]).reshape(sz)*(2/sz[1])
flow[..., 1] = (coords_new[1, :] - coords[1, :]).reshape(sz)*(2/sz[0])
return warp_matrix, im2_aligned, flow
def get_SR_data(im, scale, nimg=10, config=None):
'''
Wrapper function to get real or simulation data
Inputs:
im: Image or image stack
scale: Scale for resolution
nimg: Number of images
config: Dictionary containing the following files:
simulation: If True, im will be converted to an image stack,
else the input will be treated as imstack
get_gt: If True, and simulation is also True, return groundtruth
registration matrices
shift_max, theta_max: See get_imstack
downsample: If True, the imstack will be a downsampled version of
the data. Only applicable if simulation is false
tau, noise_snr: For simulated data, these represente max. photon
count, and readout noise
add_fpn: If True, add fixed pattern noise to data
fpn_vmin: Minimum value of fpn
fpn_method: 'col' or 'both'
fpn_rank: If 'both', how many patterns to add
Outputs:
im: Ground truth high resolution image. Only useful if simulation
is true, or simulation is false, and downsample is true. Else
it is just a nearest neighbor upsampling
imstack: (nimg, Hl, Wl) stack of low resolution images
ecc_mats: (nimg, 2, 3) affine matrices
'''
# Extract configuration values
if config is None:
simulation = True
get_gt = False
shift_max = 10
theta_max = np.pi/12
downsample = False
add_noise = False
tau = None
noise_snr = None
add_fpn = False
fpn_vmin = 0.9
fpn_method = 'col'
fpn_rank = 1
else:
simulation = not config['real']
get_gt = config['get_gt']
shift_max = config['shift_max']*scale
theta_max = config['theta_max']*np.pi/180
downsample = config['downsample']
add_noise = config['add_noise']
tau = config['tau']
noise_snr = config['noise_snr']
add_fpn = config['add_fpn']
fpn_vmin = config['fpn_vmin']
fpn_method = config['fpn_method']
fpn_rank = config['fpn_rank']
if simulation is True:
H, W = im.shape
imstack, _, _, mats = get_imstack(im, scale, shift_max,
theta_max, nimg)
imstack /= im.max()
im /= im.max()
if add_noise:
imstack = utils.measure(imstack, noise_snr, tau)
if add_fpn:
fpn = thermal.create_fpn(imstack.shape[1:], vmin=fpn_vmin,
method=fpn_method, rank=fpn_rank)
imstack = imstack*fpn[np.newaxis, ...]
_, Hl, Wl = imstack.shape
if get_gt:
ecc_mats = invert_regstack(mats)
else:
ecc_mats = register_stack(imstack, (Hl, Wl))[:, :2, :]
else:
_, H, W = im.shape
imstack = np.copy(im[:nimg, ...], order='C')
interp = cv2.INTER_AREA
if downsample:
imstack_lr = np.zeros((nimg, H//scale, W//scale))
Hl, Wl = H//scale, W//scale
for idx in range(nimg):
imstack_lr[idx, ...] = cv2.resize(imstack[idx, ...],
(W//scale, H//scale),
interpolation=interp)
im = imstack[0, ...]
imstack = imstack_lr.astype(np.float32)
else:
im = cv2.resize(imstack[0, ...], (W*scale, H*scale))
Hl, Wl = H, W
H, W = Hl*scale, Wl*scale
ecc_mats = register_stack(imstack, (Hl, Wl))[:, :2, :]
imstack /= im.max()
im /= im.max()
return im, imstack, ecc_mats
def get_random_affine(nimg, shift_max=10, theta_max=np.pi/12, perspective=False):
'''
Get affine matrices with random shifts and thetas
'''
shifts = np.random.randint(-shift_max, shift_max, size=[nimg, 2])
thetas = (2*np.random.rand(nimg)-1)*theta_max
shifts[0, ...] = 0
thetas[0] = 0
affine_mats = np.zeros((nimg, 3, 3))
affine_mats[:, 2, 2] = 1.0
for idx in range(nimg):
affine_mats[idx, :2, :] = getEuclidianMatrix(thetas[idx],
shifts[idx, ...])
# Set first matrix to identity
affine_mats[0, :, :] = 0
affine_mats[0, 0, 0] = 1
affine_mats[0, 1, 1] = 1
affine_mats[0, 2, 2] = 1
if perspective is False:
affine_mats = affine_mats[:, :2, :]
return affine_mats
def get_imstack(im, scale, shift_max=10, theta_max=np.pi/12, nshifts=5):
'''
Obtain synthetically generated, low resolution images of im, with
random shifts.
Inputs:
im: Input high resolution image
scale: Downsampling factor (> 1)
theta_max: Maximum angle of rotation
nshifts: Number of shifted images to obtain
perturb_coords: If True, perturb the coordinates to study the effect
of erroneous registration
Outputs:
imstack: Stack of images
coordstack: Stack of (x ,y) coordinates for each image
'''
H, W = im.shape
#shifts = np.random.randint(-shift_max, shift_max, size=[nshifts, 2])
shifts = -shift_max + 2*shift_max*np.random.rand(nshifts, 2)
thetas = (2*np.random.rand(nshifts)-1)*theta_max
Y, X = np.mgrid[:H, :W]
tmp = cv2.resize(im, None, fx=1/scale, fy=1/scale)
Hl, Wl = tmp.shape
imstack = np.zeros((nshifts, Hl, Wl), dtype=np.float32)
Xstack = np.zeros_like(imstack)
Ystack = np.zeros_like(imstack)
mats = np.zeros((nshifts, 2, 3))
# Ensure first shift and theta are zero
shifts[0, :] = 0
thetas[0] = 0
coords = np.hstack((X.reshape(-1, 1), Y.reshape(-1, 1), np.ones((H*W, 1))))
for idx in range(nshifts):
shift = shifts[idx, :]
theta = thetas[idx]
mat = getEuclidianMatrix(theta, shift)
mats[idx, ...] = mat
coords_new = mat.dot(coords.T).T
Xnew = coords_new[:, 0].reshape(H, W)
Ynew = coords_new[:, 1].reshape(H, W)
Xnew = cv2.resize(Xnew, (Wl, Hl), interpolation=cv2.INTER_LINEAR)
Ynew = cv2.resize(Ynew, (Wl, Hl), interpolation=cv2.INTER_LINEAR)
imstack[idx, ...] = cv2.remap(im, Xnew.astype(np.float32),
Ynew.astype(np.float32),
cv2.INTER_LINEAR)
Xstack[idx, ...] = 2*Xnew/W - 1
Ystack[idx, ...] = 2*Ynew/H - 1
return imstack, Xstack, Ystack, mats
def get_downsampled_shifted_images(im, scale, shift_max=10,
theta_max=np.pi/12, nshifts=5,
perturb_coords=False):
'''
Obtain synthetically generated, low resolution images of im, with
random shifts.
Inputs:
im: Input high resolution image
scale: Downsampling factor (> 1)
theta_max: Maximum angle of rotation
nshifts: Number of shifted images to obtain
perturb_coords: If True, perturb the coordinates to study the effect
of erroneous registration
Outputs:
imstack: Stack of images
coordstack: Stack of (x ,y) coordinates for each image
'''
H, W = im.shape
shifts = np.random.randint(-shift_max, shift_max, size=[nshifts, 2])
thetas = (2*np.random.rand(nshifts)-1)*theta_max
Y, X = np.mgrid[:H, :W]
tmp = cv2.resize(im, None, fx=1/scale, fy=1/scale)
Hl, Wl = tmp.shape
imstack = np.zeros((nshifts, Hl, Wl), dtype=np.float32)
Xstack = np.zeros_like(imstack)
Ystack = np.zeros_like(imstack)
# Ensure first shift and theta are zero
shifts[0, :] = 0
thetas[0] = 0
for idx in range(nshifts):
shift = shifts[idx, :]
theta = thetas[idx]
# Shift
Xshifted = X - shift[1]
Yshifted = Y - shift[0]
# Rotate
Xrot = (Xshifted-W/2)*np.cos(theta) - (Yshifted-H/2)*np.sin(theta) + W/2
Yrot = (Xshifted-W/2)*np.sin(theta) + (Yshifted-H/2)*np.cos(theta) + H/2
Xnew = cv2.resize(Xrot, (Wl, Hl), interpolation=cv2.INTER_AREA)
Ynew = cv2.resize(Yrot, (Wl, Hl), interpolation=cv2.INTER_AREA)
imstack[idx, ...] = cv2.remap(im, Xnew.astype(np.float32),
Ynew.astype(np.float32), cv2.INTER_AREA)
if perturb_coords:
# Now ... let's generate noisy estimates
Xshifted = X - (1 + np.random.randn(1)*1e-2)*shift[1]
Yshifted = Y - (1 + np.random.randn(1)*1e-2)*shift[0]
theta = (1 + np.random.randn(1)*1e-2)*theta
Xrot = (Xshifted-W/2)*np.cos(theta) -\
(Yshifted-H/2)*np.sin(theta) + W/2
Yrot = (Xshifted-W/2)*np.sin(theta) +\
(Yshifted-H/2)*np.cos(theta) + H/2
Xnew = cv2.resize(Xrot, (Wl, Hl), interpolation=cv2.INTER_AREA)
Ynew = cv2.resize(Yrot, (Wl, Hl), interpolation=cv2.INTER_AREA)
Xstack[idx, ...] = 2*Xnew/W - 1
Ystack[idx, ...] = 2*Ynew/H - 1
return imstack, Xstack, Ystack, shifts, thetas
def register_stack(imstack, full_res, method=StackReg.RIGID_BODY):
'''
Register a stack of images and get coordinates
Inputs:
imstack: nimg x H x W stack of images
full_res: Resolution at which images will be super resolved
method: Method to use for registration. Default is StackReg.RIGID_BODY
Outputs:
reg_mats: (nimg, 2, 3) dimensional registration matrices
'''
nimg, H, W = imstack.shape
Hr, Wr = full_res
imstack_full = np.zeros((nimg, Hr, Wr))
# Upsample the images
for idx in range(nimg):
imstack_full[idx, ...] = cv2.resize(imstack[idx, ...], (Wr, Hr),
interpolation=cv2.INTER_AREA)
# Now register the stack
reg = StackReg(method)
reg_mats = reg.register_stack(imstack_full, reference='first', verbose=True)
return reg_mats
def invert_regstack(regstack):
'''
Invert affine matrices
'''
nimg = regstack.shape[0]
regstack_inv = np.zeros_like(regstack)
last_row = np.zeros((1, 3))
last_row[0, 2] = 1
for idx in range(nimg):
mat = linalg.inv(np.vstack((regstack[idx, ...], last_row)))[:2, :]
regstack_inv[idx, ...] = mat
return regstack_inv
def mat2coords(reg_stack, full_res, low_res):
'''
Computed 2D coordinates from affine matrices
Inputs:
reg_stack: (nimg, 2, 3) registration stack
res: Resolution of images
'''
nimg, _, _ = reg_stack.shape
H, W = full_res
Y, X = np.mgrid[:H, :W]
Hl, Wl = low_res
coords = np.hstack((X.reshape(-1, 1), Y.reshape(-1, 1), np.ones((H*W, 1))))
Xstack = np.zeros((nimg, Hl, Wl), dtype=np.float32)
Ystack = np.zeros_like(Xstack)
last_row = np.zeros((1, 3))
last_row[0, 2] = 1
for idx in range(nimg):
mat = linalg.inv(np.vstack((reg_stack[idx, ...], last_row)))
coords_new = mat.dot(coords.T).T
Xstack[idx, ...] = cv2.resize(2*coords_new[:, 0].reshape(H, W)/W - 1,
(Wl, Hl), interpolation=cv2.INTER_AREA)
Ystack[idx, ...] = cv2.resize(2*coords_new[:, 1].reshape(H, W)/H - 1,
(Wl, Hl), interpolation=cv2.INTER_AREA)
return Xstack, Ystack
def param2theta(params, w, h):
'''
Convert affine matrix to parameter that torch can use
Inputs:
params: nimg x 2 x 3 affine matrices
w, h: Width and height of the image
Outputs:
theta: Matrix to use with grid_sample (for example)
Reference:
https://discuss.pytorch.org/t/how-to-convert-an-affine-transform-matrix-into-theta-to-use-torch-nn-functional-affine-grid/24315/4
'''
last_row = np.zeros((1, 3), dtype=np.float32)
last_row[0, 2] = 1
theta = np.zeros_like(params)
for idx in range(params.shape[0]):
param = np.vstack((params[idx, ...], last_row))
param = np.linalg.inv(param)
theta[idx,0,0] = param[0,0]
theta[idx,0,1] = param[0,1]*h/w
theta[idx,0,2] = param[0,2]*2/w + theta[idx,0,0] + theta[idx,0,1] - 1
#theta[idx, 0, 2] = param[0, 2]*2/w + param[0, 0] + param[0, 1] - 1
theta[idx,1,0] = param[1,0]*w/h
theta[idx,1,1] = param[1,1]
theta[idx,1,2] = param[1,2]*2/h + theta[idx,1,0] + theta[idx,1,1] - 1
#theta[idx, 1, 2] = param[1, 2]*2/h + param[1, 0] + param[1, 1] - 1
return theta
def affine2rigid(mats):
'''
Compute rigid body transformations from affine matrices
Inputs:
mats: (nmats, 2, 3) affine matrices
Outputs:
translations: (nmats, 2) translation array
angles: (nmats) angles array
'''
# Compute average angle to reduce numerical errors
if False:
angles = (np.arccos(mats[:, 0, 0]) -
np.arcsin(mats[:, 0, 1]) +
np.arcsin(mats[:, 1, 0]) +
np.arccos(mats[:, 1, 1]))/4.0
angles = np.arccos(mats[:, 0, 0])
translations = mats[:, :, 2]
return angles, translations
def get_transformed_coords(theta, imsize):
'''
Compute transformed coordinates for given affine matrices
'''
B = theta.shape[0]
H, W = imsize
return F.affine_grid(theta, (B, 1, H, W)).reshape(-1, H*W, 2)
def interp_lr(imref, coords, renderer):
'''
Compute affine transformed images from coordinates at high resolution
Inputs:
imref: (1, 1, H, W) low resolution image, upsampled
coords: (B, H, W, 2) high resolution coordinates
renderer: Function to downsample the images
Outputs:
im_lr: (B, 1, Hl, Wl) low resolution transformed images
'''
B = coords.shape[0]
im_hr = F.grid_sample(torch.repeat_interleave(imref, B, 0),
coords, mode='bilinear', align_corners=False)
im_lr = renderer.integrator(im_hr)
return im_lr
def register_stack_ecc(imstack, full_res, method=cv2.MOTION_EUCLIDEAN):
'''
Register a stack of images and get coordinates
Inputs:
imstack: nimg x H x W stack of images
full_res: Resolution at which images will be super resolved
method: Method to use for ECC registration
Outputs:
Xstack: X Coordinates for registration
Ystack: Y Coordinates for registration
mask: (nimg, ) dimensional mask for images that were successfully
registered
alignment_err: (nimg, ) dimensional array of alignment error
'''
nimg, H, W = imstack.shape
Hr, Wr = full_res
mask = np.zeros(nimg)
alignment_err = np.zeros(nimg)
Xstack = np.zeros((nimg, H, W), dtype=np.float32)
Ystack = np.zeros((nimg, H, W), dtype=np.float32)
imref = cv2.resize(imstack[0, ...], (Wr, Hr),
interpolation=cv2.INTER_LINEAR)
mask[0] = 1
Y, X = np.mgrid[:Hr, :Wr]
X = 2*X/Wr - 1
Y = 2*Y/Hr - 1
Xstack[0, ...] = cv2.resize(X, (W, H), interpolation=cv2.INTER_LINEAR)
Ystack[0, ...] = cv2.resize(Y, (W, H), interpolation=cv2.INTER_LINEAR)
if method == cv2.MOTION_HOMOGRAPHY:
ecc_mats = np.zeros((nimg, 3, 3))
ecc_mats[0, 2, 2] = 1
else:
ecc_mats = np.zeros((nimg, 2, 3))
# First image is registered ... to itself
ecc_mats[0, 0, 0] = 1
ecc_mats[0, 1, 1] = 1
for idx in tqdm.tqdm(range(1, nimg)):
im2 = cv2.resize(imstack[idx, ...], (Wr, Hr),
interpolation=cv2.INTER_LINEAR)
try:
mat, im2_aligned, flow = ecc_flow(imref, im2, warp_mode=method)
mask[idx] = 1
ecc_mats[idx, :] = mat
Xstack[idx, ...] = cv2.resize(X - flow[..., 0], (W, H),
interpolation=cv2.INTER_LINEAR)
Ystack[idx, ...] = cv2.resize(Y - flow[..., 1], (W, H),
interpolation=cv2.INTER_LINEAR)
spatial_mask = (im2_aligned != 0)
alignment_err[idx] = abs((imref - im2_aligned)*spatial_mask).mean()
except:
mask[idx] = 0
continue
# Now return the coordinates
return Xstack, Ystack, mask, ecc_mats, alignment_err
def prune_stack(imstack, ecc_mats, full_res, thres=None):
'''
Prune a stack of images which are not well registered.
Inputs:
imstack: nimg x H x W stack of images
ecc_mats: nimg x 2 x 3 stack of transformation matrices
full_res: Full resolution size
thres: Threshold of registration error to consider when rejecting
images. If None, 2*median(error_array) is used
Outputs:
imstack: nimg_good x H x W stack of good images
ecc_mats: nimg_good x 2 x 3 stack of good transformation matrices
'''
nimg, Hl, Wl = imstack.shape
H, W = full_res
if thres is None:
thres = 1
imref = cv2.resize(imstack[0, ...], (W, H), interpolation=cv2.INTER_AREA)
imten = torch.tensor(imref).cuda()[None, None, ...]
imstack_ten = torch.tensor(imstack).cuda()[:, None, ...]
imten = torch.repeat_interleave(imten, int(nimg), 0)
mat = torch.tensor(ecc_mats.astype(np.float32)).cuda()
imtrans = kornia.warp_affine(imten, mat, (Hl, Wl))
imdiff = abs(imtrans - imstack_ten).cpu()[:, 0, ...]
diff_array = (imdiff/(imstack + 1e-2*imstack.max())).mean(-1).mean(-1)
mask = diff_array < thres
imstack = np.copy(imstack[mask == 1, ...], order='C')
ecc_mats = np.copy(ecc_mats[mask == 1, ...], order='C')
imdiff = imdiff[mask == 1, ...]
return imstack, ecc_mats, mask, imdiff
def flow2rgb(flow):
'''
Convert flow to an RGB image to visualize.
'''
H, W, _ = flow.shape
hsv = np.zeros((H, W, 3), dtype=np.uint8)
hsv[..., 1] = 255
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang*180/np.pi/2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
return cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) | 23,853 | 33.772595 | 137 | py |
DeepIR | DeepIR-main/modules/deep_prior.py | #!/usr/bin/env
'''
One single file for all things Deep Image Prior
'''
import os
import sys
import tqdm
import pdb
import numpy as np
import torch
from torch import nn
import torchvision
import cv2
from dmodels.skip import skip
from dmodels.texture_nets import get_texture_nets
from dmodels.resnet import ResNet
from dmodels.unet import UNet
class Downsampler(nn.Module):
'''
http://www.realitypixels.com/turk/computergraphics/ResamplingFilters.pdf
'''
def __init__(self, n_planes, factor, kernel_type, phase=0,
kernel_width=None, support=None, sigma=None,
preserve_size=False):
super(Downsampler, self).__init__()
assert phase in [0, 0.5], 'phase should be 0 or 0.5'
if kernel_type == 'lanczos2':
support = 2
kernel_width = 4 * factor + 1
kernel_type_ = 'lanczos'
elif kernel_type == 'lanczos3':
support = 3
kernel_width = 6 * factor + 1
kernel_type_ = 'lanczos'
elif kernel_type == 'gauss12':
kernel_width = 7
sigma = 1/2
kernel_type_ = 'gauss'
elif kernel_type == 'gauss1sq2':
kernel_width = 9
sigma = 1./np.sqrt(2)
kernel_type_ = 'gauss'
elif kernel_type in ['lanczos', 'gauss', 'box']:
kernel_type_ = kernel_type
else:
assert False, 'wrong name kernel'
# note that `kernel width` will be different to actual size for phase = 1/2
self.kernel = get_kernel(factor, kernel_type_, phase, kernel_width,
support=support, sigma=sigma)
downsampler = nn.Conv2d(n_planes, n_planes,
kernel_size=self.kernel.shape,
stride=factor, padding=0)
downsampler.weight.data[:] = 0
downsampler.bias.data[:] = 0
kernel_torch = torch.from_numpy(self.kernel)
for i in range(n_planes):
downsampler.weight.data[i, i] = kernel_torch
self.downsampler_ = downsampler
if preserve_size:
if self.kernel.shape[0] % 2 == 1:
pad = int((self.kernel.shape[0] - 1) / 2.)
else:
pad = int((self.kernel.shape[0] - factor) / 2.)
self.padding = nn.ReplicationPad2d(pad)
self.preserve_size = preserve_size
def forward(self, input):
if self.preserve_size:
x = self.padding(input)
else:
x= input
self.x = x
return self.downsampler_(x)
def get_kernel(factor, kernel_type, phase, kernel_width,
support=None, sigma=None):
assert kernel_type in ['lanczos', 'gauss', 'box']
# factor = float(factor)
if phase == 0.5 and kernel_type != 'box':
kernel = np.zeros([kernel_width - 1, kernel_width - 1])
else:
kernel = np.zeros([kernel_width, kernel_width])
if kernel_type == 'box':
assert phase == 0.5, 'Box filter is always half-phased'
kernel[:] = 1./(kernel_width * kernel_width)
elif kernel_type == 'gauss':
assert sigma, 'sigma is not specified'
assert phase != 0.5, 'phase 1/2 for gauss not implemented'
center = (kernel_width + 1.)/2.
print(center, kernel_width)
sigma_sq = sigma * sigma
for i in range(1, kernel.shape[0] + 1):
for j in range(1, kernel.shape[1] + 1):
di = (i - center)/2.
dj = (j - center)/2.
kernel[i - 1][j - 1] = np.exp(-(di * di + dj * dj)/(2 * sigma_sq))
kernel[i - 1][j - 1] = kernel[i - 1][j - 1]/(2. * np.pi * sigma_sq)
elif kernel_type == 'lanczos':
assert support, 'support is not specified'
center = (kernel_width + 1) / 2.
for i in range(1, kernel.shape[0] + 1):
for j in range(1, kernel.shape[1] + 1):
if phase == 0.5:
di = abs(i + 0.5 - center) / factor
dj = abs(j + 0.5 - center) / factor
else:
di = abs(i - center) / factor
dj = abs(j - center) / factor
pi_sq = np.pi * np.pi
val = 1
if di != 0:
val = val * support * np.sin(np.pi * di) * np.sin(np.pi * di / support)
val = val / (np.pi * np.pi * di * di)
if dj != 0:
val = val * support * np.sin(np.pi * dj) * np.sin(np.pi * dj / support)
val = val / (np.pi * np.pi * dj * dj)
kernel[i - 1][j - 1] = val
else:
assert False, 'wrong method name'
kernel /= kernel.sum()
return kernel
def get_noise(input_depth, method, spatial_size, noise_type='u', var=1./10):
"""Returns a pytorch.Tensor of size
(1 x `input_depth` x `spatial_size[0]` x `spatial_size[1]`)
initialized in a specific way.
Args:
input_depth: number of channels in the tensor
method: `noise` for fillting tensor with noise; `meshgrid`
for np.meshgrid
spatial_size: spatial size of the tensor to initialize
noise_type: 'u' for uniform; 'n' for normal
var: a factor, a noise will be multiplicated by. Basically it is
standard deviation scaler.
"""
if isinstance(spatial_size, int):
spatial_size = (spatial_size, spatial_size)
if method == 'noise':
shape = [1, input_depth, spatial_size[0], spatial_size[1]]
net_input = torch.zeros(shape)
fill_noise(net_input, noise_type)
net_input *= var
elif method == 'meshgrid':
assert input_depth == 2
X, Y = np.meshgrid(np.arange(0, spatial_size[1])/float(spatial_size[1]-1), np.arange(0, spatial_size[0])/float(spatial_size[0]-1))
meshgrid = np.concatenate([X[None,:], Y[None,:]])
net_input= np_to_torch(meshgrid)
else:
assert False
return net_input
def np_to_torch(img_np):
'''Converts image in numpy.array to torch.Tensor.
From C x W x H [0..1] to C x W x H [0..1]
'''
return torch.from_numpy(img_np)[None, :]
def torch_to_np(img_var):
'''Converts an image in torch.Tensor format to np.array.
From 1 x C x W x H [0..1] to C x W x H [0..1]
'''
return img_var.detach().cpu().numpy()[0]
def fill_noise(x, noise_type):
"""Fills tensor `x` with noise of type `noise_type`."""
if noise_type == 'u':
x.uniform_()
elif noise_type == 'n':
x.normal_()
else:
assert False
def get_image_grid(images_np, nrow=8):
'''Creates a grid from a list of images by concatenating them.'''
images_torch = [torch.from_numpy(x) for x in images_np]
torch_grid = torchvision.utils.make_grid(images_torch, nrow)
return torch_grid.numpy()
def get_net(input_depth, NET_TYPE, pad, upsample_mode, n_channels=3,
act_fun='LeakyReLU', skip_n33d=128, skip_n33u=128, skip_n11=4,
num_scales=5, downsample_mode='stride'):
if NET_TYPE == 'ResNet':
# TODO
net = ResNet(input_depth, 3, 10, 16, 1, nn.BatchNorm2d, False)
elif NET_TYPE == 'skip':
net = skip(input_depth, n_channels, num_channels_down = [skip_n33d]*num_scales if isinstance(skip_n33d, int) else skip_n33d,
num_channels_up = [skip_n33u]*num_scales if isinstance(skip_n33u, int) else skip_n33u,
num_channels_skip = [skip_n11]*num_scales if isinstance(skip_n11, int) else skip_n11,
upsample_mode=upsample_mode, downsample_mode=downsample_mode,
need_sigmoid=True, need_bias=True, pad=pad, act_fun=act_fun)
elif NET_TYPE == 'texture_nets':
net = get_texture_nets(inp=input_depth, ratios = [32, 16, 8, 4, 2, 1], fill_noise=False,pad=pad)
elif NET_TYPE =='UNet':
net = UNet(num_input_channels=input_depth, num_output_channels=3,
feature_scale=4, more_layers=0, concat_x=False,
upsample_mode=upsample_mode, pad=pad, norm_layer=nn.BatchNorm2d, need_sigmoid=True, need_bias=True)
elif NET_TYPE == 'identity':
assert input_depth == 3
net = nn.Sequential()
else:
assert False
return net | 8,713 | 33.995984 | 138 | py |
DeepIR | DeepIR-main/modules/dmodels/skip.py | import torch
import torch.nn as nn
from .common import *
def skip(
num_input_channels=2, num_output_channels=3,
num_channels_down=[16, 32, 64, 128, 128], num_channels_up=[16, 32, 64, 128, 128], num_channels_skip=[4, 4, 4, 4, 4],
filter_size_down=3, filter_size_up=3, filter_skip_size=1,
need_sigmoid=True, need_bias=True,
pad='zero', upsample_mode='nearest', downsample_mode='stride', act_fun='LeakyReLU',
need1x1_up=True):
"""Assembles encoder-decoder with skip connections.
Arguments:
act_fun: Either string 'LeakyReLU|Swish|ELU|none' or module (e.g. nn.ReLU)
pad (string): zero|reflection (default: 'zero')
upsample_mode (string): 'nearest|bilinear' (default: 'nearest')
downsample_mode (string): 'stride|avg|max|lanczos2' (default: 'stride')
"""
assert len(num_channels_down) == len(num_channels_up) == len(num_channels_skip)
n_scales = len(num_channels_down)
if not (isinstance(upsample_mode, list) or isinstance(upsample_mode, tuple)) :
upsample_mode = [upsample_mode]*n_scales
if not (isinstance(downsample_mode, list)or isinstance(downsample_mode, tuple)):
downsample_mode = [downsample_mode]*n_scales
if not (isinstance(filter_size_down, list) or isinstance(filter_size_down, tuple)) :
filter_size_down = [filter_size_down]*n_scales
if not (isinstance(filter_size_up, list) or isinstance(filter_size_up, tuple)) :
filter_size_up = [filter_size_up]*n_scales
last_scale = n_scales - 1
cur_depth = None
model = nn.Sequential()
model_tmp = model
input_depth = num_input_channels
for i in range(len(num_channels_down)):
deeper = nn.Sequential()
skip = nn.Sequential()
if num_channels_skip[i] != 0:
model_tmp.add(Concat(1, skip, deeper))
else:
model_tmp.add(deeper)
model_tmp.add(bn(num_channels_skip[i] + (num_channels_up[i + 1] if i < last_scale else num_channels_down[i])))
if num_channels_skip[i] != 0:
skip.add(conv(input_depth, num_channels_skip[i], filter_skip_size, bias=need_bias, pad=pad))
skip.add(bn(num_channels_skip[i]))
skip.add(act(act_fun))
# skip.add(Concat(2, GenNoise(nums_noise[i]), skip_part))
deeper.add(conv(input_depth, num_channels_down[i], filter_size_down[i], 2, bias=need_bias, pad=pad, downsample_mode=downsample_mode[i]))
deeper.add(bn(num_channels_down[i]))
deeper.add(act(act_fun))
deeper.add(conv(num_channels_down[i], num_channels_down[i], filter_size_down[i], bias=need_bias, pad=pad))
deeper.add(bn(num_channels_down[i]))
deeper.add(act(act_fun))
deeper_main = nn.Sequential()
if i == len(num_channels_down) - 1:
# The deepest
k = num_channels_down[i]
else:
deeper.add(deeper_main)
k = num_channels_up[i + 1]
deeper.add(nn.Upsample(scale_factor=2, mode=upsample_mode[i]))
model_tmp.add(conv(num_channels_skip[i] + k, num_channels_up[i], filter_size_up[i], 1, bias=need_bias, pad=pad))
model_tmp.add(bn(num_channels_up[i]))
model_tmp.add(act(act_fun))
if need1x1_up:
model_tmp.add(conv(num_channels_up[i], num_channels_up[i], 1, bias=need_bias, pad=pad))
model_tmp.add(bn(num_channels_up[i]))
model_tmp.add(act(act_fun))
input_depth = num_channels_down[i]
model_tmp = deeper_main
model.add(conv(num_channels_up[0], num_output_channels, 1, bias=need_bias, pad=pad))
if need_sigmoid:
model.add(nn.Sigmoid())
return model
| 3,744 | 36.079208 | 144 | py |
DeepIR | DeepIR-main/modules/dmodels/resnet.py | import torch
import torch.nn as nn
from numpy.random import normal
from numpy.linalg import svd
from math import sqrt
import torch.nn.init
from .common import *
class ResidualSequential(nn.Sequential):
def __init__(self, *args):
super(ResidualSequential, self).__init__(*args)
def forward(self, x):
out = super(ResidualSequential, self).forward(x)
# print(x.size(), out.size())
x_ = None
if out.size(2) != x.size(2) or out.size(3) != x.size(3):
diff2 = x.size(2) - out.size(2)
diff3 = x.size(3) - out.size(3)
# print(1)
x_ = x[:, :, diff2 /2:out.size(2) + diff2 / 2, diff3 / 2:out.size(3) + diff3 / 2]
else:
x_ = x
return out + x_
def eval(self):
print(2)
for m in self.modules():
m.eval()
exit()
def get_block(num_channels, norm_layer, act_fun):
layers = [
nn.Conv2d(num_channels, num_channels, 3, 1, 1, bias=False),
norm_layer(num_channels, affine=True),
act(act_fun),
nn.Conv2d(num_channels, num_channels, 3, 1, 1, bias=False),
norm_layer(num_channels, affine=True),
]
return layers
class ResNet(nn.Module):
def __init__(self, num_input_channels, num_output_channels, num_blocks, num_channels, need_residual=True, act_fun='LeakyReLU', need_sigmoid=True, norm_layer=nn.BatchNorm2d, pad='reflection'):
'''
pad = 'start|zero|replication'
'''
super(ResNet, self).__init__()
if need_residual:
s = ResidualSequential
else:
s = nn.Sequential
stride = 1
# First layers
layers = [
# nn.ReplicationPad2d(num_blocks * 2 * stride + 3),
conv(num_input_channels, num_channels, 3, stride=1, bias=True, pad=pad),
act(act_fun)
]
# Residual blocks
# layers_residual = []
for i in range(num_blocks):
layers += [s(*get_block(num_channels, norm_layer, act_fun))]
layers += [
nn.Conv2d(num_channels, num_channels, 3, 1, 1),
norm_layer(num_channels, affine=True)
]
# if need_residual:
# layers += [ResidualSequential(*layers_residual)]
# else:
# layers += [Sequential(*layers_residual)]
# if factor >= 2:
# # Do upsampling if needed
# layers += [
# nn.Conv2d(num_channels, num_channels *
# factor ** 2, 3, 1),
# nn.PixelShuffle(factor),
# act(act_fun)
# ]
layers += [
conv(num_channels, num_output_channels, 3, 1, bias=True, pad=pad),
nn.Sigmoid()
]
self.model = nn.Sequential(*layers)
def forward(self, input):
return self.model(input)
def eval(self):
self.model.eval()
| 2,943 | 29.350515 | 195 | py |
DeepIR | DeepIR-main/modules/dmodels/downsampler.py | import numpy as np
import torch
import torch.nn as nn
class Downsampler(nn.Module):
'''
http://www.realitypixels.com/turk/computergraphics/ResamplingFilters.pdf
'''
def __init__(self, n_planes, factor, kernel_type, phase=0, kernel_width=None, support=None, sigma=None, preserve_size=False):
super(Downsampler, self).__init__()
assert phase in [0, 0.5], 'phase should be 0 or 0.5'
if kernel_type == 'lanczos2':
support = 2
kernel_width = 4 * factor + 1
kernel_type_ = 'lanczos'
elif kernel_type == 'lanczos3':
support = 3
kernel_width = 6 * factor + 1
kernel_type_ = 'lanczos'
elif kernel_type == 'gauss12':
kernel_width = 7
sigma = 1/2
kernel_type_ = 'gauss'
elif kernel_type == 'gauss1sq2':
kernel_width = 9
sigma = 1./np.sqrt(2)
kernel_type_ = 'gauss'
elif kernel_type in ['lanczos', 'gauss', 'box']:
kernel_type_ = kernel_type
else:
assert False, 'wrong name kernel'
# note that `kernel width` will be different to actual size for phase = 1/2
self.kernel = get_kernel(factor, kernel_type_, phase, kernel_width, support=support, sigma=sigma)
downsampler = nn.Conv2d(n_planes, n_planes, kernel_size=self.kernel.shape, stride=factor, padding=0)
downsampler.weight.data[:] = 0
downsampler.bias.data[:] = 0
kernel_torch = torch.from_numpy(self.kernel)
for i in range(n_planes):
downsampler.weight.data[i, i] = kernel_torch
self.downsampler_ = downsampler
if preserve_size:
if self.kernel.shape[0] % 2 == 1:
pad = int((self.kernel.shape[0] - 1) / 2.)
else:
pad = int((self.kernel.shape[0] - factor) / 2.)
self.padding = nn.ReplicationPad2d(pad)
self.preserve_size = preserve_size
def forward(self, input):
if self.preserve_size:
x = self.padding(input)
else:
x= input
self.x = x
return self.downsampler_(x)
def get_kernel(factor, kernel_type, phase, kernel_width, support=None, sigma=None):
assert kernel_type in ['lanczos', 'gauss', 'box']
# factor = float(factor)
if phase == 0.5 and kernel_type != 'box':
kernel = np.zeros([kernel_width - 1, kernel_width - 1])
else:
kernel = np.zeros([kernel_width, kernel_width])
if kernel_type == 'box':
assert phase == 0.5, 'Box filter is always half-phased'
kernel[:] = 1./(kernel_width * kernel_width)
elif kernel_type == 'gauss':
assert sigma, 'sigma is not specified'
assert phase != 0.5, 'phase 1/2 for gauss not implemented'
center = (kernel_width + 1.)/2.
print(center, kernel_width)
sigma_sq = sigma * sigma
for i in range(1, kernel.shape[0] + 1):
for j in range(1, kernel.shape[1] + 1):
di = (i - center)/2.
dj = (j - center)/2.
kernel[i - 1][j - 1] = np.exp(-(di * di + dj * dj)/(2 * sigma_sq))
kernel[i - 1][j - 1] = kernel[i - 1][j - 1]/(2. * np.pi * sigma_sq)
elif kernel_type == 'lanczos':
assert support, 'support is not specified'
center = (kernel_width + 1) / 2.
for i in range(1, kernel.shape[0] + 1):
for j in range(1, kernel.shape[1] + 1):
if phase == 0.5:
di = abs(i + 0.5 - center) / factor
dj = abs(j + 0.5 - center) / factor
else:
di = abs(i - center) / factor
dj = abs(j - center) / factor
pi_sq = np.pi * np.pi
val = 1
if di != 0:
val = val * support * np.sin(np.pi * di) * np.sin(np.pi * di / support)
val = val / (np.pi * np.pi * di * di)
if dj != 0:
val = val * support * np.sin(np.pi * dj) * np.sin(np.pi * dj / support)
val = val / (np.pi * np.pi * dj * dj)
kernel[i - 1][j - 1] = val
else:
assert False, 'wrong method name'
kernel /= kernel.sum()
return kernel
#a = Downsampler(n_planes=3, factor=2, kernel_type='lanczos2', phase='1', preserve_size=True)
#################
# Learnable downsampler
# KS = 32
# dow = nn.Sequential(nn.ReplicationPad2d(int((KS - factor) / 2.)), nn.Conv2d(1,1,KS,factor))
# class Apply(nn.Module):
# def __init__(self, what, dim, *args):
# super(Apply, self).__init__()
# self.dim = dim
# self.what = what
# def forward(self, input):
# inputs = []
# for i in range(input.size(self.dim)):
# inputs.append(self.what(input.narrow(self.dim, i, 1)))
# return torch.cat(inputs, dim=self.dim)
# def __len__(self):
# return len(self._modules)
# downs = Apply(dow, 1)
# downs.type(dtype)(net_input.type(dtype)).size()
| 5,379 | 30.83432 | 129 | py |
DeepIR | DeepIR-main/modules/dmodels/dcgan.py | import torch
import torch.nn as nn
def dcgan(inp=2,
ndf=32,
num_ups=4, need_sigmoid=True, need_bias=True, pad='zero', upsample_mode='nearest', need_convT = True):
layers= [nn.ConvTranspose2d(inp, ndf, kernel_size=3, stride=1, padding=0, bias=False),
nn.BatchNorm2d(ndf),
nn.LeakyReLU(True)]
for i in range(num_ups-3):
if need_convT:
layers += [ nn.ConvTranspose2d(ndf, ndf, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(ndf),
nn.LeakyReLU(True)]
else:
layers += [ nn.Upsample(scale_factor=2, mode=upsample_mode),
nn.Conv2d(ndf, ndf, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(ndf),
nn.LeakyReLU(True)]
if need_convT:
layers += [nn.ConvTranspose2d(ndf, 3, 4, 2, 1, bias=False),]
else:
layers += [nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Conv2d(ndf, 3, kernel_size=3, stride=1, padding=1, bias=False)]
if need_sigmoid:
layers += [nn.Sigmoid()]
model =nn.Sequential(*layers)
return model | 1,244 | 35.617647 | 112 | py |
DeepIR | DeepIR-main/modules/dmodels/texture_nets.py | import torch
import torch.nn as nn
from .common import *
normalization = nn.BatchNorm2d
def conv(in_f, out_f, kernel_size, stride=1, bias=True, pad='zero'):
if pad == 'zero':
return nn.Conv2d(in_f, out_f, kernel_size, stride, padding=(kernel_size - 1) / 2, bias=bias)
elif pad == 'reflection':
layers = [nn.ReflectionPad2d((kernel_size - 1) / 2),
nn.Conv2d(in_f, out_f, kernel_size, stride, padding=0, bias=bias)]
return nn.Sequential(*layers)
def get_texture_nets(inp=3, ratios = [32, 16, 8, 4, 2, 1], fill_noise=False, pad='zero', need_sigmoid=False, conv_num=8, upsample_mode='nearest'):
for i in range(len(ratios)):
j = i + 1
seq = nn.Sequential()
tmp = nn.AvgPool2d(ratios[i], ratios[i])
seq.add(tmp)
if fill_noise:
seq.add(GenNoise(inp))
seq.add(conv(inp, conv_num, 3, pad=pad))
seq.add(normalization(conv_num))
seq.add(act())
seq.add(conv(conv_num, conv_num, 3, pad=pad))
seq.add(normalization(conv_num))
seq.add(act())
seq.add(conv(conv_num, conv_num, 1, pad=pad))
seq.add(normalization(conv_num))
seq.add(act())
if i == 0:
seq.add(nn.Upsample(scale_factor=2, mode=upsample_mode))
cur = seq
else:
cur_temp = cur
cur = nn.Sequential()
# Batch norm before merging
seq.add(normalization(conv_num))
cur_temp.add(normalization(conv_num * (j - 1)))
cur.add(Concat(1, cur_temp, seq))
cur.add(conv(conv_num * j, conv_num * j, 3, pad=pad))
cur.add(normalization(conv_num * j))
cur.add(act())
cur.add(conv(conv_num * j, conv_num * j, 3, pad=pad))
cur.add(normalization(conv_num * j))
cur.add(act())
cur.add(conv(conv_num * j, conv_num * j, 1, pad=pad))
cur.add(normalization(conv_num * j))
cur.add(act())
if i == len(ratios) - 1:
cur.add(conv(conv_num * j, 3, 1, pad=pad))
else:
cur.add(nn.Upsample(scale_factor=2, mode=upsample_mode))
model = cur
if need_sigmoid:
model.add(nn.Sigmoid())
return model
| 2,315 | 27.95 | 146 | py |
DeepIR | DeepIR-main/modules/dmodels/common.py | import torch
import torch.nn as nn
import numpy as np
from .downsampler import Downsampler
def add_module(self, module):
self.add_module(str(len(self) + 1), module)
torch.nn.Module.add = add_module
class Concat(nn.Module):
def __init__(self, dim, *args):
super(Concat, self).__init__()
self.dim = dim
for idx, module in enumerate(args):
self.add_module(str(idx), module)
def forward(self, input):
inputs = []
for module in self._modules.values():
inputs.append(module(input))
inputs_shapes2 = [x.shape[2] for x in inputs]
inputs_shapes3 = [x.shape[3] for x in inputs]
if np.all(np.array(inputs_shapes2) == min(inputs_shapes2)) and np.all(np.array(inputs_shapes3) == min(inputs_shapes3)):
inputs_ = inputs
else:
target_shape2 = min(inputs_shapes2)
target_shape3 = min(inputs_shapes3)
inputs_ = []
for inp in inputs:
diff2 = (inp.size(2) - target_shape2) // 2
diff3 = (inp.size(3) - target_shape3) // 2
inputs_.append(inp[:, :, diff2: diff2 + target_shape2, diff3:diff3 + target_shape3])
return torch.cat(inputs_, dim=self.dim)
def __len__(self):
return len(self._modules)
class GenNoise(nn.Module):
def __init__(self, dim2):
super(GenNoise, self).__init__()
self.dim2 = dim2
def forward(self, input):
a = list(input.size())
a[1] = self.dim2
# print (input.data.type())
b = torch.zeros(a).type_as(input.data)
b.normal_()
x = torch.autograd.Variable(b)
return x
class Swish(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Swish, self).__init__()
self.s = nn.Sigmoid()
def forward(self, x):
return x * self.s(x)
def act(act_fun = 'LeakyReLU'):
'''
Either string defining an activation function or module (e.g. nn.ReLU)
'''
if isinstance(act_fun, str):
if act_fun == 'LeakyReLU':
return nn.LeakyReLU(0.2, inplace=True)
elif act_fun == 'Swish':
return Swish()
elif act_fun == 'ELU':
return nn.ELU()
elif act_fun == 'none':
return nn.Sequential()
else:
assert False
else:
return act_fun()
def bn(num_features):
return nn.BatchNorm2d(num_features)
def conv(in_f, out_f, kernel_size, stride=1, bias=True, pad='zero', downsample_mode='stride'):
downsampler = None
if stride != 1 and downsample_mode != 'stride':
if downsample_mode == 'avg':
downsampler = nn.AvgPool2d(stride, stride)
elif downsample_mode == 'max':
downsampler = nn.MaxPool2d(stride, stride)
elif downsample_mode in ['lanczos2', 'lanczos3']:
downsampler = Downsampler(n_planes=out_f, factor=stride, kernel_type=downsample_mode, phase=0.5, preserve_size=True)
else:
assert False
stride = 1
padder = None
to_pad = int((kernel_size - 1) / 2)
if pad == 'reflection':
padder = nn.ReflectionPad2d(to_pad)
to_pad = 0
convolver = nn.Conv2d(in_f, out_f, kernel_size, stride, padding=to_pad, bias=bias)
layers = filter(lambda x: x is not None, [padder, convolver, downsampler])
return nn.Sequential(*layers) | 3,531 | 27.483871 | 128 | py |
DeepIR | DeepIR-main/modules/dmodels/unet.py | import torch.nn as nn
import torch
import torch.nn as nn
import torch.nn.functional as F
from .common import *
class ListModule(nn.Module):
def __init__(self, *args):
super(ListModule, self).__init__()
idx = 0
for module in args:
self.add_module(str(idx), module)
idx += 1
def __getitem__(self, idx):
if idx >= len(self._modules):
raise IndexError('index {} is out of range'.format(idx))
if idx < 0:
idx = len(self) + idx
it = iter(self._modules.values())
for i in range(idx):
next(it)
return next(it)
def __iter__(self):
return iter(self._modules.values())
def __len__(self):
return len(self._modules)
class UNet(nn.Module):
'''
upsample_mode in ['deconv', 'nearest', 'bilinear']
pad in ['zero', 'replication', 'none']
'''
def __init__(self, num_input_channels=3, num_output_channels=3,
feature_scale=4, more_layers=0, concat_x=False,
upsample_mode='deconv', pad='zero', norm_layer=nn.InstanceNorm2d, need_sigmoid=True, need_bias=True):
super(UNet, self).__init__()
self.feature_scale = feature_scale
self.more_layers = more_layers
self.concat_x = concat_x
filters = [64, 128, 256, 512, 1024]
filters = [x // self.feature_scale for x in filters]
self.start = unetConv2(num_input_channels, filters[0] if not concat_x else filters[0] - num_input_channels, norm_layer, need_bias, pad)
self.down1 = unetDown(filters[0], filters[1] if not concat_x else filters[1] - num_input_channels, norm_layer, need_bias, pad)
self.down2 = unetDown(filters[1], filters[2] if not concat_x else filters[2] - num_input_channels, norm_layer, need_bias, pad)
self.down3 = unetDown(filters[2], filters[3] if not concat_x else filters[3] - num_input_channels, norm_layer, need_bias, pad)
self.down4 = unetDown(filters[3], filters[4] if not concat_x else filters[4] - num_input_channels, norm_layer, need_bias, pad)
# more downsampling layers
if self.more_layers > 0:
self.more_downs = [
unetDown(filters[4], filters[4] if not concat_x else filters[4] - num_input_channels , norm_layer, need_bias, pad) for i in range(self.more_layers)]
self.more_ups = [unetUp(filters[4], upsample_mode, need_bias, pad, same_num_filt =True) for i in range(self.more_layers)]
self.more_downs = ListModule(*self.more_downs)
self.more_ups = ListModule(*self.more_ups)
self.up4 = unetUp(filters[3], upsample_mode, need_bias, pad)
self.up3 = unetUp(filters[2], upsample_mode, need_bias, pad)
self.up2 = unetUp(filters[1], upsample_mode, need_bias, pad)
self.up1 = unetUp(filters[0], upsample_mode, need_bias, pad)
self.final = conv(filters[0], num_output_channels, 1, bias=need_bias, pad=pad)
if need_sigmoid:
self.final = nn.Sequential(self.final, nn.Sigmoid())
def forward(self, inputs):
# Downsample
downs = [inputs]
down = nn.AvgPool2d(2, 2)
for i in range(4 + self.more_layers):
downs.append(down(downs[-1]))
in64 = self.start(inputs)
if self.concat_x:
in64 = torch.cat([in64, downs[0]], 1)
down1 = self.down1(in64)
if self.concat_x:
down1 = torch.cat([down1, downs[1]], 1)
down2 = self.down2(down1)
if self.concat_x:
down2 = torch.cat([down2, downs[2]], 1)
down3 = self.down3(down2)
if self.concat_x:
down3 = torch.cat([down3, downs[3]], 1)
down4 = self.down4(down3)
if self.concat_x:
down4 = torch.cat([down4, downs[4]], 1)
if self.more_layers > 0:
prevs = [down4]
for kk, d in enumerate(self.more_downs):
# print(prevs[-1].size())
out = d(prevs[-1])
if self.concat_x:
out = torch.cat([out, downs[kk + 5]], 1)
prevs.append(out)
up_ = self.more_ups[-1](prevs[-1], prevs[-2])
for idx in range(self.more_layers - 1):
l = self.more_ups[self.more - idx - 2]
up_= l(up_, prevs[self.more - idx - 2])
else:
up_= down4
up4= self.up4(up_, down3)
up3= self.up3(up4, down2)
up2= self.up2(up3, down1)
up1= self.up1(up2, in64)
return self.final(up1)
class unetConv2(nn.Module):
def __init__(self, in_size, out_size, norm_layer, need_bias, pad):
super(unetConv2, self).__init__()
print(pad)
if norm_layer is not None:
self.conv1= nn.Sequential(conv(in_size, out_size, 3, bias=need_bias, pad=pad),
norm_layer(out_size),
nn.ReLU(),)
self.conv2= nn.Sequential(conv(out_size, out_size, 3, bias=need_bias, pad=pad),
norm_layer(out_size),
nn.ReLU(),)
else:
self.conv1= nn.Sequential(conv(in_size, out_size, 3, bias=need_bias, pad=pad),
nn.ReLU(),)
self.conv2= nn.Sequential(conv(out_size, out_size, 3, bias=need_bias, pad=pad),
nn.ReLU(),)
def forward(self, inputs):
outputs= self.conv1(inputs)
outputs= self.conv2(outputs)
return outputs
class unetDown(nn.Module):
def __init__(self, in_size, out_size, norm_layer, need_bias, pad):
super(unetDown, self).__init__()
self.conv= unetConv2(in_size, out_size, norm_layer, need_bias, pad)
self.down= nn.MaxPool2d(2, 2)
def forward(self, inputs):
outputs= self.down(inputs)
outputs= self.conv(outputs)
return outputs
class unetUp(nn.Module):
def __init__(self, out_size, upsample_mode, need_bias, pad, same_num_filt=False):
super(unetUp, self).__init__()
num_filt = out_size if same_num_filt else out_size * 2
if upsample_mode == 'deconv':
self.up= nn.ConvTranspose2d(num_filt, out_size, 4, stride=2, padding=1)
self.conv= unetConv2(out_size * 2, out_size, None, need_bias, pad)
elif upsample_mode=='bilinear' or upsample_mode=='nearest':
self.up = nn.Sequential(nn.Upsample(scale_factor=2, mode=upsample_mode),
conv(num_filt, out_size, 3, bias=need_bias, pad=pad))
self.conv= unetConv2(out_size * 2, out_size, None, need_bias, pad)
else:
assert False
def forward(self, inputs1, inputs2):
in1_up= self.up(inputs1)
if (inputs2.size(2) != in1_up.size(2)) or (inputs2.size(3) != in1_up.size(3)):
diff2 = (inputs2.size(2) - in1_up.size(2)) // 2
diff3 = (inputs2.size(3) - in1_up.size(3)) // 2
inputs2_ = inputs2[:, :, diff2 : diff2 + in1_up.size(2), diff3 : diff3 + in1_up.size(3)]
else:
inputs2_ = inputs2
output= self.conv(torch.cat([in1_up, inputs2_], 1))
return output
| 7,324 | 36.953368 | 164 | py |
DeepIR | DeepIR-main/modules/dmodels/__init__.py | from .skip import skip
from .texture_nets import get_texture_nets
from .resnet import ResNet
from .unet import UNet
import torch.nn as nn
def get_net(input_depth, NET_TYPE, pad, upsample_mode, n_channels=3, act_fun='LeakyReLU', skip_n33d=128, skip_n33u=128, skip_n11=4, num_scales=5, downsample_mode='stride'):
if NET_TYPE == 'ResNet':
# TODO
net = ResNet(input_depth, 3, 10, 16, 1, nn.BatchNorm2d, False)
elif NET_TYPE == 'skip':
net = skip(input_depth, n_channels, num_channels_down = [skip_n33d]*num_scales if isinstance(skip_n33d, int) else skip_n33d,
num_channels_up = [skip_n33u]*num_scales if isinstance(skip_n33u, int) else skip_n33u,
num_channels_skip = [skip_n11]*num_scales if isinstance(skip_n11, int) else skip_n11,
upsample_mode=upsample_mode, downsample_mode=downsample_mode,
need_sigmoid=True, need_bias=True, pad=pad, act_fun=act_fun)
elif NET_TYPE == 'texture_nets':
net = get_texture_nets(inp=input_depth, ratios = [32, 16, 8, 4, 2, 1], fill_noise=False,pad=pad)
elif NET_TYPE =='UNet':
net = UNet(num_input_channels=input_depth, num_output_channels=3,
feature_scale=4, more_layers=0, concat_x=False,
upsample_mode=upsample_mode, pad=pad, norm_layer=nn.BatchNorm2d, need_sigmoid=True, need_bias=True)
elif NET_TYPE == 'identity':
assert input_depth == 3
net = nn.Sequential()
else:
assert False
return net | 1,639 | 50.25 | 172 | py |
AdaptiveGCL | AdaptiveGCL-main/DataHandler.py | import pickle
import numpy as np
from scipy.sparse import csr_matrix, coo_matrix, dok_matrix
from Params import args
import scipy.sparse as sp
from Utils.TimeLogger import log
import torch as t
import torch.utils.data as data
import torch.utils.data as dataloader
class DataHandler:
def __init__(self):
if args.data == 'yelp':
predir = './Datasets/sparse_yelp/'
elif args.data == 'lastfm':
predir = './Datasets/lastFM/'
elif args.data == 'beer':
predir = './Datasets/beerAdvocate/'
self.predir = predir
self.trnfile = predir + 'trnMat.pkl'
self.tstfile = predir + 'tstMat.pkl'
def loadOneFile(self, filename):
with open(filename, 'rb') as fs:
ret = (pickle.load(fs) != 0).astype(np.float32)
if type(ret) != coo_matrix:
ret = sp.coo_matrix(ret)
return ret
def normalizeAdj(self, mat):
degree = np.array(mat.sum(axis=-1))
dInvSqrt = np.reshape(np.power(degree, -0.5), [-1])
dInvSqrt[np.isinf(dInvSqrt)] = 0.0
dInvSqrtMat = sp.diags(dInvSqrt)
return mat.dot(dInvSqrtMat).transpose().dot(dInvSqrtMat).tocoo()
def makeTorchAdj(self, mat):
# make ui adj
a = sp.csr_matrix((args.user, args.user))
b = sp.csr_matrix((args.item, args.item))
mat = sp.vstack([sp.hstack([a, mat]), sp.hstack([mat.transpose(), b])])
mat = (mat != 0) * 1.0
mat = (mat + sp.eye(mat.shape[0])) * 1.0
mat = self.normalizeAdj(mat)
# make cuda tensor
idxs = t.from_numpy(np.vstack([mat.row, mat.col]).astype(np.int64))
vals = t.from_numpy(mat.data.astype(np.float32))
shape = t.Size(mat.shape)
return t.sparse.FloatTensor(idxs, vals, shape).cuda()
def LoadData(self):
trnMat = self.loadOneFile(self.trnfile)
tstMat = self.loadOneFile(self.tstfile)
self.trnMat = trnMat
args.user, args.item = trnMat.shape
self.torchBiAdj = self.makeTorchAdj(trnMat)
trnData = TrnData(trnMat)
self.trnLoader = dataloader.DataLoader(trnData, batch_size=args.batch, shuffle=True, num_workers=0)
tstData = TstData(tstMat, trnMat)
self.tstLoader = dataloader.DataLoader(tstData, batch_size=args.tstBat, shuffle=False, num_workers=0)
class TrnData(data.Dataset):
def __init__(self, coomat):
self.rows = coomat.row
self.cols = coomat.col
self.dokmat = coomat.todok()
self.negs = np.zeros(len(self.rows)).astype(np.int32)
def negSampling(self):
for i in range(len(self.rows)):
u = self.rows[i]
while True:
iNeg = np.random.randint(args.item)
if (u, iNeg) not in self.dokmat:
break
self.negs[i] = iNeg
def __len__(self):
return len(self.rows)
def __getitem__(self, idx):
return self.rows[idx], self.cols[idx], self.negs[idx]
class TstData(data.Dataset):
def __init__(self, coomat, trnMat):
self.csrmat = (trnMat.tocsr() != 0) * 1.0
tstLocs = [None] * coomat.shape[0]
tstUsrs = set()
for i in range(len(coomat.data)):
row = coomat.row[i]
col = coomat.col[i]
if tstLocs[row] is None:
tstLocs[row] = list()
tstLocs[row].append(col)
tstUsrs.add(row)
tstUsrs = np.array(list(tstUsrs))
self.tstUsrs = tstUsrs
self.tstLocs = tstLocs
def __len__(self):
return len(self.tstUsrs)
def __getitem__(self, idx):
return self.tstUsrs[idx], np.reshape(self.csrmat[self.tstUsrs[idx]].toarray(), [-1]) | 3,205 | 29.245283 | 103 | py |
AdaptiveGCL | AdaptiveGCL-main/Main.py | import torch
import Utils.TimeLogger as logger
from Utils.TimeLogger import log
from Params import args
from Model import Model, vgae_encoder, vgae_decoder, vgae, DenoisingNet
from DataHandler import DataHandler
import numpy as np
from Utils.Utils import calcRegLoss, pairPredict
import os
from copy import deepcopy
import scipy.sparse as sp
import random
class Coach:
def __init__(self, handler):
self.handler = handler
print('USER', args.user, 'ITEM', args.item)
print('NUM OF INTERACTIONS', self.handler.trnLoader.dataset.__len__())
self.metrics = dict()
mets = ['Loss', 'preLoss', 'Recall', 'NDCG']
for met in mets:
self.metrics['Train' + met] = list()
self.metrics['Test' + met] = list()
def makePrint(self, name, ep, reses, save):
ret = 'Epoch %d/%d, %s: ' % (ep, args.epoch, name)
for metric in reses:
val = reses[metric]
ret += '%s = %.4f, ' % (metric, val)
tem = name + metric
if save and tem in self.metrics:
self.metrics[tem].append(val)
ret = ret[:-2] + ' '
return ret
def run(self):
self.prepareModel()
log('Model Prepared')
recallMax = 0
ndcgMax = 0
bestEpoch = 0
stloc = 0
log('Model Initialized')
for ep in range(stloc, args.epoch):
temperature = max(0.05, args.init_temperature * pow(args.temperature_decay, ep))
tstFlag = (ep % args.tstEpoch == 0)
reses = self.trainEpoch(temperature)
log(self.makePrint('Train', ep, reses, tstFlag))
if tstFlag:
reses = self.testEpoch()
if (reses['Recall'] > recallMax):
recallMax = reses['Recall']
ndcgMax = reses['NDCG']
bestEpoch = ep
log(self.makePrint('Test', ep, reses, tstFlag))
print()
print('Best epoch : ', bestEpoch, ' , Recall : ', recallMax, ' , NDCG : ', ndcgMax)
def prepareModel(self):
self.model = Model().cuda()
encoder = vgae_encoder().cuda()
decoder = vgae_decoder().cuda()
self.generator_1 = vgae(encoder, decoder).cuda()
self.generator_2 = DenoisingNet(self.model.getGCN(), self.model.getEmbeds()).cuda()
self.generator_2.set_fea_adj(args.user+args.item, deepcopy(self.handler.torchBiAdj).cuda())
self.opt = torch.optim.Adam(self.model.parameters(), lr=args.lr, weight_decay=0)
self.opt_gen_1 = torch.optim.Adam(self.generator_1.parameters(), lr=args.lr, weight_decay=0)
self.opt_gen_2 = torch.optim.Adam(filter(lambda p: p.requires_grad, self.generator_2.parameters()), lr=args.lr, weight_decay=0, eps=args.eps)
def trainEpoch(self, temperature):
trnLoader = self.handler.trnLoader
trnLoader.dataset.negSampling()
generate_loss_1, generate_loss_2, bpr_loss, im_loss, ib_loss, reg_loss = 0, 0, 0, 0, 0, 0
steps = trnLoader.dataset.__len__() // args.batch
for i, tem in enumerate(trnLoader):
data = deepcopy(self.handler.torchBiAdj).cuda()
data1 = self.generator_generate(self.generator_1)
self.opt.zero_grad()
self.opt_gen_1.zero_grad()
self.opt_gen_2.zero_grad()
ancs, poss, negs = tem
ancs = ancs.long().cuda()
poss = poss.long().cuda()
negs = negs.long().cuda()
out1 = self.model.forward_graphcl(data1)
out2 = self.model.forward_graphcl_(self.generator_2)
loss = self.model.loss_graphcl(out1, out2, ancs, poss).mean() * args.ssl_reg
im_loss += float(loss)
loss.backward()
# info bottleneck
_out1 = self.model.forward_graphcl(data1)
_out2 = self.model.forward_graphcl_(self.generator_2)
loss_ib = self.model.loss_graphcl(_out1, out1.detach(), ancs, poss) + self.model.loss_graphcl(_out2, out2.detach(), ancs, poss)
loss= loss_ib.mean() * args.ib_reg
ib_loss += float(loss)
loss.backward()
# BPR
usrEmbeds, itmEmbeds = self.model.forward_gcn(data)
ancEmbeds = usrEmbeds[ancs]
posEmbeds = itmEmbeds[poss]
negEmbeds = itmEmbeds[negs]
scoreDiff = pairPredict(ancEmbeds, posEmbeds, negEmbeds)
bprLoss = - (scoreDiff).sigmoid().log().sum() / args.batch
regLoss = calcRegLoss(self.model) * args.reg
loss = bprLoss + regLoss
bpr_loss += float(bprLoss)
reg_loss += float(regLoss)
loss.backward()
loss_1 = self.generator_1(deepcopy(self.handler.torchBiAdj).cuda(), ancs, poss, negs)
loss_2 = self.generator_2(ancs, poss, negs, temperature)
loss = loss_1 + loss_2
generate_loss_1 += float(loss_1)
generate_loss_2 += float(loss_2)
loss.backward()
self.opt.step()
self.opt_gen_1.step()
self.opt_gen_2.step()
log('Step %d/%d: gen 1 : %.3f ; gen 2 : %.3f ; bpr : %.3f ; im : %.3f ; ib : %.3f ; reg : %.3f ' % (
i,
steps,
generate_loss_1,
generate_loss_2,
bpr_loss,
im_loss,
ib_loss,
reg_loss,
), save=False, oneline=True)
ret = dict()
ret['Gen_1 Loss'] = generate_loss_1 / steps
ret['Gen_2 Loss'] = generate_loss_2 / steps
ret['BPR Loss'] = bpr_loss / steps
ret['IM Loss'] = im_loss / steps
ret['IB Loss'] = ib_loss / steps
ret['Reg Loss'] = reg_loss / steps
return ret
def testEpoch(self):
tstLoader = self.handler.tstLoader
epRecall, epNdcg = [0] * 2
i = 0
num = tstLoader.dataset.__len__()
steps = num // args.tstBat
for usr, trnMask in tstLoader:
i += 1
usr = usr.long().cuda()
trnMask = trnMask.cuda()
usrEmbeds, itmEmbeds = self.model.forward_gcn(self.handler.torchBiAdj)
allPreds = torch.mm(usrEmbeds[usr], torch.transpose(itmEmbeds, 1, 0)) * (1 - trnMask) - trnMask * 1e8
_, topLocs = torch.topk(allPreds, args.topk)
recall, ndcg = self.calcRes(topLocs.cpu().numpy(), self.handler.tstLoader.dataset.tstLocs, usr)
epRecall += recall
epNdcg += ndcg
log('Steps %d/%d: recall = %.2f, ndcg = %.2f ' % (i, steps, recall, ndcg), save=False, oneline=True)
ret = dict()
ret['Recall'] = epRecall / num
ret['NDCG'] = epNdcg / num
return ret
def calcRes(self, topLocs, tstLocs, batIds):
assert topLocs.shape[0] == len(batIds)
allRecall = allNdcg = 0
for i in range(len(batIds)):
temTopLocs = list(topLocs[i])
temTstLocs = tstLocs[batIds[i]]
tstNum = len(temTstLocs)
maxDcg = np.sum([np.reciprocal(np.log2(loc + 2)) for loc in range(min(tstNum, args.topk))])
recall = dcg = 0
for val in temTstLocs:
if val in temTopLocs:
recall += 1
dcg += np.reciprocal(np.log2(temTopLocs.index(val) + 2))
recall = recall / tstNum
ndcg = dcg / maxDcg
allRecall += recall
allNdcg += ndcg
return allRecall, allNdcg
def generator_generate(self, generator):
edge_index = []
edge_index.append([])
edge_index.append([])
adj = deepcopy(self.handler.torchBiAdj)
idxs = adj._indices()
with torch.no_grad():
view = generator.generate(self.handler.torchBiAdj, idxs, adj)
return view
if __name__ == '__main__':
with torch.cuda.device(args.gpu):
logger.saveDefault = True
log('Start')
handler = DataHandler()
handler.LoadData()
log('Load Data')
coach = Coach(handler)
coach.run() | 6,846 | 29.9819 | 143 | py |
AdaptiveGCL | AdaptiveGCL-main/Model.py | from torch import nn
import torch.nn.functional as F
import torch
from Params import args
from copy import deepcopy
import numpy as np
import math
import scipy.sparse as sp
from Utils.Utils import contrastLoss, calcRegLoss, pairPredict
import time
import torch_sparse
init = nn.init.xavier_uniform_
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.uEmbeds = nn.Parameter(init(torch.empty(args.user, args.latdim)))
self.iEmbeds = nn.Parameter(init(torch.empty(args.item, args.latdim)))
self.gcnLayers = nn.Sequential(*[GCNLayer() for i in range(args.gnn_layer)])
def forward_gcn(self, adj):
iniEmbeds = torch.concat([self.uEmbeds, self.iEmbeds], axis=0)
embedsLst = [iniEmbeds]
for gcn in self.gcnLayers:
embeds = gcn(adj, embedsLst[-1])
embedsLst.append(embeds)
mainEmbeds = sum(embedsLst)
return mainEmbeds[:args.user], mainEmbeds[args.user:]
def forward_graphcl(self, adj):
iniEmbeds = torch.concat([self.uEmbeds, self.iEmbeds], axis=0)
embedsLst = [iniEmbeds]
for gcn in self.gcnLayers:
embeds = gcn(adj, embedsLst[-1])
embedsLst.append(embeds)
mainEmbeds = sum(embedsLst)
return mainEmbeds
def forward_graphcl_(self, generator):
iniEmbeds = torch.concat([self.uEmbeds, self.iEmbeds], axis=0)
embedsLst = [iniEmbeds]
count = 0
for gcn in self.gcnLayers:
with torch.no_grad():
adj = generator.generate(x=embedsLst[-1], layer=count)
embeds = gcn(adj, embedsLst[-1])
embedsLst.append(embeds)
count += 1
mainEmbeds = sum(embedsLst)
return mainEmbeds
def loss_graphcl(self, x1, x2, users, items):
T = args.temp
user_embeddings1, item_embeddings1 = torch.split(x1, [args.user, args.item], dim=0)
user_embeddings2, item_embeddings2 = torch.split(x2, [args.user, args.item], dim=0)
user_embeddings1 = F.normalize(user_embeddings1, dim=1)
item_embeddings1 = F.normalize(item_embeddings1, dim=1)
user_embeddings2 = F.normalize(user_embeddings2, dim=1)
item_embeddings2 = F.normalize(item_embeddings2, dim=1)
user_embs1 = F.embedding(users, user_embeddings1)
item_embs1 = F.embedding(items, item_embeddings1)
user_embs2 = F.embedding(users, user_embeddings2)
item_embs2 = F.embedding(items, item_embeddings2)
all_embs1 = torch.cat([user_embs1, item_embs1], dim=0)
all_embs2 = torch.cat([user_embs2, item_embs2], dim=0)
all_embs1_abs = all_embs1.norm(dim=1)
all_embs2_abs = all_embs2.norm(dim=1)
sim_matrix = torch.einsum('ik,jk->ij', all_embs1, all_embs2) / torch.einsum('i,j->ij', all_embs1_abs, all_embs2_abs)
sim_matrix = torch.exp(sim_matrix / T)
pos_sim = sim_matrix[np.arange(all_embs1.shape[0]), np.arange(all_embs1.shape[0])]
loss = pos_sim / (sim_matrix.sum(dim=1) - pos_sim)
loss = - torch.log(loss)
return loss
def getEmbeds(self):
self.unfreeze(self.gcnLayers)
return torch.concat([self.uEmbeds, self.iEmbeds], axis=0)
def unfreeze(self, layer):
for child in layer.children():
for param in child.parameters():
param.requires_grad = True
def getGCN(self):
return self.gcnLayers
class GCNLayer(nn.Module):
def __init__(self):
super(GCNLayer, self).__init__()
def forward(self, adj, embeds, flag=True):
if (flag):
return torch.spmm(adj, embeds)
else:
return torch_sparse.spmm(adj.indices(), adj.values(), adj.shape[0], adj.shape[1], embeds)
class vgae_encoder(Model):
def __init__(self):
super(vgae_encoder, self).__init__()
hidden = args.latdim
self.encoder_mean = nn.Sequential(nn.Linear(hidden, hidden), nn.ReLU(inplace=True), nn.Linear(hidden, hidden))
self.encoder_std = nn.Sequential(nn.Linear(hidden, hidden), nn.ReLU(inplace=True), nn.Linear(hidden, hidden), nn.Softplus())
def forward(self, adj):
x = self.forward_graphcl(adj)
x_mean = self.encoder_mean(x)
x_std = self.encoder_std(x)
gaussian_noise = torch.randn(x_mean.shape).cuda()
x = gaussian_noise * x_std + x_mean
return x, x_mean, x_std
class vgae_decoder(nn.Module):
def __init__(self, hidden=args.latdim):
super(vgae_decoder, self).__init__()
self.decoder = nn.Sequential(nn.ReLU(inplace=True), nn.Linear(hidden, hidden), nn.ReLU(inplace=True), nn.Linear(hidden, 1))
self.sigmoid = nn.Sigmoid()
self.bceloss = nn.BCELoss(reduction='none')
def forward(self, x, x_mean, x_std, users, items, neg_items, encoder):
x_user, x_item = torch.split(x, [args.user, args.item], dim=0)
edge_pos_pred = self.sigmoid(self.decoder(x_user[users] * x_item[items]))
edge_neg_pred = self.sigmoid(self.decoder(x_user[users] * x_item[neg_items]))
loss_edge_pos = self.bceloss( edge_pos_pred, torch.ones(edge_pos_pred.shape).cuda() )
loss_edge_neg = self.bceloss( edge_neg_pred, torch.zeros(edge_neg_pred.shape).cuda() )
loss_rec = loss_edge_pos + loss_edge_neg
kl_divergence = - 0.5 * (1 + 2 * torch.log(x_std) - x_mean**2 - x_std**2).sum(dim=1)
ancEmbeds = x_user[users]
posEmbeds = x_item[items]
negEmbeds = x_item[neg_items]
scoreDiff = pairPredict(ancEmbeds, posEmbeds, negEmbeds)
bprLoss = - (scoreDiff).sigmoid().log().sum() / args.batch
regLoss = calcRegLoss(encoder) * args.reg
beta = 0.1
loss = (loss_rec + beta * kl_divergence.mean() + bprLoss + regLoss).mean()
return loss
class vgae(nn.Module):
def __init__(self, encoder, decoder):
super(vgae, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, data, users, items, neg_items):
x, x_mean, x_std = self.encoder(data)
loss = self.decoder(x, x_mean, x_std, users, items, neg_items, self.encoder)
return loss
def generate(self, data, edge_index, adj):
x, _, _ = self.encoder(data)
edge_pred = self.decoder.sigmoid(self.decoder.decoder(x[edge_index[0]] * x[edge_index[1]]))
vals = adj._values()
idxs = adj._indices()
edgeNum = vals.size()
edge_pred = edge_pred[:, 0]
mask = ((edge_pred + 0.5).floor()).type(torch.bool)
newVals = vals[mask]
newVals = newVals / (newVals.shape[0] / edgeNum[0])
newIdxs = idxs[:, mask]
return torch.sparse.FloatTensor(newIdxs, newVals, adj.shape)
class DenoisingNet(nn.Module):
def __init__(self, gcnLayers, features):
super(DenoisingNet, self).__init__()
self.features = features
self.gcnLayers = gcnLayers
self.edge_weights = []
self.nblayers = []
self.selflayers = []
self.attentions = []
self.attentions.append([])
self.attentions.append([])
hidden = args.latdim
self.nblayers_0 = nn.Sequential(nn.Linear(hidden, hidden), nn.ReLU(inplace=True))
self.nblayers_1 = nn.Sequential(nn.Linear(hidden, hidden), nn.ReLU(inplace=True))
self.selflayers_0 = nn.Sequential(nn.Linear(hidden, hidden), nn.ReLU(inplace=True))
self.selflayers_1 = nn.Sequential(nn.Linear(hidden, hidden), nn.ReLU(inplace=True))
self.attentions_0 = nn.Sequential(nn.Linear( 2 * hidden, 1))
self.attentions_1 = nn.Sequential(nn.Linear( 2 * hidden, 1))
def freeze(self, layer):
for child in layer.children():
for param in child.parameters():
param.requires_grad = False
def get_attention(self, input1, input2, layer=0):
if layer == 0:
nb_layer = self.nblayers_0
selflayer = self.selflayers_0
if layer == 1:
nb_layer = self.nblayers_1
selflayer = self.selflayers_1
input1 = nb_layer(input1)
input2 = selflayer(input2)
input10 = torch.concat([input1, input2], axis=1)
if layer == 0:
weight10 = self.attentions_0(input10)
if layer == 1:
weight10 = self.attentions_1(input10)
return weight10
def hard_concrete_sample(self, log_alpha, beta=1.0, training=True):
gamma = args.gamma
zeta = args.zeta
if training:
debug_var = 1e-7
bias = 0.0
np_random = np.random.uniform(low=debug_var, high=1.0-debug_var, size=np.shape(log_alpha.cpu().detach().numpy()))
random_noise = bias + torch.tensor(np_random)
gate_inputs = torch.log(random_noise) - torch.log(1.0 - random_noise)
gate_inputs = (gate_inputs.cuda() + log_alpha) / beta
gate_inputs = torch.sigmoid(gate_inputs)
else:
gate_inputs = torch.sigmoid(log_alpha)
stretched_values = gate_inputs * (zeta-gamma) +gamma
cliped = torch.clamp(stretched_values, 0.0, 1.0)
return cliped.float()
def generate(self, x, layer=0):
f1_features = x[self.row, :]
f2_features = x[self.col, :]
weight = self.get_attention(f1_features, f2_features, layer)
mask = self.hard_concrete_sample(weight, training=False)
mask = torch.squeeze(mask)
adj = torch.sparse.FloatTensor(self.adj_mat._indices(), mask, self.adj_mat.shape)
ind = deepcopy(adj._indices())
row = ind[0, :]
col = ind[1, :]
rowsum = torch.sparse.sum(adj, dim=-1).to_dense()
d_inv_sqrt = torch.reshape(torch.pow(rowsum, -0.5), [-1])
d_inv_sqrt = torch.clamp(d_inv_sqrt, 0.0, 10.0)
row_inv_sqrt = d_inv_sqrt[row]
col_inv_sqrt = d_inv_sqrt[col]
values = torch.mul(adj._values(), row_inv_sqrt)
values = torch.mul(values, col_inv_sqrt)
support = torch.sparse.FloatTensor(adj._indices(), values, adj.shape)
return support
def l0_norm(self, log_alpha, beta):
gamma = args.gamma
zeta = args.zeta
gamma = torch.tensor(gamma)
zeta = torch.tensor(zeta)
reg_per_weight = torch.sigmoid(log_alpha - beta * torch.log(-gamma/zeta))
return torch.mean(reg_per_weight)
def set_fea_adj(self, nodes, adj):
self.node_size = nodes
self.adj_mat = adj
ind = deepcopy(adj._indices())
self.row = ind[0, :]
self.col = ind[1, :]
def call(self, inputs, training=None):
if training:
temperature = inputs
else:
temperature = 1.0
self.maskes = []
x = self.features.detach()
layer_index = 0
embedsLst = [self.features.detach()]
for layer in self.gcnLayers:
xs = []
f1_features = x[self.row, :]
f2_features = x[self.col, :]
weight = self.get_attention(f1_features, f2_features, layer=layer_index)
mask = self.hard_concrete_sample(weight, temperature, training)
self.edge_weights.append(weight)
self.maskes.append(mask)
mask = torch.squeeze(mask)
adj = torch.sparse.FloatTensor(self.adj_mat._indices(), mask, self.adj_mat.shape).coalesce()
ind = deepcopy(adj._indices())
row = ind[0, :]
col = ind[1, :]
rowsum = torch.sparse.sum(adj, dim=-1).to_dense() + 1e-6
d_inv_sqrt = torch.reshape(torch.pow(rowsum, -0.5), [-1])
d_inv_sqrt = torch.clamp(d_inv_sqrt, 0.0, 10.0)
row_inv_sqrt = d_inv_sqrt[row]
col_inv_sqrt = d_inv_sqrt[col]
values = torch.mul(adj.values(), row_inv_sqrt)
values = torch.mul(values, col_inv_sqrt)
support = torch.sparse.FloatTensor(adj._indices(), values, adj.shape).coalesce()
nextx = layer(support, x, False)
xs.append(nextx)
x = xs[0]
embedsLst.append(x)
layer_index += 1
return sum(embedsLst)
def lossl0(self, temperature):
l0_loss = torch.zeros([]).cuda()
for weight in self.edge_weights:
l0_loss += self.l0_norm(weight, temperature)
self.edge_weights = []
return l0_loss
def forward(self, users, items, neg_items, temperature):
self.freeze(self.gcnLayers)
x = self.call(temperature, True)
x_user, x_item = torch.split(x, [args.user, args.item], dim=0)
ancEmbeds = x_user[users]
posEmbeds = x_item[items]
negEmbeds = x_item[neg_items]
scoreDiff = pairPredict(ancEmbeds, posEmbeds, negEmbeds)
bprLoss = - (scoreDiff).sigmoid().log().sum() / args.batch
regLoss = calcRegLoss(self) * args.reg
lossl0 = self.lossl0(temperature) * args.lambda0
return bprLoss + regLoss + lossl0
| 11,377 | 29.180371 | 126 | py |
AdaptiveGCL | AdaptiveGCL-main/Utils/Utils.py | import torch as t
import torch.nn.functional as F
def innerProduct(usrEmbeds, itmEmbeds):
return t.sum(usrEmbeds * itmEmbeds, dim=-1)
def pairPredict(ancEmbeds, posEmbeds, negEmbeds):
return innerProduct(ancEmbeds, posEmbeds) - innerProduct(ancEmbeds, negEmbeds)
def calcRegLoss(model):
ret = 0
for W in model.parameters():
ret += W.norm(2).square()
return ret
def contrastLoss(embeds1, embeds2, nodes, temp):
embeds1 = F.normalize(embeds1, p=2)
embeds2 = F.normalize(embeds2, p=2)
pckEmbeds1 = embeds1[nodes]
pckEmbeds2 = embeds2[nodes]
nume = t.exp(t.sum(pckEmbeds1 * pckEmbeds2, dim=-1) / temp)
deno = t.exp(pckEmbeds1 @ embeds2.T / temp).sum(-1)
return -t.log(nume / deno) | 694 | 29.217391 | 79 | py |
RG | RG-master/Image Classification/main.py | from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import torchvision
import torchvision.transforms as transforms
import os
import argparse
import random
from resnet import *
from utils import progress_bar
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0
start_epoch = 0
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=False, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=0)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=False, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=0)
def poly(base_lr, epoch,max_iter=100,power=0.9):
return base_lr*((1-float(epoch+1)/max_iter)**(power))
# Model
print('==> Building model..')
net = ResNet34()
if device == 'cuda':
net.cuda()
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.5, weight_decay=0.0001)
M_loss = 0
# Training
def train(epoch):
global M_loss
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss_ = loss * random.random() # Here represents the Random Gradient
loss_.backward()
optimizer.step()
train_loss += loss_.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
acc = 100.*correct/total
if acc > best_acc:
print('Saving..')
open('./random_gradient_accuracy.txt', 'a').write(str(epoch) + '_' + str(acc) + ',')
best_acc = acc
print('best_acc:', best_acc)
def adjust_learning_rate(optimizer, epoch, net):
lr = poly(0.1, epoch) # This is normal way to reduce the LR, you can replace it with CLR
print('current lr: ', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if __name__ == '__main__':
for epoch in range(0, 100):
train(epoch)
test(epoch)
adjust_learning_rate(optimizer,epoch,net)
| 4,105 | 30.343511 | 109 | py |
RG | RG-master/Image Classification/resnet.py | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2,2,2,2])
def ResNet34():
return ResNet(BasicBlock, [3,4,6,3])
def ResNet50():
return ResNet(Bottleneck, [3,4,6,3])
def ResNet101():
return ResNet(Bottleneck, [3,4,23,3])
def ResNet152():
return ResNet(Bottleneck, [3,8,36,3])
| 3,941 | 32.982759 | 102 | py |
RG | RG-master/pix2pix/pix2pix.py | import argparse
import os
import numpy as np
import math
import itertools
import time
import datetime
import sys
import random
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from models import *
from datasets import *
import torch.nn as nn
import torch.nn.functional as F
import torch
parser = argparse.ArgumentParser()
parser.add_argument('--epoch', type=int, default=0, help='epoch to start training from')
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')
parser.add_argument('--dataset_name', type=str, default="facades", help='name of the dataset')
parser.add_argument('--batch_size', type=int, default=1, help='size of the batches')
parser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')
parser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')
parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')
parser.add_argument('--decay_epoch', type=int, default=100, help='epoch from which to start lr decay')
parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')
parser.add_argument('--img_height', type=int, default=256, help='size of image height')
parser.add_argument('--img_width', type=int, default=256, help='size of image width')
parser.add_argument('--channels', type=int, default=3, help='number of image channels')
parser.add_argument('--sample_interval', type=int, default=500, help='interval between sampling of images from generators')
parser.add_argument('--checkpoint_interval', type=int, default=-1, help='interval between model checkpoints')
opt = parser.parse_args()
print(opt)
os.makedirs('images/%s' % opt.dataset_name, exist_ok=True)
os.makedirs('saved_models/%s' % opt.dataset_name, exist_ok=True)
cuda = True if torch.cuda.is_available() else False
# Loss functions
criterion_GAN = torch.nn.MSELoss()
criterion_pixelwise = torch.nn.L1Loss()
# Loss weight of L1 pixel-wise loss between translated image and real image
lambda_pixel = 100
# Calculate output of image discriminator (PatchGAN)
patch = (1, opt.img_height//2**4, opt.img_width//2**4)
# Initialize generator and discriminator
generator = GeneratorUNet()
discriminator = Discriminator()
if cuda:
generator = generator.cuda()
discriminator = discriminator.cuda()
criterion_GAN.cuda()
criterion_pixelwise.cuda()
if opt.epoch != 0:
# Load pretrained models
generator.load_state_dict(torch.load('saved_models/%s/generator_%d.pth' % (opt.dataset_name, opt.epoch)))
discriminator.load_state_dict(torch.load('saved_models/%s/discriminator_%d.pth' % (opt.dataset_name, opt.epoch)))
else:
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
# Configure dataloaders
transforms_ = [ transforms.Resize((opt.img_height, opt.img_width), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) ]
dataloader = DataLoader(ImageDataset("../../data/%s" % opt.dataset_name, transforms_=transforms_),
batch_size=opt.batch_size, shuffle=True, num_workers=opt.n_cpu)
val_dataloader = DataLoader(ImageDataset("../../data/%s" % opt.dataset_name, transforms_=transforms_, mode='val'),
batch_size=10, shuffle=True, num_workers=1)
# Tensor type
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
def sample_images(batches_done):
"""Saves a generated sample from the validation set"""
imgs = next(iter(val_dataloader))
real_A = Variable(imgs['B'].type(Tensor))
real_B = Variable(imgs['A'].type(Tensor))
fake_B = generator(real_A)
img_sample = torch.cat((real_A.data, fake_B.data, real_B.data), -2)
save_image(img_sample, 'images/%s/%s.png' % (opt.dataset_name, batches_done), nrow=5, normalize=True)
# ----------
# Training
# ----------
prev_time = time.time()
for epoch in range(opt.epoch, opt.n_epochs):
for i, batch in enumerate(dataloader):
# Model inputs
real_A = Variable(batch['B'].type(Tensor))
real_B = Variable(batch['A'].type(Tensor))
# Adversarial ground truths
valid = Variable(Tensor(np.ones((real_A.size(0), *patch))), requires_grad=False)
fake = Variable(Tensor(np.zeros((real_A.size(0), *patch))), requires_grad=False)
# ------------------
# Train Generators
# ------------------
optimizer_G.zero_grad()
# GAN loss
fake_B = generator(real_A)
pred_fake = discriminator(fake_B, real_A)
loss_GAN = criterion_GAN(pred_fake, valid)
# Pixel-wise loss
loss_pixel = criterion_pixelwise(fake_B, real_B)
# Total loss
loss_G = loss_GAN + lambda_pixel * loss_pixel
# Random Gradient
loss_G = loss_G * random.random()
loss_G.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Real loss
pred_real = discriminator(real_B, real_A)
loss_real = criterion_GAN(pred_real, valid)
# Fake loss
pred_fake = discriminator(fake_B.detach(), real_A)
loss_fake = criterion_GAN(pred_fake, fake)
# Total loss
loss_D = 0.5 * (loss_real + loss_fake)
loss_D.backward()
optimizer_D.step()
# --------------
# Log Progress
# --------------
# Determine approximate time left
batches_done = epoch * len(dataloader) + i
batches_left = opt.n_epochs * len(dataloader) - batches_done
time_left = datetime.timedelta(seconds=batches_left * (time.time() - prev_time))
prev_time = time.time()
# Print log
sys.stdout.write("\r[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f, pixel: %f, adv: %f] ETA: %s" %
(epoch, opt.n_epochs,
i, len(dataloader),
loss_D.item(), loss_G.item(),
loss_pixel.item(), loss_GAN.item(),
time_left))
# If at sample interval save image
if batches_done % opt.sample_interval == 0:
sample_images(batches_done)
if opt.checkpoint_interval != -1 and epoch % opt.checkpoint_interval == 0:
# Save model checkpoints
torch.save(generator.state_dict(), 'saved_models/%s/generator_%d.pth' % (opt.dataset_name, epoch))
torch.save(discriminator.state_dict(), 'saved_models/%s/discriminator_%d.pth' % (opt.dataset_name, epoch))
| 7,224 | 36.827225 | 123 | py |
RG | RG-master/pix2pix/datasets.py | import glob
import random
import os
import numpy as np
from torch.utils.data import Dataset
from PIL import Image
import torchvision.transforms as transforms
class ImageDataset(Dataset):
def __init__(self, root, transforms_=None, mode='train'):
self.transform = transforms.Compose(transforms_)
self.files = sorted(glob.glob(os.path.join(root, mode) + '/*.*'))
if mode == 'train':
self.files.extend(sorted(glob.glob(os.path.join(root, 'test') + '/*.*')))
def __getitem__(self, index):
img = Image.open(self.files[index % len(self.files)])
w, h = img.size
img_A = img.crop((0, 0, w/2, h))
img_B = img.crop((w/2, 0, w, h))
if np.random.random() < 0.5:
img_A = Image.fromarray(np.array(img_A)[:, ::-1, :], 'RGB')
img_B = Image.fromarray(np.array(img_B)[:, ::-1, :], 'RGB')
img_A = self.transform(img_A)
img_B = self.transform(img_B)
return {'A': img_A, 'B': img_B}
def __len__(self):
return len(self.files)
| 1,056 | 28.361111 | 85 | py |
RG | RG-master/pix2pix/models.py | import torch.nn as nn
import torch.nn.functional as F
import torch
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
##############################
# U-NET
##############################
class UNetDown(nn.Module):
def __init__(self, in_size, out_size, normalize=True, dropout=0.0):
super(UNetDown, self).__init__()
layers = [nn.Conv2d(in_size, out_size, 4, 2, 1, bias=False)]
if normalize:
layers.append(nn.InstanceNorm2d(out_size))
layers.append(nn.LeakyReLU(0.2))
if dropout:
layers.append(nn.Dropout(dropout))
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
class UNetUp(nn.Module):
def __init__(self, in_size, out_size, dropout=0.0):
super(UNetUp, self).__init__()
layers = [ nn.ConvTranspose2d(in_size, out_size, 4, 2, 1, bias=False),
nn.InstanceNorm2d(out_size),
nn.ReLU(inplace=True)]
if dropout:
layers.append(nn.Dropout(dropout))
self.model = nn.Sequential(*layers)
def forward(self, x, skip_input):
x = self.model(x)
x = torch.cat((x, skip_input), 1)
return x
class GeneratorUNet(nn.Module):
def __init__(self, in_channels=3, out_channels=3):
super(GeneratorUNet, self).__init__()
self.down1 = UNetDown(in_channels, 64, normalize=False)
self.down2 = UNetDown(64, 128)
self.down3 = UNetDown(128, 256)
self.down4 = UNetDown(256, 512, dropout=0.5)
self.down5 = UNetDown(512, 512, dropout=0.5)
self.down6 = UNetDown(512, 512, dropout=0.5)
self.down7 = UNetDown(512, 512, dropout=0.5)
self.down8 = UNetDown(512, 512, normalize=False, dropout=0.5)
self.up1 = UNetUp(512, 512, dropout=0.5)
self.up2 = UNetUp(1024, 512, dropout=0.5)
self.up3 = UNetUp(1024, 512, dropout=0.5)
self.up4 = UNetUp(1024, 512, dropout=0.5)
self.up5 = UNetUp(1024, 256)
self.up6 = UNetUp(512, 128)
self.up7 = UNetUp(256, 64)
self.final = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.ZeroPad2d((1, 0, 1, 0)),
nn.Conv2d(128, out_channels, 4, padding=1),
nn.Tanh()
)
def forward(self, x):
# U-Net generator with skip connections from encoder to decoder
d1 = self.down1(x)
d2 = self.down2(d1)
d3 = self.down3(d2)
d4 = self.down4(d3)
d5 = self.down5(d4)
d6 = self.down6(d5)
d7 = self.down7(d6)
d8 = self.down8(d7)
u1 = self.up1(d8, d7)
u2 = self.up2(u1, d6)
u3 = self.up3(u2, d5)
u4 = self.up4(u3, d4)
u5 = self.up5(u4, d3)
u6 = self.up6(u5, d2)
u7 = self.up7(u6, d1)
return self.final(u7)
##############################
# Discriminator
##############################
class Discriminator(nn.Module):
def __init__(self, in_channels=3):
super(Discriminator, self).__init__()
def discriminator_block(in_filters, out_filters, normalization=True):
"""Returns downsampling layers of each discriminator block"""
layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]
if normalization:
layers.append(nn.InstanceNorm2d(out_filters))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
self.model = nn.Sequential(
*discriminator_block(in_channels*2, 64, normalization=False),
*discriminator_block(64, 128),
*discriminator_block(128, 256),
*discriminator_block(256, 512),
nn.ZeroPad2d((1, 0, 1, 0)),
nn.Conv2d(512, 1, 4, padding=1, bias=False)
)
def forward(self, img_A, img_B):
# Concatenate image and condition image by channels to produce input
img_input = torch.cat((img_A, img_B), 1)
return self.model(img_input)
| 4,289 | 32.515625 | 81 | py |
RG | RG-master/Semantic Segmentation/model.py | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch
import numpy as np
affine_par = True
import torch.nn.functional as F
def outS(i):
i = int(i)
i = (i+1)/2
i = int(np.ceil((i+1)/2.0))
i = (i+1)/2
return i
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, affine = affine_par)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, affine = affine_par)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change
self.bn1 = nn.BatchNorm2d(planes,affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
padding = dilation
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change
padding=padding, bias=False, dilation = dilation)
self.bn2 = nn.BatchNorm2d(planes,affine = affine_par)
for i in self.bn2.parameters():
i.requires_grad = False
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4, affine = affine_par)
for i in self.bn3.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Classifier_Module(nn.Module):
def __init__(self, dilation_series, padding_series, num_classes):
super(Classifier_Module, self).__init__()
self.conv2d_list = nn.ModuleList()
for dilation, padding in zip(dilation_series, padding_series):
self.conv2d_list.append(nn.Conv2d(2048, num_classes, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias = True))
for m in self.conv2d_list:
m.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.conv2d_list[0](x)
for i in range(len(self.conv2d_list)-1):
out += self.conv2d_list[i+1](x)
return out
class Residual_Covolution(nn.Module):
def __init__(self, icol, ocol, num_classes):
super(Residual_Covolution, self).__init__()
self.conv1 = nn.Conv2d(icol, ocol, kernel_size=3, stride=1, padding=12, dilation=12, bias=True)
self.conv2 = nn.Conv2d(ocol, num_classes, kernel_size=3, stride=1, padding=12, dilation=12, bias=True)
self.conv3 = nn.Conv2d(num_classes, ocol, kernel_size=1, stride=1, padding=0, dilation=1, bias=True)
self.conv4 = nn.Conv2d(ocol, icol, kernel_size=1, stride=1, padding=0, dilation=1, bias=True)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
dow1 = self.conv1(x)
dow1 = self.relu(dow1)
seg = self.conv2(dow1)
inc1 = self.conv3(seg)
add1 = dow1 + self.relu(inc1)
inc2 = self.conv4(add1)
out = x + self.relu(inc2)
return out, seg
class Residual_Refinement_Module(nn.Module):
def __init__(self, num_classes):
super(Residual_Refinement_Module, self).__init__()
self.RC1 = Residual_Covolution(2048, 512, num_classes)
self.RC2 = Residual_Covolution(2048, 512, num_classes)
def forward(self, x):
x, seg1 = self.RC1(x)
_, seg2 = self.RC2(x)
return [seg1, seg1+seg2]
class ResNet_Refine(nn.Module):
def __init__(self, block, layers, num_classes):
self.inplanes = 64
super(ResNet_Refine, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
self.layer5 = Residual_Refinement_Module(num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# for i in m.parameters():
# i.requires_grad = False
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion,affine = affine_par))
for i in downsample._modules['1'].parameters():
i.requires_grad = False
layers = []
layers.append(block(self.inplanes, planes, stride,dilation=dilation, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x_size = x.size()
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
interp = F.upsample(x, x_size[2:], mode='bilinear')
return interp
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
self.layer5 = self._make_pred_layer(Classifier_Module, [6,12,18,24],[6,12,18,24],num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# for i in m.parameters():
# i.requires_grad = False
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion,affine = affine_par))
for i in downsample._modules['1'].parameters():
i.requires_grad = False
layers = []
layers.append(block(self.inplanes, planes, stride,dilation=dilation, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def _make_pred_layer(self,block, dilation_series, padding_series,num_classes):
return block(dilation_series,padding_series,num_classes)
def forward(self, x):
x_size = x.size()
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
interp = F.upsample(x, x_size[2:], mode='bilinear')
return interp
class MS_Deeplab(nn.Module):
def __init__(self,block,num_classes):
super(MS_Deeplab,self).__init__()
self.Scale = ResNet(block,[3, 4, 23, 3],num_classes) #changed to fix #4
def forward(self,x):
output = self.Scale(x) # for original scale
output_size = output.size()[2]
input_size = x.size()[2]
self.interp1 = nn.Upsample(size=(int(input_size*0.75)+1, int(input_size*0.75)+1), mode='bilinear')
self.interp2 = nn.Upsample(size=(int(input_size*0.5)+1, int(input_size*0.5)+1), mode='bilinear')
self.interp3 = nn.Upsample(size=(output_size, output_size), mode='bilinear')
x75 = self.interp1(x)
output75 = self.interp3(self.Scale(x75)) # for 0.75x scale
x5 = self.interp2(x)
output5 = self.interp3(self.Scale(x5)) # for 0.5x scale
out_max = torch.max(torch.max(output, output75), output5)
return [output, output75, output5, out_max]
def Res_Ms_Deeplab(num_classes=21):
model = MS_Deeplab(Bottleneck, num_classes)
return model
def Res_Deeplab(num_classes=21, is_refine=False):
if is_refine:
model = ResNet_Refine(Bottleneck,[3, 4, 23, 3], num_classes)
else:
model = ResNet(Bottleneck,[3, 4, 23, 3], num_classes)
return model
| 11,339 | 36.549669 | 139 | py |
RG | RG-master/Semantic Segmentation/train.py | import datetime
import os
import random
import time
from math import sqrt
import torchvision.transforms as standard_transforms
import torchvision.utils as vutils
# from tensorboard import SummaryWriter
from torch import optim
from torch.autograd import Variable
from torch.backends import cudnn
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
import utils.joint_transforms as joint_transforms
import utils.transforms as extended_transforms
from datasets import voc
from models import *
from utils import check_mkdir, evaluate, AverageMeter, CrossEntropyLoss2d
from tqdm import tqdm as tqdm
cudnn.benchmark = True
from torchvision.transforms import *
from torchvision.transforms import ToTensor, ToPILImage
ckpt_path = './ckpt'
exp_name = 'RSPPNET'
args = {
'epoch_num': 200,
'lr': 0.0001,
'weight_decay': 0.0005,
'momentum': 0.9,
'lr_patience': 100, # large patience denotes fixed lr
'snapshot': '', # empty string denotes learning from scratch
'print_freq': 1,
'val_save_to_img_file': False,
'val_img_sample_rate': 0.1 # randomly sample some validation results to display
}
def lr_poly(base_lr, iter,max_iter=200,power=0.9):
return base_lr*((1-float(iter)/max_iter)**(power))
def adjust_learning_rate(optimizer, i_iter, net, train_args):
"""Sets the learning rate to the initial LR divided by 5 at 60th, 120th and 160th epochs"""
lr = lr_poly(0.0001, i_iter)
print('current lr:', lr)
# optimizer.step()
# optimizer = optim.RMSprop(net.parameters(), lr=lr, alpha=0.99, eps=1e-08, weight_decay=train_args['weight_decay'], momentum=0.9, centered=False)
# optimizer = optim.Adam(net.parameters(), lr=lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=train_args['weight_decay'])
optimizer = optim.SGD(net.parameters(),lr=lr, momentum=train_args['momentum'],weight_decay=train_args['weight_decay'])
# optimizer = optim.SGD([{'params': get_1x_lr_params_NOscale(net), 'lr': lr }, {'params': get_10x_lr_params(net), 'lr': 10*lr} ], lr = lr, momentum = train_args['momentum'],weight_decay = train_args['weight_decay'])
# optimizer.zero_grad()
# optimizer.param_groups[1]['lr'] = lr * 10
max_label = 20
def get_iou(pred,gt):
if pred.shape!= gt.shape:
print('pred shape',pred.shape, 'gt shape', gt.shape)
assert(pred.shape == gt.shape)
gt = gt.astype(np.float32)
pred = pred.astype(np.float32)
count = np.zeros((max_label+1,))
for j in range(max_label+1):
x = np.where(pred==j)
p_idx_j = set(zip(x[0].tolist(),x[1].tolist()))
x = np.where(gt==j)
GT_idx_j = set(zip(x[0].tolist(),x[1].tolist()))
#pdb.set_trace()
n_jj = set.intersection(p_idx_j,GT_idx_j)
u_jj = set.union(p_idx_j,GT_idx_j)
if len(GT_idx_j)!=0:
count[j] = float(len(n_jj))/float(len(u_jj))
result_class = count
Aiou = np.sum(result_class[:])/float(len(np.unique(gt)))
return Aiou
def main(train_args):
net = PSPNet(num_classes=voc.num_classes).cuda()
if len(train_args['snapshot']) == 0:
curr_epoch = 1
train_args['best_record'] = {'epoch': 0, 'val_loss': 1e10, 'acc': 0, 'acc_cls': 0, 'mean_iu': 0, 'fwavacc': 0}
else:
print('training resumes from ' + train_args['snapshot'])
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, train_args['snapshot'])))
split_snapshot = train_args['snapshot'].split('_')
curr_epoch = int(split_snapshot[1]) + 1
train_args['best_record'] = {'epoch': int(split_snapshot[1]), 'val_loss': float(split_snapshot[3]),
'acc': float(split_snapshot[5]), 'acc_cls': float(split_snapshot[7]),
'mean_iu': float(split_snapshot[9]), 'fwavacc': float(split_snapshot[11])}
mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
input_transform = standard_transforms.Compose([
ToTensor(),
Normalize([.485, .456, .406], [.229, .224, .225]),
])
joint_transform = joint_transforms.Compose([
joint_transforms.CenterCrop(224),
# joint_transforms.Scale(2),
joint_transforms.RandomHorizontallyFlip(),
])
target_transform = standard_transforms.Compose([
extended_transforms.MaskToTensor(),
])
restore_transform = standard_transforms.Compose([
extended_transforms.DeNormalize(*mean_std),
standard_transforms.ToPILImage(),
])
visualize = standard_transforms.Compose([
standard_transforms.Scale(400),
standard_transforms.CenterCrop(400),
standard_transforms.ToTensor()
])
val_input_transform = standard_transforms.Compose([
CenterCrop(224),
ToTensor(),
Normalize([.485, .456, .406], [.229, .224, .225]),
])
val_target_transform = standard_transforms.Compose([
CenterCrop(224),
extended_transforms.MaskToTensor(),
])
train_set = voc.VOC('train', transform=input_transform, target_transform=target_transform, joint_transform=joint_transform)
train_loader = DataLoader(train_set, batch_size=4, num_workers=4, shuffle=True)
val_set = voc.VOC('val', transform=val_input_transform, target_transform=val_target_transform)
val_loader = DataLoader(val_set, batch_size=4, num_workers=4, shuffle=False)
# criterion = CrossEntropyLoss2d(size_average=True, ignore_index=voc.ignore_label).cuda()
criterion = torch.nn.CrossEntropyLoss(ignore_index=voc.ignore_label).cuda()
optimizer = optim.SGD(net.parameters(),lr=train_args['lr'], momentum=train_args['momentum'],weight_decay=train_args['weight_decay'])
check_mkdir(ckpt_path)
check_mkdir(os.path.join(ckpt_path, exp_name))
# open(os.path.join(ckpt_path, exp_name, 'loss_001_aux_SGD_momentum_95_random_lr_001.txt'), 'w').write(str(train_args) + '\n\n')
for epoch in range(curr_epoch, train_args['epoch_num'] + 1):
# adjust_learning_rate(optimizer,epoch,net,train_args)
train(train_loader, net, criterion, optimizer, epoch, train_args)
validate(val_loader, net, criterion, optimizer, epoch, train_args, restore_transform, visualize)
adjust_learning_rate(optimizer,epoch,net,train_args)
# scheduler.step(val_loss)
def train(train_loader, net, criterion, optimizer, epoch, train_args):
# interp = nn.Upsample(size=256, mode='bilinear')
net.train()
train_loss = AverageMeter()
curr_iter = (epoch - 1) * len(train_loader)
for i, data in enumerate(train_loader):
inputs, labels = data
assert inputs.size()[2:] == labels.size()[1:]
N = inputs.size(0)
inputs = Variable(inputs).cuda()
labels = Variable(labels).cuda()
random_number = random.random()
if random_number > 0.5:
optimizer.zero_grad()
outputs,aux_logits = net(inputs)
assert outputs.size()[2:] == labels.size()[1:]
assert outputs.size()[1] == voc.num_classes
loss_1 = criterion(outputs, labels)
loss_2 = criterion(aux_logits, labels)
loss = (loss_1 + 0.4*loss_2)*random.random()
loss.backward()
optimizer.step()
train_loss.update(loss.data[0], N)
else:
optimizer.zero_grad()
outputs,aux_logits = net(inputs)
assert outputs.size()[2:] == labels.size()[1:]
assert outputs.size()[1] == voc.num_classes
loss_1 = criterion(outputs, labels)
loss_2 = criterion(aux_logits, labels)
loss = loss_1 + 0.4*loss_2
loss.backward()
optimizer.step()
train_loss.update(loss.data[0], N)
curr_iter += 1
# writer.add_scalar('train_loss', train_loss.avg, curr_iter)
if i % train_args['print_freq'] == 0:
print('[epoch %d], [iter %d / %d], [train loss %.5f],[N: %d]' % (
epoch, i + 1, len(train_loader), train_loss.avg, N
# , loss_1.data[0], loss_2.data[0],[loss %.3f],[loss2 %.3f]
))
def validate(val_loader, net, criterion, optimizer, epoch, train_args, restore, visualize):
net.eval()
global best_acc
val_loss = AverageMeter()
inputs_all, gts_all, predictions_all = [], [], []
for vi, data in tqdm(enumerate(val_loader)):
inputs, gts = data
N = inputs.size(0)
inputs = Variable(inputs, volatile=True).cuda()
gts = Variable(gts, volatile=True).cuda()
outputs = net(inputs)
# interp = nn.Upsample(size=256, mode='bilinear')
# outputs = interp(net(inputs))
predictions = outputs.data.max(1)[1].squeeze_(1).squeeze_(0).cpu().numpy()
val_loss.update(criterion(outputs, gts).data[0], N)
# if random.random() > train_args['val_img_sample_rate']:
# inputs_all.append(None)
# else:
# inputs_all.append(inputs.data.squeeze_(0).cpu())
gts_all.append(gts.data.squeeze_(0).cpu().numpy())
predictions_all.append(predictions)
# IOU.append(get_iou(outputs,gts))
acc, acc_cls, mean_iu, fwavacc = evaluate(predictions_all, gts_all, voc.num_classes)
if mean_iu > train_args['best_record']['mean_iu']:
train_args['best_record']['val_loss'] = val_loss.avg
train_args['best_record']['epoch'] = epoch
train_args['best_record']['acc'] = acc
train_args['best_record']['acc_cls'] = acc_cls
train_args['best_record']['mean_iu'] = mean_iu
train_args['best_record']['fwavacc'] = fwavacc
snapshot_name = 'epoch_%d_loss_%.5f_acc_%.5f_acc-cls_%.5f_mean-iu_%.5f_fwavacc_%.5f' % (
epoch, val_loss.avg, acc, acc_cls, mean_iu, fwavacc
)
open(os.path.join(ckpt_path, exp_name, 'loss_0001_dilation_aux_SGD_momentum_090_PSPNet_L3.txt'), 'a').write(str(epoch) + '_' + str(mean_iu) + ',')
# torch.save(net.state_dict(), os.path.join(ckpt_path, exp_name, snapshot_name + '.pth'))
print('--------------------------------------------------------------------')
print('[epoch %d], [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f]' % (
epoch, val_loss.avg, acc, acc_cls, mean_iu, fwavacc))
print('best record: [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f], [epoch %d]' % (
train_args['best_record']['val_loss'], train_args['best_record']['acc'], train_args['best_record']['acc_cls'],
train_args['best_record']['mean_iu'], train_args['best_record']['fwavacc'], train_args['best_record']['epoch']))
print('--------------------------------------------------------------------')
if __name__ == '__main__':
main(args)
| 9,955 | 38.19685 | 219 | py |
CD-Flow | CD-Flow-main/main.py | import torch
from trainnet import trainNet
import pandas as pd
import argparse
def parse_config():
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=100)
parser.add_argument("--resume_path", type=str, default=None)
parser.add_argument("--learning_rate", type=float, default=1e-5)
parser.add_argument("--scheduler_step", type=int, default=5)
parser.add_argument("--scheduler_gamma", type=float, default=0.5)
parser.add_argument("--batch_size_train", type=int, default=4)
parser.add_argument("--batch_size_test", type=int, default=4)
parser.add_argument("--n_epochs", type=int, default=50)
parser.add_argument("--training_datadir", type=str, default='')
parser.add_argument("--colorspace", type=str, default='rgb')
parser.add_argument("--trainpath1", type=str, default='trainnet.py')
parser.add_argument("--trainpath2", type=str, default='main.py')
parser.add_argument("--trainpath3", type=str, default='model.py')
parser.add_argument("--trainpath4", type=str, default='DataLoader.py')
parser.add_argument("--work_path", type=str, default='work_dir')
parser.add_argument("--datapath", type=str, default='data')
parser.add_argument("--trainset", type=str, default='train.csv')
parser.add_argument("--valset", type=str, default='val.csv')
parser.add_argument("--testset", type=str, default='test.csv')
parser.add_argument("--test_aligned_path", type=str, default=None)
parser.add_argument("--test_notaligned_path", type=str, default=None)
return parser.parse_args()
if __name__ == '__main__':
config = parse_config()
path = config.datapath
modelprediction = pd.DataFrame(columns=['no'])
modelprediction_aligned = pd.DataFrame(columns=['no'])
modelprediction_notaligned = pd.DataFrame(columns=['no'])
work_path = config.work_path
trainpath = config.trainset
valpath = config.valset
testpath = config.testset
performance = pd.DataFrame(columns=['stress', 'plcc', 'srcc', 'stress_aligned', 'plcc_aligned', 'srcc_aligned', 'stress_notaligned', 'plcc_notaligned', 'srcc_notaligned'])
torch.cuda.empty_cache()
i = 0
config.datapath = path+'/{}.csv'.format(i+1)
config.work_path = work_path+'/{}'.format(i+1)
config.trainset = path+'/{}/'.format(i+1)+trainpath
config.valset = path+'/{}/'.format(i+1)+valpath
config.testset = path+'/{}/'.format(i+1)+testpath
config.test_aligned_path = path+'/{}/test_aligned.csv'.format(i+1)
config.test_notaligned_path = path+'/{}/test_notaligned.csv'.format(i+1)
dist1, y_true1, stress1, cc_v1, srocc_v1, dist2, y_true2, stress2, cc_v2, srocc_v2,\
dist3, y_true3, stress3, cc_v3, srocc_v3 = trainNet(config, i)
performance.loc['{}'.format(i), 'stress'] = stress1
performance.loc['{}'.format(i), 'plcc'] = cc_v1
performance.loc['{}'.format(i), 'srcc'] = srocc_v1
performance.loc['{}'.format(i), 'stress_aligned'] = stress2
performance.loc['{}'.format(i), 'plcc_aligned'] = cc_v2
performance.loc['{}'.format(i), 'srcc_aligned'] = srocc_v2
performance.loc['{}'.format(i), 'stress_notaligned'] = stress3
performance.loc['{}'.format(i), 'plcc_notaligned'] = cc_v3
performance.loc['{}'.format(i), 'srcc_notaligned'] = srocc_v3
performance.to_csv(config.work_path + '/modelperformance.csv', index=None)
| 3,377 | 48.676471 | 175 | py |
CD-Flow | CD-Flow-main/test.py | import time
from EMA import EMA
import torch
from torch.utils.data import DataLoader
from model import CDFlow
from DataLoader import CD_128
from coeff_func import *
import os
from loss import createLossAndOptimizer
from torch.autograd import Variable
import torchvision
import torch.autograd as autograd
from function import setup_seed, copy_codes
import argparse
def parse_config():
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size_test", type=int, default=4)
parser.add_argument("--work_path", type=str, default='work_dir')
parser.add_argument("--datapath", type=str, default='data')
parser.add_argument("--dataset", type=str, default='')
parser.add_argument("--testset", type=str, default='test.csv')
parser.add_argument("--test_aligned_path", type=str, default=None)
parser.add_argument("--test_notaligned_path", type=str, default=None)
return parser.parse_args()
def test(data_val_loader, net):
dist = []
y_true = []
for i, data in enumerate(data_val_loader, 0):
with torch.no_grad():
x, y, gts = data
y_val = gts.numpy()
x, y, gts = \
Variable(x).cuda(), \
Variable(y).cuda(), \
Variable(gts).cuda()
score, _, _, _, _, _, _, _, _, _ = net(x, y)
pred = (torch.squeeze(score)).cpu().detach().numpy().tolist()
if isinstance(pred, list):
dist.extend(pred)
y_true.extend(y_val.tolist())
else:
dist.append(np.array(pred))
y_true.append(y_val)
dist_np = np.array(dist)
y_true_np = np.array(y_true).squeeze()
stress = compute_stress(dist_np, y_true_np)
_, cc_v, srocc_v, krocc_v, rmse_v = coeff_fit(dist_np, y_true_np)
return srocc_v, cc_v, stress, dist, y_true
config = parse_config()
path = config.datapath
work_path = config.work_path
testpath = config.testset
workspace = work_path + '/{}'.format(1)
testset = path + '/{}/'.format(1) + testpath
test_aligned_path = path + '/{}/test_aligned.csv'.format(1)
test_notaligned_path = path + '/{}/test_notaligned.csv'.format(1)
datadir = config.dataset
batch_size_test = config.batch_size_test
test_pairs = np.genfromtxt(open(testset, encoding='UTF-8-sig'), delimiter=',', dtype=str)
test_aligned_pairs = np.genfromtxt(open(test_aligned_path), delimiter=',', dtype=str)
test_notaligned_pairs = np.genfromtxt(open(test_notaligned_path), delimiter=',', dtype=str)
data_test = CD_128(test_pairs[:], root_dir=datadir, test=True)
test_aligned = CD_128(test_aligned_pairs[:], root_dir=datadir, test=True)
test_notaligned = CD_128(test_notaligned_pairs[:], root_dir=datadir, test=True)
data_test_loader = DataLoader(data_test, batch_size=batch_size_test, shuffle=False, pin_memory=True, num_workers=4)
data_test_aligned_loader = DataLoader(test_aligned, batch_size=batch_size_test, shuffle=False, pin_memory=True,
num_workers=4)
data_test_notaligned_loader = DataLoader(test_notaligned, batch_size=batch_size_test, shuffle=False, pin_memory=True,
num_workers=4)
print('#############################################################################')
print("Testing...")
print('#############################################################################')
device = torch.device("cuda")
pt = os.path.join(workspace, 'checkpoint_best', 'ModelParams_Best_val.pt')
checkpoint = torch.load(pt)
net = CDFlow().cuda()
net = torch.nn.DataParallel(net).cuda()
net.load_state_dict(checkpoint['state_dict'])
net.eval()
srocc_v1, cc_v1, stress1, dist1, y_true1 = test(data_test_loader, net)
print('All: plcc{}; srcc{}; stress{}'.format(cc_v1, srocc_v1, stress1))
srocc_v2, cc_v2, stress2, dist2, y_true2 = test(data_test_aligned_loader, net)
print('Pixel-wise aligned: plcc{}; srcc{}; stress{}'.format(cc_v2, srocc_v2, stress2))
srocc_v3, cc_v3, stress3, dist3, y_true3 = test(data_test_notaligned_loader, net)
print('Non-Pixel-wise aligned: plcc{}; srcc{}; stress{}'.format(cc_v3, srocc_v3, stress3))
| 4,109 | 41.8125 | 117 | py |
CD-Flow | CD-Flow-main/flow.py | import torch
from torch import nn
from torch.nn import functional as F
from math import log, pi, exp
import numpy as np
from scipy import linalg as la
logabs = lambda x: torch.log(torch.abs(x))
class ActNorm(nn.Module):
def __init__(self, in_channel, logdet=True):
super().__init__()
self.loc = nn.Parameter(torch.zeros(1, in_channel, 1, 1))
self.scale = nn.Parameter(torch.ones(1, in_channel, 1, 1))
self.register_buffer("initialized", torch.tensor(0, dtype=torch.uint8))
self.logdet = logdet
def initialize(self, input):
with torch.no_grad():
flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1)
mean = (
flatten.mean(1)
.unsqueeze(1)
.unsqueeze(2)
.unsqueeze(3)
.permute(1, 0, 2, 3)
)
std = (
flatten.std(1)
.unsqueeze(1)
.unsqueeze(2)
.unsqueeze(3)
.permute(1, 0, 2, 3)
)
self.loc.data.copy_(-mean)
self.scale.data.copy_(1 / (std + 1e-6))
def forward(self, input):
_, _, height, width = input.shape
if self.initialized.item() == 0:
self.initialize(input)
self.initialized.fill_(1)
log_abs = logabs(self.scale)
logdet = height * width * torch.sum(log_abs)
if self.logdet:
return self.scale * (input + self.loc), logdet
else:
return self.scale * (input + self.loc)
def reverse(self, output):
return output / self.scale - self.loc
class InvConv2d(nn.Module):
def __init__(self, in_channel):
super().__init__()
weight = torch.randn(in_channel, in_channel)
q, _ = torch.qr(weight)
weight = q.unsqueeze(2).unsqueeze(3)
self.weight = nn.Parameter(weight)
def forward(self, input):
_, _, height, width = input.shape
out = F.conv2d(input, self.weight)
logdet = (
height * width * torch.slogdet(self.weight.squeeze().double())[1].float()
)
return out, logdet
def reverse(self, output):
return F.conv2d(
output, self.weight.squeeze().inverse().unsqueeze(2).unsqueeze(3)
)
class InvConv2dLU(nn.Module):
def __init__(self, in_channel):
super().__init__()
weight = np.random.randn(in_channel, in_channel)
q, _ = la.qr(weight)
w_p, w_l, w_u = la.lu(q.astype(np.float32))
w_s = np.diag(w_u)
w_u = np.triu(w_u, 1)
u_mask = np.triu(np.ones_like(w_u), 1)
l_mask = u_mask.T
w_p = torch.from_numpy(w_p)
w_l = torch.from_numpy(w_l)
w_s = torch.from_numpy(w_s)
w_u = torch.from_numpy(w_u)
self.register_buffer("w_p", w_p)
self.register_buffer("u_mask", torch.from_numpy(u_mask))
self.register_buffer("l_mask", torch.from_numpy(l_mask))
self.register_buffer("s_sign", torch.sign(w_s))
self.register_buffer("l_eye", torch.eye(l_mask.shape[0]))
self.w_l = nn.Parameter(w_l)
self.w_s = nn.Parameter(logabs(w_s))
self.w_u = nn.Parameter(w_u)
def forward(self, input):
_, _, height, width = input.shape
weight = self.calc_weight()
out = F.conv2d(input, weight)
logdet = height * width * torch.sum(self.w_s)
return out, logdet
def calc_weight(self):
weight = (
self.w_p
@ (self.w_l * self.l_mask + self.l_eye)
@ ((self.w_u * self.u_mask) + torch.diag(self.s_sign * torch.exp(self.w_s)))
)
return weight.unsqueeze(2).unsqueeze(3)
def reverse(self, output):
weight = self.calc_weight()
return F.conv2d(output, weight.squeeze().inverse().unsqueeze(2).unsqueeze(3))
class ZeroConv2d(nn.Module):
def __init__(self, in_channel, out_channel, padding=1):
super().__init__()
self.conv = nn.Conv2d(in_channel, out_channel, 3, padding=0)
self.conv.weight.data.zero_()
self.conv.bias.data.zero_()
self.scale = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
def forward(self, input):
out = F.pad(input, [1, 1, 1, 1], value=1)
out = self.conv(out)
out = out * torch.exp(self.scale * 3)
return out
class AffineCoupling(nn.Module):
def __init__(self, in_channel, filter_size=512, affine=True):
super().__init__()
self.affine = affine
self.net = nn.Sequential(
nn.Conv2d(in_channel // 2, filter_size, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(filter_size, filter_size, 1),
nn.ReLU(inplace=True),
ZeroConv2d(filter_size, in_channel if self.affine else in_channel // 2),
)
self.net[0].weight.data.normal_(0, 0.05)
self.net[0].bias.data.zero_()
self.net[2].weight.data.normal_(0, 0.05)
self.net[2].bias.data.zero_()
def forward(self, input):
in_a, in_b = input.chunk(2, 1)
if self.affine:
log_s, t = self.net(in_a).chunk(2, 1)
s = F.sigmoid(log_s + 2)
out_b = (in_b + t) * s
logdet = torch.sum(torch.log(s).view(input.shape[0], -1), 1)
else:
net_out = self.net(in_a)
out_b = in_b + net_out
logdet = None
return torch.cat([in_a, out_b], 1), logdet
def reverse(self, output):
out_a, out_b = output.chunk(2, 1)
if self.affine:
log_s, t = self.net(out_a).chunk(2, 1)
# s = torch.exp(log_s)
s = F.sigmoid(log_s + 2)
# in_a = (out_a - t) / s
in_b = out_b / s - t
else:
net_out = self.net(out_a)
in_b = out_b - net_out
return torch.cat([out_a, in_b], 1)
class Flow(nn.Module):
def __init__(self, in_channel, affine=True, conv_lu=True):
super().__init__()
self.actnorm = ActNorm(in_channel)
if conv_lu:
self.invconv = InvConv2dLU(in_channel)
else:
self.invconv = InvConv2d(in_channel)
self.coupling = AffineCoupling(in_channel, affine=affine)
def forward(self, input):
out, logdet = self.actnorm(input)
out, det1 = self.invconv(out)
out, det2 = self.coupling(out)
logdet = logdet + det1
if det2 is not None:
logdet = logdet + det2
return out, logdet
def reverse(self, output):
input = self.coupling.reverse(output)
input = self.invconv.reverse(input)
input = self.actnorm.reverse(input)
return input
def gaussian_log_p(x, mean, log_sd):
return -0.5 * log(2 * pi) - log_sd - 0.5 * (x - mean) ** 2 / torch.exp(2 * log_sd)
def gaussian_sample(eps, mean, log_sd):
return mean + torch.exp(log_sd) * eps
class Block(nn.Module):
def __init__(self, in_channel, n_flow, split=True, affine=True, conv_lu=True):
super().__init__()
squeeze_dim = in_channel * 4
self.flows = nn.ModuleList()
for i in range(n_flow):
self.flows.append(Flow(squeeze_dim, affine=affine, conv_lu=conv_lu))
self.split = split
if split:
self.prior = ZeroConv2d(in_channel * 2, in_channel * 4)
else:
self.prior = ZeroConv2d(in_channel * 4, in_channel * 8)
def forward(self, input):
b_size, n_channel, height, width = input.shape
squeezed = input.view(b_size, n_channel, height // 2, 2, width // 2, 2)
squeezed = squeezed.permute(0, 1, 3, 5, 2, 4)
out = squeezed.contiguous().view(b_size, n_channel * 4, height // 2, width // 2)
logdet = 0
for flow in self.flows:
out, det = flow(out)
logdet = logdet + det
if self.split:
out, z_new = out.chunk(2, 1)
mean, log_sd = self.prior(out).chunk(2, 1)
log_p = gaussian_log_p(z_new, mean, log_sd)
log_p = log_p.view(b_size, -1).sum(1)
else:
one = torch.ones_like(out)
mean, log_sd = self.prior(one).chunk(2, 1)
log_p = gaussian_log_p(out, mean, log_sd)
log_p = log_p.view(b_size, -1).sum(1)
z_new = out
#self.log_sd = log_sd
return out, logdet, log_p, z_new
def reverse(self, output, eps=None, reconstruct=False):
input = output
if reconstruct:
if self.split:
input = torch.cat([output, eps], 1) ## channel-wise concat
else:
input = eps
else:
if self.split:
mean, log_sd = self.prior(input).chunk(2, 1)
z = gaussian_sample(eps, mean, log_sd)
input = torch.cat([output, z], 1)
else:
one = torch.ones_like(input)
mean, log_sd = self.prior(one).chunk(2, 1)
z = gaussian_sample(eps, mean, log_sd)
input = z
for flow in self.flows[::-1]:
input = flow.reverse(input)
b_size, n_channel, height, width = input.shape
unsqueezed = input.view(b_size, n_channel // 4, 2, 2, height, width)
unsqueezed = unsqueezed.permute(0, 1, 4, 2, 5, 3)
unsqueezed = unsqueezed.contiguous().view(
b_size, n_channel // 4, height * 2, width * 2
)
return unsqueezed
class Glow(nn.Module):
def __init__(
self, in_channel, n_flow, n_block, affine=True, conv_lu=True
):
super().__init__()
self.blocks = nn.ModuleList()
n_channel = in_channel
for i in range(n_block - 1):
self.blocks.append(Block(n_channel, n_flow, affine=affine, conv_lu=conv_lu))
n_channel *= 2
self.blocks.append(Block(n_channel, n_flow, split=False, affine=affine))
def forward(self, input):
log_p_sum = 0
logdet = 0
out = input
z_outs = []
for i, block in enumerate(self.blocks):
out, det, log_p, z_new = block(out)
z_outs.append(z_new)
logdet = logdet + det
if log_p is not None:
log_p_sum = log_p_sum + log_p
return log_p_sum, logdet, z_outs
def reverse(self, z_list, reconstruct=True, cd_map=False):
for i, block in enumerate(self.blocks[::-1]):
if i == 0:
input = block.reverse(z_list[-1], z_list[-1], reconstruct=reconstruct)
else:
input = block.reverse(input, z_list[-(i + 1)], reconstruct=reconstruct)
return input
| 10,847 | 28.720548 | 88 | py |
CD-Flow | CD-Flow-main/DataLoader.py | import os
import torch
import random
import numpy as np
from torch.utils.data import Dataset
from PIL import Image
from torchvision import transforms
import torchvision
class CD_128(Dataset):
def __init__(self, jnd_info, root_dir, test=False):
self.ref_name = jnd_info[:, 0]
self.test_name = jnd_info[:, 1]
self.root_dir = str(root_dir)
self.gt = jnd_info[:, 2]
self.test = test
if test == False:
self.trans_org = transforms.Compose([
transforms.Resize(1024),
transforms.RandomRotation(3),
transforms.RandomCrop(1000),
transforms.Resize(768),
transforms.ToTensor(),
])
else:
self.trans_org = transforms.Compose([
transforms.Resize(1024),
transforms.CenterCrop(1024),
transforms.ToTensor(),
])
def __len__(self):
return len(self.gt)
def __getitem__(self, idx):
gt = float(self.gt[idx])
full_address = os.path.join(self.root_dir, str(self.ref_name[idx]))
ref = Image.open(full_address).convert("RGB")
ref1 = self.trans_org(ref)
full_address_test = os.path.join(self.root_dir, str(self.test_name[idx]))
test = Image.open(full_address_test).convert("RGB")
test1 = self.trans_org(test)
return ref1, test1, gt
| 1,425 | 30 | 81 | py |
CD-Flow | CD-Flow-main/loss.py | import torch
import numpy as np
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
def createLossAndOptimizer(net, learning_rate, scheduler_step, scheduler_gamma):
loss = LossFunc()
# optimizer = optim.Adam([{'params': net.parameters(), 'lr':learning_rate}], lr = learning_rate, weight_decay=5e-4)
optimizer = optim.Adam([{'params': net.parameters(), 'lr': learning_rate}], lr=learning_rate, eps=1e-7)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=scheduler_step, gamma=scheduler_gamma)
return loss, optimizer, scheduler
class LossFunc(torch.nn.Module):
def __init__(self):
super(LossFunc, self).__init__()
def mse_loss(self, score, label):
score = torch.squeeze(score)
return torch.mean((score - label) ** 2)
def forward(self, score, label):
mse = self.mse_loss(score, label)
return mse
| 909 | 34 | 119 | py |
CD-Flow | CD-Flow-main/model.py | import math
import time
import torch
import torch.nn as nn
from flow import *
import os
class CDFlow(nn.Module):
def __init__(self):
super(CDFlow, self).__init__()
self.glow = Glow(3, 8, 6, affine=True, conv_lu=True)
def coordinate_transform(self, x_hat, rev=False):
if not rev:
log_p, logdet, x_hat = self.glow(x_hat)
return log_p, logdet, x_hat
else:
x_hat = self.glow.reverse(x_hat)
return x_hat
def forward(self, x, y):
log_p_x, logdet_x, x_hat = self.coordinate_transform(x, rev=False)
log_p_y, logdet_y, y_hat = self.coordinate_transform(y, rev=False)
x_hat_1, y_hat_1 = x_hat[0].view(x_hat[0].shape[0], -1), y_hat[0].view(x_hat[0].shape[0], -1)
x_hat_2, y_hat_2 = x_hat[1].view(x_hat[1].shape[0], -1), y_hat[1].view(x_hat[1].shape[0], -1)
x_hat_3, y_hat_3 = x_hat[2].view(x_hat[2].shape[0], -1), y_hat[2].view(x_hat[2].shape[0], -1)
x_hat_4, y_hat_4 = x_hat[3].view(x_hat[3].shape[0], -1), y_hat[3].view(x_hat[3].shape[0], -1)
x_hat_5, y_hat_5 = x_hat[4].view(x_hat[4].shape[0], -1), y_hat[4].view(x_hat[4].shape[0], -1)
x_hat_6, y_hat_6 = x_hat[5].view(x_hat[5].shape[0], -1), y_hat[5].view(x_hat[5].shape[0], -1)
x_cat_65 = torch.cat((x_hat_6, x_hat_5), dim=1)
y_cat_65 = torch.cat((y_hat_6, y_hat_5), dim=1)
x_cat_654 = torch.cat((x_hat_6, x_hat_5, x_hat_4), dim=1)
y_cat_654 = torch.cat((y_hat_6, y_hat_5, y_hat_4), dim=1)
x_cat_6543 = torch.cat((x_hat_6, x_hat_5, x_hat_4, x_hat_3), dim=1)
y_cat_6543 = torch.cat((y_hat_6, y_hat_5, y_hat_4, y_hat_3), dim=1)
x_cat_65432 = torch.cat((x_hat_6, x_hat_5, x_hat_4, x_hat_3, x_hat_2), dim=1)
y_cat_65432 = torch.cat((y_hat_6, y_hat_5, y_hat_4, y_hat_3, y_hat_2), dim=1)
x_cat_654321 = torch.cat((x_hat_6, x_hat_5, x_hat_4, x_hat_3, x_hat_2, x_hat_1), dim=1)
y_cat_654321 = torch.cat((y_hat_6, y_hat_5, y_hat_4, y_hat_3, y_hat_2, y_hat_1), dim=1)
mse6 = (x_hat_6 - y_hat_6).view(x_hat_6.shape[0], -1)
mse6 = mse6.unsqueeze(1)
mse6 = torch.sqrt(1e-8 + torch.matmul(mse6, mse6.transpose(dim0=-2, dim1=-1))/mse6.shape[2])
mse6 = mse6.squeeze(2)
mse65 = (x_cat_65 - y_cat_65).view(x_cat_65.shape[0], -1)
mse65 = mse65.unsqueeze(1)
mse65 = torch.sqrt(1e-8 + torch.matmul(mse65, mse65.transpose(dim0=-2, dim1=-1))/mse65.shape[2])
mse65 = mse65.squeeze(2)
mse654 = (x_cat_654 - y_cat_654).view(x_cat_654.shape[0], -1)
mse654 = mse654.unsqueeze(1)
mse654 = torch.sqrt(1e-8 + torch.matmul(mse654, mse654.transpose(dim0=-2, dim1=-1))/mse654.shape[2])
mse654 = mse654.squeeze(2)
mse6543 = (x_cat_6543 - y_cat_6543).view(x_cat_6543.shape[0], -1)
mse6543 = mse6543.unsqueeze(1)
mse6543 = torch.sqrt(1e-8 + torch.matmul(mse6543, mse6543.transpose(dim0=-2, dim1=-1))/mse6543.shape[2])
mse6543 = mse6543.squeeze(2)
mse65432 = (x_cat_65432 - y_cat_65432).view(x_cat_65432.shape[0], -1)
mse65432 = mse65432.unsqueeze(1)
mse65432 = torch.sqrt(1e-8 + torch.matmul(mse65432, mse65432.transpose(dim0=-2, dim1=-1)) / mse65432.shape[2])
mse65432 = mse65432.squeeze(2)
mse654321 = (x_cat_654321 - y_cat_654321).view(x_cat_654321.shape[0], -1)
mse654321 = mse654321.unsqueeze(1)
mse654321 = torch.sqrt(1e-8 + torch.matmul(mse654321, mse654321.transpose(dim0=-2, dim1=-1)) / mse654321.shape[2])
mse654321 = mse654321.squeeze(2)
return mse654321, mse65432, mse6543, mse654, mse65, mse6, log_p_x, logdet_x, log_p_y, logdet_y
| 3,702 | 47.090909 | 122 | py |
CD-Flow | CD-Flow-main/function.py | import shutil
import random
import torch
import numpy as np
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def copy_codes(trainpath1,trainpath2,trainpath3,trainpath4, path1,path2,path3,path4):
shutil.copyfile(trainpath1, path1)
shutil.copyfile(trainpath2, path2)
shutil.copyfile(trainpath3, path3)
shutil.copyfile(trainpath4, path4)
| 484 | 25.944444 | 85 | py |
CD-Flow | CD-Flow-main/trainnet.py | import time
from EMA import EMA
import torch
from torch.utils.data import DataLoader
from model import CDFlow
from DataLoader import CD_128
from coeff_func import *
import os
from loss import createLossAndOptimizer
from torch.autograd import Variable
import torch.autograd as autograd
from function import setup_seed, copy_codes
from math import log
def trainNet(config, times):
resume_path = config.resume_path
learning_rate = config.learning_rate
scheduler_step = config.scheduler_step
scheduler_gamma = config.scheduler_gamma
batch_size_train = config.batch_size_train
batch_size_test = config.batch_size_test
n_epochs = config.n_epochs
training_datadir = config.training_datadir
colorspace = config.colorspace
trainpath1 = config.trainpath1
trainpath2 = config.trainpath2
trainpath3 = config.trainpath3
trainpath4 = config.trainpath4
workspace = config.work_path
device = torch.device("cuda")
# set random seed
setup_seed(config.seed)
if not os.path.exists(workspace):
os.mkdir(workspace)
if not os.path.exists(os.path.join(workspace, 'codes')):
os.mkdir(os.path.join(workspace, 'codes'))
if not os.path.exists(os.path.join(workspace, 'checkpoint')):
os.mkdir(os.path.join(workspace, 'checkpoint'))
if not os.path.exists(os.path.join(workspace, 'checkpoint_best')):
os.mkdir(os.path.join(workspace, 'checkpoint_best'))
copy_codes(trainpath1=trainpath1, trainpath2=trainpath2, trainpath3=trainpath3, trainpath4=trainpath4,
path1=os.path.join(workspace, 'codes/trainNet.py'), path2=os.path.join(workspace, 'codes/main.py'),
path3=os.path.join(workspace, 'codes/net.py'), path4=os.path.join(workspace, 'codes/DataLoader.py'))
print("============ HYPERPARAMETERS ==========")
print("batch_size_train and test=", batch_size_train, batch_size_test)
print("epochs=", n_epochs)
print('learning rate=', learning_rate)
print('scheduler_step=', scheduler_step)
print('scheduler_gamma=', scheduler_gamma)
print('training dir=', training_datadir)
print('colorspace=', colorspace)
print(config.trainset)
print(config.valset)
print(config.testset)
print(config.test_aligned_path)
print(config.test_notaligned_path)
train_pairs = np.genfromtxt(open(config.trainset, encoding='UTF-8-sig'), delimiter=',', dtype=str)
val_pairs = np.genfromtxt(open(config.valset, encoding='UTF-8-sig'), delimiter=',', dtype=str)
test_pairs = np.genfromtxt(open(config.testset, encoding='UTF-8-sig'), delimiter=',', dtype=str)
test_aligned_pairs = np.genfromtxt(open(config.test_aligned_path), delimiter=',', dtype=str)
test_notaligned_pairs = np.genfromtxt(open(config.test_notaligned_path), delimiter=',', dtype=str)
data_train = CD_128(train_pairs[:], root_dir=training_datadir, test=False)
data_val = CD_128(val_pairs[:], root_dir=training_datadir, test=True)
data_test = CD_128(test_pairs[:], root_dir=training_datadir, test=True)
test_aligned = CD_128(test_aligned_pairs[:], root_dir=training_datadir, test=True)
test_notaligned = CD_128(test_notaligned_pairs[:], root_dir=training_datadir, test=True)
net = CDFlow().to(device)
net = torch.nn.DataParallel(net)
net = net.to(device)
loss, optimizer, scheduler = createLossAndOptimizer(net, learning_rate, scheduler_step, scheduler_gamma)
data_train_loader = DataLoader(data_train, batch_size=batch_size_train, shuffle=True,
pin_memory=True, num_workers=4)
data_val_loader = DataLoader(data_val, batch_size=batch_size_test, shuffle=True, pin_memory=True,
num_workers=4)
data_test_loader = DataLoader(data_test, batch_size=batch_size_test, shuffle=False,
pin_memory=True, num_workers=4)
data_test_aligned_loader = DataLoader(test_aligned, batch_size=batch_size_test, shuffle=False,
pin_memory=True, num_workers=4)
data_test_notaligned_loader = DataLoader(test_notaligned, batch_size=batch_size_test,
shuffle=False, pin_memory=True, num_workers=4)
if resume_path is not None:
checkpoint = torch.load(resume_path)
start_epoch = checkpoint['epoch'] + 1
net.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print('continue to train: shuffle{} epoch{} '.format(times + 1, start_epoch))
else:
start_epoch = 0
training_start_time = time.time()
rows, columns = train_pairs.shape
n_batches = rows // batch_size_train
valsrcc = 0
ema = EMA(net, 0.999)
ema.register()
autograd.set_detect_anomaly(True)
for epoch in range(start_epoch, n_epochs):
# initiate parameters for statistic recordings.
dist = []
y_true = []
running_loss = 0.0
total_train_loss = 0
start_time = time.time()
print_every = 20
train_counter = 0
net.train()
print("---------------------train mode-------epoch{}--------------------------".format(epoch))
for i, data in enumerate(data_train_loader, 0):
train_counter = train_counter + 1
x, y, gts = data
y_val = gts.numpy()
x, y, gts = \
Variable(x).to(device), \
Variable(y).to(device), \
Variable(gts).to(device)
optimizer.zero_grad()
score, score65432, score6543, score654, score65, score6, log_p_x, logdet_x, log_p_y, logdet_y = net(x, y)
logdet_x = logdet_x.mean()
logdet_y = logdet_y.mean()
loss_x, log_p_x, log_det_x = calc_loss(log_p_x, logdet_x, 768, 2.0 ** 5)
loss_y, log_p_y, log_det_y = calc_loss(log_p_y, logdet_y, 768, 2.0 ** 5)
score_loss = 10 * loss(score, gts) + loss(score65432, gts) + loss(score6543, gts) + loss(score654, gts) + loss(score65, gts) + loss(score6, gts)
loss_size = 10 * score_loss + loss_x + loss_y
loss_size.backward()
optimizer.step()
ema.update()
running_loss += loss_size.item()
total_train_loss += loss_size.item()
pred = (torch.squeeze(score)).cpu().detach().numpy().tolist()
if isinstance(pred, list):
dist.extend(pred)
y_true.extend(y_val.tolist())
else:
dist.append(np.array(pred))
y_true.append(y_val)
if (i + 1) % (print_every + 1) == 0:
print("Epoch {}, {:d}% \t train_loss: {:.6f} took: {:.2f}s".format(
epoch + 1, int(100 * (i + 1) / n_batches), running_loss / print_every, time.time() - start_time))
running_loss = 0.0
start_time = time.time()
torch.save(
{"state_dict": net.state_dict(), 'epoch': epoch, 'optimizer': optimizer.state_dict(), 'times': times}, \
os.path.join(workspace, 'checkpoint', 'ModelParams_checkpoint.pt'))
# Calculate correlation coefficients between the predicted values and ground truth values on training set.
dist = np.array(dist).squeeze()
y_true = np.array(y_true).squeeze()
_, cc_v, srocc_v, krocc_v, rmse_v = coeff_fit(dist, y_true)
print("Training set: PCC{:.4}, SROCC{:.4}, KROCC{:.4}, RMSE{:.4}".format(cc_v, srocc_v, krocc_v, rmse_v))
# validation
# EMA
ema.apply_shadow()
# EMA
net.eval()
print("----------------------------validation mode---------------------------------")
srocc_v, total_val_loss, val_counter, cc_v, krocc_v, rmse_v, stress, dist, y_true, score_val = test(
data_val_loader, net, loss)
# srocc_a, total_val_loss_a, val_counter_a, cc_a, krocc_a, rmse_a, stress_a, dist_a, y_true_a, score_a = test(
# data_test_aligned_loader, net, loss)
# srocc_na, total_val_loss_na, val_counter_na, cc_na, krocc_na, rmse_na, stress_na, dist_na, y_true_na, score_na = test(
# data_test_notaligned_loader, net, loss)
if srocc_v > valsrcc:
valsrcc = srocc_v
torch.save({"state_dict": net.state_dict()},
os.path.join(workspace, 'checkpoint_best', 'ModelParams_Best_val.pt'))
print('update best model...')
print("VALIDATION: PCC{:.4}, SROCC{:.4}, STRESS{:.4}, RMSE{:.4}".format(cc_v, srocc_v, stress, rmse_v))
print("loss = {:.6}".format(total_val_loss / val_counter))
# EMA
ema.restore()
# EMA
scheduler.step()
print('#############################################################################')
print("Training finished, took {:.2f}s".format(time.time() - training_start_time))
pt = os.path.join(workspace, 'checkpoint_best', 'ModelParams_Best_val.pt')
checkpoint = torch.load(pt)
net = CDFlow().to(device)
net = torch.nn.DataParallel(net).to(device)
net.load_state_dict(checkpoint['state_dict'])
net.eval()
srocc_v1, total_val_loss, val_counter, cc_v1, krocc_v, rmse_v, stress1, dist1, y_true1, score_val = test(
data_test_loader, net, loss)
print('best performance: plcc{} srcc{}'.format(cc_v1, srocc_v1))
srocc_v2, total_val_loss, val_counter, cc_v2, krocc_v, rmse_v, stress2, dist2, y_true2, score_val = test(
data_test_aligned_loader, net, loss)
print('best performance in Pixel-wise aligned: plcc{} srcc{}'.format(cc_v2, srocc_v2))
srocc_v3, total_val_loss, val_counter, cc_v3, krocc_v, rmse_v, stress3, dist3, y_true3, score_val = test(
data_test_notaligned_loader, net, loss)
print('best performance in non-Pixel-wise aligned: plcc{} srcc{}'.format(cc_v3, srocc_v3))
return dist1, y_true1, stress1, cc_v1, srocc_v1, dist2, y_true2, stress2, cc_v2, srocc_v2, dist3, y_true3, stress3, cc_v3, srocc_v3
def test(data_val_loader, net, loss):
total_val_loss = 0
val_counter = 0
score_val = 0
dist = []
y_true = []
device = torch.device("cuda")
for i, data in enumerate(data_val_loader, 0):
with torch.no_grad():
x, y, gts = data
y_val = gts.numpy()
x, y, gts = \
Variable(x).to(device), \
Variable(y).to(device), \
Variable(gts).to(device)
score, _, _, _, _, _, _, _, _, _ = net(x, y)
score_loss = loss(score, gts)
loss_size = score_loss
total_val_loss += loss_size.cpu().numpy()
score_val = score_val + score_loss.item()
val_counter += 1
pred = (torch.squeeze(score)).cpu().detach().numpy().tolist()
if isinstance(pred, list):
dist.extend(pred)
y_true.extend(y_val.tolist())
else:
dist.append(np.array(pred))
y_true.append(y_val)
# Calculate correlation coefficients between the predicted values and ground truth values on validation set.
dist_np = np.array(dist).squeeze()
y_true_np = np.array(y_true).squeeze()
stress = compute_stress(dist_np, y_true_np)
_, cc_v, srocc_v, krocc_v, rmse_v = coeff_fit(dist_np, y_true_np)
return srocc_v, total_val_loss, val_counter, cc_v, krocc_v, rmse_v, stress, dist, y_true, score_val
def calc_loss(log_p, logdet, image_size, n_bins):
n_pixel = image_size * image_size * 3
loss = -log(n_bins) * n_pixel
loss = loss + logdet + log_p
return (-loss / (log(2) * n_pixel)).mean(), (log_p / (log(2) * n_pixel)).mean(), (
logdet / (log(2) * n_pixel)).mean()
| 11,783 | 44.85214 | 156 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/dqn_algos/demo.py | import numpy as np
from arguments import get_args
from models import net
import torch
from rl_utils.env_wrapper.atari_wrapper import make_atari, wrap_deepmind
def get_tensors(obs):
obs = np.transpose(obs, (2, 0, 1))
obs = np.expand_dims(obs, 0)
obs = torch.tensor(obs, dtype=torch.float32)
return obs
if __name__ == '__main__':
args = get_args()
# create the environment
env = make_atari(args.env_name)
env = wrap_deepmind(env, frame_stack=True)
# create the network
net = net(env.action_space.n, args.use_dueling)
# model path
model_path = args.save_dir + args.env_name + '/model.pt'
# load the models
net.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))
# start to test the demo
obs = env.reset()
for _ in range(2000):
env.render()
with torch.no_grad():
obs_tensor = get_tensors(obs)
action_value = net(obs_tensor)
action = torch.argmax(action_value.squeeze()).item()
obs, reward, done, _ = env.step(action)
if done:
obs = env.reset()
env.close()
| 1,134 | 30.527778 | 90 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/dqn_algos/models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
# the convolution layer of deepmind
class deepmind(nn.Module):
def __init__(self):
super(deepmind, self).__init__()
self.conv1 = nn.Conv2d(4, 32, 8, stride=4)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 32, 3, stride=1)
# start to do the init...
nn.init.orthogonal_(self.conv1.weight.data, gain=nn.init.calculate_gain('relu'))
nn.init.orthogonal_(self.conv2.weight.data, gain=nn.init.calculate_gain('relu'))
nn.init.orthogonal_(self.conv3.weight.data, gain=nn.init.calculate_gain('relu'))
# init the bias...
nn.init.constant_(self.conv1.bias.data, 0)
nn.init.constant_(self.conv2.bias.data, 0)
nn.init.constant_(self.conv3.bias.data, 0)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(-1, 32 * 7 * 7)
return x
# in the initial, just the nature CNN
class net(nn.Module):
def __init__(self, num_actions, use_dueling=False):
super(net, self).__init__()
# if use the dueling network
self.use_dueling = use_dueling
# define the network
self.cnn_layer = deepmind()
# if not use dueling
if not self.use_dueling:
self.fc1 = nn.Linear(32 * 7 * 7, 256)
self.action_value = nn.Linear(256, num_actions)
else:
# the layer for dueling network architecture
self.action_fc = nn.Linear(32 * 7 * 7, 256)
self.state_value_fc = nn.Linear(32 * 7 * 7, 256)
self.action_value = nn.Linear(256, num_actions)
self.state_value = nn.Linear(256, 1)
def forward(self, inputs):
x = self.cnn_layer(inputs / 255.0)
if not self.use_dueling:
x = F.relu(self.fc1(x))
action_value_out = self.action_value(x)
else:
# get the action value
action_fc = F.relu(self.action_fc(x))
action_value = self.action_value(action_fc)
# get the state value
state_value_fc = F.relu(self.state_value_fc(x))
state_value = self.state_value(state_value_fc)
# action value mean
action_value_mean = torch.mean(action_value, dim=1, keepdim=True)
action_value_center = action_value - action_value_mean
# Q = V + A
action_value_out = state_value + action_value_center
return action_value_out
| 2,596 | 37.761194 | 88 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/dqn_algos/dqn_agent.py | import sys
import numpy as np
from models import net
from utils import linear_schedule, select_actions, reward_recorder
from rl_utils.experience_replay.experience_replay import replay_buffer
import torch
from datetime import datetime
import os
import copy
# define the dqn agent
class dqn_agent:
def __init__(self, env, args):
# define some important
self.env = env
self.args = args
# define the network
self.net = net(self.env.action_space.n, self.args.use_dueling)
# copy the self.net as the
self.target_net = copy.deepcopy(self.net)
# make sure the target net has the same weights as the network
self.target_net.load_state_dict(self.net.state_dict())
if self.args.cuda:
self.net.cuda()
self.target_net.cuda()
# define the optimizer
self.optimizer = torch.optim.Adam(self.net.parameters(), lr=self.args.lr)
# define the replay memory
self.buffer = replay_buffer(self.args.buffer_size)
# define the linear schedule of the exploration
self.exploration_schedule = linear_schedule(int(self.args.total_timesteps * self.args.exploration_fraction), \
self.args.final_ratio, self.args.init_ratio)
# create the folder to save the models
if not os.path.exists(self.args.save_dir):
os.mkdir(self.args.save_dir)
# set the environment folder
self.model_path = os.path.join(self.args.save_dir, self.args.env_name)
if not os.path.exists(self.model_path):
os.mkdir(self.model_path)
# start to do the training
def learn(self):
# the episode reward
episode_reward = reward_recorder()
obs = np.array(self.env.reset())
td_loss = 0
for timestep in range(self.args.total_timesteps):
explore_eps = self.exploration_schedule.get_value(timestep)
with torch.no_grad():
obs_tensor = self._get_tensors(obs)
action_value = self.net(obs_tensor)
# select actions
action = select_actions(action_value, explore_eps)
# excute actions
obs_, reward, done, _ = self.env.step(action)
obs_ = np.array(obs_)
# tryint to append the samples
self.buffer.add(obs, action, reward, obs_, float(done))
obs = obs_
# add the rewards
episode_reward.add_rewards(reward)
if done:
obs = np.array(self.env.reset())
# start new episode to store rewards
episode_reward.start_new_episode()
if timestep > self.args.learning_starts and timestep % self.args.train_freq == 0:
# start to sample the samples from the replay buffer
batch_samples = self.buffer.sample(self.args.batch_size)
td_loss = self._update_network(batch_samples)
if timestep > self.args.learning_starts and timestep % self.args.target_network_update_freq == 0:
# update the target network
self.target_net.load_state_dict(self.net.state_dict())
if done and episode_reward.num_episodes % self.args.display_interval == 0:
print('[{}] Frames: {}, Episode: {}, Mean: {:.3f}, Loss: {:.3f}'.format(datetime.now(), timestep, episode_reward.num_episodes, \
episode_reward.mean, td_loss))
torch.save(self.net.state_dict(), self.model_path + '/model.pt')
# update the network
def _update_network(self, samples):
obses, actions, rewards, obses_next, dones = samples
# convert the data to tensor
obses = self._get_tensors(obses)
actions = torch.tensor(actions, dtype=torch.int64).unsqueeze(-1)
rewards = torch.tensor(rewards, dtype=torch.float32).unsqueeze(-1)
obses_next = self._get_tensors(obses_next)
dones = torch.tensor(1 - dones, dtype=torch.float32).unsqueeze(-1)
# convert into gpu
if self.args.cuda:
actions = actions.cuda()
rewards = rewards.cuda()
dones = dones.cuda()
# calculate the target value
with torch.no_grad():
# if use the double network architecture
if self.args.use_double_net:
q_value_ = self.net(obses_next)
action_max_idx = torch.argmax(q_value_, dim=1, keepdim=True)
target_action_value = self.target_net(obses_next)
target_action_max_value = target_action_value.gather(1, action_max_idx)
else:
target_action_value = self.target_net(obses_next)
target_action_max_value, _ = torch.max(target_action_value, dim=1, keepdim=True)
# target
expected_value = rewards + self.args.gamma * target_action_max_value * dones
# get the real q value
action_value = self.net(obses)
real_value = action_value.gather(1, actions)
loss = (expected_value - real_value).pow(2).mean()
# start to update
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item()
# get tensors
def _get_tensors(self, obs):
if obs.ndim == 3:
obs = np.transpose(obs, (2, 0, 1))
obs = np.expand_dims(obs, 0)
elif obs.ndim == 4:
obs = np.transpose(obs, (0, 3, 1, 2))
obs = torch.tensor(obs, dtype=torch.float32)
if self.args.cuda:
obs = obs.cuda()
return obs
| 5,646 | 43.81746 | 144 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/trpo/trpo_agent.py | import torch
import numpy as np
import os
from models import network
from rl_utils.running_filter.running_filter import ZFilter
from utils import select_actions, eval_actions, conjugated_gradient, line_search, set_flat_params_to
from datetime import datetime
class trpo_agent:
def __init__(self, env, args):
self.env = env
self.args = args
# define the network
self.net = network(self.env.observation_space.shape[0], self.env.action_space.shape[0])
self.old_net = network(self.env.observation_space.shape[0], self.env.action_space.shape[0])
# make sure the net and old net have the same parameters
self.old_net.load_state_dict(self.net.state_dict())
# define the optimizer
self.optimizer = torch.optim.Adam(self.net.critic.parameters(), lr=self.args.lr)
# define the running mean filter
self.running_state = ZFilter((self.env.observation_space.shape[0],), clip=5)
if not os.path.exists(self.args.save_dir):
os.mkdir(self.args.save_dir)
self.model_path = self.args.save_dir + self.args.env_name + '/'
if not os.path.exists(self.model_path):
os.mkdir(self.model_path)
def learn(self):
num_updates = self.args.total_timesteps // self.args.nsteps
obs = self.running_state(self.env.reset())
final_reward = 0
episode_reward = 0
self.dones = False
for update in range(num_updates):
mb_obs, mb_rewards, mb_actions, mb_dones, mb_values = [], [], [], [], []
for step in range(self.args.nsteps):
with torch.no_grad():
obs_tensor = self._get_tensors(obs)
value, pi = self.net(obs_tensor)
# select actions
actions = select_actions(pi)
# store informations
mb_obs.append(np.copy(obs))
mb_actions.append(actions)
mb_dones.append(self.dones)
mb_values.append(value.detach().numpy().squeeze())
# start to execute actions in the environment
obs_, reward, done, _ = self.env.step(actions)
self.dones = done
mb_rewards.append(reward)
if done:
obs_ = self.env.reset()
obs = self.running_state(obs_)
episode_reward += reward
mask = 0.0 if done else 1.0
final_reward *= mask
final_reward += (1 - mask) * episode_reward
episode_reward *= mask
# to process the rollouts
mb_obs = np.asarray(mb_obs, dtype=np.float32)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
mb_values = np.asarray(mb_values, dtype=np.float32)
# compute the last state value
with torch.no_grad():
obs_tensor = self._get_tensors(obs)
last_value, _ = self.net(obs_tensor)
last_value = last_value.detach().numpy().squeeze()
# compute the advantages
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.args.nsteps)):
if t == self.args.nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_value
else:
nextnonterminal = 1.0 - mb_dones[t + 1]
nextvalues = mb_values[t + 1]
delta = mb_rewards[t] + self.args.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.args.gamma * self.args.tau * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
# normalize the advantages
mb_advs = (mb_advs - mb_advs.mean()) / (mb_advs.std() + 1e-5)
# before the update, make the old network has the parameter of the current network
self.old_net.load_state_dict(self.net.state_dict())
# start to update the network
policy_loss, value_loss = self._update_network(mb_obs, mb_actions, mb_returns, mb_advs)
torch.save([self.net.state_dict(), self.running_state], self.model_path + 'model.pt')
print('[{}] Update: {} / {}, Frames: {}, Reward: {:.3f}, VL: {:.3f}, PL: {:.3f}'.format(datetime.now(), update, \
num_updates, (update + 1)*self.args.nsteps, final_reward, value_loss, policy_loss))
# start to update network
def _update_network(self, mb_obs, mb_actions, mb_returns, mb_advs):
mb_obs_tensor = torch.tensor(mb_obs, dtype=torch.float32)
mb_actions_tensor = torch.tensor(mb_actions, dtype=torch.float32)
mb_returns_tensor = torch.tensor(mb_returns, dtype=torch.float32).unsqueeze(1)
mb_advs_tensor = torch.tensor(mb_advs, dtype=torch.float32).unsqueeze(1)
# try to get the old policy and current policy
values, _ = self.net(mb_obs_tensor)
with torch.no_grad():
_, pi_old = self.old_net(mb_obs_tensor)
# get the surr loss
surr_loss = self._get_surrogate_loss(mb_obs_tensor, mb_advs_tensor, mb_actions_tensor, pi_old)
# comupte the surrogate gardient -> g, Ax = g, where A is the fisher information matrix
surr_grad = torch.autograd.grad(surr_loss, self.net.actor.parameters())
flat_surr_grad = torch.cat([grad.view(-1) for grad in surr_grad]).data
# use the conjugated gradient to calculate the scaled direction vector (natural gradient)
nature_grad = conjugated_gradient(self._fisher_vector_product, -flat_surr_grad, 10, mb_obs_tensor, pi_old)
# calculate the scaleing ratio
non_scale_kl = 0.5 * (nature_grad * self._fisher_vector_product(nature_grad, mb_obs_tensor, pi_old)).sum(0, keepdim=True)
scale_ratio = torch.sqrt(non_scale_kl / self.args.max_kl)
final_nature_grad = nature_grad / scale_ratio[0]
# calculate the expected improvement rate...
expected_improve = (-flat_surr_grad * nature_grad).sum(0, keepdim=True) / scale_ratio[0]
# get the flat param ...
prev_params = torch.cat([param.data.view(-1) for param in self.net.actor.parameters()])
# start to do the line search
success, new_params = line_search(self.net.actor, self._get_surrogate_loss, prev_params, final_nature_grad, \
expected_improve, mb_obs_tensor, mb_advs_tensor, mb_actions_tensor, pi_old)
set_flat_params_to(self.net.actor, new_params)
# then trying to update the critic network
inds = np.arange(mb_obs.shape[0])
for _ in range(self.args.vf_itrs):
np.random.shuffle(inds)
for start in range(0, mb_obs.shape[0], self.args.batch_size):
end = start + self.args.batch_size
mbinds = inds[start:end]
mini_obs = mb_obs[mbinds]
mini_returns = mb_returns[mbinds]
# put things in the tensor
mini_obs = torch.tensor(mini_obs, dtype=torch.float32)
mini_returns = torch.tensor(mini_returns, dtype=torch.float32).unsqueeze(1)
values, _ = self.net(mini_obs)
v_loss = (mini_returns - values).pow(2).mean()
self.optimizer.zero_grad()
v_loss.backward()
self.optimizer.step()
return surr_loss.item(), v_loss.item()
# get the surrogate loss
def _get_surrogate_loss(self, obs, adv, actions, pi_old):
_, pi = self.net(obs)
log_prob = eval_actions(pi, actions)
old_log_prob = eval_actions(pi_old, actions).detach()
surr_loss = -torch.exp(log_prob - old_log_prob) * adv
return surr_loss.mean()
# the product of the fisher informaiton matrix and the nature gradient -> Ax
def _fisher_vector_product(self, v, obs, pi_old):
kl = self._get_kl(obs, pi_old)
kl = kl.mean()
# start to calculate the second order gradient of the KL
kl_grads = torch.autograd.grad(kl, self.net.actor.parameters(), create_graph=True)
flat_kl_grads = torch.cat([grad.view(-1) for grad in kl_grads])
kl_v = (flat_kl_grads * torch.autograd.Variable(v)).sum()
kl_second_grads = torch.autograd.grad(kl_v, self.net.actor.parameters())
flat_kl_second_grads = torch.cat([grad.contiguous().view(-1) for grad in kl_second_grads]).data
flat_kl_second_grads = flat_kl_second_grads + self.args.damping * v
return flat_kl_second_grads
# get the kl divergence between two distributions
def _get_kl(self, obs, pi_old):
mean_old, std_old = pi_old
_, pi = self.net(obs)
mean, std = pi
# start to calculate the kl-divergence
kl = -torch.log(std / std_old) + (std.pow(2) + (mean - mean_old).pow(2)) / (2 * std_old.pow(2)) - 0.5
return kl.sum(1, keepdim=True)
# get the tensors
def _get_tensors(self, obs):
return torch.tensor(obs, dtype=torch.float32).unsqueeze(0)
| 9,299 | 52.142857 | 129 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/trpo/utils.py | import numpy as np
import torch
from torch.distributions.normal import Normal
# select actions
def select_actions(pi):
mean, std = pi
normal_dist = Normal(mean, std)
return normal_dist.sample().detach().numpy().squeeze()
# evaluate the actions
def eval_actions(pi, actions):
mean, std = pi
normal_dist = Normal(mean, std)
return normal_dist.log_prob(actions).sum(dim=1, keepdim=True)
# conjugated gradient
def conjugated_gradient(fvp, b, update_steps, obs, pi_old, residual_tol=1e-10):
# the initial solution is zero
x = torch.zeros(b.size(), dtype=torch.float32)
r = b.clone()
p = b.clone()
rdotr = torch.dot(r, r)
for i in range(update_steps):
fv_product = fvp(p, obs, pi_old)
alpha = rdotr / torch.dot(p, fv_product)
x = x + alpha * p
r = r - alpha * fv_product
new_rdotr = torch.dot(r, r)
beta = new_rdotr / rdotr
p = r + beta * p
rdotr = new_rdotr
# if less than residual tot.. break
if rdotr < residual_tol:
break
return x
# line search
def line_search(model, loss_fn, x, full_step, expected_rate, obs, adv, actions, pi_old, max_backtracks=10, accept_ratio=0.1):
fval = loss_fn(obs, adv, actions, pi_old).data
for (_n_backtracks, stepfrac) in enumerate(0.5**np.arange(max_backtracks)):
xnew = x + stepfrac * full_step
set_flat_params_to(model, xnew)
new_fval = loss_fn(obs, adv, actions, pi_old).data
actual_improve = fval - new_fval
expected_improve = expected_rate * stepfrac
ratio = actual_improve / expected_improve
if ratio.item() > accept_ratio and actual_improve.item() > 0:
return True, xnew
return False, x
def set_flat_params_to(model, flat_params):
prev_indx = 0
for param in model.parameters():
flat_size = int(np.prod(list(param.size())))
param.data.copy_(flat_params[prev_indx:prev_indx + flat_size].view(param.size()))
prev_indx += flat_size
| 2,026 | 33.355932 | 125 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/trpo/demo.py | import numpy as np
import torch
import gym
from arguments import get_args
from models import network
def denormalize(x, mean, std, clip=10):
x -= mean
x /= (std + 1e-8)
return np.clip(x, -clip, clip)
def get_tensors(x):
return torch.tensor(x, dtype=torch.float32).unsqueeze(0)
if __name__ == '__main__':
args = get_args()
# create the environment
env = gym.make(args.env_name)
# build up the network
net = network(env.observation_space.shape[0], env.action_space.shape[0])
# load the saved model
model_path = args.save_dir + args.env_name + '/model.pt'
network_model, filters = torch.load(model_path, map_location=lambda storage, loc: storage)
net.load_state_dict(network_model)
net.eval()
for _ in range(10):
obs = denormalize(env.reset(), filters.rs.mean, filters.rs.std)
reward_total = 0
for _ in range(10000):
env.render()
obs_tensor = get_tensors(obs)
with torch.no_grad():
_, (mean, _) = net(obs_tensor)
action = mean.numpy().squeeze()
obs, reward, done, _ = env.step(action)
reward_total += reward
obs = denormalize(obs, filters.rs.mean, filters.rs.std)
if done:
break
print('the reward of this episode is: {}'.format(reward_total))
env.close()
| 1,383 | 31.952381 | 94 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/trpo/models.py | import torch
from torch import nn
from torch.nn import functional as F
class network(nn.Module):
def __init__(self, num_states, num_actions):
super(network, self).__init__()
# define the critic
self.critic = critic(num_states)
self.actor = actor(num_states, num_actions)
def forward(self, x):
state_value = self.critic(x)
pi = self.actor(x)
return state_value, pi
class critic(nn.Module):
def __init__(self, num_states):
super(critic, self).__init__()
self.fc1 = nn.Linear(num_states, 64)
self.fc2 = nn.Linear(64, 64)
self.value = nn.Linear(64, 1)
def forward(self, x):
x = F.tanh(self.fc1(x))
x = F.tanh(self.fc2(x))
value = self.value(x)
return value
class actor(nn.Module):
def __init__(self, num_states, num_actions):
super(actor, self).__init__()
self.fc1 = nn.Linear(num_states, 64)
self.fc2 = nn.Linear(64, 64)
self.action_mean = nn.Linear(64, num_actions)
self.sigma_log = nn.Parameter(torch.zeros(1, num_actions))
def forward(self, x):
x = F.tanh(self.fc1(x))
x = F.tanh(self.fc2(x))
mean = self.action_mean(x)
sigma_log = self.sigma_log.expand_as(mean)
sigma = torch.exp(sigma_log)
pi = (mean, sigma)
return pi
| 1,376 | 28.297872 | 66 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/a2c/a2c_agent.py | import numpy as np
import torch
from models import net
from datetime import datetime
from utils import select_actions, evaluate_actions, discount_with_dones
import os
class a2c_agent:
def __init__(self, envs, args):
self.envs = envs
self.args = args
# define the network
self.net = net(self.envs.action_space.n)
if self.args.cuda:
self.net.cuda()
# define the optimizer
self.optimizer = torch.optim.RMSprop(self.net.parameters(), lr=self.args.lr, eps=self.args.eps, alpha=self.args.alpha)
if not os.path.exists(self.args.save_dir):
os.mkdir(self.args.save_dir)
# check the saved path for envs..
self.model_path = self.args.save_dir + self.args.env_name + '/'
if not os.path.exists(self.model_path):
os.mkdir(self.model_path)
# get the obs..
self.batch_ob_shape = (self.args.num_workers * self.args.nsteps,) + self.envs.observation_space.shape
self.obs = np.zeros((self.args.num_workers,) + self.envs.observation_space.shape, dtype=self.envs.observation_space.dtype.name)
self.obs[:] = self.envs.reset()
self.dones = [False for _ in range(self.args.num_workers)]
# train the network..
def learn(self):
num_updates = self.args.total_frames // (self.args.num_workers * self.args.nsteps)
# get the reward to calculate other information
episode_rewards = np.zeros((self.args.num_workers, ), dtype=np.float32)
final_rewards = np.zeros((self.args.num_workers, ), dtype=np.float32)
# start to update
for update in range(num_updates):
mb_obs, mb_rewards, mb_actions, mb_dones = [],[],[],[]
for step in range(self.args.nsteps):
with torch.no_grad():
input_tensor = self._get_tensors(self.obs)
_, pi = self.net(input_tensor)
# select actions
actions = select_actions(pi)
cpu_actions = actions.squeeze(1).cpu().numpy()
# start to store the information
mb_obs.append(np.copy(self.obs))
mb_actions.append(cpu_actions)
mb_dones.append(self.dones)
# step
obs, rewards, dones, _ = self.envs.step(cpu_actions)
# start to store the rewards
self.dones = dones
mb_rewards.append(rewards)
for n, done in enumerate(dones):
if done:
self.obs[n] = self.obs[n]*0
self.obs = obs
episode_rewards += rewards
# get the masks
masks = np.array([0.0 if done else 1.0 for done in dones], dtype=np.float32)
final_rewards *= masks
final_rewards += (1 - masks) * episode_rewards
episode_rewards *= masks
# update the obs
mb_dones.append(self.dones)
# process the rollouts
mb_obs = np.asarray(mb_obs, dtype=np.uint8).swapaxes(1, 0).reshape(self.batch_ob_shape)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)
mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0)
mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)
mb_masks = mb_dones[:, :-1]
mb_dones = mb_dones[:, 1:]
# calculate the last value
with torch.no_grad():
input_tensor = self._get_tensors(self.obs)
last_values, _ = self.net(input_tensor)
# compute returns
for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values.detach().cpu().numpy().squeeze())):
rewards = rewards.tolist()
dones = dones.tolist()
if dones[-1] == 0:
rewards = discount_with_dones(rewards+[value], dones+[0], self.args.gamma)[:-1]
else:
rewards = discount_with_dones(rewards, dones, self.args.gamma)
mb_rewards[n] = rewards
mb_rewards = mb_rewards.flatten()
mb_actions = mb_actions.flatten()
# start to update network
vl, al, ent = self._update_network(mb_obs, mb_rewards, mb_actions)
if update % self.args.log_interval == 0:
print('[{}] Update: {}/{}, Frames: {}, Rewards: {:.1f}, VL: {:.3f}, PL: {:.3f}, Ent: {:.2f}, Min: {}, Max:{}'.format(\
datetime.now(), update, num_updates, (update+1)*(self.args.num_workers * self.args.nsteps),\
final_rewards.mean(), vl, al, ent, final_rewards.min(), final_rewards.max()))
torch.save(self.net.state_dict(), self.model_path + 'model.pt')
# update_network
def _update_network(self, obs, returns, actions):
# evaluate the actions
input_tensor = self._get_tensors(obs)
values, pi = self.net(input_tensor)
# define the tensor of actions, returns
returns = torch.tensor(returns, dtype=torch.float32).unsqueeze(1)
actions = torch.tensor(actions, dtype=torch.int64).unsqueeze(1)
if self.args.cuda:
returns = returns.cuda()
actions = actions.cuda()
# evaluate actions
action_log_probs, dist_entropy = evaluate_actions(pi, actions)
# calculate advantages...
advantages = returns - values
# get the value loss
value_loss = advantages.pow(2).mean()
# get the action loss
action_loss = -(advantages.detach() * action_log_probs).mean()
# total loss
total_loss = action_loss + self.args.value_loss_coef * value_loss - self.args.entropy_coef * dist_entropy
# start to update
self.optimizer.zero_grad()
total_loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.max_grad_norm)
self.optimizer.step()
return value_loss.item(), action_loss.item(), dist_entropy.item()
# get the tensors...
def _get_tensors(self, obs):
input_tensor = torch.tensor(np.transpose(obs, (0, 3, 1, 2)), dtype=torch.float32)
if self.args.cuda:
input_tensor = input_tensor.cuda()
return input_tensor
| 6,370 | 47.633588 | 135 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/a2c/utils.py | import torch
import numpy as np
from torch.distributions.categorical import Categorical
# select - actions
def select_actions(pi, deterministic=False):
cate_dist = Categorical(pi)
if deterministic:
return torch.argmax(pi, dim=1).item()
else:
return cate_dist.sample().unsqueeze(-1)
# get the action log prob and entropy...
def evaluate_actions(pi, actions):
cate_dist = Categorical(pi)
return cate_dist.log_prob(actions.squeeze(-1)).unsqueeze(-1), cate_dist.entropy().mean()
def discount_with_dones(rewards, dones, gamma):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + gamma * r * (1.-done)
discounted.append(r)
return discounted[::-1]
| 749 | 29 | 92 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/a2c/demo.py | from arguments import get_args
from models import net
import torch
from utils import select_actions
import cv2
import numpy as np
from rl_utils.env_wrapper.frame_stack import VecFrameStack
from rl_utils.env_wrapper.atari_wrapper import make_atari, wrap_deepmind
# update the current observation
def get_tensors(obs):
input_tensor = torch.tensor(np.transpose(obs, (2, 0, 1)), dtype=torch.float32).unsqueeze(0)
return input_tensor
if __name__ == "__main__":
args = get_args()
# create environment
#env = VecFrameStack(wrap_deepmind(make_atari(args.env_name)), 4)
env = make_atari(args.env_name)
env = wrap_deepmind(env, frame_stack=True)
# get the model path
model_path = args.save_dir + args.env_name + '/model.pt'
network = net(env.action_space.n)
network.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))
obs = env.reset()
while True:
env.render()
# get the obs
with torch.no_grad():
input_tensor = get_tensors(obs)
_, pi = network(input_tensor)
actions = select_actions(pi, True)
obs, reward, done, _ = env.step([actions])
env.close()
| 1,193 | 33.114286 | 95 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/a2c/models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
# the convolution layer of deepmind
class deepmind(nn.Module):
def __init__(self):
super(deepmind, self).__init__()
self.conv1 = nn.Conv2d(4, 32, 8, stride=4)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 32, 3, stride=1)
self.fc1 = nn.Linear(32 * 7 * 7, 512)
# start to do the init...
nn.init.orthogonal_(self.conv1.weight.data, gain=nn.init.calculate_gain('relu'))
nn.init.orthogonal_(self.conv2.weight.data, gain=nn.init.calculate_gain('relu'))
nn.init.orthogonal_(self.conv3.weight.data, gain=nn.init.calculate_gain('relu'))
nn.init.orthogonal_(self.fc1.weight.data, gain=nn.init.calculate_gain('relu'))
# init the bias...
nn.init.constant_(self.conv1.bias.data, 0)
nn.init.constant_(self.conv2.bias.data, 0)
nn.init.constant_(self.conv3.bias.data, 0)
nn.init.constant_(self.fc1.bias.data, 0)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(-1, 32 * 7 * 7)
x = F.relu(self.fc1(x))
return x
# in the initial, just the nature CNN
class net(nn.Module):
def __init__(self, num_actions):
super(net, self).__init__()
self.cnn_layer = deepmind()
self.critic = nn.Linear(512, 1)
self.actor = nn.Linear(512, num_actions)
# init the linear layer..
nn.init.orthogonal_(self.critic.weight.data)
nn.init.constant_(self.critic.bias.data, 0)
# init the policy layer...
nn.init.orthogonal_(self.actor.weight.data, gain=0.01)
nn.init.constant_(self.actor.bias.data, 0)
def forward(self, inputs):
x = self.cnn_layer(inputs / 255.0)
value = self.critic(x)
pi = F.softmax(self.actor(x), dim=1)
return value, pi
| 1,959 | 37.431373 | 88 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/ddpg/utils.py | import numpy as np
import torch
# add ounoise here
class ounoise():
def __init__(self, std, action_dim, mean=0, theta=0.15, dt=1e-2, x0=None):
self.std = std
self.mean = mean
self.action_dim = action_dim
self.theta = theta
self.dt = dt
self.x0 = x0
# reset the noise
def reset(self):
self.x_prev = self.x0 if self.x0 is not None else np.zeros(self.action_dim)
# generate noise
def noise(self):
x = self.x_prev + self.theta * (self.mean - self.x_prev) * self.dt + \
self.std * np.sqrt(self.dt) * np.random.normal(size=self.action_dim)
self.x_prev = x
return x
| 686 | 27.625 | 84 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/ddpg/demo.py | from arguments import get_args
import gym
from models import actor
import torch
import numpy as np
def normalize(obs, mean, std, clip):
return np.clip((obs - mean) / std, -clip, clip)
if __name__ == '__main__':
args = get_args()
env = gym.make(args.env_name)
# get environment infos
obs_dims = env.observation_space.shape[0]
action_dims = env.action_space.shape[0]
action_max = env.action_space.high[0]
# define the network
actor_net = actor(obs_dims, action_dims)
# load models
model_path = args.save_dir + args.env_name + '/model.pt'
model, mean, std = torch.load(model_path, map_location=lambda storage, loc: storage)
# load models into the network
actor_net.load_state_dict(model)
for ep in range(10):
obs = env.reset()
reward_sum = 0
while True:
env.render()
with torch.no_grad():
norm_obs = normalize(obs, mean, std, args.clip_range)
norm_obs_tensor = torch.tensor(norm_obs, dtype=torch.float32).unsqueeze(0)
actions = actor_net(norm_obs_tensor)
actions = actions.detach().numpy().squeeze()
if action_dims == 1:
actions = np.array([actions])
obs_, reward, done, _ = env.step(action_max * actions)
reward_sum += reward
if done:
break
obs = obs_
print('the episode is: {}, the reward is: {}'.format(ep, reward_sum))
env.close()
| 1,518 | 34.325581 | 90 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/ddpg/ddpg_agent.py | import numpy as np
from models import actor, critic
import torch
import os
from datetime import datetime
from mpi4py import MPI
from rl_utils.mpi_utils.normalizer import normalizer
from rl_utils.mpi_utils.utils import sync_networks, sync_grads
from rl_utils.experience_replay.experience_replay import replay_buffer
from utils import ounoise
import copy
import gym
"""
ddpg algorithms - revised baseline version
support MPI training
"""
class ddpg_agent:
def __init__(self, env, args):
self.env = env
self.args = args
# get the dims and action max of the environment
obs_dims = self.env.observation_space.shape[0]
self.action_dims = self.env.action_space.shape[0]
self.action_max = self.env.action_space.high[0]
# define the network
self.actor_net = actor(obs_dims, self.action_dims)
self.critic_net = critic(obs_dims, self.action_dims)
# sync the weights across the mpi
sync_networks(self.actor_net)
sync_networks(self.critic_net)
# build the target newtork
self.actor_target_net = copy.deepcopy(self.actor_net)
self.critic_target_net = copy.deepcopy(self.critic_net)
# create the optimizer
self.actor_optim = torch.optim.Adam(self.actor_net.parameters(), self.args.lr_actor)
self.critic_optim = torch.optim.Adam(self.critic_net.parameters(), self.args.lr_critic, weight_decay=self.args.critic_l2_reg)
# create the replay buffer
self.replay_buffer = replay_buffer(self.args.replay_size)
# create the normalizer
self.o_norm = normalizer(obs_dims, default_clip_range=self.args.clip_range)
# create the noise generator
self.noise_generator = ounoise(std=0.2, action_dim=self.action_dims)
# create the dir to save models
if MPI.COMM_WORLD.Get_rank() == 0:
if not os.path.exists(self.args.save_dir):
os.mkdir(self.args.save_dir)
self.model_path = os.path.join(self.args.save_dir, self.args.env_name)
if not os.path.exists(self.model_path):
os.mkdir(self.model_path)
# create a eval environemnt
self.eval_env = gym.make(self.args.env_name)
# set seeds
self.eval_env.seed(self.args.seed * 2 + MPI.COMM_WORLD.Get_rank())
def learn(self):
"""
the learning part
"""
self.actor_net.train()
# reset the environmenr firstly
obs = self.env.reset()
self.noise_generator.reset()
# get the number of epochs
nb_epochs = self.args.total_frames // (self.args.nb_rollout_steps * self.args.nb_cycles)
for epoch in range(nb_epochs):
for _ in range(self.args.nb_cycles):
# used to update the normalizer
ep_obs = []
for _ in range(self.args.nb_rollout_steps):
with torch.no_grad():
inputs_tensor = self._preproc_inputs(obs)
pi = self.actor_net(inputs_tensor)
action = self._select_actions(pi)
# feed actions into the environment
obs_, reward, done, _ = self.env.step(self.action_max * action)
# append the rollout information into the memory
self.replay_buffer.add(obs, action, reward, obs_, float(done))
ep_obs.append(obs.copy())
obs = obs_
# if done, reset the environment
if done:
obs = self.env.reset()
self.noise_generator.reset()
# then start to do the update of the normalizer
ep_obs = np.array(ep_obs)
self.o_norm.update(ep_obs)
self.o_norm.recompute_stats()
# then start to update the network
for _ in range(self.args.nb_train):
a_loss, c_loss = self._update_network()
# update the target network
self._soft_update_target_network(self.actor_target_net, self.actor_net)
self._soft_update_target_network(self.critic_target_net, self.critic_net)
# start to do the evaluation
success_rate = self._eval_agent()
# convert back to normal
self.actor_net.train()
if epoch % self.args.display_interval == 0:
if MPI.COMM_WORLD.Get_rank() == 0:
print('[{}] Epoch: {} / {}, Frames: {}, Rewards: {:.3f}, Actor loss: {:.3f}, Critic Loss: {:.3f}'.format(datetime.now(), \
epoch, nb_epochs, (epoch+1) * self.args.nb_rollout_steps * self.args.nb_cycles, success_rate, a_loss, c_loss))
torch.save([self.actor_net.state_dict(), self.o_norm.mean, self.o_norm.std], self.model_path + '/model.pt')
# functions to preprocess the image
def _preproc_inputs(self, obs):
obs_norm = self.o_norm.normalize(obs)
inputs_tensor = torch.tensor(obs_norm, dtype=torch.float32).unsqueeze(0)
return inputs_tensor
# this function will choose action for the agent and do the exploration
def _select_actions(self, pi):
action = pi.cpu().numpy().squeeze()
# TODO: Noise type now - only support ounoise
# add the gaussian noise
#action = action + np.random.normal(0, 0.1, self.action_dims)
# add ou noise
action = action + self.noise_generator.noise()
action = np.clip(action, -1, 1)
return action
# update the network
def _update_network(self):
# sample the samples from the replay buffer
samples = self.replay_buffer.sample(self.args.batch_size)
obses, actions, rewards, obses_next, dones = samples
# try to do the normalization of obses
norm_obses = self.o_norm.normalize(obses)
norm_obses_next = self.o_norm.normalize(obses_next)
# transfer them into tensors
norm_obses_tensor = torch.tensor(norm_obses, dtype=torch.float32)
norm_obses_next_tensor = torch.tensor(norm_obses_next, dtype=torch.float32)
actions_tensor = torch.tensor(actions, dtype=torch.float32)
rewards_tensor = torch.tensor(rewards, dtype=torch.float32).unsqueeze(1)
dones_tensor = torch.tensor(dones, dtype=torch.float32).unsqueeze(1)
with torch.no_grad():
actions_next = self.actor_target_net(norm_obses_next_tensor)
q_next_value = self.critic_target_net(norm_obses_next_tensor, actions_next)
target_q_value = rewards_tensor + (1 - dones_tensor) * self.args.gamma * q_next_value
# the real q value
real_q_value = self.critic_net(norm_obses_tensor, actions_tensor)
critic_loss = (real_q_value - target_q_value).pow(2).mean()
# the actor loss
actions_real = self.actor_net(norm_obses_tensor)
actor_loss = -self.critic_net(norm_obses_tensor, actions_real).mean()
# start to update the network
self.actor_optim.zero_grad()
actor_loss.backward()
sync_grads(self.actor_net)
self.actor_optim.step()
# update the critic network
self.critic_optim.zero_grad()
critic_loss.backward()
sync_grads(self.critic_net)
self.critic_optim.step()
return actor_loss.item(), critic_loss.item()
# soft update the target network...
def _soft_update_target_network(self, target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_((1 - self.args.polyak) * param.data + self.args.polyak * target_param.data)
# do the evaluation
def _eval_agent(self):
self.actor_net.eval()
total_success_rate = []
for _ in range(self.args.nb_test_rollouts):
per_success_rate = []
obs = self.eval_env.reset()
while True:
with torch.no_grad():
inputs_tensor = self._preproc_inputs(obs)
pi = self.actor_net(inputs_tensor)
actions = pi.detach().cpu().numpy().squeeze()
if self.action_dims == 1:
actions = np.array([actions])
obs_, reward, done, _ = self.eval_env.step(actions * self.action_max)
per_success_rate.append(reward)
obs = obs_
if done:
break
total_success_rate.append(np.sum(per_success_rate))
local_success_rate = np.mean(total_success_rate)
global_success_rate = MPI.COMM_WORLD.allreduce(local_success_rate, op=MPI.SUM)
return global_success_rate / MPI.COMM_WORLD.Get_size()
| 8,833 | 45.494737 | 142 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/ddpg/models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
# define the actor network
class actor(nn.Module):
def __init__(self, obs_dims, action_dims):
super(actor, self).__init__()
self.fc1 = nn.Linear(obs_dims, 400)
self.fc2 = nn.Linear(400, 300)
self.action_out = nn.Linear(300, action_dims)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
actions = torch.tanh(self.action_out(x))
return actions
class critic(nn.Module):
def __init__(self, obs_dims, action_dims):
super(critic, self).__init__()
self.fc1 = nn.Linear(obs_dims, 400)
self.fc2 = nn.Linear(400 + action_dims, 300)
self.q_out = nn.Linear(300, 1)
def forward(self, x, actions):
x = F.relu(self.fc1(x))
x = torch.cat([x, actions], dim=1)
x = F.relu(self.fc2(x))
q_value = self.q_out(x)
return q_value
| 950 | 28.71875 | 53 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/ppo/utils.py | import numpy as np
import torch
from torch.distributions.normal import Normal
from torch.distributions.beta import Beta
from torch.distributions.categorical import Categorical
import random
def select_actions(pi, dist_type, env_type):
if env_type == 'atari':
actions = Categorical(pi).sample()
else:
if dist_type == 'gauss':
mean, std = pi
actions = Normal(mean, std).sample()
elif dist_type == 'beta':
alpha, beta = pi
actions = Beta(alpha.detach().cpu(), beta.detach().cpu()).sample()
# return actions
return actions.detach().cpu().numpy().squeeze()
def evaluate_actions(pi, actions, dist_type, env_type):
if env_type == 'atari':
cate_dist = Categorical(pi)
log_prob = cate_dist.log_prob(actions).unsqueeze(-1)
entropy = cate_dist.entropy().mean()
else:
if dist_type == 'gauss':
mean, std = pi
normal_dist = Normal(mean, std)
log_prob = normal_dist.log_prob(actions).sum(dim=1, keepdim=True)
entropy = normal_dist.entropy().mean()
elif dist_type == 'beta':
alpha, beta = pi
beta_dist = Beta(alpha, beta)
log_prob = beta_dist.log_prob(actions).sum(dim=1, keepdim=True)
entropy = beta_dist.entropy().mean()
return log_prob, entropy
| 1,370 | 35.078947 | 78 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/ppo/demo.py | from arguments import get_args
from models import cnn_net, mlp_net
import torch
import cv2
import numpy as np
import gym
from rl_utils.env_wrapper.frame_stack import VecFrameStack
from rl_utils.env_wrapper.atari_wrapper import make_atari, wrap_deepmind
# denormalize
def normalize(x, mean, std, clip=10):
x -= mean
x /= (std + 1e-8)
return np.clip(x, -clip, clip)
# get tensors for the agent
def get_tensors(obs, env_type, filters=None):
if env_type == 'atari':
tensor = torch.tensor(np.transpose(obs, (2, 0, 1)), dtype=torch.float32).unsqueeze(0)
elif env_type == 'mujoco':
tensor = torch.tensor(normalize(obs, filters.rs.mean, filters.rs.std), dtype=torch.float32).unsqueeze(0)
return tensor
if __name__ == '__main__':
# get the arguments
args = get_args()
# create the environment
if args.env_type == 'atari':
env = make_atari(args.env_name)
env = wrap_deepmind(env, frame_stack=True)
elif args.env_type == 'mujoco':
env = gym.make(args.env_name)
# get the model path
model_path = args.save_dir + args.env_name + '/model.pt'
# create the network
if args.env_type == 'atari':
network = cnn_net(env.action_space.n)
network.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))
filters = None
elif args.env_type == 'mujoco':
network = mlp_net(env.observation_space.shape[0], env.action_space.shape[0], args.dist)
net_models, filters = torch.load(model_path, map_location=lambda storage, loc: storage)
# load models
network.load_state_dict(net_models)
# start to play the demo
obs = env.reset()
reward_total = 0
# just one episode
while True:
env.render()
with torch.no_grad():
obs_tensor = get_tensors(obs, args.env_type, filters)
_, pi = network(obs_tensor)
# get actions
if args.env_type == 'atari':
actions = torch.argmax(pi, dim=1).item()
elif args.env_type == 'mujoco':
if args.dist == 'gauss':
mean, _ = pi
actions = mean.numpy().squeeze()
elif args.dist == 'beta':
alpha, beta = pi
actions = (alpha - 1) / (alpha + beta - 2)
actions = actions.numpy().squeeze()
actions = -1 + 2 * actions
obs_, reward, done, _ = env.step(actions)
reward_total += reward
if done:
break
obs = obs_
print('the rewrads is: {}'.format(reward_total))
| 2,641 | 35.694444 | 112 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/ppo/models.py | import torch
from torch import nn
from torch.nn import functional as F
"""
this network also include gaussian distribution and beta distribution
"""
class mlp_net(nn.Module):
def __init__(self, state_size, num_actions, dist_type):
super(mlp_net, self).__init__()
self.dist_type = dist_type
self.fc1_v = nn.Linear(state_size, 64)
self.fc2_v = nn.Linear(64, 64)
self.fc1_a = nn.Linear(state_size, 64)
self.fc2_a = nn.Linear(64, 64)
# check the type of distribution
if self.dist_type == 'gauss':
self.sigma_log = nn.Parameter(torch.zeros(1, num_actions))
self.action_mean = nn.Linear(64, num_actions)
self.action_mean.weight.data.mul_(0.1)
self.action_mean.bias.data.zero_()
elif self.dist_type == 'beta':
self.action_alpha = nn.Linear(64, num_actions)
self.action_beta = nn.Linear(64, num_actions)
# init..
self.action_alpha.weight.data.mul_(0.1)
self.action_alpha.bias.data.zero_()
self.action_beta.weight.data.mul_(0.1)
self.action_beta.bias.data.zero_()
# define layers to output state value
self.value = nn.Linear(64, 1)
self.value.weight.data.mul_(0.1)
self.value.bias.data.zero_()
def forward(self, x):
x_v = torch.tanh(self.fc1_v(x))
x_v = torch.tanh(self.fc2_v(x_v))
state_value = self.value(x_v)
# output the policy...
x_a = torch.tanh(self.fc1_a(x))
x_a = torch.tanh(self.fc2_a(x_a))
if self.dist_type == 'gauss':
mean = self.action_mean(x_a)
sigma_log = self.sigma_log.expand_as(mean)
sigma = torch.exp(sigma_log)
pi = (mean, sigma)
elif self.dist_type == 'beta':
alpha = F.softplus(self.action_alpha(x_a)) + 1
beta = F.softplus(self.action_beta(x_a)) + 1
pi = (alpha, beta)
return state_value, pi
# the convolution layer of deepmind
class deepmind(nn.Module):
def __init__(self):
super(deepmind, self).__init__()
self.conv1 = nn.Conv2d(4, 32, 8, stride=4)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 32, 3, stride=1)
self.fc1 = nn.Linear(32 * 7 * 7, 512)
# start to do the init...
nn.init.orthogonal_(self.conv1.weight.data, gain=nn.init.calculate_gain('relu'))
nn.init.orthogonal_(self.conv2.weight.data, gain=nn.init.calculate_gain('relu'))
nn.init.orthogonal_(self.conv3.weight.data, gain=nn.init.calculate_gain('relu'))
nn.init.orthogonal_(self.fc1.weight.data, gain=nn.init.calculate_gain('relu'))
# init the bias...
nn.init.constant_(self.conv1.bias.data, 0)
nn.init.constant_(self.conv2.bias.data, 0)
nn.init.constant_(self.conv3.bias.data, 0)
nn.init.constant_(self.fc1.bias.data, 0)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(-1, 32 * 7 * 7)
x = F.relu(self.fc1(x))
return x
# in the initial, just the nature CNN
class cnn_net(nn.Module):
def __init__(self, num_actions):
super(cnn_net, self).__init__()
self.cnn_layer = deepmind()
self.critic = nn.Linear(512, 1)
self.actor = nn.Linear(512, num_actions)
# init the linear layer..
nn.init.orthogonal_(self.critic.weight.data)
nn.init.constant_(self.critic.bias.data, 0)
# init the policy layer...
nn.init.orthogonal_(self.actor.weight.data, gain=0.01)
nn.init.constant_(self.actor.bias.data, 0)
def forward(self, inputs):
x = self.cnn_layer(inputs / 255.0)
value = self.critic(x)
pi = F.softmax(self.actor(x), dim=1)
return value, pi
| 3,913 | 37 | 88 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.