repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
grabnel | grabnel-master/src/attack/grad_arg_max.py | import dgl
import torch
from tqdm import tqdm
from copy import deepcopy
from .base_attack import BaseAttack
import pandas as pd
import numpy as np
from itertools import product
from functools import lru_cache
class GradArgMax(BaseAttack):
def __init__(self, classifier, loss_fn, mode='flip', **kwargs):
super().__init__(classifier, loss_fn)
self.mode = mode
def attack(self, graph: dgl.DGLGraph, label: torch.tensor, budget: int, max_queries: int, verbose=True):
"""Attack graph by flipping edge with maximum gradient (in absolute value)."""
graph = dgl.transform.remove_self_loop(graph)
# save original graph and create a fully connected graph with binary edge weights which represent membership.
unperturbed_graph = deepcopy(graph)
m = unperturbed_graph.number_of_edges()
graph, edge_weights = self.prepare_input(graph)
# fast access to edge_ids
self.graph = graph
self.edge_ids.cache_clear()
# initialise variables
flipped_edges = set()
losses = []
correct_prediction = []
queries = []
progress_bar = tqdm(range(budget), disable=not verbose)
# edge id for self loops if they exist
self_loops = self.has_self_loops(graph)
if self_loops:
self_loop_ids = graph.edge_ids(graph.nodes(), graph.nodes(), return_uv=True)
# sequential attack
for i in progress_bar:
# forward and backward pass
predictions = self.classifier(graph, edge_weights)
label_prediction = torch.argmax(predictions)
loss = self.loss_fn(predictions, label)
loss.backward()
# update loss/query information
losses.append(loss.item())
queries.append(i)
# stop early if attack is a success
if label_prediction.item() != label.item():
correct_prediction.append(False)
break
else:
correct_prediction.append(True)
# So the argmax chooses the most negative gradient for edges that
# exist in the original graph or the most positive gradient for non-existent edges.
gradients = edge_weights.grad.detach()
gradients[:m] = -1 * gradients[:m]
# mask gradients for already flipped edges so they cant be selected
for edge in flipped_edges:
edge_ids = self.edge_ids(edge)
gradients[edge_ids] = -np.inf
# mask self loops
if self_loops:
gradients[self_loop_ids] = -np.inf
# mask gradients based on mode
if self.mode == 'flip':
pass
elif self.mode == 'add':
gradients[:m] = -np.inf
elif self.mode == 'remove':
gradients[m:] = -np.inf
else:
raise NotImplementedError('Only supports flip, add, remove.')
# select edge to be flipped based on which flip will increase loss the most
edge_index = torch.argmax(gradients).item()
u = graph.edges()[0][edge_index].item()
v = graph.edges()[1][edge_index].item()
flipped_edge = frozenset((u, v))
flipped_edges.add(flipped_edge)
# update edge weights
edge_weights = edge_weights.detach()
edge_ids = self.edge_ids(flipped_edge)
edge_weights[edge_ids] = 1 - edge_weights[edge_ids]
edge_weights = edge_weights.requires_grad_(True)
# update tqdm progress bar
progress_bar.set_postfix({'loss': f'{loss.item():.4}', 'selected': (u, v)})
# prepare output information
df = pd.DataFrame({'losses': losses,
'correct_prediction': correct_prediction,
'queries': queries})
# construct adversarial example if the attack succeeds
if not correct_prediction[-1]:
adv_example = self.construct_perturbed_graph(unperturbed_graph, flipped_edges)
else:
adv_example = None
# print if attack succeeded
if verbose:
if not correct_prediction[-1]:
print('Attack success')
else:
print('Attack fail')
return df, adv_example
@staticmethod
def prepare_input(graph):
"""Make graph fully connected but with zero weight edges where they don't exist"""
n = graph.number_of_nodes()
m = graph.number_of_edges()
to_add_u = []
to_add_v = []
for u, v in product(range(n), range(n)):
if u != v and not graph.has_edges_between(u, v):
to_add_u.append(u)
to_add_v.append(v)
edge_weights = torch.hstack((torch.ones(m), torch.zeros(len(to_add_u))))
edge_weights.requires_grad = True
graph.add_edges(to_add_u, to_add_v)
return graph, edge_weights
def construct_perturbed_graph(self, graph, flipped_edges):
"""Takes the unperturbed graph and list of flipped edges and applys the perturbation."""
to_add_u = []
to_add_v = []
to_delete = []
for edge in flipped_edges:
u, v = edge
if graph.has_edges_between(u, v):
edge_ids = self.edge_ids(edge)
to_delete += list(edge_ids.numpy())
else:
to_add_u += [u, v]
to_add_v += [v, u]
graph.remove_edges(to_delete)
graph.add_edges(to_add_u, to_add_v)
return graph
@lru_cache(maxsize=None)
def edge_ids(self, edge: frozenset):
"""Edge ids for edges u ~ v."""
u, v = edge
_, _, edge_ids_uv = self.graph.edge_ids([u], [v], return_uv=True)
_, _, edge_ids_vu = self.graph.edge_ids([v], [u], return_uv=True)
return torch.hstack((edge_ids_uv, edge_ids_vu))
@staticmethod
def has_self_loops(graph):
"""Determine if the graph contains self loops"""
u = graph.nodes()
return graph.has_edges_between(u, u).all().item()
| 6,190 | 35.417647 | 117 | py |
grabnel | grabnel-master/src/attack/bayesopt_attack.py | from .base_attack import BaseAttack
from .genetic import Genetic
import torch
import dgl
import pandas as pd
import numpy as np
from bayesopt.bayesopt.predictors import GPWL, BayesianLinearRegression, NullSurrogate
from .utils import correct_predictions, random_sample_flip, random_sample_rewire_swap, population_graphs, extrapolate_breakeven
from copy import deepcopy
class BayesOptAttack(BaseAttack):
def __init__(self, classifier: torch.nn.Module, loss_fn: torch.nn.Module,
batch_size: int = 1, n_init: int = 10,
edit_per_stage=None,
surrogate: str = 'bayeslinregress',
mode: str = 'flip',
target_class: int = None,
surrogate_settings: dict = None,
acq_settings: dict = None,
verbose: bool = True,
terminate_after_n_fail: int = None,
n_hop_constraint: int = None,
preserve_disconnected_components: bool = False,):
"""
Attacking classifier via Bayesian optimisation with GP/Bayesian Linear regression surrogate with Weisfeiler-
Lehman kernels.
:param classifier: see BaseAttack
:param loss_fn: see BaseAttack
:param batch_size: the number of possible adversarial samples to propose at each BO iteration. Larger batch
size will lead to faster performance, but correspondingly the performance might decrease
:param edit_per_stage: int or float. the number of edits amortised to each stage. A smaller edit_per_stage leads
to more stages which is more greedy, a larger edit_per_stage is less greedy but leads to a larger search
space.
:param surrogate: the choice of surrogate.
:param n_init: the number of initial perturbations to be sampled randomly from the search space
:param mode: str: 'flip', 'add', 'remove' or 'rewire': allowed edit operations on the edges.
:param surrogate_settings: dict: any parameters to be passed to the surrogates. See bayesopt/gp_predictor.py
:param acq_settings: dict: any parameters to be passed to the acquisition function.
:param verbose: whether to enable diagnostic information.
:param terminate_after_n_fail: the tolerance when the BO agent fails to push the attack loss. If this is not None (
a positive int), after this number of successive failures in increasing attack loss the attack will be aborted.
:param n_hop_constraint: int. If not None (a positive int), and edge perturbation (either rewire or flip) must
be constrained within the n_hop distance of the first node.
"""
super().__init__(classifier, loss_fn)
self.target_class = target_class
if acq_settings is None:
acq_settings = {}
if 'acq_type' not in acq_settings.keys(): acq_settings['acq_type'] = 'ei'
if 'acq_optimiser' not in acq_settings.keys(): acq_settings['acq_optimiser'] = 'mutation'
if 'acq_max_step' not in acq_settings.keys(): acq_settings['acq_max_step'] = 400
if 'random_frac' not in acq_settings.keys(): acq_settings['random_frac'] = 0.5
self.acq_settings = acq_settings
self.batch_size = batch_size
self.n_init = n_init
self.edit_per_stage = edit_per_stage
if surrogate_settings is None:
surrogate_settings = {}
if surrogate == 'gpwl': self.surrogate = GPWL(**surrogate_settings)
elif surrogate == 'bayeslinregress': self.surrogate = BayesianLinearRegression(**surrogate_settings)
elif surrogate == 'null': self.surrogate = NullSurrogate()
else: raise ValueError(f'Unrecognised surrogate choice {surrogate}')
self.verbose = verbose
assert mode in ['flip', 'add', 'remove', 'rewire'], f'mode {mode} is not recognised!'
self.mode = mode
# save a record of previous query history
self.query_history = []
self.loss_history = []
self.terminate_after_n_fail = terminate_after_n_fail if terminate_after_n_fail is not None and terminate_after_n_fail > 0 else None
self.n_hop_constraint = n_hop_constraint if n_hop_constraint is not None and n_hop_constraint > 0 else None
self.preserve_disconnected_components = preserve_disconnected_components
def attack(self, graph: dgl.DGLGraph, label: torch.tensor, budget, max_queries: int):
"""
The main attack loop.
- For BO, at each iteration, we only modify one edge. If we have budget > 1, we use a greedy approach to
partition the total max_queries into int(max_queries/budget) stages. At each stage, we attack on the
*base graph*: in the first stage, it is the original graph (i.e. graph passed as an argument here);
in the subsequent stages, it is the best perturbed graph of the previous stage that led to the largest
classifier loss.
- The optimisation terminates once it detects a successful attack.
For the rest, see documentation for Genetic and BaseAttack
"""
if isinstance(budget, float):
assert 0 < budget < 1., f'if a float is supplied, this number must be within 0 and 1 but got {budget}'
budget = np.round(budget * graph.num_edges()).astype(np.int)
if isinstance(self.edit_per_stage, float):
self.edit_per_stage = np.round(self.edit_per_stage * graph.num_edges()).astype(np.int)
stages, edits_per_stage = self.get_stage_statistics(max_queries, budget)
if self.verbose:
print(f'Total number of {max_queries} of queries is divided into {stages}')
print(f'Edits per stage is {edits_per_stage}')
self.query_history = []
self.loss_history = []
dfs = []
self.committed_edits = []
base_graph = graph
i = 0
adv_example = None
is_edge_weighted = 'weight' in graph.edata.keys()
best_loss = -np.inf
n_fail = 0
while i < max_queries:
curr_stage = np.digitize(i, stages) - 1
prev_stage = np.digitize(max(0, i - self.batch_size), stages) - 1
edit_allowed_this_stage = edits_per_stage[curr_stage]
if curr_stage != prev_stage or i == 0:
if i > 0:
best_idx = torch.argmax(self.surrogate.y)
base_graph = deepcopy(self.surrogate.X[best_idx])
# update the list of prohibited edges
if len(self.query_history) > 0:
self.committed_edits += self.query_history[-self.surrogate.y.shape[0] + int(best_idx)]
if self.verbose:
print(f'Entering Stage {curr_stage}. ')
print(f'Committed edge edits={self.committed_edits}')
# sample randomly at the start of each stage
n_init = min(self.n_init, stages[curr_stage + 1] - stages[curr_stage])
if self.mode == 'rewire':
samples = [random_sample_rewire_swap(base_graph, edit_allowed_this_stage, rewire_only=not is_edge_weighted, n_hop=self.n_hop_constraint,
preserve_disconnected_components=self.preserve_disconnected_components
) for _
in range(n_init)]
else:
samples = [
random_sample_flip(base_graph, edit_allowed_this_stage, remove_edge_only=self.mode == 'remove',
add_edge_only=self.mode == 'add', n_hop=self.n_hop_constraint,
committed_edges=self.committed_edits,
preserve_disconnected_components=self.preserve_disconnected_components,)
for _ in range(n_init)]
if not len(samples):
print('Patience reached. Terminating the current run')
break
perturbed_graphs = population_graphs(base_graph, samples, self.mode)
self.query_history += samples
i += n_init
else:
perturbed_graphs = self.suggest(base_graph, edit_allowed_this_stage, )
i += self.batch_size
with torch.no_grad():
try:
preds = self.classifier(dgl.batch(perturbed_graphs))
except:
preds = torch.cat([self.classifier(g) for g in perturbed_graphs])
if preds.ndimension() == 1:
preds.reshape(-1, 1)
# dgl.batch and dgl.unbatch create lots of problems. use this as a fallback option
# see reference in github issue:
# https://github.com/dmlc/dgl/issues/2409
if preds.shape[0] != len(perturbed_graphs):
preds = self.classifier(perturbed_graphs)
if len(perturbed_graphs) == 1 and preds.shape[1] == 1:
labels = label[0].reshape(1)
else:
labels = torch.repeat_interleave(label, len(perturbed_graphs))
losses = self.loss_fn(preds, labels, reduction='none')
if losses.ndimension() == 0:
losses = losses.reshape(1)
self.loss_history += losses.detach().numpy().tolist()
if self.verbose:
print(f'Iteration {i}. Loss: {losses.detach().numpy()}.')
dfs.append(self.construct_dataframe(losses, preds, label.squeeze(), i + 1))
if len(self.loss_history) > 200 and extrapolate_breakeven(self.loss_history) > 1e5:
print(f'Predicted breakeven point {extrapolate_breakeven(self.loss_history)} and run terminated')
break
if (self.target_class is None and np.sum(correct_predictions(preds.numpy(), labels.numpy())) < len(perturbed_graphs)) \
or (self.target_class is not None and (np.argmax(preds.numpy(), axis=1) == self.target_class).any()):
print('Attack succeeded!')
if self.target_class is None:
comps = correct_predictions(preds.numpy(), labels.numpy())
for i, comp in enumerate(comps):
if not comp:
adv_example = perturbed_graphs[i]
break
else:
for i, pred in enumerate(preds):
if np.argmax(pred.numpy()) == self.target_class:
adv_example = perturbed_graphs[i]
break
break
reset_surrogate = False
self.observe(perturbed_graphs, losses, reset_surrogate=reset_surrogate)
if np.max(losses.numpy()) > best_loss:
n_fail = 0
best_loss = torch.max(losses).detach().numpy()
else:
n_fail += len(perturbed_graphs)
if self.terminate_after_n_fail is not None and n_fail > self.terminate_after_n_fail:
print('Patience reached. Terminating the current run')
break
return pd.concat(dfs), adv_example
def suggest(self, base_graph: dgl.DGLGraph, n_edit: int, prohibited_edges: list = None):
"""
The BO function to suggest perturbations to be queried from self.classifier
:param base_graph: the graph on which we perform perturbations
:param n_edit: number of edge edit allowed per perturbation
:param prohibited_edges: list of edge edits that are not allowed.
:return: a list of dgl graphs of shape self.batch_size
"""
is_edge_weighted = 'weight' in base_graph.edata.keys()
candidate_graphs = None
n_samples = self.acq_settings['acq_max_step']
if self.acq_settings['acq_optimiser'] == 'random':
if self.mode == 'rewire':
candidate_samples = [random_sample_rewire_swap(base_graph,
n_edit,
rewire_only=not is_edge_weighted,
n_hop=self.n_hop_constraint,
preserve_disconnected_components=self.preserve_disconnected_components,
) for _
in range(n_samples)]
else:
candidate_samples = [random_sample_flip(base_graph, n_edit, remove_edge_only=self.mode == 'remove',
add_edge_only=self.mode == 'add', n_hop=self.n_hop_constraint,
committed_edges=self.committed_edits,
preserve_disconnected_components=self.preserve_disconnected_components,
)
for _ in range(n_samples)]
elif self.acq_settings['acq_optimiser'] in ['genetic', 'mutation']:
n_round = 10
top_k = 3
pop_size = max(n_samples // n_round, 100)
# optionally set the fraction of randomly generated samples
n_rand = np.round(pop_size * self.acq_settings['random_frac']).astype(np.int)
n_mutate = pop_size - n_rand
genetic_optimiser = Genetic(classifier=lambda x_: 0, loss_fn=lambda x_: 0,
population_size=pop_size,
mutation_rate=1., mode=self.mode)
if self.mode == 'rewire':
candidate_samples = [
random_sample_rewire_swap(base_graph, n_edit, rewire_only=not is_edge_weighted,
n_hop=self.n_hop_constraint,
preserve_disconnected_components=self.preserve_disconnected_components,
) for _ in
range(n_rand)]
else:
candidate_samples = [
random_sample_flip(base_graph, n_edit, remove_edge_only=self.mode == 'remove',
add_edge_only=self.mode == 'add',
n_hop=self.n_hop_constraint,
committed_edges=self.committed_edits,
preserve_disconnected_components=self.preserve_disconnected_components,
) for _ in range(n_rand)]
self.query_history += candidate_samples
topk_indices = torch.topk(self.surrogate.y, min(self.surrogate.y.shape[0], top_k))[1]
while len(candidate_samples) < pop_size:
selected_index = topk_indices[np.random.randint(len(topk_indices))]
candidate_samples.append(
genetic_optimiser.mutate_sample(base_graph,
self.query_history[-len(self.surrogate.y) + selected_index],
)
)
candidate_graphs = population_graphs(base_graph, candidate_samples, self.mode)
acq_values = self.surrogate.acquisition(candidate_graphs, acq_func=self.acq_settings['acq_type'], bias=None)
# for each mutation round, alternate between optimising the topology (A) with features (X)
for r in range(n_round):
topk_indices = torch.topk(acq_values, min(len(candidate_graphs), top_k))[1]
while len(candidate_samples) < pop_size:
selected_sample = candidate_samples[np.random.randint(len(topk_indices))]
candidate_samples.append(
genetic_optimiser.mutate_sample(base_graph, selected_sample,))
candidate_samples = candidate_samples[n_mutate:]
candidate_graphs = population_graphs(base_graph, candidate_samples, self.mode)
acq_values = self.surrogate.acquisition(candidate_graphs, acq_func=self.acq_settings['acq_type'], bias=None)
else:
raise NotImplementedError(f'Unable to parse the acq_optimiser {self.acq_settings["acq_optimiser"]}')
if candidate_graphs is None:
candidate_graphs = population_graphs(base_graph, candidate_samples, self.mode)
acq_values = self.surrogate.acquisition(candidate_graphs, acq_func=self.acq_settings['acq_type'])
acq_values_np = acq_values.detach().numpy().flatten()
acq_values_np_, unique_idx = np.unique(acq_values_np, return_index=True)
i = np.argpartition(acq_values_np_, -min(acq_values_np_.shape[0], self.batch_size))[
-min(acq_values_np_.shape[0], self.batch_size):]
indices = np.array([unique_idx[j] for j in i])
suggested = [candidate_graphs[j] for j in indices]
self.query_history += [candidate_samples[j] for j in indices]
return suggested
def observe(self, X, y, reset_surrogate=False):
"""
Update the BO with new sample-target pair(s) we obtained from quering the classifer
:param X: a list of dgl graphs. The list of dgl graphs we queried from the classifier
:param y: a Tensor of shape[0] = len(X). The tensor of the classifier loss
:param reset_surrogate: whether to reset the surrogate (clearing all previous fitted (X, y)).
"""
nan_idx = (y != y).nonzero().view(-1)
if nan_idx.shape[0] > 0:
for i in nan_idx:
X.pop(i)
y = y[y == y]
if self.surrogate.X is None or reset_surrogate:
self.surrogate.fit(X, y)
else:
self.surrogate.update(X, y)
@staticmethod
def construct_dataframe(losses: np.array, predictions: torch.tensor, label: torch.tensor, queries: int) \
-> pd.DataFrame:
"""Construct a pandas dataframe consistent with the base class. This dataframe is for all samples evaluated
after exactly `queries` queries."""
labels = np.tile(label, len(predictions))
df = pd.DataFrame({'losses': losses,
'correct_prediction': correct_predictions(predictions.numpy(), labels),
'queries': queries})
return df
def get_stage_statistics(self, max_queries: int, budget: int):
if self.edit_per_stage is None:
self.edit_per_stage = budget
if budget % self.edit_per_stage:
num_stages = budget // self.edit_per_stage + 1
else:
num_stages = budget // self.edit_per_stage
query_per_edit = max_queries // budget
stage_length = self.edit_per_stage * query_per_edit
stages = []
edits_per_stages = []
for i in range(num_stages):
stages.append(min(max_queries, i * stage_length))
if sum(edits_per_stages) + self.edit_per_stage < budget:
edits_per_stages.append(self.edit_per_stage)
else:
edits_per_stages.append(budget - sum(edits_per_stages))
stages.append(max_queries)
return np.array(stages), np.array(edits_per_stages)
| 19,657 | 54.064426 | 156 | py |
grabnel | grabnel-master/src/models/gin.py | """
Adapted from https://github.com/dmlc/dgl/blob/9a0511c8e91a7f633c9c3292fccbcbad5281d1f5/examples/mxnet/gin/gin.py.
The only change is in the forward we take a graph and extract the features, rather than accept a graph and features as
two separate inputs.
How Powerful are Graph Neural Networks
https://arxiv.org/abs/1810.00826
https://openreview.net/forum?id=ryGs6iA5Km
Author's implementation: https://github.com/weihua916/powerful-gnns
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl.nn.pytorch.conv import GINConv
from dgl.nn.pytorch.glob import AvgPooling, MaxPooling, SumPooling
from .base import BaseGraphClassifier
class ApplyNodeFunc(nn.Module):
"""Update the node feature hv with MLP, BN and ReLU."""
def __init__(self, mlp):
super(ApplyNodeFunc, self).__init__()
self.mlp = mlp
self.bn = nn.BatchNorm1d(self.mlp.output_dim)
def forward(self, h):
h = self.mlp(h)
h = self.bn(h)
h = F.relu(h)
return h
class MLP(nn.Module):
"""MLP with linear output"""
def __init__(self, num_layers, input_dim, hidden_dim, output_dim):
"""MLP layers construction
Paramters
---------
num_layers: int
The number of linear layers
input_dim: int
The dimensionality of input features
hidden_dim: int
The dimensionality of hidden units at ALL layers
output_dim: int
The number of classes for prediction
"""
super(MLP, self).__init__()
self.linear_or_not = True # default is linear model
self.num_layers = num_layers
if output_dim == 2:
self.output_dim = 1
else:
self.output_dim = output_dim
if num_layers < 1:
raise ValueError("number of layers should be positive!")
elif num_layers == 1:
# Linear model
self.linear = nn.Linear(input_dim, output_dim)
else:
# Multi-layer model
self.linear_or_not = False
self.linears = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
self.linears.append(nn.Linear(input_dim, hidden_dim))
for layer in range(num_layers - 2):
self.linears.append(nn.Linear(hidden_dim, hidden_dim))
self.linears.append(nn.Linear(hidden_dim, output_dim))
for layer in range(num_layers - 1):
self.batch_norms.append(nn.BatchNorm1d((hidden_dim)))
def forward(self, x):
if self.linear_or_not:
# If linear model
return self.linear(x)
else:
# If MLP
h = x
for i in range(self.num_layers - 1):
h = F.relu(self.batch_norms[i](self.linears[i](h)))
return self.linears[-1](h)
class GINGraphClassifier(BaseGraphClassifier):
"""GIN model"""
def __init__(self, input_dim, output_dim, num_layers=5, num_mlp_layers=2, hidden_dim=64,
final_dropout=0.5, learn_eps=True, graph_pooling_type='sum',
neighbor_pooling_type='sum'):
"""model parameters setting
Parameters
---------
num_layers: int
The number of linear layers in the neural network
num_mlp_layers: int
The number of linear layers in mlps
input_dim: int
The dimensionality of input features
hidden_dim: int
The dimensionality of hidden units at ALL layers
output_dim: int
The number of classes for prediction
final_dropout: float
dropout ratio on the final linear layer
learn_eps: boolean
If True, learn epsilon to distinguish center nodes from neighbors
If False, aggregate neighbors and center nodes altogether.
neighbor_pooling_type: str
how to aggregate neighbors (sum, mean, or max)
graph_pooling_type: str
how to aggregate entire nodes in a graph (sum, mean or max)
"""
super(BaseGraphClassifier, self).__init__()
self.num_layers = num_layers
self.learn_eps = learn_eps
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
for layer in range(self.num_layers - 1):
if layer == 0:
mlp = MLP(num_mlp_layers, input_dim, hidden_dim, hidden_dim)
else:
mlp = MLP(num_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
self.ginlayers.append(
GINConv(ApplyNodeFunc(mlp), neighbor_pooling_type, 0, self.learn_eps))
self.batch_norms.append(nn.BatchNorm1d(hidden_dim))
# Linear function for graph poolings of output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = torch.nn.ModuleList()
for layer in range(num_layers):
if layer == 0:
self.linears_prediction.append(
nn.Linear(input_dim, output_dim))
else:
self.linears_prediction.append(
nn.Linear(hidden_dim, output_dim))
self.drop = nn.Dropout(final_dropout)
if graph_pooling_type == 'sum':
self.pool = SumPooling()
elif graph_pooling_type == 'mean':
self.pool = AvgPooling()
elif graph_pooling_type == 'max':
self.pool = MaxPooling()
else:
raise NotImplementedError
def forward(self, g, edge_weight=None):
# list of hidden representation at each layer (including input)
h = g.ndata['node_attr']
hidden_rep = [h]
for i in range(self.num_layers - 1):
h = self.ginlayers[i](g, h, edge_weight)
h = self.batch_norms[i](h)
h = F.relu(h)
hidden_rep.append(h)
score_over_layer = 0
# perform pooling over all nodes in each graph in every layer
for i, h in enumerate(hidden_rep):
pooled_h = self.pool(g, h)
score_over_layer += self.drop(self.linears_prediction[i](pooled_h))
return score_over_layer
| 6,275 | 34.061453 | 118 | py |
grabnel | grabnel-master/src/models/base.py | """Base model"""
import dgl
import torch
import torch.nn as nn
class BaseGraphClassifier(nn.Module):
"""Base class."""
def __init__(self, input_dim: int, number_of_labels: int, **kwargs):
"""
Args:
input_dim: Number of feature maps
number_of_labels: Number of labels in the classification task
"""
super(BaseGraphClassifier, self).__init__()
self.input_dim = input_dim
self.number_of_labels = number_of_labels
def forward(self, graph: dgl.DGLGraph) -> torch.tensor:
"""
Args:
graph: a DGL graph with attributes stored in graph.ndata['node_attr']
Returns: a torch tensor containing logits
"""
| 728 | 23.3 | 81 | py |
grabnel | grabnel-master/src/models/chebygin.py | # Implementation of the ChebyGIN model from
# implemented here because the author provides the pre-trained model upon which we can attack directly (MNIST-75sp). We
# might also train this model on other datasets
# Xingchen Wan <xwan@robots.ox.ac.uk>
import sys
sys.path.append('..')
import torch.sparse
# sorry about the asterisks but those are present in the original implementation
from .chebygin_base.attention_pooling import *
from .chebygin_base.utils import *
from .base import BaseGraphClassifier
import dgl
class ChebyGINLayer(nn.Module):
"""
General Graph Neural Network layer that depending on arguments can be:
1. Graph Convolution Layer (T. Kipf and M. Welling, ICLR 2017)
2. Chebyshev Graph Convolution Layer (M. Defferrard et al., NeurIPS 2017)
3. GIN Layer (K. Xu et al., ICLR 2019)
4. ChebyGIN Layer (B. Knyazev et al., ICLR 2019 Workshop on Representation Learning on Graphs and Manifolds)
The first three types (1-3) of layers are particular cases of the fourth (4) case.
"""
def __init__(self,
in_features,
out_features,
K,
n_hidden=0,
aggregation='mean',
activation=nn.ReLU(True),
n_relations=1):
super(ChebyGINLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.n_relations = n_relations
assert K > 0, 'order is assumed to be > 0'
self.K = K
assert n_hidden >= 0, ('invalid n_hidden value', n_hidden)
self.n_hidden = n_hidden
assert aggregation in ['mean', 'sum'], ('invalid aggregation', aggregation)
self.aggregation = aggregation
self.activation = activation
n_in = self.in_features * self.K * n_relations
if self.n_hidden == 0:
fc = [nn.Linear(n_in, self.out_features)]
else:
fc = [nn.Linear(n_in, n_hidden),
nn.ReLU(True),
nn.Linear(n_hidden, self.out_features)]
if activation is not None:
fc.append(activation)
self.fc = nn.Sequential(*fc)
print('ChebyGINLayer', list(self.fc.children())[0].weight.shape,
torch.norm(list(self.fc.children())[0].weight, dim=1)[:10])
def __repr__(self):
return 'ChebyGINLayer(in_features={}, out_features={}, K={}, n_hidden={}, aggregation={})\nfc={}'.format(
self.in_features,
self.out_features,
self.K,
self.n_hidden,
self.aggregation,
str(self.fc))
def chebyshev_basis(self, L, X, K):
'''
Return T_k X where T_k are the Chebyshev polynomials of order up to K.
:param L: graph Laplacian, batch (B), nodes (N), nodes (N)
:param X: input of size batch (B), nodes (N), features (F)
:param K: Chebyshev polynomial order, i.e. filter size (number of hopes)
:return: Tensor of size (B,N,K,F) as a result of multiplying T_k(L) by X for each order
'''
if K > 1:
Xt = [X]
Xt.append(torch.bmm(L, X)) # B,N,F
for k in range(2, K):
Xt.append(2 * torch.bmm(L, Xt[k - 1]) - Xt[k - 2]) # B,N,F
Xt = torch.stack(Xt, 2) # B,N,K,F
return Xt
else:
# GCN
assert K == 1, K
return torch.bmm(L, X).unsqueeze(2) # B,N,1,F
def laplacian_batch(self, A, add_identity=False):
'''
Computes normalized Laplacian transformed so that its eigenvalues are in range [-1, 1].
Note that sum of all eigenvalues = trace(L) = 0.
:param A: Tensor of size (B,N,N) containing batch (B) of adjacency matrices of shape N,N
:return: Normalized Laplacian of size (B,N,N)
'''
B, N = A.shape[:2]
if add_identity:
A = A + torch.eye(N, device=A.get_device() if A.is_cuda else 'cpu').unsqueeze(0)
D = torch.sum(A, 1) # nodes degree (B,N)
D_hat = (D + 1e-5) ** (-0.5)
L = D_hat.view(B, N, 1) * A * D_hat.view(B, 1, N) # B,N,N
if not add_identity:
L = -L # for ChebyNet to make a valid Chebyshev basis
return D, L
def forward(self, data):
x, A, mask = data[:3]
B, N, F = x.shape
assert N == A.shape[1] == A.shape[2], ('invalid shape', N, x.shape, A.shape)
if len(A.shape) == 3:
A = A.unsqueeze(3)
y_out = []
for rel in range(A.shape[3]):
D, L = self.laplacian_batch(A[:, :, :, rel],
add_identity=self.K == 1) # for the first layer this can be done at the preprocessing stage
y = self.chebyshev_basis(L, x, self.K) # B,N,K,F
if self.aggregation == 'sum':
# Sum features of neighbors
if self.K == 1:
# GIN
y = y * D.view(B, N, 1, 1)
else:
# ChebyGIN
D_GIN = torch.ones(B, N, self.K, device=x.get_device() if x.is_cuda else 'cpu')
D_GIN[:, :, 1:] = D.view(B, N, 1).expand(-1, -1, self.K - 1) # keep self-loop features the same
y = y * D_GIN.view(B, N, self.K, 1) # apply summation for other scales
y_out.append(y)
y = torch.cat(y_out, dim=2)
y = self.fc(y.view(B, N, -1)) # B,N,F
if len(mask.shape) == 2:
mask = mask.unsqueeze(2)
y = y * mask.float()
output = [y, A, mask]
output.extend(data[3:] + [x]) # for python2
return output
class GraphReadout(nn.Module):
"""
Global pooling layer applied after the last graph layer.
"""
def __init__(self,
pool_type):
super(GraphReadout, self).__init__()
self.pool_type = pool_type
dim = 1 # pooling over nodes
if pool_type == 'max':
self.readout_layer = lambda x, mask: torch.max(x, dim=dim)[0]
elif pool_type in ['avg', 'mean']:
# sum over all nodes, then divide by the number of valid nodes in each sample of the batch
self.readout_layer = lambda x, mask: torch.sum(x, dim=dim) / torch.sum(mask, dim=dim).float()
elif pool_type in ['sum']:
self.readout_layer = lambda x, mask: torch.sum(x, dim=dim)
else:
raise NotImplementedError(pool_type)
def __repr__(self):
return 'GraphReadout({})'.format(self.pool_type)
def forward(self, data):
x, A, mask = data[:3]
B, N = x.shape[:2]
x = self.readout_layer(x, mask.view(B, N, 1))
output = [x]
output.extend(data[1:]) # [x, *data[1:]] doesn't work in Python2
return output
class ChebyGIN(BaseGraphClassifier):
"""
Implementation of the ChebyGIN with Attention Mechanisms in Boris Knyazev, Graham W. Taylor, Mohamed R. Amer.
Understanding Attention and Generalization in Graph Neural Networks. NeurIPS 2019.
This repo has been adapted with our interface, which reads dgl graph and outputs the logits as expected.
"""
def __init__(self,
in_features,
number_of_labels,
filters,
K=1,
n_hidden=0,
aggregation='mean',
dropout=0,
readout='max',
pool=None, # Example: 'attn_gt_threshold_0_skip_skip'.split('_'),
pool_arch='fc_prev'.split('_'),
large_graph=False, # > ~500 graphs
kl_weight=None,
graph_layer_fn=None,
init='normal',
scale=None,
debug=False):
super().__init__(in_features, number_of_labels)
self.out_features = number_of_labels
assert len(filters) > 0, 'filters must be an iterable object with at least one element'
assert K > 0, 'filter scale must be a positive integer'
self.pool = pool
self.pool_arch = pool_arch
self.debug = debug
n_prev = None
attn_gnn = None
if graph_layer_fn is None:
graph_layer_fn = lambda n_in, n_out, K_, n_hidden_, activation: ChebyGINLayer(in_features=n_in,
out_features=n_out,
K=K_,
n_hidden=n_hidden_,
aggregation=aggregation,
activation=activation)
if self.pool_arch is not None and self.pool_arch[0] == 'gnn':
attn_gnn = lambda n_in: ChebyGIN(in_features=n_in,
number_of_labels=0,
filters=[32, 32, 1],
K=np.min((K, 2)),
n_hidden=0,
graph_layer_fn=graph_layer_fn)
graph_layers = []
for layer, f in enumerate(filters + [None]):
n_in = in_features if layer == 0 else filters[layer - 1]
# Pooling layers
# It's a non-standard way to put pooling before convolution, but it's important for our work
if self.pool is not None and len(self.pool) > len(filters) + layer and self.pool[layer + 3] != 'skip':
graph_layers.append(AttentionPooling(in_features=n_in, in_features_prev=n_prev,
pool_type=self.pool[:3] + [self.pool[layer + 3]],
pool_arch=self.pool_arch,
large_graph=large_graph,
kl_weight=kl_weight,
attn_gnn=attn_gnn,
init=init,
scale=scale,
debug=debug))
if f is not None:
# Graph "convolution" layers
# no ReLU if the last layer and no fc layer after that
graph_layers.append(graph_layer_fn(n_in, f, K, n_hidden,
None if self.out_features == 0 and layer == len(
filters) - 1 else nn.ReLU(True)))
n_prev = n_in
if self.out_features > 0:
# Global pooling over nodes
graph_layers.append(GraphReadout(readout))
self.graph_layers = nn.Sequential(*graph_layers)
if self.out_features > 0:
# Fully connected (classification/regression) layers
self.fc = nn.Sequential(
*(([nn.Dropout(p=dropout)] if dropout > 0 else []) + [nn.Linear(filters[-1], number_of_labels)]))
def _forward(self, data):
data = self.graph_layers(data)
if self.out_features > 0:
y = self.fc(data[0]) # B,out_features
else:
y = data[0] # B,N,out_features
return y, data[4]
def forward(self, graph) -> torch.tensor:
parsed_data = parse_dgl_graph(graph)
res = torch.empty(size=(len(parsed_data), self.out_features))
for i, g in enumerate(parsed_data):
res[i] = self._forward(g)[0].detach()
return res
def parse_dgl_graph(graph):
if isinstance(graph, list):
graphs = graph
elif isinstance(graph, dgl.DGLGraph):
try:
graphs = dgl.unbatch(graph)
except RuntimeError:
graphs = [graph]
res = []
for graph in graphs:
N_nodes = graph.number_of_nodes()
u, v = graph.all_edges(order='eid')
wadj = np.zeros((N_nodes, N_nodes))
if 'weight' in graph.edata.keys():
wadj[u.numpy(), v.numpy()] = graph.edata['weight'].numpy()
else:
wadj[u.numpy(), v.numpy()] = 1.
adj = torch.tensor(wadj).float().unsqueeze(0)
if 'node_attr' in graph.ndata.keys():
node_feat = graph.ndata['node_attr'].unsqueeze(0)
else:
node_feat = graph.in_degrees().numpy().reshape(-1, 1)
mask = torch.ones(1, N_nodes, dtype=torch.uint8)
res.append([node_feat, adj, mask, None, {'N_nodes': torch.zeros(1, 1) + N_nodes}])
return res
class ChebyGINOriginal(nn.Module):
'''
Graph Neural Network class.
'''
def __init__(self,
in_features,
out_features,
filters,
K=1,
n_hidden=0,
aggregation='mean',
dropout=0,
readout='max',
pool=None, # Example: 'attn_gt_threshold_0_skip_skip'.split('_'),
pool_arch='fc_prev'.split('_'),
large_graph=False, # > ~500 graphs
kl_weight=None,
graph_layer_fn=None,
init='normal',
scale=None,
debug=False):
super().__init__()
self.out_features = out_features
assert len(filters) > 0, 'filters must be an iterable object with at least one element'
assert K > 0, 'filter scale must be a positive integer'
self.pool = pool
self.pool_arch = pool_arch
self.debug = debug
n_prev = None
attn_gnn = None
if graph_layer_fn is None:
graph_layer_fn = lambda n_in, n_out, K_, n_hidden_, activation: ChebyGINLayer(in_features=n_in,
out_features=n_out,
K=K_,
n_hidden=n_hidden_,
aggregation=aggregation,
activation=activation)
if self.pool_arch is not None and self.pool_arch[0] == 'gnn':
attn_gnn = lambda n_in: ChebyGINOriginal(in_features=n_in,
out_features=0,
filters=[32, 32, 1],
K=np.min((K, 2)),
n_hidden=0,
graph_layer_fn=graph_layer_fn)
graph_layers = []
for layer, f in enumerate(filters + [None]):
n_in = in_features if layer == 0 else filters[layer - 1]
# Pooling layers
# It's a non-standard way to put pooling before convolution, but it's important for our work
if self.pool is not None and len(self.pool) > len(filters) + layer and self.pool[layer + 3] != 'skip':
graph_layers.append(AttentionPooling(in_features=n_in, in_features_prev=n_prev,
pool_type=self.pool[:3] + [self.pool[layer + 3]],
pool_arch=self.pool_arch,
large_graph=large_graph,
kl_weight=kl_weight,
attn_gnn=attn_gnn,
init=init,
scale=scale,
debug=debug))
if f is not None:
# Graph "convolution" layers
# no ReLU if the last layer and no fc layer after that
graph_layers.append(graph_layer_fn(n_in, f, K, n_hidden,
None if self.out_features == 0 and layer == len(
filters) - 1 else nn.ReLU(True)))
n_prev = n_in
if self.out_features > 0:
# Global pooling over nodes
graph_layers.append(GraphReadout(readout))
self.graph_layers = nn.Sequential(*graph_layers)
if self.out_features > 0:
# Fully connected (classification/regression) layers
self.fc = nn.Sequential(
*(([nn.Dropout(p=dropout)] if dropout > 0 else []) + [nn.Linear(filters[-1], out_features)]))
def forward(self, data):
data = self.graph_layers(data)
if self.out_features > 0:
y = self.fc(data[0]) # B,out_features
else:
y = data[0] # B,N,out_features
return y, data[4]
| 17,326 | 42.755051 | 132 | py |
grabnel | grabnel-master/src/models/s2v.py | import torch.nn as nn
import dgl
import torch
import torch.nn.functional as F
from .base import BaseGraphClassifier
from pytorch_structure2vec.s2v_lib.embedding import EmbedMeanField, EmbedLoopyBP
#from pytorch_structure2vec.graph_classification.util import S2VGraph
import numpy as np
import networkx as nx
class S2VGraph(object):
def __init__(self, g, label, node_tags=None):
self.num_nodes = len(g)
self.node_tags = node_tags
x, y = zip(*g.edges())
self.num_edges = len(x)
self.label = label
self.edge_pairs = np.ndarray(shape=(self.num_edges, 2), dtype=np.int32)
self.edge_pairs[:, 0] = x
self.edge_pairs[:, 1] = y
self.edge_pairs = self.edge_pairs.flatten()
def to_networkx(self):
edges = np.reshape(self.edge_pairs, (self.num_edges, 2))
g = nx.Graph()
g.add_edges_from(edges)
return g
class S2VClassifier(BaseGraphClassifier):
def __init__(self, input_dim, number_of_labels, latent_dim=64, embedding_output_dim=64, max_lv=2, gm='mean_field'):
super(S2VClassifier, self).__init__(input_dim, number_of_labels)
# select model class
if gm == 'mean_field':
model = EmbedMeanField
elif gm == 'loopy_bp':
model = EmbedLoopyBP
print('input_dim', input_dim, 'max_lv', max_lv)
# initialise embedding model
self.s2v = model(latent_dim=latent_dim, output_dim=0, num_node_feats=input_dim,
num_edge_feats=0, max_lv=max_lv)
# MLP layer
self.mlp = MLP(input_size=embedding_output_dim, hidden_size=32, number_of_labels=number_of_labels)
# patched by xingchen to avoid the batching / unbatching error in s2v
# def forward(self, graph: dgl.DGLGraph) -> torch.tensor:
# s2v_graphs, features = self.prepare_batch(graph)
# embedding = self.s2v(s2v_graphs, features, None)
# return self.mlp(embedding)
def forward(self, graphs) -> torch.Tensor:
s2v_graphs, features = self.prepare_batch(graphs)
embedding = self.s2v(s2v_graphs, features, None)
return self.mlp(embedding)
# @staticmethod
# def prepare_batch(graph_batch: dgl.DGLGraph) -> (list, torch.tensor):
# features = graph_batch.ndata['node_attr']
# graphs = dgl.unbatch(graph_batch)
# s2v_graphs = []
# for graph in graphs:
# label = number_connected_components(graph)
# g = graph.to_networkx()
# s2v_graphs.append(S2VGraph(g, label))
# return s2v_graphs, features
#
@staticmethod
def prepare_batch(graphs) -> (list, torch.tensor):
if not isinstance(graphs, list):
graphs = [graphs] # one element
features = torch.cat([graph.ndata['node_attr'] for graph in graphs])
# graphs = dgl.unbatch(graph_batch)
s2v_graphs = []
for graph in graphs:
label = number_connected_components(graph)
g = graph.to_networkx()
s2v_graphs.append(S2VGraph(g, label))
return s2v_graphs, features
class MLP(nn.Module):
def __init__(self, input_size, hidden_size, number_of_labels):
super(MLP, self).__init__()
self.h1_weights = nn.Linear(input_size, hidden_size)
self.last_weights = nn.Linear(hidden_size, number_of_labels)
def forward(self, x):
x = self.h1_weights(x)
x = F.relu(x)
x = self.last_weights(x)
#x = F.log_softmax(x, dim=1)
return x
from scipy.sparse.csgraph import connected_components
def number_connected_components(dglgraph):
return connected_components(dglgraph.adjacency_matrix(scipy_fmt="csr"))[0] | 3,722 | 34.798077 | 119 | py |
grabnel | grabnel-master/src/models/utils.py | from . import GCNGraphClassifier, GINGraphClassifier, ChebyGIN
try:
from pytorch_structure2vec.s2v_lib.embedding import EmbedMeanField, EmbedLoopyBP
from . import S2VClassifier
s2v_available = True
except:
print('Failed to import S2V surrogate!')
s2v_available = False
def get_model_class(model_name):
"""Returns a model class which implements `BaseGraphClassifier`."""
if model_name == 'gcn':
model_class = GCNGraphClassifier
elif model_name == 'gin':
model_class = GINGraphClassifier
elif model_name == 'chebygin':
model_class = ChebyGIN
elif model_name == 's2v' and s2v_available:
model_class = S2VClassifier
else:
raise ValueError
return model_class
| 744 | 30.041667 | 84 | py |
grabnel | grabnel-master/src/models/gcn.py | """GCN based classification model."""
import dgl
import torch
import torch.nn as nn
from dgl.nn.pytorch.conv import GraphConv
from dgl.nn.pytorch.glob import MaxPooling
from .base import BaseGraphClassifier
class GCNGraphClassifier(BaseGraphClassifier):
"""A GCN based graph classifier. Outputs are logits.
This model is based off the ReWatt model.
"""
def __init__(self, input_dim: int, number_of_labels: int, hidden_dim: int = 16):
super(GCNGraphClassifier, self).__init__(input_dim, number_of_labels)
self.hidden_dim = hidden_dim
self.graph_conv1 = GraphConv(input_dim, hidden_dim, allow_zero_in_degree=True)
self.graph_conv2 = GraphConv(hidden_dim, hidden_dim, allow_zero_in_degree=True)
self.graph_conv3 = GraphConv(hidden_dim, hidden_dim, allow_zero_in_degree=True)
self.pooling = MaxPooling()
if number_of_labels == 2:
self.MLP = nn.Linear(16, 1)
else:
self.MLP = nn.Linear(16, number_of_labels)
def forward(self, graph: dgl.DGLGraph, edge_weight=None) -> torch.tensor:
"""Produce logits.
Edit: added edge_weight options (for dgl > 0.6.0)
"""
x = graph.ndata['node_attr']
x = torch.relu(self.graph_conv1(graph, x, edge_weight=edge_weight))
x = torch.relu(self.graph_conv2(graph, x, edge_weight=edge_weight))
x = torch.relu(self.graph_conv3(graph, x, edge_weight=edge_weight))
x = self.pooling(graph, x)
x = self.MLP(x)
return x
| 1,532 | 34.651163 | 87 | py |
grabnel | grabnel-master/src/models/gunet/network.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from src.models.gunet.utils.ops import GCN, GraphUnet, Initializer, norm_g
from src.models.base import BaseGraphClassifier
import dgl
import numpy as np
def parse_dgl_graph(graph):
"""Parse the dgl graph"""
if isinstance(graph, list):
graphs = graph
elif isinstance(graph, dgl.DGLGraph):
try:
graphs = dgl.unbatch(graph)
except RuntimeError:
graphs = [graph]
res = []
for graph in graphs:
N_nodes = graph.number_of_nodes()
u, v = graph.all_edges(order='eid')
wadj = np.zeros((N_nodes, N_nodes))
if 'weight' in graph.edata.keys():
wadj[u.numpy(), v.numpy()] = graph.edata['weight'].numpy()
else:
wadj[u.numpy(), v.numpy()] = 1.
adj = torch.tensor(wadj).float().unsqueeze(0)
if 'node_attr' in graph.ndata.keys():
node_feat = graph.ndata['node_attr'].unsqueeze(0)
else:
node_feat = graph.in_degrees().numpy().reshape(-1, 1)
mask = torch.ones(1, N_nodes, dtype=torch.uint8)
res.append([node_feat, adj, mask, None, {'N_nodes': torch.zeros(1, 1) + N_nodes}])
return res
class GUNet(BaseGraphClassifier):
def __init__(self, net, input_dim=3, number_of_labels=2):
"""Wrapper around GUNet into a BaseGraphClassifier. Used as a victim for the BO attack"""
super().__init__(input_dim, number_of_labels)
self.net = net
self.net.eval()
def forward(self, graph: dgl.DGLGraph):
parsed_data = parse_dgl_graph(graph)
res = torch.empty(size=(len(parsed_data), self.number_of_labels))
for i, g in enumerate(parsed_data):
res[i] = self.net.get_logit(g[1], g[0])
return res
def is_correct(self, graph: dgl.DGLGraph, labels):
parsed_data = parse_dgl_graph(graph)
acc = torch.empty(size=(len(parsed_data), 1))
for i, g in enumerate(parsed_data):
acc[i] = self.net(g[1], g[0], labels[i])[1]
return acc
class GNet(nn.Module):
"""The original GNet module. Used for model training"""
def __init__(self, in_dim, n_classes, args):
super(GNet, self).__init__()
self.n_act = getattr(nn, args.act_n)()
self.c_act = getattr(nn, args.act_c)()
self.s_gcn = GCN(in_dim, args.l_dim, self.n_act, args.drop_n)
self.g_unet = GraphUnet(
args.ks, args.l_dim, args.l_dim, args.l_dim, self.n_act,
args.drop_n)
self.out_l_1 = nn.Linear(3*args.l_dim*(args.l_num+1), args.h_dim)
self.out_l_2 = nn.Linear(args.h_dim, n_classes)
self.out_drop = nn.Dropout(p=args.drop_c)
Initializer.weights_init(self)
def forward(self, gs, hs, labels):
hs = self.embed(gs, hs)
logits = self.classify(hs)
return self.metric(logits, labels)
def get_logit(self, gs, hs):
"""Get logits in eval mode"""
with torch.no_grad():
hs = self.embed(gs, hs)
return self.classify(hs)
def embed(self, gs, hs):
o_hs = []
for g, h in zip(gs, hs):
h = self.embed_one(g, h)
o_hs.append(h)
hs = torch.stack(o_hs, 0)
return hs
def embed_one(self, g, h):
g = norm_g(g)
h = self.s_gcn(g, h)
hs = self.g_unet(g, h)
h = self.readout(hs)
return h
def readout(self, hs):
h_max = [torch.max(h, 0)[0] for h in hs]
h_sum = [torch.sum(h, 0) for h in hs]
h_mean = [torch.mean(h, 0) for h in hs]
h = torch.cat(h_max + h_sum + h_mean)
return h
def classify(self, h):
h = self.out_drop(h)
h = self.out_l_1(h)
h = self.c_act(h)
h = self.out_drop(h)
h = self.out_l_2(h)
return F.log_softmax(h, dim=1)
def metric(self, logits, labels):
loss = F.nll_loss(logits, labels)
_, preds = torch.max(logits, 1)
acc = torch.mean((preds == labels).float())
return loss, acc
| 4,086 | 32.5 | 97 | py |
grabnel | grabnel-master/src/models/gunet/trainer.py | import torch
from tqdm import tqdm
import torch.optim as optim
from src.models.gunet.utils.dataset import GraphData
import pandas as pd
class Trainer:
def __init__(self, args, net, G_data, save_path, log_path):
self.args = args
self.net = net
self.feat_dim = G_data.feat_dim
self.fold_idx = G_data.fold_idx
self.init(args, G_data.train_gs, G_data.test_gs)
# specify the model save and logging paths
self.save_path = save_path
self.log_path = log_path
self.model_file_name = f'gunet_{args.data}_{args.seed}.pt'
if torch.cuda.is_available():
self.net.cuda()
self.training_log = []
def init(self, args, train_gs, test_gs):
print('#train: %d, #test: %d' % (len(train_gs), len(test_gs)))
train_data = GraphData(train_gs, self.feat_dim)
test_data = GraphData(test_gs, self.feat_dim)
self.train_d = train_data.loader(self.args.batch, True)
self.test_d = test_data.loader(self.args.batch, False)
self.optimizer = optim.Adam(
self.net.parameters(), lr=self.args.lr, amsgrad=True,
weight_decay=0.0008)
def to_cuda(self, gs):
if torch.cuda.is_available():
if type(gs) == list:
return [g.cuda() for g in gs]
return gs.cuda()
return gs
def run_epoch(self, epoch, data, model, optimizer):
losses, accs, n_samples = [], [], 0
for batch in tqdm(data, desc=str(epoch), unit='b'):
cur_len, gs, hs, ys = batch
gs, hs, ys = map(self.to_cuda, [gs, hs, ys])
loss, acc = model(gs, hs, ys)
losses.append(loss*cur_len)
accs.append(acc*cur_len)
n_samples += cur_len
if optimizer is not None:
optimizer.zero_grad()
loss.backward()
optimizer.step()
avg_loss, avg_acc = sum(losses) / n_samples, sum(accs) / n_samples
return avg_loss.item(), avg_acc.item()
def train(self):
max_acc = 0.0
train_str = 'Train epoch %d: loss %.5f acc %.5f'
test_str = 'Test epoch %d: loss %.5f acc %.5f max %.5f'
# line_str = '%d:\t%.5f\n'
for e_id in range(self.args.num_epochs):
self.net.train()
train_loss, train_acc = self.run_epoch(
e_id, self.train_d, self.net, self.optimizer)
print(train_str % (e_id, train_loss, train_acc))
with torch.no_grad():
self.net.eval()
loss, acc = self.run_epoch(e_id, self.test_d, self.net, None)
max_acc = max(max_acc, acc)
print(test_str % (e_id, loss, acc, max_acc))
if acc == max(max_acc, acc) and self.save_path is not None:
print('Best model')
torch.save(self.net.state_dict(), f'{self.save_path}/{self.model_file_name}')
# torch.save(self.net.state_dict(), f'{self.save_dir}/model.pickle')
self.training_log.append([e_id, train_loss, loss, train_acc, acc])
if self.log_path is not None:
logs = pd.DataFrame(self.training_log, columns=['epoch', 'train_loss', 'valid_loss', 'train_acc', 'valid_acc'])
logs.to_csv(f'{self.log_path}/gunet_{self.args.data}_{self.args.seed}.csv')
# with open(self.args.acc_file, 'a+') as f:
# f.write(line_str % (self.fold_idx, max_acc))
| 3,465 | 39.776471 | 123 | py |
grabnel | grabnel-master/src/models/gunet/utils/data_loader.py | import torch
from tqdm import tqdm
import networkx as nx
import numpy as np
import torch.nn.functional as F
from sklearn.model_selection import StratifiedKFold
from functools import partial
import os
import pickle
class G_data(object):
def __init__(self, num_class, feat_dim, g_list, seed, task_name, split_save_path=None):
self.num_class = num_class
self.feat_dim = feat_dim
self.g_list = g_list
self.split_save_path = split_save_path
self.seed = seed
self.task_name = task_name
self.sep_data()
def sep_data(self,):
skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=self.seed)
labels = [g.label for g in self.g_list]
self.idx_list = list(skf.split(np.zeros(len(labels)), labels))
def use_fold_data(self, fold_idx):
self.fold_idx = fold_idx+1
train_idx, test_idx = self.idx_list[fold_idx]
self.train_gs = [self.g_list[i] for i in train_idx]
self.test_gs = [self.g_list[i] for i in test_idx]
def pickle_data(self):
if self.split_save_path is not None:
# pickle.dump(self.train_gs, open(f'{self.split_save_path}/train_data.pickle', 'wb'))
save_dict = {
'num_class': self.num_class,
'feat_dim': self.feat_dim,
'seed': self.seed,
'test_split': self.test_gs
}
pickle.dump(save_dict, open(f'{self.split_save_path}/gunet_split_{self.task_name}_test_data_{self.seed}.pickle', 'wb'))
class FileLoader(object):
def __init__(self, args):
self.args = args
def line_genor(self, lines):
for line in lines:
yield line
def gen_graph(self, f, i, label_dict, feat_dict, deg_as_tag):
row = next(f).strip().split()
n, label = [int(w) for w in row]
if label not in label_dict:
label_dict[label] = len(label_dict)
g = nx.Graph()
g.add_nodes_from(list(range(n)))
node_tags = []
for j in range(n):
row = next(f).strip().split()
tmp = int(row[1]) + 2
row = [int(w) for w in row[:tmp]]
if row[0] not in feat_dict:
feat_dict[row[0]] = len(feat_dict)
for k in range(2, len(row)):
if j != row[k]:
g.add_edge(j, row[k])
if len(row) > 2:
node_tags.append(feat_dict[row[0]])
g.label = label
g.remove_nodes_from(list(nx.isolates(g)))
if deg_as_tag:
g.node_tags = list(dict(g.degree).values())
else:
g.node_tags = node_tags
return g
def process_g(self, label_dict, tag2index, tagset, g):
g.label = label_dict[g.label]
g.feas = torch.tensor([tag2index[tag] for tag in g.node_tags])
g.feas = F.one_hot(g.feas, len(tagset))
A = torch.FloatTensor(nx.to_numpy_matrix(g))
g.A = A + torch.eye(g.number_of_nodes())
return g
def load_data(self, preamble_path=None):
args = self.args
print('loading data ...')
g_list = []
label_dict = {}
feat_dict = {}
if preamble_path is None: fp = f'data/{args.data}/{args.data}.txt'
else: fp = os.path.join(preamble_path, f'{args.data}/{args.data}.txt')
with open(fp, 'r') as f:
lines = f.readlines()
f = self.line_genor(lines)
n_g = int(next(f).strip())
for i in tqdm(range(n_g), desc="Create graph", unit='graphs'):
g = self.gen_graph(f, i, label_dict, feat_dict, args.deg_as_tag)
g_list.append(g)
tagset = set([])
for g in g_list:
tagset = tagset.union(set(g.node_tags))
tagset = list(tagset)
tag2index = {tagset[i]: i for i in range(len(tagset))}
f_n = partial(self.process_g, label_dict, tag2index, tagset)
new_g_list = []
for g in tqdm(g_list, desc="Process graph", unit='graphs'):
new_g_list.append(f_n(g))
num_class = len(label_dict)
feat_dim = len(tagset)
print('# classes: %d' % num_class, '# maximum node tag: %d' % feat_dim)
print(f'Split save path: {args.split_save_path}, Seed = {args.seed}')
gdata = G_data(num_class, feat_dim, new_g_list,
task_name=args.data,
split_save_path=args.split_save_path,
seed=args.seed)
# gdata.pickle_data()
# if args.split_save_path is not None:
# gdata.split_save_path = args.split_save_path
# gdata.pickle_data()
# gdata = G_data(num_class, feat_dim, new_g_list, task_name=args.data, split_save_path=args.split_save_path)
return gdata
| 4,790 | 35.295455 | 131 | py |
grabnel | grabnel-master/src/models/gunet/utils/dataset.py | import random
import torch
import networkx as nx
import dgl
class GraphData(object):
def __init__(self, data, feat_dim):
super(GraphData, self).__init__()
self.data = data
self.feat_dim = feat_dim
self.idx = list(range(len(data)))
self.pos = 0
def __reset__(self):
self.pos = 0
if self.shuffle:
random.shuffle(self.idx)
def __len__(self):
return len(self.data) // self.batch + 1
def __getitem__(self, idx):
g = self.data[idx]
return g.A, g.feas.float(), g.label
def __iter__(self):
return self
def __next__(self):
if self.pos >= len(self.data):
self.__reset__()
raise StopIteration
cur_idx = self.idx[self.pos: self.pos+self.batch]
data = [self.__getitem__(idx) for idx in cur_idx]
self.pos += len(cur_idx)
gs, hs, labels = map(list, zip(*data))
return len(gs), gs, hs, torch.LongTensor(labels)
def loader(self, batch, shuffle, *args):
self.batch = batch
self.shuffle = shuffle
if shuffle:
random.shuffle(self.idx)
return self
def gunet_graph2dgl(graph):
"""Convert the GUnet native data format to one compatible with this repository"""
adj = graph.A.numpy()
G = nx.from_numpy_array(adj)
feat = graph.feas.float().numpy()
G = G.to_directed()
node_features = {'node_attr': feat}
for n in G.nodes():
for k, v in node_features.items():
G.nodes[n][k] = v[n]
# print(G.nodes(data=True))
g = dgl.from_networkx(G, node_attrs=['node_attr'])
return dgl.to_simple(g)
| 1,677 | 25.634921 | 85 | py |
grabnel | grabnel-master/src/models/gunet/utils/ops.py | import torch
import torch.nn as nn
import numpy as np
class GraphUnet(nn.Module):
def __init__(self, ks, in_dim, out_dim, dim, act, drop_p):
super(GraphUnet, self).__init__()
self.ks = ks
self.bottom_gcn = GCN(dim, dim, act, drop_p)
self.down_gcns = nn.ModuleList()
self.up_gcns = nn.ModuleList()
self.pools = nn.ModuleList()
self.unpools = nn.ModuleList()
self.l_n = len(ks)
for i in range(self.l_n):
self.down_gcns.append(GCN(dim, dim, act, drop_p))
self.up_gcns.append(GCN(dim, dim, act, drop_p))
self.pools.append(Pool(ks[i], dim, drop_p))
self.unpools.append(Unpool(dim, dim, drop_p))
def forward(self, g, h):
adj_ms = []
indices_list = []
down_outs = []
hs = []
org_h = h
for i in range(self.l_n):
h = self.down_gcns[i](g, h)
adj_ms.append(g)
down_outs.append(h)
g, h, idx = self.pools[i](g, h)
indices_list.append(idx)
h = self.bottom_gcn(g, h)
for i in range(self.l_n):
up_idx = self.l_n - i - 1
g, idx = adj_ms[up_idx], indices_list[up_idx]
g, h = self.unpools[i](g, h, down_outs[up_idx], idx)
h = self.up_gcns[i](g, h)
h = h.add(down_outs[up_idx])
hs.append(h)
h = h.add(org_h)
hs.append(h)
return hs
class GCN(nn.Module):
def __init__(self, in_dim, out_dim, act, p):
super(GCN, self).__init__()
self.proj = nn.Linear(in_dim, out_dim)
self.act = act
self.drop = nn.Dropout(p=p) if p > 0.0 else nn.Identity()
def forward(self, g, h):
h = self.drop(h)
h = torch.matmul(g, h)
h = self.proj(h)
h = self.act(h)
return h
class Pool(nn.Module):
def __init__(self, k, in_dim, p):
super(Pool, self).__init__()
self.k = k
self.sigmoid = nn.Sigmoid()
self.proj = nn.Linear(in_dim, 1)
self.drop = nn.Dropout(p=p) if p > 0 else nn.Identity()
def forward(self, g, h):
Z = self.drop(h)
weights = self.proj(Z).squeeze()
scores = self.sigmoid(weights)
return top_k_graph(scores, g, h, self.k)
class Unpool(nn.Module):
def __init__(self, *args):
super(Unpool, self).__init__()
def forward(self, g, h, pre_h, idx):
new_h = h.new_zeros([g.shape[0], h.shape[1]])
new_h[idx] = h
return g, new_h
def top_k_graph(scores, g, h, k):
num_nodes = g.shape[0]
values, idx = torch.topk(scores, max(2, int(k*num_nodes)))
new_h = h[idx, :]
values = torch.unsqueeze(values, -1)
new_h = torch.mul(new_h, values)
un_g = g.bool().float()
un_g = torch.matmul(un_g, un_g).bool().float()
un_g = un_g[idx, :]
un_g = un_g[:, idx]
g = norm_g(un_g)
return g, new_h, idx
def norm_g(g):
degrees = torch.sum(g, 1)
g = g / degrees
return g
class Initializer(object):
@classmethod
def _glorot_uniform(cls, w):
if len(w.size()) == 2:
fan_in, fan_out = w.size()
elif len(w.size()) == 3:
fan_in = w.size()[1] * w.size()[2]
fan_out = w.size()[0] * w.size()[2]
else:
fan_in = np.prod(w.size())
fan_out = np.prod(w.size())
limit = np.sqrt(6.0 / (fan_in + fan_out))
w.uniform_(-limit, limit)
@classmethod
def _param_init(cls, m):
if isinstance(m, nn.parameter.Parameter):
cls._glorot_uniform(m.data)
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
cls._glorot_uniform(m.weight.data)
@classmethod
def weights_init(cls, m):
for p in m.modules():
if isinstance(p, nn.ParameterList):
for pp in p:
cls._param_init(pp)
else:
cls._param_init(p)
for name, p in m.named_parameters():
if '.' not in name:
cls._param_init(p)
| 4,097 | 27.068493 | 65 | py |
grabnel | grabnel-master/src/models/chebygin_base/graphdata.py | import numpy as np
import os
from os.path import join as pjoin
import pickle
import copy
import torch
import torch.utils
import torch.utils.data
import torch.nn.functional as F
import torchvision
from scipy.spatial.distance import cdist
from .utils import *
def compute_adjacency_matrix_images(coord, sigma=0.1):
coord = coord.reshape(-1, 2)
dist = cdist(coord, coord)
A = np.exp(- dist / (sigma * np.pi) ** 2)
A[np.diag_indices_from(A)] = 0
return A
def precompute_graph_images(img_size):
col, row = np.meshgrid(np.arange(img_size), np.arange(img_size))
coord = np.stack((col, row), axis=2) / img_size # 28,28,2
A = torch.from_numpy(compute_adjacency_matrix_images(coord)).float().unsqueeze(0)
coord = torch.from_numpy(coord).float().unsqueeze(0).view(1, -1, 2)
mask = torch.ones(1, img_size * img_size, dtype=torch.uint8)
return A, coord, mask
def collate_batch_images(batch, A, mask, use_mean_px=True, coord=None,
gt_attn_threshold=0, replicate_features=True):
B = len(batch)
C, H, W = batch[0][0].shape
N_nodes = H * W
params_dict = {'N_nodes': torch.zeros(B, dtype=torch.long) + N_nodes, 'node_attn_eval': None}
has_WS_attn = len(batch[0]) > 2
if has_WS_attn:
WS_attn = torch.from_numpy(np.stack([batch[b][2].reshape(N_nodes) for b in range(B)]).astype(np.float32)).view(B, N_nodes)
WS_attn = normalize_batch(WS_attn)
params_dict.update({'node_attn': WS_attn}) # use these scores for training
if use_mean_px:
x = torch.stack([batch[b][0].view(C, N_nodes).t() for b in range(B)]).float()
if gt_attn_threshold == 0:
GT_attn = (x > 0).view(B, N_nodes).float()
else:
GT_attn = x.view(B, N_nodes).float().clone()
GT_attn[GT_attn < gt_attn_threshold] = 0
GT_attn = normalize_batch(GT_attn)
params_dict.update({'node_attn_eval': GT_attn}) # use this for evaluation of attention
if not has_WS_attn:
params_dict.update({'node_attn': GT_attn}) # use this to train attention
else:
raise NotImplementedError('this case is not well supported')
if coord is not None:
if use_mean_px:
x = torch.cat((x, coord.expand(B, -1, -1)), dim=2)
else:
x = coord.expand(B, -1, -1)
if x is None:
x = torch.ones(B, N_nodes, 1) # dummy features
if replicate_features:
x = F.pad(x, (2, 0), 'replicate')
try:
labels = torch.Tensor([batch[b][1] for b in range(B)]).long()
except:
labels = torch.stack([batch[b][1] for b in range(B)]).long()
return [x, A.expand(B, -1, -1), mask.expand(B, -1), labels, params_dict]
def collate_batch(batch):
'''
Creates a batch of same size graphs by zero-padding node features and adjacency matrices up to
the maximum number of nodes in the current batch rather than in the entire dataset.
'''
B = len(batch)
N_nodes = [batch[b][2] for b in range(B)]
C = batch[0][0].shape[1]
N_nodes_max = int(np.max(N_nodes))
mask = torch.zeros(B, N_nodes_max, dtype=torch.bool) # use byte for older PyTorch
A = torch.zeros(B, N_nodes_max, N_nodes_max)
x = torch.zeros(B, N_nodes_max, C)
has_GT_attn = len(batch[0]) > 4 and batch[0][4] is not None
if has_GT_attn:
GT_attn = torch.zeros(B, N_nodes_max)
has_WS_attn = len(batch[0]) > 5 and batch[0][5] is not None
if has_WS_attn:
WS_attn = torch.zeros(B, N_nodes_max)
for b in range(B):
x[b, :N_nodes[b]] = batch[b][0]
A[b, :N_nodes[b], :N_nodes[b]] = batch[b][1]
mask[b][:N_nodes[b]] = 1 # mask with values of 0 for dummy (zero padded) nodes, otherwise 1
if has_GT_attn:
GT_attn[b, :N_nodes[b]] = batch[b][4].squeeze()
if has_WS_attn:
WS_attn[b, :N_nodes[b]] = batch[b][5].squeeze()
N_nodes = torch.from_numpy(np.array(N_nodes)).long()
params_dict = {'N_nodes': N_nodes}
if has_WS_attn:
params_dict.update({'node_attn': WS_attn}) # use this to train attention
if has_GT_attn:
params_dict.update({'node_attn_eval': GT_attn}) # use this for evaluation of attention
if not has_WS_attn:
params_dict.update({'node_attn': GT_attn}) # use this to train attention
elif has_WS_attn:
params_dict.update({'node_attn_eval': WS_attn}) # use this for evaluation of attention
labels = torch.from_numpy(np.array([batch[b][3] for b in range(B)])).long()
return [x, A, mask, labels, params_dict]
class MNIST(torchvision.datasets.MNIST):
'''
Wrapper around MNIST to use predefined attention coefficients
'''
def __init__(self, root, train=True, transform=None, target_transform=None, download=False, attn_coef=None):
super(MNIST, self).__init__(root, train, transform, target_transform, download)
self.alpha_WS = None
if attn_coef is not None and train:
print('loading weakly-supervised labels from %s' % attn_coef)
with open(attn_coef, 'rb') as f:
self.alpha_WS = pickle.load(f)
print(train, len(self.alpha_WS))
def __getitem__(self, index):
img, target = super(MNIST, self).__getitem__(index)
if self.alpha_WS is None:
return img, target
else:
return img, target, self.alpha_WS[index]
class MNIST75sp(torch.utils.data.Dataset):
def __init__(self,
data_dir,
split,
use_mean_px=True,
use_coord=True,
gt_attn_threshold=0,
attn_coef=None):
self.data_dir = data_dir
self.split = split
self.is_test = split.lower() in ['test', 'val']
with open(pjoin(data_dir, 'mnist_75sp_%s.pkl' % split), 'rb') as f:
self.labels, self.sp_data = pickle.load(f)
self.use_mean_px = use_mean_px
self.use_coord = use_coord
self.n_samples = len(self.labels)
self.img_size = 28
self.gt_attn_threshold = gt_attn_threshold
self.alpha_WS = None
if attn_coef is not None and not self.is_test:
with open(attn_coef, 'rb') as f:
self.alpha_WS = pickle.load(f)
print('using weakly-supervised labels from %s (%d samples)' % (attn_coef, len(self.alpha_WS)))
def train_val_split(self, samples_idx):
self.sp_data = [self.sp_data[i] for i in samples_idx]
self.labels = self.labels[samples_idx]
self.n_samples = len(self.labels)
def precompute_graph_data(self, replicate_features, threads=0):
print('precompute all data for the %s set...' % self.split.upper())
self.Adj_matrices, self.node_features, self.GT_attn, self.WS_attn = [], [], [], []
for index, sample in enumerate(self.sp_data):
mean_px, coord = sample[:2]
coord = coord / self.img_size
A = compute_adjacency_matrix_images(coord)
N_nodes = A.shape[0]
x = None
if self.use_mean_px:
x = mean_px.reshape(N_nodes, -1)
if self.use_coord:
coord = coord.reshape(N_nodes, 2)
if self.use_mean_px:
x = np.concatenate((x, coord), axis=1)
else:
x = coord
if x is None:
x = np.ones(N_nodes, 1) # dummy features
if replicate_features:
x = np.pad(x, ((0, 0), (2, 0)), 'edge') # replicate features to make it possible to test on colored images
if self.gt_attn_threshold == 0:
gt_attn = (mean_px > 0).astype(np.float32)
else:
gt_attn = mean_px.copy()
gt_attn[gt_attn < self.gt_attn_threshold] = 0
self.GT_attn.append(normalize(gt_attn))
if self.alpha_WS is not None:
self.WS_attn.append(normalize(self.alpha_WS[index]))
self.node_features.append(x)
self.Adj_matrices.append(A)
def __len__(self):
return self.n_samples
def __getitem__(self, index):
data = [self.node_features[index],
self.Adj_matrices[index],
self.Adj_matrices[index].shape[0],
self.labels[index],
self.GT_attn[index]]
if self.alpha_WS is not None:
data.append(self.WS_attn[index])
data = list_to_torch(data) # convert to torch
return data
class SyntheticGraphs(torch.utils.data.Dataset):
def __init__(self,
data_dir,
dataset,
split,
degree_feature=True,
attn_coef=None,
threads=12):
self.is_test = split.lower() in ['test', 'val']
self.split = split
self.degree_feature = degree_feature
if dataset.find('colors') >= 0:
dim = int(dataset.split('-')[1])
data_file = 'random_graphs_colors_dim%d_%s.pkl' % (dim, split)
is_triangles = False
self.feature_dim = dim + 1
if dataset.find('triangles') >= 0:
data_file = 'random_graphs_triangles_%s.pkl' % split
is_triangles = True
else:
NotImplementedError(dataset)
with open(pjoin(data_dir, data_file), 'rb') as f:
data = pickle.load(f)
for key in data:
if not isinstance(data[key], list) and not isinstance(data[key], np.ndarray):
print(split, key, data[key])
else:
print(split, key, len(data[key]))
self.Node_degrees = [np.sum(A, 1).astype(np.int32) for A in data['Adj_matrices']]
if is_triangles:
# use one-hot degree features as node features
self.feature_dim = data['Max_degree'] + 1
self.node_features = []
for i in range(len(data['Adj_matrices'])):
N = data['Adj_matrices'][i].shape[0]
if degree_feature:
D_onehot = np.zeros((N, self.feature_dim ))
D_onehot[np.arange(N), self.Node_degrees[i]] = 1
else:
D_onehot = np.zeros((N, 1))
self.node_features.append(D_onehot)
if not degree_feature:
self.feature_dim = 1
else:
# Add 1 feature to support new colors at test time
self.node_features = []
for i in range(len(data['node_features'])):
features = data['node_features'][i]
if features.shape[1] < self.feature_dim:
features = np.pad(features, ((0, 0), (0, 1)), 'constant')
self.node_features.append(features)
self.alpha_WS = None
if attn_coef is not None and not self.is_test:
with open(attn_coef, 'rb') as f:
self.alpha_WS = pickle.load(f)
print('using weakly-supervised labels from %s (%d samples)' % (attn_coef, len(self.alpha_WS)))
self.WS_attn = []
for index in range(len(self.alpha_WS)):
self.WS_attn.append(normalize(self.alpha_WS[index]))
N_nodes = np.array([A.shape[0] for A in data['Adj_matrices']])
self.Adj_matrices = data['Adj_matrices']
self.GT_attn = data['GT_attn']
# Normalizing ground truth attention so that it sums to 1
for i in range(len(self.GT_attn)):
self.GT_attn[i] = normalize(self.GT_attn[i])
#assert np.sum(self.GT_attn[i]) == 1, (i, np.sum(self.GT_attn[i]), self.GT_attn[i])
self.labels = data['graph_labels'].astype(np.int32)
self.classes = np.unique(self.labels)
self.n_classes = len(self.classes)
R = np.corrcoef(self.labels, N_nodes)[0, 1]
degrees = []
for i in range(len(self.Node_degrees)):
degrees.extend(list(self.Node_degrees[i]))
degrees = np.array(degrees, np.int32)
print('N nodes avg/std/min/max: \t{:.2f}/{:.2f}/{:d}/{:d}'.format(*stats(N_nodes)))
print('N edges avg/std/min/max: \t{:.2f}/{:.2f}/{:d}/{:d}'.format(*stats(data['N_edges'])))
print('Node degree avg/std/min/max: \t{:.2f}/{:.2f}/{:d}/{:d}'.format(*stats(degrees)))
print('Node features dim: \t\t%d' % self.feature_dim)
print('N classes: \t\t\t%d' % self.n_classes)
print('Correlation of labels with graph size: \t%.2f' % R)
print('Classes: \t\t\t%s' % str(self.classes))
for lbl in self.classes:
idx = self.labels == lbl
print('Class {}: \t\t\t{} samples, N_nodes: avg/std/min/max: \t{:.2f}/{:.2f}/{:d}/{:d}'.format(lbl, np.sum(idx), *stats(N_nodes[idx])))
def __len__(self):
return len(self.Adj_matrices)
def __getitem__(self, index):
data = [self.node_features[index],
self.Adj_matrices[index],
self.Adj_matrices[index].shape[0],
self.labels[index],
self.GT_attn[index]]
if self.alpha_WS is not None:
data.append(self.WS_attn[index])
data = list_to_torch(data) # convert to torch
return data
class GraphData(torch.utils.data.Dataset):
def __init__(self,
datareader,
fold_id,
split, # train, val, train_val, test
degree_feature=True,
attn_labels=None):
self.fold_id = fold_id
self.split = split
self.w_sup_signal_attn = None
print('''The degree_feature argument is ignored for this dataset.
It will automatically be set to True if nodes do not have any features. Otherwise it will be set to False''')
if attn_labels is not None:
if isinstance(attn_labels, str) and os.path.isfile(attn_labels):
with open(attn_labels, 'rb') as f:
self.w_sup_signal_attn = pickle.load(f)
else:
self.w_sup_signal_attn = attn_labels
for i in range(len(self.w_sup_signal_attn)):
alpha = self.w_sup_signal_attn[i]
alpha[alpha < 1e-3] = 0 # assuming that some nodes should have zero importance
self.w_sup_signal_attn[i] = normalize(alpha)
print(('!!!using weakly supervised labels (%d samples)!!!' % len(self.w_sup_signal_attn)).upper())
self.set_fold(datareader.data, fold_id)
def set_fold(self, data, fold_id):
self.total = len(data['targets'])
self.N_nodes_max = data['N_nodes_max']
self.num_classes = data['num_classes']
self.num_features = data['num_features']
if self.split in ['train', 'val']:
self.idx = data['splits'][self.split][fold_id]
else:
assert self.split in ['train_val', 'test'], ('unexpected split', self.split)
self.idx = data['splits'][self.split]
# use deepcopy to make sure we don't alter objects in folds
self.labels = np.array(copy.deepcopy([data['targets'][i] for i in self.idx]))
self.adj_list = copy.deepcopy([data['adj_list'][i] for i in self.idx])
self.features_onehot = copy.deepcopy([data['features_onehot'][i] for i in self.idx])
self.N_edges = np.array([A.sum() // 2 for A in self.adj_list]) # assume undirected graph with binary edges
print('%s: %d/%d' % (self.split.upper(), len(self.labels), len(data['targets'])))
classes = np.unique(self.labels)
for lbl in classes:
print('Class %d: \t\t\t%d samples' % (lbl, np.sum(self.labels == lbl)))
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
if isinstance(index, str):
# To make data format consistent with SyntheticGraphs
if index == 'Adj_matrices':
return self.adj_list
elif index == 'GT_attn':
print('Ground truth attention is unavailable for this dataset: weakly-supervised labels will be returned')
return self.w_sup_signal_attn
elif index == 'graph_labels':
return self.labels
elif index == 'node_features':
return self.features_onehot
elif index == 'N_edges':
return self.N_edges
else:
raise KeyError(index)
else:
data = [self.features_onehot[index],
self.adj_list[index],
self.adj_list[index].shape[0],
self.labels[index],
None] # no GT attention
if self.w_sup_signal_attn is not None:
data.append(self.w_sup_signal_attn[index])
data = list_to_torch(data) # convert to torch
return data
class DataReader():
'''
Class to read the txt files containing all data of the dataset
Should work for any dataset from https://ls11-www.cs.tu-dortmund.de/staff/morris/graphkerneldatasets
'''
def __init__(self,
data_dir, # folder with txt files
N_nodes=None, # maximum number of nodes in the training set
rnd_state=None,
use_cont_node_attr=False, # use or not additional float valued node attributes available in some datasets
folds=10,
fold_id=None):
self.data_dir = data_dir
self.rnd_state = np.random.RandomState() if rnd_state is None else rnd_state
self.use_cont_node_attr = use_cont_node_attr
self.N_nodes = N_nodes
if os.path.isfile('%s/data.pkl' % data_dir):
print('loading data from %s/data.pkl' % data_dir)
with open('%s/data.pkl' % data_dir, 'rb') as f:
data = pickle.load(f)
else:
files = os.listdir(self.data_dir)
data = {}
nodes, graphs = self.read_graph_nodes_relations(
list(filter(lambda f: f.find('graph_indicator') >= 0, files))[0])
lst = list(filter(lambda f: f.find('node_labels') >= 0, files))
if len(lst) > 0:
data['features'] = self.read_node_features(lst[0], nodes, graphs, fn=lambda s: int(s.strip()))
else:
data['features'] = None
data['adj_list'] = self.read_graph_adj(list(filter(lambda f: f.find('_A') >= 0, files))[0], nodes, graphs)
data['targets'] = np.array(
self.parse_txt_file(list(filter(lambda f: f.find('graph_labels') >= 0, files))[0],
line_parse_fn=lambda s: int(float(s.strip()))))
if self.use_cont_node_attr:
data['attr'] = self.read_node_features(list(filter(lambda f: f.find('node_attributes') >= 0, files))[0],
nodes, graphs,
fn=lambda s: np.array(list(map(float, s.strip().split(',')))))
features, n_edges, degrees = [], [], []
for sample_id, adj in enumerate(data['adj_list']):
N = len(adj) # number of nodes
if data['features'] is not None:
assert N == len(data['features'][sample_id]), (N, len(data['features'][sample_id]))
n = np.sum(adj) # total sum of edges
# assert n % 2 == 0, n
n_edges.append(int(n / 2)) # undirected edges, so need to divide by 2
if not np.allclose(adj, adj.T):
print(sample_id, 'not symmetric')
degrees.extend(list(np.sum(adj, 1)))
if data['features'] is not None:
features.append(np.array(data['features'][sample_id]))
# Create features over graphs as one-hot vectors for each node
if data['features'] is not None:
features_all = np.concatenate(features)
features_min = features_all.min()
num_features = int(features_all.max() - features_min + 1) # number of possible values
features_onehot = []
for i, x in enumerate(features):
feature_onehot = np.zeros((len(x), num_features))
for node, value in enumerate(x):
feature_onehot[node, value - features_min] = 1
if self.use_cont_node_attr:
feature_onehot = np.concatenate((feature_onehot, np.array(data['attr'][i])), axis=1)
features_onehot.append(feature_onehot)
if self.use_cont_node_attr:
num_features = features_onehot[0].shape[1]
else:
degree_max = int(np.max([np.sum(A, 1).max() for A in data['adj_list']]))
num_features = degree_max + 1
features_onehot = []
for A in data['adj_list']:
n = A.shape[0]
D = np.sum(A, 1).astype(np.int)
D_onehot = np.zeros((n, num_features))
D_onehot[np.arange(n), D] = 1
features_onehot.append(D_onehot)
shapes = [len(adj) for adj in data['adj_list']]
labels = data['targets'] # graph class labels
labels -= np.min(labels) # to start from 0
classes = np.unique(labels)
num_classes = len(classes)
if not np.all(np.diff(classes) == 1):
print('making labels sequential, otherwise pytorch might crash')
labels_new = np.zeros(labels.shape, dtype=labels.dtype) - 1
for lbl in range(num_classes):
labels_new[labels == classes[lbl]] = lbl
labels = labels_new
classes = np.unique(labels)
assert len(np.unique(labels)) == num_classes, np.unique(labels)
def stats(x):
return (np.mean(x), np.std(x), np.min(x), np.max(x))
print('N nodes avg/std/min/max: \t%.2f/%.2f/%d/%d' % stats(shapes))
print('N edges avg/std/min/max: \t%.2f/%.2f/%d/%d' % stats(n_edges))
print('Node degree avg/std/min/max: \t%.2f/%.2f/%d/%d' % stats(degrees))
print('Node features dim: \t\t%d' % num_features)
print('N classes: \t\t\t%d' % num_classes)
print('Classes: \t\t\t%s' % str(classes))
for lbl in classes:
print('Class %d: \t\t\t%d samples' % (lbl, np.sum(labels == lbl)))
if data['features'] is not None:
for u in np.unique(features_all):
print('feature {}, count {}/{}'.format(u, np.count_nonzero(features_all == u), len(features_all)))
N_graphs = len(labels) # number of samples (graphs) in data
assert N_graphs == len(data['adj_list']) == len(features_onehot), 'invalid data'
data['features_onehot'] = features_onehot
data['targets'] = labels
data['N_nodes_max'] = np.max(shapes) # max number of nodes
data['num_features'] = num_features
data['num_classes'] = num_classes
# Save preprocessed data for faster loading
with open('%s/data.pkl' % data_dir, 'wb') as f:
pickle.dump(data, f, protocol=2)
labels = data['targets']
# Create test sets first
N_graphs = len(labels)
shapes = np.array([len(adj) for adj in data['adj_list']])
train_ids, val_ids, train_val_ids, test_ids = self.split_ids_shape(np.arange(N_graphs), shapes, N_nodes, folds=folds)
# Create train sets
splits = {'train': [], 'val': [], 'train_val': train_val_ids, 'test': test_ids}
for fold in range(folds):
splits['train'].append(train_ids[fold])
splits['val'].append(val_ids[fold])
data['splits'] = splits
self.data = data
def split_ids_shape(self, ids_all, shapes, N_nodes, folds=1, fold_id=0):
if N_nodes > 0:
small_graphs_ind = np.where(shapes <= N_nodes)[0]
print('{}/{} graphs with at least {} nodes'.format(len(small_graphs_ind), len(shapes), N_nodes))
idx = self.rnd_state.permutation(len(small_graphs_ind))
if len(idx) > 1000:
n = 1000
else:
n = 500
train_val_ids = small_graphs_ind[idx[:n]]
test_ids = small_graphs_ind[idx[n:]]
large_graphs_ind = np.where(shapes > N_nodes)[0]
test_ids = np.concatenate((test_ids, large_graphs_ind))
else:
idx = self.rnd_state.permutation(len(ids_all))
n = len(ids_all) // folds # number of test samples
test_ids = ids_all[idx[fold_id * n: (fold_id + 1) * n if fold_id < folds - 1 else -1]]
train_val_ids = []
for i in ids_all:
if i not in test_ids:
train_val_ids.append(i)
train_val_ids = np.array(train_val_ids)
assert np.all(
np.unique(np.concatenate((train_val_ids, test_ids))) == sorted(ids_all)), 'some graphs are missing in the test sets'
if folds > 0:
print('generating %d-fold cross-validation splits' % folds)
train_ids, val_ids = self.split_ids(train_val_ids, folds=folds)
# Sanity checks
for fold in range(folds):
ind = np.concatenate((train_ids[fold], val_ids[fold]))
print(fold, len(train_ids[fold]), len(val_ids[fold]))
assert len(train_ids[fold]) + len(val_ids[fold]) == len(np.unique(ind)) == len(ind) == len(train_val_ids), 'invalid splits'
else:
train_ids, val_ids = [], []
return train_ids, val_ids, train_val_ids, test_ids
def split_ids(self, ids, folds=10):
n = len(ids)
stride = int(np.ceil(n / float(folds)))
test_ids = [ids[i: i + stride] for i in range(0, n, stride)]
assert np.all(
np.unique(np.concatenate(test_ids)) == sorted(ids)), 'some graphs are missing in the test sets'
assert len(test_ids) == folds, 'invalid test sets'
train_ids = []
for fold in range(folds):
train_ids.append(np.array([e for e in ids if e not in test_ids[fold]]))
assert len(train_ids[fold]) + len(test_ids[fold]) == len(
np.unique(list(train_ids[fold]) + list(test_ids[fold]))) == n, 'invalid splits'
return train_ids, test_ids
def parse_txt_file(self, fpath, line_parse_fn=None):
with open(pjoin(self.data_dir, fpath), 'r') as f:
lines = f.readlines()
data = [line_parse_fn(s) if line_parse_fn is not None else s for s in lines]
return data
def read_graph_adj(self, fpath, nodes, graphs):
edges = self.parse_txt_file(fpath, line_parse_fn=lambda s: s.split(','))
adj_dict = {}
for edge in edges:
node1 = int(edge[0].strip()) - 1 # -1 because of zero-indexing in our code
node2 = int(edge[1].strip()) - 1
graph_id = nodes[node1]
assert graph_id == nodes[node2], ('invalid data', graph_id, nodes[node2])
if graph_id not in adj_dict:
n = len(graphs[graph_id])
adj_dict[graph_id] = np.zeros((n, n))
ind1 = np.where(graphs[graph_id] == node1)[0]
ind2 = np.where(graphs[graph_id] == node2)[0]
assert len(ind1) == len(ind2) == 1, (ind1, ind2)
adj_dict[graph_id][ind1, ind2] = 1
adj_list = [adj_dict[graph_id] for graph_id in sorted(list(graphs.keys()))]
return adj_list
def read_graph_nodes_relations(self, fpath):
graph_ids = self.parse_txt_file(fpath, line_parse_fn=lambda s: int(s.rstrip()))
nodes, graphs = {}, {}
for node_id, graph_id in enumerate(graph_ids):
if graph_id not in graphs:
graphs[graph_id] = []
graphs[graph_id].append(node_id)
nodes[node_id] = graph_id
graph_ids = np.unique(list(graphs.keys()))
for graph_id in graph_ids:
graphs[graph_id] = np.array(graphs[graph_id])
return nodes, graphs
def read_node_features(self, fpath, nodes, graphs, fn):
node_features_all = self.parse_txt_file(fpath, line_parse_fn=fn)
node_features = {}
for node_id, x in enumerate(node_features_all):
graph_id = nodes[node_id]
if graph_id not in node_features:
node_features[graph_id] = [None] * len(graphs[graph_id])
ind = np.where(graphs[graph_id] == node_id)[0]
assert len(ind) == 1, ind
assert node_features[graph_id][ind[0]] is None, node_features[graph_id][ind[0]]
node_features[graph_id][ind[0]] = x
node_features_lst = [node_features[graph_id] for graph_id in sorted(list(graphs.keys()))]
return node_features_lst
| 29,129 | 42.155556 | 147 | py |
grabnel | grabnel-master/src/models/chebygin_base/utils.py | import numpy as np
import os
import torch
import copy
from .graphdata import *
import torch.nn.functional as F
from torchvision import datasets, transforms
from sklearn.metrics import roc_auc_score
import numbers
import random
def load_save_noise(f, noise_shape):
if os.path.isfile(f):
print('loading noise from %s' % f)
noises = torch.load(f)
else:
noises = torch.randn(noise_shape, dtype=torch.float)
# np.save(f, noises.numpy())
torch.save(noises, f)
return noises
def list_to_torch(data):
for i in range(len(data)):
if data[i] is None:
continue
elif isinstance(data[i], np.ndarray):
if data[i].dtype == np.bool:
data[i] = data[i].astype(np.float32)
data[i] = torch.from_numpy(data[i]).float()
elif isinstance(data[i], list):
data[i] = list_to_torch(data[i])
return data
def data_to_device(data, device):
if isinstance(data, dict):
keys = list(data.keys())
else:
keys = range(len(data))
for i in keys:
if isinstance(data[i], list) or isinstance(data[i], dict):
data[i] = data_to_device(data[i], device)
else:
if isinstance(data[i], torch.Tensor):
try:
data[i] = data[i].to(device)
except:
print('error', i, data[i], type(data[i]))
raise
return data
def count_correct(output, target, N_nodes=None, N_nodes_min=0, N_nodes_max=25):
if output.shape[1] == 1:
# Regression
pred = output.round().long()
else:
# Classification
pred = output.max(1, keepdim=True)[1]
target = target.long().squeeze().cpu() # for older pytorch
pred = pred.squeeze().cpu() # for older pytorch
if N_nodes is not None:
idx = (N_nodes >= N_nodes_min) & (N_nodes <= N_nodes_max)
if idx.sum() > 0:
correct = pred[idx].eq(target[idx]).sum().item()
for lbl in torch.unique(target, sorted=True):
idx_lbl = target[idx] == lbl
eq = (pred[idx][idx_lbl] == target[idx][idx_lbl]).float()
print('lbl: {}, avg acc: {:2.2f}% ({}/{})'.format(lbl, 100 * eq.mean(), int(eq.sum()),
int(idx_lbl.float().sum())))
eq = (pred[idx] == target[idx]).float()
print('{} <= N_nodes <= {} (min={}, max={}), avg acc: {:2.2f}% ({}/{})'.format(N_nodes_min,
N_nodes_max,
N_nodes[idx].min(),
N_nodes[idx].max(),
100 * eq.mean(),
int(eq.sum()), int(idx.sum())))
else:
correct = 0
print('no graphs with nodes >= {} and <= {}'.format(N_nodes_min, N_nodes_max))
else:
correct = pred.eq(target).sum().item()
return correct
def attn_AUC(alpha_GT, alpha):
auc = []
if len(alpha) > 0 and alpha_GT is not None and len(alpha_GT) > 0:
for layer in alpha:
alpha_gt = np.concatenate([a.flatten() for a in alpha_GT[layer]]) > 0
if len(np.unique(alpha_gt)) <= 1:
print('Only one class ({}) present in y_true. ROC AUC score is not defined in that case.'.format(np.unique(alpha_gt)))
auc.append(np.nan)
else:
auc.append(100 * roc_auc_score(y_true=alpha_gt,
y_score=np.concatenate([a.flatten() for a in alpha[layer]])))
return auc
def stats(arr):
return np.mean(arr), np.std(arr), np.min(arr), np.max(arr)
def normalize(x, eps=1e-7):
return x / (x.sum() + eps)
def normalize_batch(x, dim=1, eps=1e-7):
return x / (x.sum(dim=dim, keepdim=True) + eps)
def normalize_zero_one(im, eps=1e-7):
m1 = im.min()
m2 = im.max()
return (im - m1) / (m2 - m1 + eps)
def mse_loss(target, output, reduction='mean', reduce=None):
loss = (target.float().squeeze() - output.float().squeeze()) ** 2
if reduce is None:
if reduction == 'mean':
return torch.mean(loss)
elif reduction == 'sum':
return torch.sum(loss)
elif reduction == 'none':
return loss
else:
NotImplementedError(reduction)
elif not reduce:
return loss
else:
NotImplementedError('use reduction if reduce=True')
def shuffle_nodes(batch):
x, A, mask, labels, params_dict = batch
for b in range(x.shape[0]):
idx = np.random.permutation(x.shape[1])
x[b] = x[b, idx]
A[b] = A[b, :, idx][idx, :]
mask[b] = mask[b, idx]
if 'node_attn' in params_dict:
params_dict['node_attn'][b] = params_dict['node_attn'][b, idx]
return [x, A, mask, labels, params_dict]
def copy_batch(data):
data_cp = []
for i in range(len(data)):
if isinstance(data[i], dict):
data_cp.append({key: data[i][key].clone() for key in data[i]})
else:
data_cp.append(data[i].clone())
return data_cp
def sanity_check(model, data):
with torch.no_grad():
output1 = model(copy_batch(data))[0]
output2 = model(shuffle_nodes(copy_batch(data)))[0]
if not torch.allclose(output1, output2, rtol=1e-02, atol=1e-03):
print('WARNING: model outputs different depending on the nodes order', (torch.norm(output1 - output2),
torch.max(output1 - output2),
torch.max(output1),
torch.max(output2)))
print('model is checked for nodes shuffling')
def set_seed(seed, seed_data=None):
random.seed(seed) # for some libraries
rnd = np.random.RandomState(seed)
if seed_data is not None:
rnd_data = np.random.RandomState(seed_data)
else:
rnd_data = rnd
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
return rnd, rnd_data
def compute_feature_stats(model, train_loader, device, n_batches=100):
print('computing mean and std of input features')
model.eval()
x = []
with torch.no_grad():
for batch_idx, data in enumerate(train_loader):
x.append(data[0].data.cpu().numpy()) # B,N,F
if batch_idx > n_batches:
break
x = np.concatenate(x, axis=1).reshape(-1, x[0].shape[-1])
print('features shape loaded', x.shape)
mn = x.mean(axis=0, keepdims=True)
sd = x.std(axis=0, keepdims=True)
print('mn', mn)
print('std', sd)
sd[sd < 1e-2] = 1 # to prevent dividing by a small number
print('corrected (non zeros) std', sd)#.data.cpu().numpy())
mn = torch.from_numpy(mn).float().to(device).unsqueeze(0)
sd = torch.from_numpy(sd).float().to(device).unsqueeze(0)
return mn, sd
def copy_data(data, idx):
data_new = {}
for key in data:
if key == 'Max_degree':
data_new[key] = data[key]
print(key, data_new[key])
else:
data_new[key] = copy.deepcopy([data[key][i] for i in idx])
if key in ['graph_labels', 'N_edges']:
data_new[key] = np.array(data_new[key], np.int32)
print(key, len(data_new[key]))
return data_new
def concat_data(data):
data_new = {}
for key in data[0]:
if key == 'Max_degree':
data_new[key] = np.max(np.array([ d[key] for d in data ]))
print(key, data_new[key])
else:
if key in ['graph_labels', 'N_edges']:
data_new[key] = np.concatenate([ d[key] for d in data ])
else:
lst = []
for d in data:
lst.extend(d[key])
data_new[key] = lst
print(key, len(data_new[key]))
return data_new
| 8,523 | 33.934426 | 134 | py |
grabnel | grabnel-master/src/models/chebygin_base/attention_pooling.py | import torch.nn as nn
import torch.sparse
from .utils import *
class AttentionPooling(nn.Module):
"""
Graph pooling layer implementing top-k and threshold-based pooling.
"""
def __init__(self,
in_features, # feature dimensionality in the current graph layer
in_features_prev, # feature dimensionality in the previous graph layer
pool_type,
pool_arch,
large_graph,
attn_gnn=None,
kl_weight=None,
drop_nodes=True,
init='normal',
scale=None,
debug=False):
super(AttentionPooling, self).__init__()
self.pool_type = pool_type
self.pool_arch = pool_arch
self.large_graph = large_graph
self.kl_weight = kl_weight
self.proj = None
self.drop_nodes = drop_nodes
self.is_topk = self.pool_type[2].lower() == 'topk'
self.scale =scale
self.init = init
self.debug = debug
self.clamp_value = 60
self.torch = torch.__version__
if self.is_topk:
self.topk_ratio = float(self.pool_type[3]) # r
assert self.topk_ratio > 0 and self.topk_ratio <= 1, ('invalid top-k ratio', self.topk_ratio, self.pool_type)
else:
self.threshold = float(self.pool_type[3]) # \tilde{alpha}
assert self.threshold >= 0 and self.threshold <= 1, ('invalid pooling threshold', self.threshold, self.pool_type)
if self.pool_type[1] in ['unsup', 'sup']:
assert self.pool_arch not in [None, 'None'], self.pool_arch
n_in = in_features_prev if self.pool_arch[1] == 'prev' else in_features
if self.pool_arch[0] == 'fc':
p_optimal = torch.from_numpy(np.pad(np.array([0, 1]), (0, n_in - 2), 'constant')).float().view(1, n_in)
if len(self.pool_arch) == 2:
# single layer projection
self.proj = nn.Linear(n_in, 1, bias=False)
p = self.proj.weight.data
if scale is not None:
if init == 'normal':
p = torch.randn(n_in) # std=1, seed 9753 for optimal initialization
elif init == 'uniform':
p = torch.rand(n_in) * 2 - 1 # [-1,1]
else:
raise NotImplementedError(init)
p *= scale # multiply std for normal or change range for uniform
else:
print('Default PyTorch init is used for layer %s, std=%.3f' % (str(p.shape), p.std()))
self.proj.weight.data = p.view_as(self.proj.weight.data)
p = self.proj.weight.data.view(1, n_in)
else:
# multi-layer projection
filters = list(map(int, self.pool_arch[2:]))
self.proj = []
for layer in range(len(filters)):
self.proj.append(nn.Linear(in_features=n_in if layer == 0 else filters[layer - 1],
out_features=filters[layer]))
if layer == 0:
p = self.proj[0].weight.data
if scale is not None:
if init == 'normal':
p = torch.randn(filters[layer], n_in)
elif init == 'uniform':
p = torch.rand(filters[layer], n_in) * 2 - 1 # [-1,1]
else:
raise NotImplementedError(init)
p *= scale # multiply std for normal or change range for uniform
else:
print('Default PyTorch init is used for layer %s, std=%.3f' % (str(p.shape), p.std()))
self.proj[0].weight.data = p.view_as(self.proj[0].weight.data)
p = self.proj[0].weight.data.view(-1, n_in)
self.proj.append(nn.ReLU(True))
self.proj.append(nn.Linear(filters[-1], 1))
self.proj = nn.Sequential(*self.proj)
# Compute cosine similarity with the optimal vector and print values
# ignore the last dimension, because it does not receive gradients during training
# n_in=4 for colors-3 because some of our test subsets have 4 dimensional features
cos_sim = self.cosine_sim(p[:, :-1], p_optimal[:, :-1])
if p.shape[0] == 1:
print('p values', p[0].data.cpu().numpy())
print('cos_sim', cos_sim.item())
else:
for fn in [torch.max, torch.min, torch.mean, torch.std]:
print('cos_sim', fn(cos_sim).item())
elif self.pool_arch[0] == 'gnn':
self.proj = attn_gnn(n_in)
else:
raise ValueError('invalid pooling layer architecture', self.pool_arch)
elif self.pool_type[1] == 'gt':
if not self.is_topk and self.threshold > 0:
print('For ground truth attention threshold should be 0, but it is %f' % self.threshold)
else:
raise NotImplementedError(self.pool_type[1])
def __repr__(self):
return 'AttentionPooling(pool_type={}, pool_arch={}, topk={}, kl_weight={}, init={}, scale={}, proj={})'.format(
self.pool_type,
self.pool_arch,
self.is_topk,
self.kl_weight,
self.init,
self.scale,
self.proj)
def cosine_sim(self, a, b):
return torch.mm(a, b.t()) / (torch.norm(a, dim=1, keepdim=True) * torch.norm(b, dim=1, keepdim=True))
def mask_out(self, x, mask):
return x.view_as(mask) * mask
def drop_nodes_edges(self, x, A, mask):
N_nodes = torch.sum(mask, dim=1).long() # B
N_nodes_max = N_nodes.max()
idx = None
if N_nodes_max > 0:
B, N, C = x.shape
# Drop nodes
mask, idx = torch.topk(mask.byte(), N_nodes_max, dim=1, largest=True, sorted=False)
x = torch.gather(x, dim=1, index=idx.unsqueeze(2).expand(-1, -1, C))
# Drop edges
A = torch.gather(A, dim=1, index=idx.unsqueeze(2).expand(-1, -1, N))
A = torch.gather(A, dim=2, index=idx.unsqueeze(1).expand(-1, N_nodes_max, -1))
return x, A, mask, N_nodes, idx
def forward(self, data):
KL_loss = None
x, A, mask, _, params_dict = data[:5]
mask_float = mask.float()
N_nodes_float = params_dict['N_nodes'].float()
B, N, C = x.shape
A = A.view(B, N, N)
alpha_gt = None
if 'node_attn' in params_dict:
if not isinstance(params_dict['node_attn'], list):
params_dict['node_attn'] = [params_dict['node_attn']]
alpha_gt = params_dict['node_attn'][-1].view(B, N)
if 'node_attn_eval' in params_dict:
if not isinstance(params_dict['node_attn_eval'], list):
params_dict['node_attn_eval'] = [params_dict['node_attn_eval']]
if (self.pool_type[1] == 'gt' or (self.pool_type[1] == 'sup' and self.training)) and alpha_gt is None:
raise ValueError('ground truth node attention values node_attn required for %s' % self.pool_type)
if self.pool_type[1] in ['unsup', 'sup']:
attn_input = data[-1] if self.pool_arch[1] == 'prev' else x.clone()
if self.pool_arch[0] == 'fc':
alpha_pre = self.proj(attn_input).view(B, N)
else:
# to support python2
input = [attn_input]
input.extend(data[1:])
alpha_pre = self.proj(input)[0].view(B, N)
# softmax with masking out dummy nodes
alpha_pre = torch.clamp(alpha_pre, -self.clamp_value, self.clamp_value)
alpha = normalize_batch(self.mask_out(torch.exp(alpha_pre), mask_float).view(B, N))
if self.pool_type[1] == 'sup' and self.training:
if self.torch.find('1.') == 0:
KL_loss_per_node = self.mask_out(F.kl_div(torch.log(alpha + 1e-14), alpha_gt, reduction='none'),
mask_float.view(B,N))
else:
KL_loss_per_node = self.mask_out(F.kl_div(torch.log(alpha + 1e-14), alpha_gt, reduce=False),
mask_float.view(B, N))
KL_loss = self.kl_weight * torch.mean(KL_loss_per_node.sum(dim=1) / (N_nodes_float + 1e-7)) # mean over nodes, then mean over batches
else:
alpha = alpha_gt
x = x * alpha.view(B, N, 1)
if self.large_graph:
# For large graphs during training, all alpha values can be very small hindering training
x = x * N_nodes_float.view(B, 1, 1)
if self.is_topk:
N_remove = torch.round(N_nodes_float * (1 - self.topk_ratio)).long() # number of nodes to be removed for each graph
idx = torch.sort(alpha, dim=1, descending=False)[1] # indices of alpha in ascending order
mask = mask.clone().view(B, N)
for b in range(B):
idx_b = idx[b, mask[b, idx[b]]] # take indices of non-dummy nodes for current data example
mask[b, idx_b[:N_remove[b]]] = 0
else:
mask = (mask & (alpha.view_as(mask) > self.threshold)).view(B, N)
if self.drop_nodes:
x, A, mask, N_nodes_pooled, idx = self.drop_nodes_edges(x, A, mask)
if idx is not None and 'node_attn' in params_dict:
# update ground truth (or weakly labeled) attention for a reduced graph
params_dict['node_attn'].append(normalize_batch(self.mask_out(torch.gather(alpha_gt, dim=1, index=idx), mask.float())))
if idx is not None and 'node_attn_eval' in params_dict:
# update ground truth (or weakly labeled) attention for a reduced graph
params_dict['node_attn_eval'].append(normalize_batch(self.mask_out(torch.gather(params_dict['node_attn_eval'][-1], dim=1, index=idx), mask.float())))
else:
N_nodes_pooled = torch.sum(mask, dim=1).long() # B
if 'node_attn' in params_dict:
params_dict['node_attn'].append((self.mask_out(params_dict['node_attn'][-1], mask.float())))
if 'node_attn_eval' in params_dict:
params_dict['node_attn_eval'].append((self.mask_out(params_dict['node_attn_eval'][-1], mask.float())))
params_dict['N_nodes'] = N_nodes_pooled
mask_matrix = mask.unsqueeze(2) & mask.unsqueeze(1)
A = A * mask_matrix.float() # or A[~mask_matrix] = 0
# Add additional losses regularizing the model
if KL_loss is not None:
if 'reg' not in params_dict:
params_dict['reg'] = []
params_dict['reg'].append(KL_loss)
# Keep attention coefficients for evaluation
for key, value in zip(['alpha', 'mask'], [alpha, mask]):
if key not in params_dict:
params_dict[key] = []
params_dict[key].append(value.detach())
if self.debug and alpha_gt is not None:
idx_correct_pool = (alpha_gt > 0)
idx_correct_drop = (alpha_gt == 0)
alpha_correct_pool = alpha[idx_correct_pool].sum() / N_nodes_float.sum()
alpha_correct_drop = alpha[idx_correct_drop].sum() / N_nodes_float.sum()
ratio_avg = (N_nodes_pooled.float() / N_nodes_float).mean()
for key, values in zip(['alpha_correct_pool_debug', 'alpha_correct_drop_debug', 'ratio_avg_debug'],
[alpha_correct_pool, alpha_correct_drop, ratio_avg]):
if key not in params_dict:
params_dict[key] = []
params_dict[key].append(values.detach())
output = [x, A, mask]
output.extend(data[3:])
return output
| 12,358 | 48.634538 | 165 | py |
grabnel | grabnel-master/scripts/run_bo_image_classification.py | import sys
sys.path.append('../')
import argparse
import os
import torch
from functools import partial
from src.attack.data import Data
from src.attack.bayesopt_attack import BayesOptAttack
from src.attack.utils import (classification_loss, get_dataset_split, get_device,
setseed, nettack_loss)
from src.models.utils import get_model_class
import numpy as np
import datetime
import pickle
import warnings
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description='Run BO attack on Image Classification datasets')
parser.add_argument('--dataset', type=str, default='mnist')
parser.add_argument('-m', '--method', type=str, default='bo')
parser.add_argument('--loss', type=str, default='nettack')
parser.add_argument('--mode', type=str, default='rewire', choices=['rewire', 'flip'])
parser.add_argument('--model', type=str, default='chebygin', choices=['chebygin'])
parser.add_argument('--seed', type=int, default=0, help='RNG seed.')
parser.add_argument('--gpu', type=str, default=None, help='A gpu device number if available.')
parser.add_argument('--n_trials', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=5)
parser.add_argument('--n_init', type=int, default=5)
parser.add_argument('--budget', type=float, default=0.015, )
parser.add_argument('--query_per_perturb', type=int, default=40, )
parser.add_argument('--n_samples', type=int, default=100, help='number of samples to attack')
parser.add_argument('--save_path', type=str, default='../src/output/attack_logs/',
help='save path for the output logs and adversarial examples.')
parser.add_argument('--model_path', type=str, default='../src/output/models/',
help='path for the trained classifier (victim models)')
parser.add_argument('--patience', type=int, default=200)
parser.add_argument('--target_class', type=int, default=None)
parser.add_argument('--loss_threshold', type=float, default=-3)
parser.add_argument('--density_threshold', type=float, default=0.5)
parser.add_argument('--max_h', type=int, default=1)
parser.add_argument('--no_greedy', action='store_true')
parser.add_argument('--acq', type=str, default='mutation', choices=['mutation', 'random'])
parser.add_argument('--exp_name', type=str, default=None, help='')
args = parser.parse_args()
setseed(args.seed)
seed = args.seed
n_trials = args.n_trials
n_samples = args.n_samples
n_perturb = args.budget
model_name = args.model
dataset = args.dataset
dataset_split = get_dataset_split(dataset)
# Time string will be used as the directory name
time_string = datetime.datetime.now()
time_string = time_string.strftime('%Y%m%d_%H%M%S')
data = Data(dataset_name=dataset, dataset_split=dataset_split, seed=seed)
if args.target_class is not None:
if args.exp_name is not None:
save_path = args.save_path + f'/{args.exp_name}_{model_name}_{dataset}_{args.method}_{seed}_target_{args.target_class}/'
else:
save_path = args.save_path + f'/{model_name}_{dataset}_{args.method}_{seed}_{time_string}_target_{args.target_class}/'
else:
if args.exp_name is not None:
save_path = args.save_path + f'/{args.exp_name}_{model_name}_{dataset}_{args.method}_{seed}_untargeted/'
else:
save_path = args.save_path + f'/{model_name}_{dataset}_{args.method}_{seed}_{time_string}_untargeted/'
if not os.path.exists(save_path):
os.makedirs(save_path)
options = vars(args)
print(options)
option_file = open(save_path + "/command.txt", "w+")
option_file.write(str(options))
option_file.close()
model_class = get_model_class(model_name)
model_path = os.path.join(args.model_path, "checkpoint_mnist-75sp_139255_epoch30_seed0000111.pth.tar")
state = torch.load(model_path, map_location='cpu')
state_args = state['args']
model = model_class(data.feature_dim, data.number_of_labels, filters=state_args.filters, K=state_args.filter_scale,
n_hidden=state_args.n_hidden, aggregation=state_args.aggregation, dropout=state_args.dropout,
readout=state_args.readout, pool=state_args.pool, pool_arch=state_args.pool_arch)
model.load_state_dict(state['state_dict'])
model.eval()
correct_indices = []
for i in range(len(data.dataset_c)):
sample = data.dataset_c[i]
graph, label = sample
preds = model(graph)
if preds.argmax() == label:
correct_indices.append(i)
print(f' Correctly classified samples: {len(correct_indices)} / {len(data.dataset_c)}')
n_success = 0
dfs, adv_examples = [], []
for trial in range(n_trials):
selected_indices = np.random.RandomState(args.seed).choice(len(data.dataset_c), min(len(data.dataset_c), args.n_samples), replace=False).tolist()
print(f'Starting trial {trial}/{n_trials}')
for i, sample_id in enumerate(selected_indices):
print(f'Starting sample {i} (Dataset ID={sample_id}')
if sample_id in correct_indices:
graph, label = data.dataset_c[i]
if args.target_class is not None and label == args.target_class:
continue
if args.mode == 'rewire': # each rewire edit = 2 x flip edit. divide by 2
edit = max(np.round(n_perturb * graph.num_nodes() ** 2).astype(int) // 2, 1)
queries_per_perturb = args.query_per_perturb * 2
elif args.mode == 'flip':
edit = max(np.round(n_perturb * graph.num_nodes() ** 2).astype(int), 1)
queries_per_perturb = args.query_per_perturb
elif args.mode == 'linf': # continuous perturbation
assert 0. < n_perturb < 1.
edit = n_perturb
if args.target_class is not None: nettack = partial(nettack_loss, target_class=args.target_class)
else: nettack = nettack_loss
if args.method == 'bo':
attacker = BayesOptAttack(model, nettack, surrogate='bayeslinregress',
surrogate_settings={'h': args.max_h, 'extractor_mode': 'continuous', 'node_attr': 'node_attr'},
batch_size=args.batch_size,
edit_per_stage=min(5, edit) if args.no_greedy else 1,
target_class=args.target_class,
mode=args.mode,
acq_settings={'acq_optimiser': args.acq},
verbose=True,
n_init=args.n_init,
terminate_after_n_fail=args.patience)
else:
attacker = BayesOptAttack(model, nettack, surrogate='null',
surrogate_settings={'h': args.max_h, 'extractor_mode': 'continuous', 'node_attr': 'node_attr'},
batch_size=args.batch_size,
edit_per_stage=min(5, edit) if args.no_greedy else 1,
target_class=args.target_class,
mode=args.mode,
acq_settings={'acq_optimiser': args.acq},
verbose=True,
n_init=min(edit * queries_per_perturb, int(2e4)),
terminate_after_n_fail=args.patience)
df, adv_example = attacker.attack(graph, label, edit, min(edit * queries_per_perturb, int(2e4)))
if adv_example is not None:
n_success += 1
dfs.append(df)
adv_examples.append(adv_example)
else:
adv_examples.append(None)
dfs.append(None)
to_save = {
'dataframes': dfs,
'adv_example': adv_examples,
}
pickle.dump(dfs, open(os.path.join(save_path, f'trial-{trial}.pickle'), 'wb'))
pickle.dump(adv_examples, open(os.path.join(save_path, f'trial-{trial}-adv_example.pickle'), 'wb')) | 8,092 | 46.605882 | 149 | py |
grabnel | grabnel-master/scripts/run_bo_gunet.py | import sys
sys.path.append('../')
from src.models.gunet.network import GNet, GUNet
import numpy as np
import pickle
import argparse
import os
import torch
from src.attack.genetic import Genetic
from src.attack.bayesopt_attack import BayesOptAttack
from src.attack.randomattack import RandomFlip
from src.attack.utils import (nettack_loss_gunet, setseed)
from src.models.gunet.utils.dataset import gunet_graph2dgl
from src.models.gunet.config import get_parser, update_args_with_default
# parser for the GUNet settings
parser = argparse.ArgumentParser(description='Args for graph prediction of GUNet classifier on TU datasets')
parser.add_argument('-m', '--method', type=str, default='bo')
parser.add_argument('--dataset', type=str, default='PROTEINS', choices=['COLLAB', 'IMDBMULTI', 'PROTEINS'])
# note: here we use the original designation by the GUNet authors
parser.add_argument('--seed', type=int, default=1, help='RNG seed.')
parser.add_argument('--n_trials', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=5)
parser.add_argument('--n_init', type=int, default=5)
parser.add_argument('--budget', type=float, default=0.03, )
parser.add_argument('--budget_by', type=str, choices=['nnodes_sq', 'nedges'], default='nnodes_sq',
help='computing method of the budget. nnodes_sq: budgets will be computed by nnodes^2.')
parser.add_argument('-qpp', '--query_per_perturb', type=int, default=40, )
parser.add_argument('--n_samples', type=int, default=100, help='number of samples to attack')
parser.add_argument('--save_path', type=str, default='../src/output/attack_logs/',
help='save path for the output logs and adversarial examples.')
parser.add_argument('--model_path', type=str, default='../src/output/models/',
help='path for the trained classifier (victim models)')
parser.add_argument('--split_info_path', type=str, default='../data/', help='path to load the hyperparameters and the'
'test set split of the experiment.')
parser.add_argument('--patience', type=int, default=200)
parser.add_argument('--max_h', type=int, default=1)
parser.add_argument('--no_greedy', action='store_true')
parser.add_argument('--acq', type=str, default='mutation', choices=['mutation', 'random'])
parser.add_argument('--exp_name', type=str, default=None, help='')
parser.add_argument('--mode', type=str, default='flip')
parser.add_argument('--resume', action='store_true')
args, _ = parser.parse_known_args()
setseed(args.seed)
# -- 0. Set-up the saving paths, etc -- #
if args.exp_name is None or len(args.exp_name) == 0:
save_path = args.save_path + f'/gunet_{args.dataset}_{args.method}_{args.seed}/'
else:
save_path = args.save_path + f'/{args.exp_name}_gunet_{args.dataset}_{args.method}_{args.seed}_{args.time_string}/'
if not os.path.exists(save_path):
os.makedirs(save_path)
print(f'Save path is {save_path}')
# -- 1. Load data splits and the hyperparameters of the victim model -- #
model_data = pickle.load(open(f'{args.split_info_path}/gunet_split_{args.dataset}_test_data_{args.seed}.pickle', 'rb'))
# get the default information of the victim models by loading the defaults provided by GUNet authors
config_parser = get_parser()
config_parser.add_argument('-seed', '--seed', type=int, default=args.seed, help='seed')
config_parser.add_argument('-data', '--data', default=args.dataset, help='data folder name')
configs = config_parser.parse_known_args()[0]
configs = update_args_with_default(configs)
model = GNet(model_data['feat_dim'], model_data['num_class'], configs)
# load the victim model and the test split generated during training of the victim model
model.load_state_dict(torch.load(f'{args.model_path}/gunet_{args.dataset}_{args.seed}.pt', map_location='cpu'))
test_data = model_data['test_split']
# initialise a classifier that can be used as victim model
m = GUNet(model, number_of_labels=model_data['num_class'], input_dim=model_data['feat_dim'])
# load and convert the graphs and labels into compatible data format
test_data_dgl = [gunet_graph2dgl(test_data[i]) for i in range(len(test_data))]
test_labels = torch.tensor([torch.tensor(g.label) for g in test_data]).reshape(-1, 1)
accs = m.is_correct(test_data_dgl, test_labels)
accs = accs.numpy().flatten()
print('Victim Model Accuracy:', np.sum(accs) / len(accs))
# statistics on the indices
correct_indices = np.argwhere(accs > 0).flatten()
is_successful = [0] * len(correct_indices)
dfs, adv_examples = [], []
correctly_classified_graphs = [test_data_dgl[i] for i in correct_indices]
n_nodes = [g.num_nodes() for g in correctly_classified_graphs]
n_edges = [g.num_edges() // 2 for g in correctly_classified_graphs]
offset = 0
if args.resume and os.path.exists(os.path.join(save_path, 'trial-0.pickle')):
print('Existing records found. Resuming from past runs')
try:
dfs = pickle.load(open(os.path.join(save_path, 'trial-0.pickle'), 'rb'))
adv_examples = pickle.load(open(os.path.join(save_path, 'trial-0-adv_example.pickle'), 'rb'))
stats = pickle.load(open(os.path.join(save_path, 'trial-0-stats.pickle'), 'rb'))
is_successful = stats['is_successful']
n_nodes = stats['nnodes']
n_edges = stats['nedges']
correct_indices = stats['selected_samples'][len(dfs):]
offset = len(dfs)
except Exception as e:
print(f'Loading failed with exception = {e}... restarting attacks from scratch')
offset = 0
for i, selected_idx in enumerate(correct_indices):
print(f'Starting sample {i+offset} (Dataset ID={selected_idx}. Nedges={n_edges[i+offset]}, Nnodes={n_nodes[i+offset]}')
# selected_indices = correct_indices[0]
graph, label = test_data_dgl[selected_idx], test_labels[selected_idx]
if args.budget >= 1: edit = int(min(args.budget, 1))
else:
if args.budget_by == 'nnodes_sq':
edit = 1 + min(int(2e4 / args.query_per_perturb), np.round(args.budget * graph.num_nodes() ** 2).astype(int))
else:
edit = 1 + min(int(2e4 / args.query_per_perturb), np.round(args.budget * graph.num_edges() / 2).astype(int))
if args.method == 'bo':
attacker = BayesOptAttack(m, nettack_loss_gunet, surrogate='bayeslinregress',
surrogate_settings={'h': args.max_h, 'extractor_mode': 'continuous'},
batch_size=args.batch_size,
edit_per_stage=1,
terminate_after_n_fail=args.patience,
verbose=True,
n_init=args.n_init, )
elif args.method == 'seq_random':
# note there we use the BO interface but the batch_size == n_init (all query points are randomly sampled!)
attacker = BayesOptAttack(m, nettack_loss_gunet, surrogate='null',
surrogate_settings={'h': args.max_h, 'extractor_mode': 'continuous'},
batch_size=args.query_per_perturb,
edit_per_stage=1,
terminate_after_n_fail=args.patience,
verbose=True,
n_init=args.query_per_perturb, )
elif args.method == 'random': attacker = RandomFlip(m, nettack_loss_gunet, args.mode)
elif args.method == 'ga': attacker = Genetic(m, nettack_loss_gunet, population_size=50, )
else: raise ValueError(f'Unknown method: {args.method}')
df, adv_example = attacker.attack(graph, label, edit, edit * args.query_per_perturb)
dfs.append(df)
adv_examples.append(adv_example)
if adv_example is not None:
is_successful[i+offset] = 1
stats = {
'selected_samples': correct_indices,
'is_successful': is_successful,
'nnodes': n_nodes,
'nedges': n_edges
}
pickle.dump(dfs, open(os.path.join(save_path, f'trial-0.pickle'), 'wb'))
pickle.dump(adv_examples, open(os.path.join(save_path, f'trial-0-adv_example.pickle'), 'wb'))
pickle.dump(stats, open(os.path.join(save_path, f'trial-0-stats.pickle'), 'wb'))
| 8,227 | 51.74359 | 123 | py |
grabnel | grabnel-master/scripts/run_bo_tu.py | import sys
sys.path.append('../')
sys.path.append('../pytorch_structure2vec/s2v_lib') # if doing s2v attack on er graphs.
import argparse
import os
from os.path import join
import pandas as pd
import torch
from src.attack.data import Data, ERData
from src.attack.bayesopt_attack import BayesOptAttack
from src.attack.genetic import Genetic
from src.attack.randomattack import RandomFlip
from src.attack.grad_arg_max import GradArgMax
from src.attack.utils import (classification_loss, get_dataset_split, get_device, setseed, nettack_loss)
from src.models.utils import get_model_class
import numpy as np
import datetime
import pickle
import dgl
parser = argparse.ArgumentParser(description='Run BO attack on TU datasets')
parser.add_argument('--dataset', type=str, default='COLLAB')
parser.add_argument('-m', '--method', type=str, default='bo')
parser.add_argument('--loss', type=str, default='nettack')
parser.add_argument('--model', type=str, default='gcn', choices=['gcn', 'gin', 's2v'])
parser.add_argument('--seed', type=int, default=0, help='RNG seed.')
parser.add_argument('--gpu', type=str, default=None, help='A gpu device number if available.')
parser.add_argument('--n_trials', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=5)
parser.add_argument('--n_init', type=int, default=5)
parser.add_argument('--budget', type=float, default=0.03, )
parser.add_argument('--budget_by', type=str, choices=['nnodes_sq', 'nedges'], default='nnodes_sq',
help='computing method of the budget. nnodes_sq: budgets will be computed by nnodes^2.')
parser.add_argument('-qpp', '--query_per_perturb', type=int, default=40, )
parser.add_argument('--n_samples', type=int, default=100, help='number of samples to attack')
parser.add_argument('--save_path', type=str, default='../src/output/attack_logs/',
help='save path for the output logs and adversarial examples.')
parser.add_argument('--model_path', type=str, default='../src/output/models/',
help='path for the trained classifier (victim models)')
parser.add_argument('--patience', type=int, default=200)
parser.add_argument('--max_h', type=int, default=0)
parser.add_argument('--mode', type=str, default='flip', choices=['flip', 'rewire'])
parser.add_argument('--constrain_n_hop', type=int, default=None)
parser.add_argument('--no_greedy', action='store_true')
parser.add_argument('--acq', type=str, default='mutation', choices=['mutation', 'random'])
parser.add_argument('--exp_name', type=str, default=None, help='')
parser.add_argument('-pdc', '--preserve_disconnected_components', action='store_true',
help='whether constrain the attacks such as the number of disconnected components in the graph '
'does not change. ')
args = parser.parse_args()
setseed(args.seed)
seed = args.seed
n_trials = args.n_trials
n_samples = args.n_samples
n_perturb = args.budget
model_name = args.model
dataset = args.dataset
dataset_split = get_dataset_split(dataset)
# Time string will be used as the directory name
time_string = datetime.datetime.now()
time_string = time_string.strftime('%Y%m%d_%H%M%S')
if dataset == 'er_graphs':
data = ERData(seed=seed)
else:
data = Data(dataset_name=dataset, dataset_split=dataset_split, seed=seed)
if args.exp_name is None:
save_path = args.save_path + f'/{model_name}_{dataset}_{args.method}_{seed}/'
else:
save_path = args.save_path + f'/{args.exp_name}_{model_name}_{dataset}_{args.method}_{seed}_{time_string}/'
if not os.path.exists(save_path):
os.makedirs(save_path)
options = vars(args)
print(options)
option_file = open(save_path + "/command.txt", "w+")
option_file.write(str(options))
option_file.close()
model_class = get_model_class(model_name)
model = model_class(data.feature_dim, data.number_of_labels)
model_path = join(args.model_path, f'{args.model}_{dataset}_{seed}.pt')
model.load_state_dict(torch.load(model_path, map_location='cpu'))
model.eval()
evaluation_logs = pd.read_csv(join('../src/output', 'evaluation_logs', f'{args.model}_{dataset}_{seed}.csv'))
evaluation_logs = evaluation_logs.query('dataset == "c"')
correct_indices = []
dataset_c_loader = data.adversarial_dataloaders()[1]
all_graphs = []
all_labels = []
for i, (graphs, labels) in enumerate(dataset_c_loader):
with torch.no_grad():
graphs = dgl.unbatch(graphs)
all_graphs += graphs
all_labels += labels.numpy().tolist()
for i in range(len(all_labels)):
sample = all_graphs[i]
preds = model(sample).detach()
label = torch.tensor(all_labels[i])
if data.is_binary and preds.shape[1] > 1:
preds = preds[:, :1]
if (preds.shape[1] == 1 and (preds > 0) == label) or (preds.shape[1] > 1 and preds.argmax() == label):
assert evaluation_logs.iloc[i]['correct_prediction']
if evaluation_logs.iloc[i]['correct_prediction']:
correct_indices.append(i)
graph = sample
preds = model(graph).detach()
print(f' Correctly classified samples: {len(correct_indices)}')
all_labels = torch.tensor(all_labels)
n_success = 0
dfs, adv_examples = [], []
for trial in range(n_trials):
selected_indices = np.random.RandomState(args.seed).choice(len(data.dataset_c), min(len(data.dataset_c), args.n_samples), replace=False).tolist()
is_successful = [0] * len(selected_indices)
is_attacked = [0] * len(selected_indices)
is_correct = [0] * len(selected_indices)
n_edges = [0] * len(selected_indices)
n_nodes = [0] * len(selected_indices)
print(f'Starting trial {trial}/{n_trials}')
for i, sample_id in enumerate(selected_indices):
n_stagnation = 0
best_loss = -np.inf
# try:
n_nodes[i] = int(all_graphs[sample_id].num_nodes())
n_edges[i] = int(all_graphs[sample_id].num_edges() // 2)
print(f'Starting sample {i} (Dataset ID={sample_id}. Nnodes={n_nodes[i]}, Nedges={n_edges[i]}')
if sample_id in correct_indices:
is_attacked[i] = 1
is_correct[i] = 1
graph, label = all_graphs[sample_id], all_labels[sample_id].reshape(1, 1)
if n_perturb >= 1:
edit = int(min(n_perturb, 1))
queries_per_perturb = args.query_per_perturb
else: # if expressed as fraction
if args.mode == 'rewire': # each rewire edit = 2 x flip edit. divide by 2
queries_per_perturb = args.query_per_perturb * 2
if args.budget_by == 'nnodes_sq':
edit = 1 + min(int(2e4 / queries_per_perturb),
np.round(n_perturb * graph.num_nodes() ** 2 // 2).astype(int) // 2)
else:
edit = 1 + min(int(2e4 / queries_per_perturb),
np.round(n_perturb * graph.num_edges() // 2 // 2).astype(int) // 2)
else:
queries_per_perturb = args.query_per_perturb
if args.budget_by == 'nnodes_sq':
edit = 1 + min(int(2e4 / queries_per_perturb), np.round(n_perturb * graph.num_nodes() ** 2).astype(int))
else:
edit = 1 + min(int(2e4 / queries_per_perturb), np.round(n_perturb * graph.num_edges() // 2).astype(int))
if args.method == 'bo':
attacker = BayesOptAttack(model, nettack_loss,
surrogate_settings={'h': args.max_h, 'extractor_mode': 'continuous'},
batch_size=args.batch_size,
edit_per_stage=min(5, edit) if args.no_greedy else 1,
acq_settings={'acq_optimiser': args.acq,'rand_frac': 0., },
verbose=True, mode=args.mode,
n_init=args.n_init,
terminate_after_n_fail=args.patience,
n_hop_constraint=args.constrain_n_hop,
preserve_disconnected_components=args.preserve_disconnected_components,)
elif args.method == 'ga': attacker = Genetic(model, nettack_loss, population_size=100, mode=args.mode)
elif args.method == 'rs': attacker = RandomFlip(model, nettack_loss, args.mode, preserve_disconnected_components=args.preserve_disconnected_components,)
elif args.method == 'grad': attacker = GradArgMax(model, nettack_loss, args.mode)
else: raise ValueError(f'Unknown method: {args.method}.')
df, adv_example = attacker.attack(graph, label, edit, edit * queries_per_perturb)
if adv_example is not None:
n_success += 1
is_successful[i] = 1
dfs.append(df)
adv_examples.append(adv_example)
else:
adv_examples.append(None)
dfs.append(None)
stats = {
'selected_samples': selected_indices,
'is_successful': is_successful,
'is_attacked': is_attacked,
'is_correct': is_correct,
'nnodes': n_nodes,
'nedges': n_edges,
}
pickle.dump(dfs, open(os.path.join(save_path, f'trial-{trial}.pickle'), 'wb'))
pickle.dump(adv_examples, open(os.path.join(save_path, f'trial-{trial}-adv_example.pickle'), 'wb'))
pickle.dump(stats, open(os.path.join(save_path, f'trial-{trial}-stats.pickle'), 'wb'))
| 9,603 | 44.733333 | 165 | py |
grabnel | grabnel-master/bayesopt/bayesopt/acquisitions.py | import torch
from torch.distributions import Normal
def graph_expected_improvement(x_star: list, predictor, xi: float = 0.0, in_fill: str = 'best',
augmented=False,
bias=None):
mean, variance = predictor.predict(x_star)
std = torch.sqrt(variance)
if in_fill == 'best':
mu_star = torch.max(predictor.y)
elif in_fill == 'posterior':
best_idx = torch.argmax(predictor.y)
mu_star = predictor.predict(predictor.X[best_idx], full_covariance=False)[0]
else:
raise NotImplementedError(f'Unknown in fill criterion {in_fill}.')
gauss = Normal(torch.zeros(1, device=mean.device), torch.ones(1, device=mean.device))
u = (mean - mu_star - xi) / std
ucdf = gauss.cdf(u)
updf = torch.exp(gauss.log_prob(u))
ei = std * updf + (mean - mu_star - xi) * ucdf
if augmented:
sigma_n = predictor.gp.likelihood.noise.detach()
ei *= (1. - torch.sqrt(torch.tensor(sigma_n, device=mean.device)) / torch.sqrt(sigma_n + variance))
if bias is not None:
ei += bias
return ei
def graph_ucb(x_star: list, predictor, beta: float = None, bias=None):
mu, variance = predictor.predict(x_star, full_covariance=False)
std = torch.sqrt(variance)
beta = 2. if beta is None else beta
ucb = mu + beta * std
if bias is not None:
ucb += bias
return ucb
def best_mean(x_star: list, predictor):
mu, _ = predictor.predict(x_star, full_covariance=False)
return mu
| 1,533 | 33.088889 | 107 | py |
grabnel | grabnel-master/bayesopt/bayesopt/utils.py | # Xingchen Wan | 5 March 2020
import networkx as nx
from grakel.utils import graph_from_networkx
from typing import List
import dgl
import torch
import numpy as np
def dgl2networkx(g_list: List[dgl.DGLGraph], attr_name='node_attr'):
"""Convert a list of dgl graphs to a networkx graph"""
def convert_single_graph(g):
g_nx = g.to_networkx().to_undirected()
nx.set_node_attributes(g_nx, dict(g_nx.degree()), attr_name)
return g_nx
graphs = [convert_single_graph(g) for g in g_list]
return graphs
def dgl2grakel(g_list: List[dgl.DGLGraph], attr_name='node_attr'):
"""Convert a list of DGL graphs to a list of graphs understood by the grakel interface"""
nx_graph = dgl2networkx(g_list, attr_name)
graphs = graph_from_networkx(nx_graph, attr_name)
return graphs
def to_unit_cube(x, lb, ub):
"""Project to [0, 1]^d from hypercube with bounds lb and ub"""
# assert lb.ndimension() == 1 and ub.ndimension() == 1 and x.ndimension() == 2
xx = (x - lb) / (ub - lb)
return xx
def from_unit_cube(x, lb, ub):
"""Project from [0, 1]^d to hypercube with bounds lb and ub"""
# assert lb.ndimension() == 1 and ub.ndimension() == 1 and x.ndimension() == 2
xx = x * (ub - lb) + lb
return xx
def to_unit_normal(y, mean, std):
"""Normalise targets into the range ~N(0, 1)"""
return (y - mean) / std
def from_unit_normal(y, mean, std, scale_variance=False):
"""Project the ~N(0, 1) to the original range.
:param scale_variance: whether we are scaling variance instead of mean.
"""
if not scale_variance:
return y * std + mean
else:
return y * (std ** 2)
def latin_hypercube(n_pts, dim):
"""Basic Latin hypercube implementation with center perturbation."""
X = np.zeros((n_pts, dim))
centers = (1.0 + 2.0 * np.arange(0.0, n_pts)) / float(2 * n_pts)
for i in range(dim): # Shuffle the center locataions for each dimension.
X[:, i] = centers[np.random.permutation(n_pts)]
# Add some perturbations within each box
pert = np.random.uniform(-1.0, 1.0, (n_pts, dim)) / float(2 * n_pts)
X += pert
return X
# To be used by the TurBO model -- taken from the TuRBO code
from gpytorch.constraints.constraints import Interval
from gpytorch.distributions import MultivariateNormal
from gpytorch.kernels import MaternKernel, ScaleKernel
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.means import ConstantMean
from gpytorch.mlls import ExactMarginalLogLikelihood
from gpytorch.models import ExactGP
# GP Model
class GP(ExactGP):
def __init__(self, train_x, train_y, likelihood, lengthscale_constraint, outputscale_constraint, ard_dims):
super(GP, self).__init__(train_x, train_y, likelihood)
self.ard_dims = ard_dims
self.mean_module = ConstantMean()
base_kernel = MaternKernel(lengthscale_constraint=lengthscale_constraint, ard_num_dims=ard_dims, nu=2.5)
self.covar_module = ScaleKernel(base_kernel, outputscale_constraint=outputscale_constraint)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return MultivariateNormal(mean_x, covar_x)
def train_gp(train_x, train_y, use_ard, num_steps, hypers={}):
"""Fit a GP model where train_x is in [0, 1]^d and train_y is standardized."""
assert train_x.ndim == 2
assert train_y.ndim == 1
assert train_x.shape[0] == train_y.shape[0]
# Create hyper parameter bounds
noise_constraint = Interval(5e-4, 0.2)
if use_ard:
lengthscale_constraint = Interval(0.005, 2.0)
else:
lengthscale_constraint = Interval(0.005, math.sqrt(train_x.shape[1])) # [0.005, sqrt(dim)]
outputscale_constraint = Interval(0.05, 20.0)
# Create models
likelihood = GaussianLikelihood(noise_constraint=noise_constraint).to(device=train_x.device, dtype=train_y.dtype)
ard_dims = train_x.shape[1] if use_ard else None
model = GP(
train_x=train_x,
train_y=train_y,
likelihood=likelihood,
lengthscale_constraint=lengthscale_constraint,
outputscale_constraint=outputscale_constraint,
ard_dims=ard_dims,
).to(device=train_x.device, dtype=train_x.dtype)
# Find optimal model hyperparameters
model.train()
likelihood.train()
# "Loss" for GPs - the marginal log likelihood
mll = ExactMarginalLogLikelihood(likelihood, model)
# Initialize model hypers
if hypers:
model.load_state_dict(hypers)
else:
hypers = {}
hypers["covar_module.outputscale"] = 1.0
hypers["covar_module.base_kernel.lengthscale"] = 0.5
hypers["likelihood.noise"] = 0.005
model.initialize(**hypers)
# Use the adam optimizer
optimizer = torch.optim.Adam([{"params": model.parameters()}], lr=0.1)
for _ in range(num_steps):
optimizer.zero_grad()
output = model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.step()
# Switch to eval mode
model.eval()
likelihood.eval()
return model
| 5,127 | 32.298701 | 117 | py |
grabnel | grabnel-master/bayesopt/bayesopt/predictors/gp_predictor.py | import gpytorch
import torch
from bayesopt.bayesopt.wl_extractor import WeisfeilerLehmanExtractor
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.constraints.constraints import Interval
import numpy as np
from copy import deepcopy
from bayesopt.bayesopt.utils import to_unit_cube, from_unit_normal, to_unit_normal
from .base_predictor import BasePredictor
class GP(gpytorch.models.ExactGP):
"""Implementation of the exact GP in the gpytorch framework"""
def __init__(self, train_x, train_y, kernel: gpytorch.kernels.Kernel, likelihood):
super(GP, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(kernel)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
class OptimalAssignment(gpytorch.kernels.Kernel):
"""Implementation of the optimal assignment kernel as histogram intersection between two vectors"""
def forward(self, x1, x2, diag=False, last_dim_is_batch=False, **params):
"""x1 shape = [N x d], x2_shape = [M x d]. This computes the pairwise histogram intersection between the two"""
ker = torch.zeros(x1.shape[0], x2.shape[0])
for n in range(x1.shape[0]):
for m in range(x2.shape[0]):
ker[n, m] = torch.sum(torch.minimum(x1[n, :].reshape(-1, 1), x2[m, :].reshape(-1, 1)))
if diag:
return torch.diag(ker)
return ker
def train_gp(train_x, train_y, training_iter=50, kernel='linear', verbose=False, init_noise_var=None, hypers={}):
"""Train a GP model. Since in our case we do not have lengthscale, the optimisation is about finding the optimal
noise only.
train_x, train_y: the training input/targets (in torch.Tensors) for the GP
training_iter: the number of optimiser iterations. Set to 0 if you do not wish to optimise
kernel: 'linear' for the original WL kernel. 'oa' for the optimal assignment variant
verbose: if True, display diagnostic information during optimisation
init_noise_var: Initial noise variance. Supply a value here and set training_iter=0 when you have a good knowledge
of the noise variance a-priori to skip inferring noise from the data.
hypers: Optional dict of hyperparameters for the GP.
Return: a trained GP object.
"""
assert train_x.ndim == 2
assert train_y.ndim == 1
assert train_x.shape[0] == train_y.shape[0]
noise_constraint = Interval(1e-6, 0.1)
likelihood = GaussianLikelihood(noise_constraint=noise_constraint).to(device=train_x.device, dtype=train_y.dtype)
if kernel == 'linear':
k = gpytorch.kernels.LinearKernel()
elif kernel == 'oa':
k = OptimalAssignment()
elif kernel == 'rbf':
k = gpytorch.kernels.RBFKernel()
else:
raise NotImplementedError
# model
model = GP(train_x, train_y, k, likelihood).to(device=train_x.device, dtype=train_x.dtype)
model.train()
likelihood.train()
# Use the adam optimizer
optimizer = torch.optim.Adam([
{'params': model.parameters()}, # Includes GaussianLikelihood parameters
], lr=0.1)
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
if hypers:
model.load_state_dict(hypers)
else:
hypers = {}
hypers["covar_module.outputscale"] = 1.0
if model.covar_module.has_lengthscale:
hypers["covar_module.base_kernel.lengthscale"] = np.sqrt(0.01 * 0.5)
hypers["likelihood.noise"] = 0.005 if init_noise_var is None else init_noise_var
model.initialize(**hypers)
for i in range(training_iter):
# Zero gradients from previous iteration
optimizer.zero_grad()
# Output from model
output = model(train_x)
# Calc loss and backprop gradients
loss = -mll(output, train_y)
loss.backward()
if verbose:
print('Iter %d/%d - Loss: %.3f. Noise %.3f' % (
i + 1, training_iter, loss.item(), model.likelihood.noise.item()
))
optimizer.step()
model.eval()
likelihood.eval()
return model
class GPWL(BasePredictor):
def __init__(self, kernel: str = 'linear', h: int = 1, noise_var: float = None,
extractor_mode: str = 'categorical',
node_attr: str = 'node_attr1',):
"""
A simple GPWL interface which uses GP with WL kernel (note that when the original WL kernel is used, due to
the linear kernel formulation it is simply WL kernel + Bayesian linear regression
Note: for linear kernel, it is actually preferable to use bayesian linear regression explicitly
(bayes_linregress_predictor.py) which saves some overhead from invoking gpytorch, and there seems to be
problems with gpytorch handling very high-dimensional vectors associated with very large graphs.
:param kernel: "linear" or "oa" (optimal assigment)
:param h: maximum number of weisfeiler-lehman iterations
:param noise_var: the noise variance known a-priori. If None, the noise_variance will be inferred from the
data via maximum likelihood estimation.
:param extractor_mode: See extractor documentation
"""
super().__init__(h)
self.extractor = WeisfeilerLehmanExtractor(h=h, mode=extractor_mode, node_attr=node_attr)
self.kernel = kernel
self.noise_var = noise_var
self.gp = None
def fit(self, x_train: list, y_train: torch.Tensor):
"""See BasePredictor"""
if len(y_train.shape) == 0: # y_train is a scalar
y_train = y_train.reshape(1)
assert len(x_train) == y_train.shape[0]
assert y_train.ndim == 1
# Fit the feature extractor with the graph input
self.extractor.fit(x_train)
self.X = deepcopy(x_train)
self.y = deepcopy(y_train)
# Get the vector representation out
x_feat_vector = torch.tensor(self.extractor.get_train_features(), dtype=torch.float32)
# the noise variance is provided, no need for training
# standardise x_feat_vector into unit hypercube [0, 1]^d
self.lb, self.ub = torch.min(x_feat_vector, dim=0)[0]+1e-3, torch.max(x_feat_vector, dim=0)[0]-1e-3
x_feat_vector_gp = to_unit_cube(x_feat_vector, self.lb, self.ub)
# normalise y vector into unit normal distribution
self.ymean, self.ystd = torch.mean(y_train), torch.std(y_train)
y_train_normal = to_unit_normal(y_train, self.ymean, self.ystd)
if self.noise_var is not None:
self.gp = train_gp(x_feat_vector_gp, y_train_normal, training_iter=0, kernel=self.kernel, init_noise_var=self.noise_var)
else:
self.gp = train_gp(x_feat_vector_gp, y_train_normal, kernel=self.kernel)
def update(self, x_update: list, y_update: torch.Tensor):
"""See BasePredictor"""
if len(y_update.shape) == 0: # y_train is a scalar
y_update = y_update.reshape(1)
assert len(x_update) == y_update.shape[0]
assert y_update.ndim == 1
self.extractor.update(x_update)
x_feat_vector = torch.tensor(self.extractor.get_train_features(), dtype=torch.float32)
# update the lb and ub, in case new information changes those
self.lb, self.ub = torch.min(x_feat_vector, dim=0)[0]+1e-3, torch.max(x_feat_vector, dim=0)[0]-1e-3
x_feat_vector_gp = to_unit_cube(x_feat_vector, self.lb, self.ub)
self.X += deepcopy(x_update)
self.y = torch.cat((self.y, y_update))
self.ymean, self.ystd = torch.mean(self.y), torch.std(self.y)
y = to_unit_normal(deepcopy(self.y), self.ymean, self.ystd)
if self.noise_var is not None:
self.gp = train_gp(x_feat_vector_gp, y, training_iter=0, kernel=self.kernel, init_noise_var=self.noise_var)
else:
self.gp = train_gp(x_feat_vector_gp, y, kernel=self.kernel)
def predict(self, x_eval: list, full_covariance=False, include_noise_variance=False):
"""
See BasePredict
:param full_covariance: bool. Whether return the full covariance (shape N x N where N = len(x_eval)
:param include_noise_variance: bool. Whether include noise variance in the prediction. This does not impact
the posterior mean inference, but the posterior variance inference will be enlarged accordingly if this flag
is True
:return: (mean, variance (if full_covariance=True) or the full covariance matrix (if full_covariance=True))
"""
if self.gp is None:
raise ValueError("The GPWL object is not fitted to any data yet! Call fit or update to do so first.")
x_feat_vector = torch.tensor(self.extractor.transform(x_eval), dtype=torch.float32)
x_feat_vector = to_unit_cube(x_feat_vector, self.lb, self.ub)
self.gp.eval()
pred = self.gp(x_feat_vector)
# print(pred.mean)
if include_noise_variance:
self.gp.likelihood.eval()
pred = self.gp.likelihood(pred)
mean, variance = pred.mean.detach(), pred.variance.detach()
mean = from_unit_normal(mean, self.ymean, self.ystd)
variance = from_unit_normal(variance, self.ymean, self.ystd, scale_variance=True)
if full_covariance:
covar = pred.covariance_matrix.detach()
covar *= (self.ystd ** 2)
return mean, covar
return mean, variance
| 9,679 | 46.219512 | 132 | py |
grabnel | grabnel-master/bayesopt/bayesopt/predictors/bayes_linregress_predictor.py | from .base_predictor import BasePredictor
import torch
from copy import deepcopy
from bayesopt.bayesopt.utils import to_unit_cube, to_unit_normal, from_unit_normal
from bayesopt.bayesopt.wl_extractor import WeisfeilerLehmanExtractor
from sklearn import linear_model
import numpy as np
class BayesianLinearRegression(BasePredictor):
def __init__(self, h=1, max_step=1000, verbose=False, ard=True,
extractor_mode='categorical',
node_attr='node_attr1',
**linregress_params):
"""
Bayesian Linear Regression predictor with the WL feature extractor. This function uses the scikit-learn
implementation of the Bayesian Linear regression internally.
:param h: maximum number of weisfeiler-lehman iterations
:param max_step: maximum number of training steps for ELBO optimisation for the linear regressor
:param verbose: whether to enable verbose mode
:param ard: whether to use automatic relevance determination regression.
If True, we use the instance of sklearn.linear.ARDRegression, which relaxes the assumption that the Gaussian
distribution of parameter weights is spherical, and variance might vary per parameter. Empirically this
leads to more sparse representation and thus potentially better results.
If False, we use the sklearn.linear.BayesianRidge regressor
:param linregress_params: Any keyword parameters to be passed to the sklearn regressor. See Sklearn documentation
for more information
:param extractor_mode: See the extractor documentation.
"""
super().__init__(h=h)
self.extractor = WeisfeilerLehmanExtractor(h=h, mode=extractor_mode, node_attr=node_attr)
self.max_step = max_step
self.verbose = verbose
self.ard = ard
# save the BayesLinRegress model
self.model = None
# save the feature vector of WL in case of use
self.X_feat = None
self.linregress_params = linregress_params
def fit(self, x_train: list, y_train: torch.Tensor):
# if len(y_train.shape) == 0: # y_train is a scalar
# y_train = y_train.reshape(1)
y_train = y_train.reshape(-1)
assert len(x_train) == y_train.shape[0]
assert y_train.ndim == 1
# Fit the feature extractor with the graph input
self.extractor.fit(x_train)
self.X = deepcopy(x_train)
self.y = deepcopy(y_train)
# Get the vector representation out
x_feat_vector = self.extractor.get_train_features().astype(np.float32)
# the noise variance is provided, no need for training
# remove any rows
# print(x_feat_vector)
x_feat_vector = x_feat_vector[~np.isnan(x_feat_vector).any(axis=1)]
# self.v = x_feat_vector
# standardise x_feat_vector into unit hypercube [0, 1]^d
self.lb, self.ub = np.min(x_feat_vector, axis=0)+1e-3, np.max(x_feat_vector, axis=0)-1e-3
x_feat_vector_gp = to_unit_cube(x_feat_vector, self.lb, self.ub)
self.X_feat = deepcopy(x_feat_vector_gp)
# normalise y vector into unit normal distribution
self.ymean, self.ystd = torch.mean(y_train), torch.std(y_train)
if self.ystd > 0:
y_train_normal = to_unit_normal(y_train, self.ymean, self.ystd)
else:
y_train_normal = y_train
if self.ard: self.model = linear_model.ARDRegression(**self.linregress_params)
else: self.model = linear_model.BayesianRidge(**self.linregress_params)
self.model.fit(x_feat_vector_gp, y_train_normal.numpy())
# pyro.clear_param_store()
# self.model = PyroBayesianRegression(x_feat_vector_gp.shape[1], 1)
# guide = AutoDiagonalNormal(self.model)
# optimiser = pyro.optim.Adam({'lr': 0.03})
# svi = SVI(self.model, guide, optimiser, loss=Trace_ELBO())
#
# for j in range(self.max_step):
# # calculate the loss and take a gradient step
# loss = svi.step(x_feat_vector_gp, y_train_normal)
# if self.verbose and j % 100 == 0:
# print("[iteration %04d] loss: %.4f" % (j + 1, loss / y_train_normal.shape[0]))
def update(self, x_update: list, y_update: torch.Tensor):
# if len(y_update.shape) == 0: # y_train is a scalar
# y_update = y_update.reshape(1)
y_update = y_update.reshape(-1)
assert len(x_update) == y_update.shape[0]
assert y_update.ndim == 1
self.extractor.update(x_update)
x_feat_vector = self.extractor.get_train_features()
self.X_feat = deepcopy(x_feat_vector)
# remove any rows
# print(x_feat_vector)
x_feat_vector = x_feat_vector[~np.isnan(x_feat_vector).any(axis=1)]
# update the lb and ub, in case new information changes those
self.lb, self.ub = np.min(x_feat_vector, axis=0)+1e-03, np.max(x_feat_vector, axis=0)-1e-3
x_feat_vector_gp = to_unit_cube(x_feat_vector, self.lb, self.ub)
self.X += deepcopy(x_update)
self.y = torch.cat((self.y, y_update))
self.ymean, self.ystd = torch.mean(self.y), torch.std(self.y)
if self.ystd > 0:
y = to_unit_normal(self.y, self.ymean, self.ystd)
else:
y = self.y
# pyro.clear_param_store()
# self.model = PyroBayesianRegression(x_feat_vector_gp.shape[1], 1)
# guide = AutoDiagonalNormal(self.model)
# optimiser = pyro.optim.Adam({'lr': 0.03})
# svi = SVI(self.model, guide, optimiser, loss=Trace_ELBO())
#
# for j in range(self.max_step):
# # calculate the loss and take a gradient step
# loss = svi.step(x_feat_vector_gp, y)
# if self.verbose and j % 100 == 0:
# print("[iteration %04d] loss: %.4f" % (j + 1, loss / y.shape[0]))
if self.ard:
self.model = linear_model.ARDRegression(**self.linregress_params)
else:
self.model = linear_model.BayesianRidge(**self.linregress_params)
self.model.fit(x_feat_vector_gp, y)
def predict(self, x_eval: list, include_noise_variance=False, **kwargs) -> (torch.Tensor, torch.Tensor):
if self.model is None:
raise ValueError("The GPWL object is not fitted to any data yet! Call fit or update to do so first.")
x_feat_vector = self.extractor.transform(x_eval)
x_feat_vector = to_unit_cube(x_feat_vector, self.lb, self.ub)
mean, std = self.model.predict(x_feat_vector, return_std=True)
variance = std ** 2
if not include_noise_variance:
# alpha_ is the imputed noise precision
variance -= np.min(variance) - 1e5
estimate_noise_var = np.mean(self.model.predict(self.X_feat, return_std=True)[1] ** 2)
# estimate_noise_var = 1. / self.model.alpha_ # alpha_ is the noise precision
variance -= estimate_noise_var
mean = from_unit_normal(mean, self.ymean.numpy(), self.ystd.numpy())
variance = from_unit_normal(variance, self.ymean.numpy(), self.ystd.numpy(), scale_variance=True)
std = np.sqrt(variance)
return torch.tensor(mean, dtype=torch.float32), torch.tensor(std, dtype=torch.float32)
| 7,359 | 48.395973 | 121 | py |
grabnel | grabnel-master/bayesopt/bayesopt/predictors/base_predictor.py | import torch
from abc import abstractmethod
from bayesopt.bayesopt.acquisitions import graph_expected_improvement, graph_ucb, best_mean
import dgl
class BasePredictor:
def __init__(self, h: int = 1):
"""
The base class for predictors based on WL feature extractor.
:param h: int. Number of Weisfeiler-Lehman Iterations
"""
self.h = h
# Save history for the input and targets for the current predictor class
self.X, self.y = None, None
# the lower bound and upper bound for feature vector X and the mean and std of target vector Y
# will be initialised when the GPWL model is fitted to
# some data.
self.lb, self.ub = None, None
self.ymean, self.ystd = None, None
@abstractmethod
def fit(self, x_train: list, y_train: torch.Tensor):
"""
Train the predictor on x_train and y_train. note that fit overwrites any data already fit to the predictor
:param x_train: a list of dgl graphs
:param y_train: torch.Tensor representing the training targets
:return: None
"""
raise NotImplementedError
@abstractmethod
def update(self, x_update: list, y_update: torch.Tensor):
"""
Similar to fit, but append the x_update and y_update to the existing train data (if any, if there
is no training data, this simply performs fit)
:param x_update: a list of dgl graphs
:param y_update: torch.Tensor representing the training targets
:return: None
"""
raise NotImplementedError
@abstractmethod
def predict(self, x_eval: list, **kwargs) -> (torch.Tensor, torch.Tensor):
"""
Predict the graphs in x_eval using the predictor
:param x_eval: list of dgl graphs. The test graphs on which we predict
:param kwargs:
:return: (mean, variance) torch.Tensor of the same shape of x_eval
"""
raise NotImplementedError
def acquisition(self, x_eval: list, acq_func='ei', bias=None, **kwargs) -> torch.Tensor:
"""
Computes the acquisition function value at the test graphs at x_eval.
:param x_eval: list of dgl graphs.
:param acq_func: (str) The acquisition function to be used. Currently supports 'ei' and 'ucb'
:param bias: any constant, a-priori offset to be added to the acquisition function value of each graph
in x_eval. If specified, this must be a float or a tensor of the same length as x_eval
:param kwargs: any additional keyword arguments to passed to the acquisition functions
:return: acquisition function value evaluated at each graph input of x_eval
"""
assert acq_func in ['ei', 'ucb', 'mean'], f'Unknown acq function choice {acq_func}'
if acq_func == 'ei':
return graph_expected_improvement(x_eval, self, bias=bias, **kwargs)
elif acq_func == 'ucb':
return graph_ucb(x_eval, self, bias=bias, **kwargs)
elif acq_func == 'mean':
return best_mean(x_eval, self) | 3,094 | 43.855072 | 114 | py |
grabnel | grabnel-master/bayesopt/bayesopt/predictors/null_surrogate.py | # null surrogate
from .base_predictor import BasePredictor
import torch
from copy import deepcopy
class NullSurrogate(BasePredictor):
def __init__(self, h: int = None, ):
"""
Null surrogate
:param h: not required or used. For consistency of APi
"""
super().__init__(h=h)
def fit(self, x_train: list, y_train: torch.Tensor):
self.X = deepcopy(x_train)
self.y = deepcopy(y_train)
def update(self, x_update: list, y_update: torch.Tensor):
self.X += deepcopy(x_update)
self.y = torch.cat((self.y, y_update))
def predict(self, x_eval: list, **kwargs) -> (torch.Tensor, torch.Tensor):
mean = torch.zeros(len(x_eval))
vars = torch.ones(len(x_eval))
return mean, vars
| 778 | 26.821429 | 78 | py |
grabnel | grabnel-master/data/utils.py | import numpy as np
import random
import torch
def setseed(seed):
"""Sets the seed for rng."""
np.random.seed(seed)
random.seed(seed)
if seed is not None:
torch.random.manual_seed(seed) | 210 | 18.181818 | 38 | py |
grabnel | grabnel-master/data/build_mnist.py | # generate the MNIST-75sp data in DGL format
from torchvision import datasets
import scipy.ndimage
from skimage.segmentation import slic
from scipy.spatial.distance import cdist
import argparse
import numpy as np
import datetime
import os
import random
import pickle
import multiprocessing as mp
import networkx as nx
import dgl
import torch
from tqdm import tqdm
parser = argparse.ArgumentParser(description='Extract SLIC superpixels from images')
parser.add_argument('-D', '--dataset', type=str, default='mnist', choices=['mnist', 'cifar10'])
parser.add_argument('-d', '--data_dir', type=str, default='./data', help='path to the dataset')
parser.add_argument('-o', '--out_dir', type=str, default='.', help='path where to save superpixels')
parser.add_argument('-s', '--split', type=str, default='train', choices=['train', 'val', 'test'])
parser.add_argument('-t', '--threads', type=int, default=0, help='number of parallel threads')
parser.add_argument('-n', '--n_sp', type=int, default=75, help='max number of superpixels per image')
parser.add_argument('-c', '--compactness', type=int, default=0.25, help='compactness of the SLIC algorithm '
'(Balances color proximity and space proximity): '
'0.25 is a good value for MNIST '
'and 10 for color images like CIFAR-10')
parser.add_argument('--seed', type=int, default=111, help='seed for shuffling nodes')
parser.add_argument('--n_images', type=int, default=None, help='number of images to process. If none, process all MNIST'
'images.')
args = parser.parse_args()
def process_image(params):
"""Extract superpixels from the MNIST images using SLIC"""
img, index, n_images, args, to_print, shuffle = params
assert img.dtype == np.uint8, img.dtype
img = (img / 255.).astype(np.float32)
n_sp_extracted = args.n_sp + 1 # number of actually extracted superpixels (can be different from requested in SLIC)
n_sp_query = args.n_sp + (
20 if args.dataset == 'mnist' else 50) # number of superpixels we ask to extract (larger to extract more superpixels - closer to the desired n_sp)
while n_sp_extracted > args.n_sp:
superpixels = slic(img, n_segments=n_sp_query, compactness=args.compactness, multichannel=len(img.shape) > 2)
sp_indices = np.unique(superpixels)
n_sp_extracted = len(sp_indices)
n_sp_query -= 1 # reducing the number of superpixels until we get <= n superpixels
assert args.n_sp >= n_sp_extracted > 0, (args.split, index, n_sp_extracted, args.n_sp)
assert n_sp_extracted == np.max(superpixels) + 1, (
'superpixel indices', np.unique(superpixels)) # make sure superpixel indices are numbers from 0 to n-1
if shuffle:
ind = np.random.permutation(n_sp_extracted)
else:
ind = np.arange(n_sp_extracted)
sp_order = sp_indices[ind].astype(np.int32)
if len(img.shape) == 2:
img = img[:, :, None]
n_ch = 1 if img.shape[2] == 1 else 3
sp_intensity, sp_coord = [], []
for seg in sp_order:
mask = (superpixels == seg).squeeze()
avg_value = np.zeros(n_ch)
for c in range(n_ch):
avg_value[c] = np.mean(img[:, :, c][mask])
cntr = np.array(scipy.ndimage.measurements.center_of_mass(mask)) # row, col
sp_intensity.append(avg_value)
sp_coord.append(cntr)
sp_intensity = np.array(sp_intensity, np.float32)
sp_coord = np.array(sp_coord, np.float32)
# if to_print:
# print('image={}/{}, shape={}, min={:.2f}, max={:.2f}, n_sp={}'.format(index + 1, n_images, img.shape,
# img.min(), img.max(),
# sp_intensity.shape[0]))
# Create edges btween nodes in the form of adjacency matrix
sp_coord = sp_coord / img.shape[1]
dist = cdist(sp_coord, sp_coord)
sigma = 0.1 * np.pi
A = np.exp(- dist / sigma ** 2)
A[np.diag_indices_from(A)] = 0
mn = torch.tensor([0.11225057, 0.11225057, 0.11225057, 0.44206527, 0.43950436]).view(1, 1, -1)
sd = torch.tensor([0.2721889, 0.2721889, 0.2721889, 0.2987583, 0.30080357]).view(1, 1, -1)
node_features = ((torch.from_numpy(np.pad(np.concatenate((sp_intensity, sp_coord), axis=1),
((0, 0), (2, 0)), 'edge')).unsqueeze(0) - mn) / sd).numpy().squeeze()
graph = build_graph(A, node_attributes={'node_attr': node_features}, graph_type='dgl')
return graph, sp_intensity, sp_coord, sp_order, superpixels
# return sp_intensity, sp_coord, sp_order, superpixels
def build_graph(adjacency_matrix: np.array, node_attributes: dict = None, graph_type='nx'):
"""
Build a networkx or dgl graph from adjacency - node_attributes representation
:param adjacency_matrix: numpy array
:param node_attributes: optional. node attributes
:param graph_type: 'nx' for networkx. every other string will yield dgl graph
:return: graph (nx.Graph if graph_type == 'nx', otherwise a dgl.DGLGraph)
"""
G = nx.from_numpy_array(adjacency_matrix)
if node_attributes is not None:
for n in G.nodes():
for k, v in node_attributes.items():
# print(k, v)
G.nodes[n][k] = v[n]
if graph_type == 'nx':
return G
G = G.to_directed()
if node_attributes != None:
node_attrs = list(node_attributes.keys())
else:
node_attrs = []
g = dgl.from_networkx(G, node_attrs=node_attrs, edge_attrs=['weight'])
return g
dt = datetime.datetime.now()
print('start time:', dt)
if not os.path.isdir(args.out_dir):
os.mkdir(args.out_dir)
random.seed(args.seed)
np.random.seed(args.seed) # to make node random permutation reproducible (not tested)
# Read image data using torchvision
is_train = args.split.lower() == 'train'
if args.dataset == 'mnist':
data = datasets.MNIST(args.data_dir, train=is_train, download=True)
assert args.compactness < 10, 'high compactness can result in bad superpixels on MNIST'
assert 1 < args.n_sp < 28 * 28, 'the number of superpixels cannot exceed the total number of pixels or be too small'
elif args.dataset == 'cifar10':
data = datasets.CIFAR10(args.data_dir, train=is_train, download=True)
assert args.compactness > 1, 'low compactness can result in bad superpixels on CIFAR-10'
assert 1 < args.n_sp < 32 * 32, 'the number of superpixels cannot exceed the total number of pixels or be too small'
else:
raise NotImplementedError('unsupported dataset: ' + args.dataset)
images = data.train_data if is_train else data.test_data
labels = data.train_labels if is_train else data.test_labels
if not isinstance(images, np.ndarray):
images = images.numpy()
if isinstance(labels, list):
labels = np.array(labels)
if not isinstance(labels, np.ndarray):
labels = labels.numpy()
n_images = args.n_images if args.n_images is not None else len(labels)
labels = labels[:n_images] if args.n_images is not None else labels
if args.threads <= 0:
sp_data = []
for i in tqdm(range(n_images)):
sp_data.append(process_image((images[i], i, n_images, args, True, True)))
else:
with mp.Pool(processes=args.threads) as pool:
sp_data = pool.map(process_image, [(images[i], i, n_images, args, True, True) for i in range(n_images)])
superpixels = [sp_data[i][1:] for i in range(n_images)]
sp_data = [sp_data[i][0] for i in range(n_images)]
# structure the data into [(G1, y1), (G2, y2), ... ] format
save_data = list(zip(sp_data, torch.tensor(labels, dtype=torch.int32)))
with open('%s/%s_%dsp.p' % (args.out_dir, args.dataset, args.n_sp, ), 'wb') as f:
pickle.dump(save_data, f, protocol=2)
with open('%s/%s_%dsp_superpixels.p' % (args.out_dir, args.dataset, args.n_sp, ), 'wb') as f:
pickle.dump(superpixels, f, protocol=2)
print('done in {}'.format(datetime.datetime.now() - dt))
| 8,186 | 44.994382 | 155 | py |
grabnel | grabnel-master/data/generate_er.py | """Generate partitions of erdos-renyi graphs (following methodologies of Dai et al 2018).
The data is stored as a list of tuples where each tuple is of the form (x, y) where x is a DGLGraph and y is a torch
tensor containing the label."""
import argparse
import os
import pickle
from os.path import join
import dgl
import networkx as nx
import numpy as np
import torch
import tqdm
from utils import setseed
parser = argparse.ArgumentParser()
parser.add_argument('--min_n', type=int, default=90, help='Minimum number of nodes in each component.')
parser.add_argument('--max_n', type=int, default=100, help='Maximum number of nodes in each component.')
parser.add_argument('--p', type=float, default=0.05, help='Probability of connection of Erdos-Renyi Model.')
parser.add_argument('--number_of_components', type=int, nargs='+', default=[1, 2, 3],
help='Number of connected components.')
parser.add_argument('--number_of_graphs', type=int, default=5000, help='Number of graphs per class.')
parser.add_argument('--artificially_connect', dest='artificially_connect', action='store_true',
help='Connect components using the original methodology of Dai et al. (2018).')
parser.add_argument('--no-artificially_connect', dest='artificially_connect', action='store_false',
help='Keep sampling components until a connected one is sampled.')
parser.set_defaults(artificially_connect=False)
parser.add_argument('--seed', type=int, default=0, help='RNG seed.')
args = parser.parse_args()
setseed(args.seed)
def erdos_renyi_graph(min_n: int, max_n: int, p: float, connected_components: int, artificially_connect: bool) \
-> dgl.DGLHeteroGraph:
""" A graph with `connected_components` connected components. Each component is built by generating an ER(n, p)
where min_n <= n <= max_n. Each component is then connected. The way the component is connected depends on
the value of `artificially_connect`.
Args:
min_n: Minimum number of nodes in each connected component
max_n: Maximum number of nodes in each connected component
p: probability to connect nodes when generating an ER graph
connected_components: number of connected components in the final graph
artificially_connect: If true components are connected by adding an edge between successive connected
components. If false new samples will be generated until a connected one is found.
"""
components = [erdos_renyi_component(min_n, max_n, p, artificially_connect) for _ in range(connected_components)]
graph = nx.disjoint_union_all(components)
graph = dgl.from_networkx(graph)
return graph
def erdos_renyi_component(min_n: int, max_n: int, p: float, artificially_connect: bool) -> nx.classes.graph.Graph:
"""Generates a single connected component."""
n = np.random.randint(min_n, max_n+1)
graph = nx.erdos_renyi_graph(n, p)
while not nx.is_connected(graph):
if artificially_connect:
graph = artificially_connect_graph(graph)
else:
graph = nx.erdos_renyi_graph(n, p)
return graph
def artificially_connect_graph(graph: nx.classes.graph.Graph) -> nx.classes.graph.Graph:
"""Connects a graph using the methodology of Dai et al. 2018."""
edges = []
all_components = list(nx.connected_components(graph))
for i in range(len(all_components)-1):
u = np.random.choice(list(all_components[i]))
v = np.random.choice(list(all_components[i+1]))
edges.append((u, v))
graph.add_edges_from(edges)
return graph
# generate data
total_number_of_samples = len(args.number_of_components) * args.number_of_graphs # number of samples in dataset
dataset = []
with tqdm.tqdm(total=total_number_of_samples) as progress_bar:
for label in args.number_of_components:
for _ in range(args.number_of_graphs):
graph = erdos_renyi_graph(args.min_n, args.max_n, args.p, label, args.artificially_connect)
dataset.append((graph, torch.tensor([label])))
progress_bar.update(1)
# save file
pickle.dump(dataset, open('erdos_renyi.pl', 'wb'))
| 4,169 | 45.333333 | 116 | py |
clam | clam-main/baselines.py | import numpy as np
import scipy.io
import time
from collections import Counter
import math
import json
from scipy.spatial import distance
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from parser import *
from helper import *
from result_helper import *
from draw_helper import *
from hyper_params import *
from mhn import *
import time
from s_kmeans import *
if __name__ == '__main__':
start_time = time.time()
fn, fext = os.path.splitext(filename)
if filename == 'fmnist.csv':
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
M = x_train
M = tf.reshape(M, shape=(M.shape[0], M.shape[1]*M.shape[2]))
Y_true = y_train
clcnt = len(set(Y_true))
elif filename == 'Yale.mat':
# for .mat/.sci files
M, Y = read_file(filename, delimiter)
(n, d) = np.shape(M)
# for .mat file
Y -= 1
Y_true = Y.flatten()
clcnt = len(set(Y_true))
elif fext == ".npz":
data = np.load('data/' + filename, allow_pickle=True)
M = data['X']
Y = data['Y']
Y_true = Y.flatten()
clcnt = len(set(Y_true))
else:
# read data file
D = read_file(filename, delimiter, label_filename)
(n, d) = np.shape(D) # get input dimensions
d = d - 1
M = D[:, 0:d]
Y_true = D[:, d].astype(int)
clcnt = get_class_count()
print("M", np.shape(M), "Y_true", np.shape(Y_true), 'clcnt', clcnt)
# StandardScaler
scaler = StandardScaler()
M = scaler.fit_transform(M)
for baseline in baseline_list:
dic = []
iter = 0
if baseline == 'kmeans':
sort_key = 'silhouette_score_kemans_euclidean'
for kmeans_n_init in kmeans_n_init_list:
for kmeans_random_state in kmeans_random_state_list:
print('baseline:', baseline, ' iter:', iter)
s_time = time.time()
kmeans = KMeans(n_clusters=clcnt, init='k-means++', random_state=kmeans_random_state, n_init=kmeans_n_init, max_iter=1000).fit(M)
kmeans_labels = kmeans.labels_.tolist()
e_time = time.time()
print("Took", (e_time - s_time), "seconds to complete kmeans with", kmeans_n_init, "init")
scores = {
"iter": iter,
"kemans_n_init": kmeans_n_init,
"kemans_random_state": kmeans_random_state,
"number_of_cluster_found": len(set(kmeans_labels)),
"needed_time": (e_time - s_time),
"nmi_score_kemans": normalized_mutual_info_score(Y_true, kmeans_labels),
"ari_score_kemans": adjusted_rand_score(Y_true, kmeans_labels),
"silhouette_score_kemans_mahalanobis": -1,
"silhouette_score_kemans_cosine": -1,
"silhouette_score_kemans_euclidean": -1
}
# compute cluster quality matrices
try:
silhouette_score_kemans_euclidean = silhouette_score(M, kmeans_labels, metric='euclidean')
scores['silhouette_score_kemans_euclidean'] = silhouette_score_kemans_euclidean
except (ValueError):
print("Kmeans:: Oops! Only one cluster found!")
dic.append(scores)
iter += 1
elif baseline == 'nkmeans':
sort_key = 'silhouette_score_kemans_euclidean'
for kmeans_n_init in kmeans_n_init_list:
for kmeans_random_state in kmeans_random_state_list:
for dataset in dataset_list:
for std in std_list:
print('baseline:', baseline, ' iter:', iter)
if dataset == 'noisy':
M += np.random.normal(loc=0, scale=std, size=M.shape)
s_time = time.time()
kmeans = KMeans(n_clusters=clcnt, init='k-means++', random_state=kmeans_random_state, n_init=kmeans_n_init).fit(M)
kmeans_labels = kmeans.labels_.tolist()
e_time = time.time()
print("Took", (e_time - s_time), "seconds to complete kmeans with", kmeans_n_init, "init")
scores = {
"iter": iter,
"kemans_n_init": kmeans_n_init,
"kemans_random_state": kmeans_random_state,
"dataset": dataset,
"std": -1,
"number_of_cluster_found": len(set(kmeans_labels)),
"needed_time": (e_time - s_time),
"nmi_score_kemans": normalized_mutual_info_score(Y_true, kmeans_labels),
"ari_score_kemans": adjusted_rand_score(Y_true, kmeans_labels),
"nmi_score_with_original": -1,
"ari_score_with_original": -1,
"silhouette_score_kemans_euclidean": -1
}
# compute cluster quality matrices
try:
silhouette_score_kemans_euclidean = silhouette_score(M, kmeans_labels, metric='euclidean')
scores['silhouette_score_kemans_euclidean'] = silhouette_score_kemans_euclidean
except (ValueError):
print("Kmeans:: Oops! Only one cluster found!")
if dataset == 'noisy':
scores['nmi_score_with_original'] = normalized_mutual_info_score(Y_original, kmeans_labels)
scores['ari_score_with_original'] = adjusted_rand_score(Y_original, kmeans_labels)
scores['std'] = std
else:
Y_original = kmeans_labels
dic.append(scores)
iter += 1
break
dic.append(scores)
iter += 1
elif baseline == 'skmeans':
sort_key = 'silhouette_score_skemans'
for skmeans_update_interval in skmeans_update_interval_list:
for init in init_list:
s_time = time.time()
# skmeans clustering
skmeans_labels, cluster_centers = DCEC_clustering(M, clcnt, skmeans_update_interval, init)
e_time = time.time()
print("Took", (e_time - s_time), "seconds to complete skmeans with", skmeans_update_interval, "skmeans_update_interval")
scores = {
"iter": iter,
"skmeans_maxiter": skmeans_maxiter,
"skmeans_update_interval": skmeans_update_interval,
"init": init,
"number_of_cluster_found": len(set(skmeans_labels)),
"needed_time": (e_time - s_time),
"nmi_score_skemans": normalized_mutual_info_score(Y_true, skmeans_labels),
"ari_score_skemans": adjusted_rand_score(Y_true, skmeans_labels),
"silhouette_score_skemans": -1
}
# print cluster quality matrices
try:
silhouette_score_skemans = silhouette_score(M, skmeans_labels, metric='euclidean')
scores['silhouette_score_skemans'] = silhouette_score_skemans
except (ValueError):
print("Kmeans:: Oops! Only one cluster found!")
print('baseline:', baseline, ' iter:', iter, ' skmeans_update_interval:', skmeans_update_interval, ' init:', init, ' silhouette_score_skemans:', silhouette_score_skemans)
dic.append(scores)
iter += 1
elif baseline == 'spectral':
sort_key = 'silhouette_score_spectral_euclidean'
for spectral_affinity in spectral_affinity_list:
for spectral_gamma in spectral_gamma_list:
for spectral_n_neighbors in spectral_n_neighbors_list:
for spectral_assign_labels in spectral_assign_labels_list:
for spectral_n_init in spectral_n_init_list:
print('baseline:', baseline, ' iter:', iter)
s_time = time.time()
# Spectral clustering
spectral = SpectralClustering(n_clusters=clcnt, n_init=spectral_n_init, gamma=spectral_gamma, affinity=spectral_affinity, n_neighbors=spectral_n_neighbors, assign_labels=spectral_assign_labels, random_state=0).fit(M)
spectral_labels = spectral.labels_.tolist()
e_time = time.time()
print("Took", (e_time - s_time), "seconds to complete spectral with", spectral_n_init, "init and affinity", spectral_affinity)
scores = {
"iter": iter,
"spectral_n_init": spectral_n_init,
"spectral_gamma": spectral_gamma,
"spectral_affinity": spectral_affinity,
"spectral_n_neighbors": spectral_n_neighbors,
"spectral_assign_labels": spectral_assign_labels,
"number_of_cluster_found": len(set(spectral_labels)),
"needed_time": (e_time - s_time),
"nmi_score_spectral": normalized_mutual_info_score(Y_true, spectral_labels),
"ari_score_spectral": adjusted_rand_score(Y_true, spectral_labels),
"silhouette_score_spectral_mahalanobis": -1,
"silhouette_score_spectral_cosine": -1,
"silhouette_score_spectral_euclidean": -1
}
# compute cluster quality matrices
try:
silhouette_score_spectral_cosine = silhouette_score(M, spectral_labels, metric='cosine')
scores['silhouette_score_spectral_cosine'] = silhouette_score_spectral_cosine
silhouette_score_spectral_euclidean = silhouette_score(M, spectral_labels, metric='euclidean')
scores['silhouette_score_spectral_euclidean'] = silhouette_score_spectral_euclidean
except (ValueError):
print("Spectral:: Oops! Only one cluster found!")
except:
print("Found numpy.linalg.LinAlgError: Singular matrix")
dic.append(scores)
iter += 1
if spectral_affinity == 'rbf':
break
if spectral_affinity == 'nearest_neighbors':
break
elif baseline == 'agglomerative':
sort_key = 'silhouette_score_agglomerative_euclidean'
for agglomerative_affinity in agglomerative_affinity_list:
for agglomerative_linkage in agglomerative_linkage_list:
if agglomerative_affinity != 'euclidean' and agglomerative_linkage == 'ward':
continue
print('baseline:', baseline, ' iter:', iter)
s_time = time.time()
# Agglomerative clustering
agglomerative = AgglomerativeClustering(n_clusters=clcnt, affinity=agglomerative_affinity, linkage=agglomerative_linkage).fit(M)
agglomerative_labels = agglomerative.labels_.tolist()
e_time = time.time()
print("Took", (e_time - s_time), "seconds to complete agglomerative with affinity", agglomerative_affinity, "and linkage", agglomerative_linkage)
scores = {
"iter": iter,
"agglomerative_affinity": agglomerative_affinity,
"agglomerative_linkage": agglomerative_linkage,
"number_of_cluster_found": len(set(agglomerative_labels)),
"needed_time": (e_time - s_time),
"nmi_score_agglomerative": normalized_mutual_info_score(Y_true, agglomerative_labels),
"ari_score_agglomerative": adjusted_rand_score(Y_true, agglomerative_labels),
"silhouette_score_agglomerative_mahalanobis": -1,
"silhouette_score_agglomerative_cosine": -1,
"silhouette_score_agglomerative_euclidean": -1
}
# compute cluster quality matrices
try:
silhouette_score_agglomerative_cosine = silhouette_score(M, agglomerative_labels, metric='cosine')
scores['silhouette_score_agglomerative_cosine'] = silhouette_score_agglomerative_cosine
silhouette_score_agglomerative_euclidean = silhouette_score(M, agglomerative_labels, metric='euclidean')
scores['silhouette_score_agglomerative_euclidean'] = silhouette_score_agglomerative_euclidean
except (ValueError):
print("Agglomerative:: Oops! Only one cluster found!")
except:
print("Found numpy.linalg.LinAlgError: Singular matrix")
dic.append(scores)
iter += 1
mem_dic = {
"x_train": "x_train",
"y_pred": "y_pred",
}
# write dictionary to file
write_dic_to_file(dic, mem_dic, sort_key, directory, filename, baseline, ext)
end_time = time.time()
print("Took", (end_time - start_time)/60, "minutes to complete all baseline configs for", filename, "dataset.")
| 15,147 | 50.699659 | 248 | py |
clam | clam-main/plot_2d_cluster.py | import numpy as np;
import matplotlib.pyplot as plt
from scipy.spatial import Voronoi, voronoi_plot_2d
import tensorflow as tf
from tensorflow.keras.initializers import RandomNormal, RandomUniform
from tensorflow.keras.layers import Input, Dense, Lambda
from tensorflow.keras import Model
from scipy.spatial import distance
from matplotlib.pyplot import figure
import time
import os
import json
def get_labels(M, memories):
labels = []
for i in range(M.shape[0]):
min_dist = 100000
for j in range(len(memories)):
euclid_dist = distance.euclidean(M[i], memories[j])
if (euclid_dist < min_dist):
min_dist = euclid_dist
label = j
labels.append(label)
return labels
def generate_data(xpoints, ypoints):
M = []
xstep = 1/xpoints
ystep = 1/ypoints
x = 0
for i in range(xpoints):
grid_x = []
grid_y = []
y = 0
for j in range(ypoints):
grid_x.append(x)
grid_y.append(y)
y += ystep
M.append(np.stack((grid_x, grid_y), axis=1))
x += xstep
M = np.array(M)
M = np.reshape(M, (xpoints*ypoints, 2))
return M
def voronoi_tessellation(xx, yy, M):
numbPoints = len(xx)
xxyy = np.stack((xx,yy), axis=1); #combine x and y coordinates
print('xxyy:', xxyy)
##Perform Voroin tesseslation using built-in function
voronoiData=Voronoi(xxyy)
#create voronoi diagram on the point pattern
voronoi_plot_2d(voronoiData, ax=ax, show_points=False, show_vertices=False, line_width=line_width, point_size=1);
plt.xlim(-.01, 1)
plt.ylim(-.01, 1)
class MHN_WITH_1_HIDDEN_LAYER(tf.keras.layers.Layer):
def __init__(self, N1, N2, beta, alpha, memories, c=1, **kwargs):
super().__init__(**kwargs)
self.N1 = N1
self.N2 = N2
self.c = c
self.beta = beta
self.alpha = alpha
self.memories = memories
def build(self, input_shape):
self.kernel = tf.expand_dims(self.memories, axis=-1)
super().build(input_shape)
def call(self, v, mask):
Mem = self.kernel
v = tf.expand_dims(v, axis=-1)
v = tf.transpose(v, perm=[2, 1, 0])
diff = Mem - v
# original clam
exp_sum_diff = tf.exp(-self.beta/2*tf.reduce_sum(diff**2, axis=1))
den = tf.expand_dims(tf.reduce_sum(exp_sum_diff, axis=0),axis=0)
num = tf.reduce_sum(diff*tf.expand_dims(exp_sum_diff,axis=1),axis=0)
update = num/den
# with mask
mask = tf.transpose(tf.expand_dims(mask, axis=0), perm=[0, 2, 1])
v += self.alpha*tf.expand_dims(update, axis=0) * mask
v = tf.transpose(v, perm=[2, 1, 0])
v = tf.squeeze(v)
return v
def evolution(model, memories, index, beta, alpha):
labels = []
for step, (x_train, y_train) in enumerate(test_dataset):
x_train = tf.cast(x_train,dtype=tf.float32)
y_train = tf.cast(y_train,dtype=tf.float32)
mask = tf.cast(tf.equal(x_train, y_train), dtype=tf.float32)
y_pred = model([x_train, mask])
labels += get_labels(y_pred, memories)
filename = data_dir + 'voronoi_am_' + str(index) + '_' + str(int(1/alpha)) + '_' + str(beta)
dic = {}
dic['data'] = M.tolist()
dic['labels'] = labels
dic['memories'] = memories.numpy().tolist()
json_object = json.dumps(dic, indent=4)
# Writing to data.json
with open(filename + '.json', "w") as outfile:
outfile.write(json_object)
print("----starting to plot the points-----")
filename = fig_dir + 'voronoi_am_' + str(index) + '_' + str(int(1/alpha)) + '_' + str(beta)
colors = []
for i in range(len(labels)):
colors.append(color_list[labels[i]])
M_tanspose = np.transpose(M)
plt.scatter(M_tanspose[0], M_tanspose[1], color=colors, s=point_size)
for ii in range(len(memories)):
plt.scatter(memories[ii][0], memories[ii][1], c=color_list[len(memories)], s=mem_size)
plt.axis('off')
plt.savefig(filename + '.png', bbox_inches='tight')
def am_evolution(xx, yy, beta, alpha, index):
# # read data from json file and plot the graphs
# filename = data_dir + 'voronoi_am_' + str(index) + '_' + str(int(1/alpha)) + '_' + str(beta)
# data = json.load(open(filename + '.json'))
# M_ = data['data']
# labels_ = data['labels']
# memories_ = data['memories']
# filename = fig_dir + 'voronoi_am_' + str(index) + '_' + str(int(1/alpha)) + '_' + str(beta)
# colors = []
# for i in range(len(labels_)):
# colors.append(color_list[labels_[i]])
# M_tanspose = np.transpose(M_)
# plt.scatter(M_tanspose[0], M_tanspose[1], color=colors, s=point_size)
# for ii in range(len(memories_)):
# plt.scatter(memories_[ii][0], memories_[ii][1], c=color_list[len(memories_)], s=mem_size)
# # plt.text(memories_[ii][0]+diff, memories_[ii][1]+diff, str(ii+1), color=color_list[len(memories_)+1], fontsize=12)
# # plt.title('AM Partition: step = ' + str(1/alpha) + ', beta = ' + str(beta))
# plt.axis('off')
# plt.savefig(filename + '.png', bbox_inches='tight')
# training code
N1 = 2
N2 = len(xx)
input_shape = N1
N_steps = int(1/alpha)
memories = np.stack((xx,yy), axis=1); #combine x and y coordinates
memories = tf.cast(memories, dtype='float32')
# define model
input_mask = Input(shape=[input_shape])
input1 = Input(shape=[input_shape])
MHN_cell = MHN_WITH_1_HIDDEN_LAYER(N1, N2, beta, alpha, memories)
x = MHN_cell(input1, input_mask)
for i in range(N_steps-1):
x = MHN_cell(x, input_mask)
model = Model(inputs=[input1, input_mask], outputs=x)
# inference
evolution(model, memories, index, beta, alpha)
## Start of the main function
start_time = time.time()
# Simulation window parameters
xMin = 0
xMax = 1
yMin = 0
yMax = 1
# rectangle dimensions
xDelta = xMax - xMin; #width
yDelta = yMax - yMin #height
areaTotal = xDelta * yDelta;
# parameters
diff = 0.005
color_list = ['green', 'blue', 'orange', 'brown', 'm', 'black', 'red']
xx_list = [[.1, .2, .4, .6, .9]]
yy_list = [[.3, .1, .7, .5, .2]]
xpoints = 500
ypoints = 500
point_size = .4
mem_size = 128
line_width = 3
fig_size = 5
data_dir = 'plots/2d_plots/_fixed_memory_voronoi_am/' + str(xpoints) + 'x' + str(ypoints) + '/memory_' + str(len(xx_list[0])) + '/data/'
fig_dir = 'plots/2d_plots/_fixed_memory_voronoi_am/' + str(xpoints) + 'x' + str(ypoints) + '/memory_' + str(len(xx_list[0])) + '/figures/'
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
M = generate_data(xpoints, ypoints)
print("len(xx_list):", len(xx_list))
print("M.shape:", M.shape)
beta_list = [5, 15, 30, 40, 75] #[.0001, .001, .01, .1, 1, 10, 20, 50, 100, 150, 200]
alpha_list = [.1]
batch_size = 512
dataset = tf.data.Dataset.from_tensor_slices((M, M))
test_dataset = dataset.batch(batch_size)
for i in range(len(xx_list)):
for alpha in alpha_list:
for beta in beta_list:
print("alpha:", alpha, " beta:",beta)
fig, ax = plt.subplots(1, 1, figsize=(fig_size, fig_size))
s_time = time.time()
# voronoi tessellation
voronoi_tessellation(xx_list[i], yy_list[i], M)
# am evolution
am_evolution(xx_list[i], yy_list[i], beta, alpha, i)
e_time = time.time()
print("Took", e_time - s_time, "seconds to complete one config")
end_time = time.time()
print("Took", (end_time - start_time)/60, "minutes to complete all", len(xx_list)*len(alpha_list)*len(beta_list), "config")
| 7,822 | 30.292 | 138 | py |
clam | clam-main/plot_gaussian.py | import numpy as np;
import matplotlib.pyplot as plt
from scipy.spatial import Voronoi, voronoi_plot_2d
import tensorflow as tf
from tensorflow.keras.initializers import RandomNormal, RandomUniform
from tensorflow.keras.layers import Input, Dense, Lambda
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adam
from scipy.spatial import distance
from matplotlib.pyplot import figure
import time
from scipy.stats import multivariate_normal
import random
import math
import os
from sklearn.cluster import KMeans
from s_kmeans import *
from sklearn.metrics import normalized_mutual_info_score, adjusted_rand_score, silhouette_score
from collections import Counter
nmi = normalized_mutual_info_score
ari = adjusted_rand_score
sc = silhouette_score
def get_labels(M, memories):
labels = []
for i in range(M.shape[0]):
min_dist = 1000
for j in range(len(memories)):
euclid_dist = distance.euclidean(M[i], memories[j])
if (euclid_dist < min_dist):
min_dist = euclid_dist
label = j
labels.append(label)
return labels
def generate_data(xpoints, ypoints):
M = []
xstep = 1/xpoints
ystep = 1/ypoints
x = 0
for i in range(xpoints):
grid_x = []
grid_y = []
y = 0
for j in range(ypoints):
grid_x.append(x)
grid_y.append(y)
y += ystep
M.append(np.stack((grid_x, grid_y), axis=1))
x += xstep
M = np.array(M)
M = np.reshape(M, (xpoints*ypoints, 2))
return M
def plot_graph(labels, true_memories, learned_memories, algo, sc_score, d_type):
if d_type == 'original':
data = M
d_dir = data_dir
f_dir = fig_dir
else:
data = M_noisy
d_dir = noisy_data_dir
f_dir = noisy_fig_dir
# Writing to data.json
dic = {}
dic['data'] = data.tolist()
dic['labels'] = labels
dic['true_memories'] = memories.tolist()
if type(learned_memories) is list:
dic['learned_memories'] = learned_memories
else:
dic['learned_memories'] = learned_memories.numpy().tolist()
json_object = json.dumps(dic, indent=4)
with open(d_dir + algo + '.json', "w") as outfile:
outfile.write(json_object)
# plotting
n_memories = len(true_memories)
for i in range(data.shape[0]):
plt.scatter(data[i][0], data[i][1], edgecolor=color_list[labels[i]], facecolor=color_list[labels[i]], s=point_size)
for ii in range(n_memories):
plt.scatter(learned_memories[ii][0], learned_memories[ii][1], c=color_list[n_memories], s=mem_size, marker='x')
plt.scatter(true_memories[ii][0], true_memories[ii][1], c=color_list[n_memories], s=mem_size, marker='o')
plt.xticks([0 , 0.5, 1])
plt.yticks([0 , 0.5, 1])
plt.savefig(f_dir + algo + '_' + str(round(sc_score, 3)) + '.png')
plt.show()
mu = 0.2
sigma = 0.1
class MHN_WITH_1_HIDDEN_LAYER(tf.keras.layers.Layer):
def __init__(self, N1, N2, beta, alpha, memories, c=1, **kwargs):
super().__init__(**kwargs)
self.N1 = N1
self.N2 = N2
self.c = c
self.beta = beta
self.alpha = alpha
self.memories = memories
def build(self, input_shape):
self.kernel = self.add_weight(
"kernel",
shape=[self.N2, self.N1, 1],
initializer=RandomNormal(mean=mu, stddev=sigma, seed=None)
)
super().build(input_shape)
def call(self, v, mask):
Mem = self.kernel
v = tf.expand_dims(v, axis=-1)
v = tf.transpose(v, perm=[2, 1, 0])
diff = Mem - v
# original clam
exp_sum_diff = tf.exp(-self.beta/2*tf.reduce_sum(diff**2, axis=1))
den = tf.expand_dims(tf.reduce_sum(exp_sum_diff, axis=0),axis=0)
num = tf.reduce_sum(diff*tf.expand_dims(exp_sum_diff,axis=1),axis=0)
update = num/den
# with mask
mask = tf.transpose(tf.expand_dims(mask, axis=0), perm=[0, 2, 1])
v += self.alpha*tf.expand_dims(update, axis=0) * mask
v = tf.transpose(v, perm=[2, 1, 0])
v = tf.squeeze(v)
return v
def mean_squared_error(y_true, y_pred, mask):
loss = tf.reduce_mean(tf.square(y_true-y_pred)*mask)
return loss
def train_am(model, learning_rate_kernel):
learning_rate_threshold = 0.000001
optimizer = Adam(learning_rate=learning_rate_kernel)
prev_mean_loss = 100000
patience = 0
optimum_lr = learning_rate_kernel
optimum_epoch = 1
losses_schedular = []
losses = []
epochs = []
min_loss = 10000
sum_losses = 0
initial_loss = 0
nan_flag = False
N_ep = 10
max_patience = 5
loss_threshold = 0.001
reduce_frac = 0.8
clip_low = 0.001
clip_high = 1000
for epoch in range(1, N_ep+1):
# print(f'Epoch {epoch}/{N_ep}')
sum_loss = 0
isFirst = True
for step, (x_train, y_train) in enumerate(train_dataset):
#for tabular data
x_train = tf.cast(x_train,dtype=tf.float32)
y_train = tf.cast(y_train,dtype=tf.float32)
# with mask training
x_train_masked = x_train + np.random.normal(0, 0.01, size=(x_train.shape[0], 2))
mask = tf.cast(tf.equal(x_train, y_train), dtype=tf.float32)
# update weights
with tf.GradientTape() as tape:
y_pred = model([x_train_masked, mask])
loss = mean_squared_error(y_train, y_pred, mask)
sum_loss += loss
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.learning_rate.assign(learning_rate_kernel) # for optimizing memories
optimizer.apply_gradients(zip(gradients[:1], model.trainable_variables[:1]))
for _ in range(1, len(model.trainable_variables)):
model.trainable_variables[_].assign(tf.clip_by_value(model.trainable_variables[_], clip_low, clip_high))
sum_loss = tf.sqrt(sum_loss)
metrics = ' - '.join(["{}: {:.4f}".format('sum_loss', m) for m in [sum_loss]])
# print(metrics)
if math.isnan(sum_loss):
nan_flag = True
print('----------- Got nan value for beta', beta, ': Aborting the config.. ----------- ')
return False
sum_losses += sum_loss
losses_schedular.append(sum_loss)
losses.append(sum_loss)
epochs.append(epoch)
if epoch == 1:
initial_loss = sum_loss
if sum_loss < min_loss:
min_loss = sum_loss
model.save(model_directory)
optimum_lr = learning_rate_kernel
optimum_epoch = epoch
if epoch >= 10:
mean_loss = sum_losses/10
sum_losses -= losses_schedular.pop(0)
loss_diff = prev_mean_loss - mean_loss
if loss_diff < loss_threshold:
patience += 1
if patience == max_patience:
learning_rate_kernel *= reduce_frac
patience = 0
else:
patience = 0
prev_mean_loss = mean_loss
if learning_rate_kernel < learning_rate_threshold:
print('Learning Rate is too low! Breaking the loop!')
break
return True
def evolution(model, memories, d_type):
labels = []
for step, (x_train, y_train) in enumerate(test_dataset):
x_train = tf.cast(x_train,dtype=tf.float32)
y_train = tf.cast(y_train,dtype=tf.float32)
mask = tf.cast(tf.equal(x_train, y_train), dtype=tf.float32)
y_pred = model([x_train, mask])
KS = model.layers[2].get_weights()[0]
KS = tf.squeeze(KS)
labels += get_labels(y_pred, KS)
num_of_cluster_found = len(Counter(labels).keys())
if(num_of_cluster_found == 1):
print('----------- Found only one cluster! -----------')
return False
if d_type == 'original':
data = M
else:
data = M_noisy
sc_score = sc(data, labels)
print('----------- sc_am = %.4f' % sc_score, 'alpha:', alpha, 'learning_rate:', learning_rate_kernel, 'beta:', beta)
sc_list.append(sc_score)
algo = 'AM_' + d_type + str(alpha) + '_' + str(learning_rate_kernel) + '_' + str(beta)
plot_graph(labels, memories, KS, algo, sc_score, d_type)
return True
def am_evolution(xx, yy, beta, alpha, index, learning_rate_kernel, d_type):
N1 = 2
N2 = len(xx)
input_shape = N1
N_steps = int(1/alpha)
memories = np.stack((xx,yy), axis=1); #combine x and y coordinates
memories = tf.cast(memories, dtype='float32')
# define model
input_mask = Input(shape=[input_shape])
input1 = Input(shape=[input_shape])
MHN_cell = MHN_WITH_1_HIDDEN_LAYER(N1, N2, beta, alpha, memories)
x = MHN_cell(input1, input_mask)
for i in range(N_steps-1):
x = MHN_cell(x, input_mask)
model = Model(inputs=[input1, input_mask], outputs=x)
# training
if(not train_am(model, learning_rate_kernel)): return False
# inference
if not os.path.exists(model_directory):
return False
model = tf.keras.models.load_model(model_directory, compile=False)
return evolution(model, memories, d_type)
def read_file(fname, delimiter):
f = open('data/' + fname, "r")
X = []
for l in f.readlines():
a = l.strip().split(delimiter)
X.append(list(map(float, [float(x) for x in a])))
return np.array(X)
def voronoi_tessellation(xx, yy, M):
numbPoints = len(xx)
xxyy = np.stack((xx,yy), axis=1); #combine x and y coordinates
##Perform Voroin tesseslation using built-in function
voronoiData=Voronoi(xxyy)
#create voronoi diagram on the point pattern
voronoi_plot_2d(voronoiData, ax=ax, show_points=False, show_vertices=False, point_size=1);
plt.xlim(-.5, 1.5)
plt.ylim(-.5, 1.5)
def calculate_scores(data_dir, files):
data = json.load(open(data_dir + 'original.json'))
M_ = data['data']
true_labels = data['labels']
for i in range(len(files)):
data = json.load(open(data_dir + files[i] + '.json'))
labels = data['labels']
sc_score = sc(M_, labels)
nmi_score = nmi(true_labels, labels)
ari_score = ari(true_labels, labels)
print(files[i], ':: sc:', sc_score, 'nmi:', nmi_score, 'ari:', ari_score)
def plot_graphs(data_dir, fig_dir, algos):
true_labels = []
for i in range(len(algos)):
fig, ax = plt.subplots(1, 1, figsize=(5,5))
data = json.load(open(data_dir + algos[i] + '.json'))
M_ = data['data']
labels_ = data['labels']
if algos[i] == 'original':
true_memories_ = data['memories']
true_labels = labels_
else:
true_memories_ = data['true_memories']
learned_memories_ = data['learned_memories']
# # for noisy data, we need to remove the outlier
# del M_[first_ind_outlier:]
colors = []
for j in range(len(labels_)):
colors.append(color_list[labels_[j]])
M_tanspose = np.transpose(M_)
plt.scatter(M_tanspose[0], M_tanspose[1], color=colors, s=point_size)
for ii in range(len(true_memories_)):
# plt.scatter(true_memories_[ii][0], true_memories_[ii][1], c=color_list[len(true_memories_)], s=mem_size, marker='o')
if algos[i] != 'original':
plt.scatter(learned_memories_[ii][0], learned_memories_[ii][1], c=color_list[len(true_memories_)], s=mem_size, marker='o', alpha=.8)
nmi_score = nmi(true_labels, labels_)
ari_score = ari(true_labels, labels_)
sc_score = sc(M_, labels_, metric='mahalanobis')
# sc_score = sc(M_, labels_)
text = 'NMI: ' + str(round(nmi_score, 3))
plt.text(.3, .9, text, fontsize = 14)
text = 'ARI: ' + str(round(ari_score, 3))
plt.text(.3, .84, text, fontsize = 14)
text = 'SC: ' + str(round(sc_score, 3))
plt.text(.3, .78, text, fontsize = 14)
plt.xlim([0, 1]) #original
plt.ylim([0, 1])
plt.title('ClAM_clean', fontsize=14)
plt.tick_params(left = False, right = False , labelleft = False , labelbottom = False, bottom = False)
plt.savefig(fig_dir + algos[i] + '.png', bbox_inches='tight')
## main function starts here
start_time = time.time()
diff = 0.005
random_seed=1000
color_list = ['green', 'orange', 'm', 'black', '#f2ab15', 'black', 'm', 'orange', 'brown', 'black', 'red']
xx_list = [[.2, .5, .8]]
yy_list = [[.5, .5, .5]]
# xx_list = [[.3, .45, .65]]
# yy_list = [[.6, .15, .6]]
memories = np.stack((xx_list[0],yy_list[0]), axis=1); #combine x and y coordinates
print('len(memories):', len(memories))
print('memories:', memories)
point_size = 10 #30
mem_size = 70 #210
point_range_start_list = [250, 100, 200]
point_range_end_list = [300, 150, 250]
# cov_val = [.01, .01] # working covariances
# fixed_cov_val = 0.0105 # working fixed_cov_val
cov_val = [.01, .01, .01] # working
fixed_cov_val = 0.0105 # working
fig, ax = plt.subplots(1, 1, figsize=(5,5))
labels = []
for idx, val in enumerate(cov_val):
cov = np.array([[fixed_cov_val, val], [val, fixed_cov_val]])
# Generating a Gaussian bivariate distribution with given mean and covariance matrix
distr = multivariate_normal(cov = cov, mean = memories[idx], allow_singular=True, seed = random_seed)
# Generating samples out of the distribution
data = distr.rvs(size = random.randrange(point_range_start_list[idx], point_range_end_list[idx]))
if idx == 0:
M = data
else:
M = np.concatenate((M, data), axis = 0)
labels.append([idx]*len(data))
plt.scatter(data[:,0], data[:,1], c=color_list[idx], s=point_size)
for ii in range(len(memories)):
plt.scatter(memories[ii][0], memories[ii][1], c=color_list[len(memories)], s=mem_size)
# create noisy data
outliers = []
# num_of_outliers = int(M.shape[0]*.05)
num_of_outliers = 3
print('num_of_outliers:', num_of_outliers)
outliers.append((-.05, -.7))
outliers.append((.1, .1))
outliers.append((.3, -.5))
outliers = np.array(outliers)
M_noisy= np.concatenate((M, outliers), axis = 0)
label_noisy = labels.copy()
label_noisy.append([len(cov_val)+1]*len(outliers))
print('M.shape:', M.shape)
print('M_noisy.shape:', M_noisy.shape)
# AM parameters
alpha_list = [.1, .05] # [.1, .05]
learning_rate_kernel_list = [0.001, .01, .1] # [0.001, .01, .1]
beta_list = [.001, .01, .05, .1, .5, 1, 2, 5, 10, 20, 30, 50, 100, 200] # [.001, .01, .05, .1, 1, 5, 10, 30, 50, 100, 200]
batch_size = 32
number_of_points = '(.3k-.5k)_'
suffix = str(len(alpha_list)) + '_' + str(len(learning_rate_kernel_list)) + '_' + str(len(beta_list))
subdir = 'plots/2d_plots/_soft_kmeans/3_clusters/train_' + number_of_points + suffix + '/'
model_base = 'plots/2d_plots/saved_models/train_' + number_of_points + suffix
sc_list = []
data_dir = subdir + 'original/data/'
fig_dir = subdir + 'original/figures/'
noisy_data_dir = subdir + 'noisy/data/'
noisy_fig_dir = subdir + 'noisy/figures/'
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
if not os.path.exists(noisy_data_dir):
os.makedirs(noisy_data_dir)
if not os.path.exists(noisy_fig_dir):
os.makedirs(noisy_fig_dir)
# # read data from json file and plot the graphs
# # for clean data
# data_dir = 'plots/2d_plots/_rebuttal/2_clusters/train_(.3k-.5k)_2_3_11/original/data/'
# fig_dir = 'plots/2d_plots/_rebuttal/2_clusters/train_(.3k-.5k)_2_3_11/original/figures/new/'
# algos = ['original', 'AM_original0.05_0.1_0.01'] # ['AM_original0.05_0.1_0.01', 'Kmeans', , '_AM_0.2_0.1_0.05']
# plot_graphs(data_dir, fig_dir, algos)
# # for noisy data
# data_dir = 'plots/2d_plots/_rebuttal/2_clusters/train_(.3k-.5k)_2_3_11/noisy/data/'
# fig_dir = 'plots/2d_plots/_rebuttal/2_clusters/train_(.3k-.5k)_2_3_11/noisy/figures/new/'
# algos = ['noisy', 'AM_noisy0.05_0.1_0.001'] # ['noisy', 'AM_noisy0.05_0.1_0.001']
# plot_graphs(data_dir, fig_dir, algos)
# # calculate sc, nmi, ari scores from data
# data_dir = 'plots/2d_plots/_soft_kmeans/workable_final_train_(.2k-.4k)_3_3_11/data/'
# files = ['Kmeans', 'Soft-Kmeans (DCEC version)', '_AM_0.2_0.1_0.05']
# calculate_scores(data_dir, files)
# plt original data
plt.xticks([0 , 0.5, 1, 1])
plt.yticks([0 , 0.5, 1, 1])
plt.savefig(fig_dir + 'original.png')
# Writing original data to data.json
labels = [item for sublist in labels for item in sublist]
dic = {}
dic['data'] = M.tolist()
dic['labels'] = labels
dic['memories'] = memories.tolist()
json_object = json.dumps(dic, indent=4)
with open(data_dir + 'original.json', "w") as outfile:
outfile.write(json_object)
# # plot noisy data
# label_noisy = [item for sublist in label_noisy for item in sublist]
# fig, ax = plt.subplots(1, 1, figsize=(5,5))
# for i in range(M_noisy.shape[0]):
# plt.scatter(M_noisy[i,0], M_noisy[i,1], c=color_list[label_noisy[i]], s=point_size)
# for ii in range(len(memories)):
# plt.scatter(memories[ii][0], memories[ii][1], c=color_list[len(memories)], s=mem_size)
# # plt.scatter(outliers[:,0], outliers[:,1], c=color_list[i], s=point_size)
# plt.xticks([0 , 0.5, 1, 1.5])
# plt.yticks([0, 0.5, 1, 1.5])
# plt.savefig(noisy_fig_dir + 'noisy.png')
# # plt.show()
# # Writing noisy data to data.json
# dic = {}
# dic['data'] = M_noisy.tolist()
# dic['labels'] = label_noisy
# dic['memories'] = memories.tolist()
# json_object = json.dumps(dic, indent=4)
# with open(noisy_data_dir + 'noisy.json', "w") as outfile:
# outfile.write(json_object)
# Kmeans
print('Kmeans original begins')
s_time = time.time()
kmeans_n_init = 1000
kmeans = KMeans(n_clusters=len(memories), init='k-means++', random_state=0, n_init=kmeans_n_init, max_iter=1000).fit(M)
kmeans_labels = kmeans.labels_.tolist()
kmeans_cluster_centers = kmeans.cluster_centers_.tolist()
print('---- sc_kmeans = %.4f' % sc(M, kmeans_labels))
fig, ax = plt.subplots(1, 1, figsize=(5,5))
plot_graph(kmeans_labels, memories, kmeans_cluster_centers, 'Kmeans_original', sc(M, kmeans_labels), 'original')
e_time = time.time()
print("Took", (e_time - s_time), "seconds to complete kmeans with", kmeans_n_init, "init")
# print('Kmeans noisy begins')
# s_time = time.time()
# kmeans_n_init = 1000
# kmeans = KMeans(n_clusters=len(memories), init='k-means++', random_state=0, n_init=kmeans_n_init, max_iter=1000).fit(M_noisy)
# kmeans_labels = kmeans.labels_.tolist()
# kmeans_cluster_centers = kmeans.cluster_centers_.tolist()
# print('---- sc_kmeans = %.4f' % sc(M_noisy, kmeans_labels))
# fig, ax = plt.subplots(1, 1, figsize=(5,5))
# plot_graph(kmeans_labels, memories, kmeans_cluster_centers, 'Kmeans_noisy', sc(M_noisy, kmeans_labels), 'noisy')
# e_time = time.time()
# print("Took", (e_time - s_time), "seconds to complete kmeans with", kmeans_n_init, "init")
# # Soft-Kmeans (DCEC version)
# data_dir = 'plots/2d_plots/_soft_kmeans/3_clusters/train_(.3k-.5k)_2_3_11/data/'
# fig_dir = 'plots/2d_plots/_soft_kmeans/3_clusters/train_(.3k-.5k)_2_3_11/figures/'
# data = json.load(open(data_dir + 'original.json'))
# M = np.array(data['data'])
# print('Soft-Kmeans begins')
# s_time = time.time()
# labels, cluster_centers = DCEC_clustering(M, len(memories), skmeans_update_interval_list[0], init_list[0])
# soft_kmeans_labels = labels.tolist()
# print('---- sc_skmeans = %.4f' % sc(M, soft_kmeans_labels))
# cluster_centers = np.asarray(cluster_centers).reshape([len(memories), 2])
# soft_kmeans_cluster_centers = cluster_centers.tolist()
# fig, ax = plt.subplots(1, 1, figsize=(5,5))
# plot_graph(soft_kmeans_labels, memories, soft_kmeans_cluster_centers, 'Soft-Kmeans (DCEC version)', sc(M, soft_kmeans_labels))
# e_time = time.time()
# print("Took", (e_time - s_time), "seconds to complete Soft-Kmeans DCEC version")
# AM evolution
# for original data
dataset = tf.data.Dataset.from_tensor_slices((M, M))
train_dataset = dataset.shuffle(M.shape[0]).batch(batch_size)
test_dataset = dataset.batch(batch_size)
for i in range(len(xx_list)):
for alpha in alpha_list:
for learning_rate_kernel in learning_rate_kernel_list:
for beta in beta_list:
print("alpha:", alpha, " beta:", beta, " learning_rate_kernel:",learning_rate_kernel)
model_directory = model_base + str(alpha) + '_' + str(learning_rate_kernel) + '_' + str(beta)
fig, ax = plt.subplots(1, 1, figsize=(5,5))
s_time = time.time()
# am evolution
if(not am_evolution(xx_list[i], yy_list[i], beta, alpha, i, learning_rate_kernel, 'original')):
continue
e_time = time.time()
# # for noisy data
# dataset = tf.data.Dataset.from_tensor_slices((M_noisy, M_noisy))
# train_dataset = dataset.shuffle(M.shape[0]).batch(batch_size)
# test_dataset = dataset.batch(batch_size)
# for i in range(len(xx_list)):
# for alpha in alpha_list:
# for learning_rate_kernel in learning_rate_kernel_list:
# for beta in beta_list:
# print("alpha:", alpha, " beta:", beta, " learning_rate_kernel:",learning_rate_kernel)
# model_directory = model_base + str(alpha) + '_' + str(learning_rate_kernel) + '_' + str(beta)
# fig, ax = plt.subplots(1, 1, figsize=(5,5))
# s_time = time.time()
# # am evolution
# if(not am_evolution(xx_list[i], yy_list[i], beta, alpha, i, learning_rate_kernel, 'noisy')):
# continue
# e_time = time.time()
end_time = time.time()
print("Took", (end_time - start_time)/60, "minutes to complete all", len(learning_rate_kernel_list)*len(alpha_list)*len(beta_list), "config") | 22,110 | 34.491172 | 148 | py |
clam | clam-main/mhn.py | import tensorflow as tf
from tensorflow.keras.initializers import RandomNormal, RandomUniform
from hyper_params import *
import numpy as np
class MHN_WITH_1_HIDDEN_LAYER(tf.keras.layers.Layer):
def __init__(self, N1, N2, beta, alpha, init_mem=None, c=1, **kwargs):
super().__init__(**kwargs)
self.N1 = N1
self.N2 = N2
self.c = c
self.beta = beta
self.alpha = alpha
self.init_mem = init_mem
def build(self, input_shape):
# for original kmeans
self.kernel = self.add_weight(
"kernel",
shape=[self.N2, self.N1, 1],
initializer=RandomNormal(mean=mu, stddev=sigma, seed=None)
# initializer=tf.constant_initializer(self.init_mem)
)
# for weighted clam
# self.weight = self.add_weight(
# "weight",
# shape=[self.N2, 1],
# initializer=RandomNormal(mean=mu, stddev=sigma, seed=None)
# )
super().build(input_shape)
def call(self, v, mask):
Mem = self.kernel
v = tf.expand_dims(v, axis=-1)
v = tf.transpose(v, perm=[2, 1, 0])
diff = Mem - v
# original clam
exp_sum_diff = tf.exp((-self.beta/2)*tf.reduce_sum(diff**2, axis=1))
den = tf.expand_dims(tf.reduce_sum(exp_sum_diff, axis=0),axis=0)
num = tf.reduce_sum(diff*tf.expand_dims(exp_sum_diff,axis=1),axis=0)
## for weighted clam
# exp_sum_diff = tf.exp((-self.beta/2)*tf.reduce_sum(diff**2, axis=1))
# num = tf.reduce_sum(diff*tf.expand_dims(self.weight*exp_sum_diff,axis=1),axis=0)
# den = tf.expand_dims(tf.reduce_sum(self.weight*exp_sum_diff, axis=0),axis=0)
update = num/den
# with mask
mask = tf.transpose(tf.expand_dims(mask, axis=0), perm=[0, 2, 1])
v += self.alpha*tf.expand_dims(update, axis=0) * mask
v = tf.transpose(v, perm=[2, 1, 0])
v = tf.squeeze(v, axis=2)
return v | 2,017 | 33.20339 | 90 | py |
clam | clam-main/s_kmeans.py | # from time import time
import numpy as np
import keras.backend as K
from tensorflow.keras.layers import Input, Layer, InputSpec
from keras.models import Model
from keras.utils.vis_utils import plot_model
from sklearn.cluster import KMeans
import tensorflow as tf
from helper import *
class ClusteringLayer(Layer):
"""
Clustering layer converts input sample (feature) to soft label, i.e. a vector that represents the probability of the
sample belonging to each cluster. The probability is calculated with student's t-distribution.
# Example
```
model.add(ClusteringLayer(n_clusters=10))
```
# Arguments
n_clusters: number of clusters.
weights: list of Numpy array with shape `(n_clusters, n_features)` which represents the initial cluster centers.
alpha: parameter in Student's t-distribution. Default to 1.0.
# Input shape
2D tensor with shape: `(n_samples, n_features)`.
# Output shape
2D tensor with shape: `(n_samples, n_clusters)`.
"""
def __init__(self, n_clusters, weights=None, alpha=1.0, **kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(ClusteringLayer, self).__init__(**kwargs)
self.n_clusters = n_clusters
self.alpha = alpha
self.initial_weights = weights
self.input_spec = InputSpec(ndim=2)
def build(self, input_shape):
assert len(input_shape) == 2
input_dim = input_shape[1]
self.input_spec = InputSpec(dtype=K.floatx(), shape=(None, input_dim))
self.clusters = self.add_weight(shape=(self.n_clusters, input_dim), initializer='glorot_uniform', name='clusters')
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def call(self, inputs, **kwargs):
""" student t-distribution, as same as used in t-SNE algorithm.
q_ij = 1/(1+dist(x_i, u_j)^2), then normalize it.
Arguments:
inputs: the variable containing data, shape=(n_samples, n_features)
Return:
q: student's t-distribution, or soft labels for each sample. shape=(n_samples, n_clusters)
"""
q = 1.0 / (1.0 + (K.sum(K.square(K.expand_dims(inputs, axis=1) - self.clusters), axis=2) / self.alpha))
q **= (self.alpha + 1.0) / 2.0
q = K.transpose(K.transpose(q) / K.sum(q, axis=1))
return q
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) == 2
return input_shape[0], self.n_clusters
def get_config(self):
config = {'n_clusters': self.n_clusters}
base_config = super(ClusteringLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class DCEC(object):
def __init__(self,
input_shape,
n_clusters=10,
alpha=1.0):
super(DCEC, self).__init__()
self.n_clusters = n_clusters
self.input_shape = input_shape
self.alpha = alpha
self.pretrained = False
self.y_pred = []
input = Input(shape=[input_shape[0]])
clustering_layer = ClusteringLayer(self.n_clusters, name='clustering')
self.model = Model(inputs=input, outputs=clustering_layer(input))
def load_weights(self, weights_path):
self.model.load_weights(weights_path)
def predict(self, x):
q = self.model.predict(x, verbose=0)
return q.argmax(1)
@staticmethod
def target_distribution(q):
weight = q ** 2 / q.sum(0)
return (weight.T / weight.sum(1)).T
def compile(self, loss=['kld'], loss_weights=[1], optimizer='adam'):
self.model.compile(loss=loss, loss_weights=loss_weights, optimizer=optimizer)
def fit(self, x, init, y=None, batch_size=256, maxiter=2e4, tol=1e-5,
update_interval=140, cae_weights=None, save_dir='./results/temp'):
if init == 'kmeans':
# Step 2: initialize cluster centers using k-means
kmeans = KMeans(n_clusters=self.n_clusters, n_init=kmeans_n_init_list[0])
self.y_pred = kmeans.fit_predict(x)
y_pred_last = np.copy(self.y_pred)
self.model.get_layer(name='clustering').set_weights([kmeans.cluster_centers_])
else:
cluster_centers = np.random.normal(mu, sigma, size=(self.n_clusters, x.shape[1]))
self.model.get_layer(name='clustering').set_weights([cluster_centers])
loss = [0]
index = 0
for ite in range(int(maxiter)):
if ite % update_interval == 0:
q = self.model.predict(x, verbose=0)
p = self.target_distribution(q) # update the auxiliary target distribution p
# evaluate the clustering performance
self.y_pred = q.argmax(1)
loss = np.round(loss, 5)
print('Iter', ite, '; loss=', loss)
if ite == 0:
y_pred_last = np.copy(self.y_pred)
continue
# check stop criterion
delta_label = np.sum(self.y_pred != y_pred_last).astype(np.float32) / self.y_pred.shape[0]
y_pred_last = np.copy(self.y_pred)
if ite > 0 and delta_label < tol:
print('delta_label ', delta_label, '< tol ', tol)
print('Reached tolerance threshold. Stopping training.')
break
# train on batch
if (index + 1) * batch_size > x.shape[0]:
loss = self.model.train_on_batch(x=x[index * batch_size::], y=p[index * batch_size::])
index = 0
else:
loss = self.model.train_on_batch(x=x[index * batch_size:(index + 1) * batch_size], y=p[index * batch_size:(index + 1) * batch_size])
index += 1
ite += 1
return self.model.get_layer(name='clustering').get_weights()
def DCEC_clustering(x, n_clusters, skmeans_update_interval, init):
# prepare the DCEC model
dcec = DCEC(input_shape=x.shape[1:], n_clusters=n_clusters)
optimizer = 'adam'
dcec.compile(loss=['kld'], loss_weights=[1], optimizer=optimizer)
cluster_centers = dcec.fit(x, init, y=None, tol=skmeans_tolerance, maxiter=skmeans_maxiter, update_interval=skmeans_update_interval)
y_pred = dcec.y_pred
return y_pred, cluster_centers | 6,577 | 38.866667 | 148 | py |
clam | clam-main/amc_t.py | import numpy as np
import scipy.io
import time
from collections import Counter
import math
import tensorflow as tf
# import tensorflow_addons as tfa
from tensorflow.keras.layers import Input, Dense, Lambda
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adam
# from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.python.client import device_lib
from scipy.spatial import distance
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from scipy.spatial.distance import cdist
from joblib import Parallel, delayed
from parser import *
from helper import *
from result_helper import *
from hyper_params import *
from mhn import *
if __name__ == '__main__':
tf.get_logger().setLevel('ERROR')
fn, fext = os.path.splitext(filename)
start_time = time.time()
if filename == 'fmnist.csv':
# load fashion-mnist dataset
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
M = x_train
M = tf.reshape(M, shape=(M.shape[0], M.shape[1]*M.shape[2]))
Y_true = y_train
clcnt = len(set(Y_true))
elif filename == 'Yale.mat':
# for .mat/.sci files
M, Y = read_file(filename, delimiter)
(n, d) = np.shape(M)
# for .mat file
Y -= 1
Y_true = Y.flatten()
clcnt = len(set(Y_true))
elif fext == ".npz":
data = np.load('data/' + filename, allow_pickle=True)
M = data['X']
Y = data['Y']
Y_true = np.array(list(map(int, Y)))
Y_true -= 1
clcnt = len(set(Y_true))
else:
D = read_file(filename, delimiter, label_filename)
(n, d) = np.shape(D) # get input dimensions
d = d - 1
M = D[:, 0:d]
Y_true = D[:, d].astype(int)
clcnt = get_class_count()
print("M", np.shape(M), "Y_true", np.shape(Y_true), 'clcnt', clcnt)
print_distribution(Y_true.tolist(), clcnt)
# StandardScaler
scaler = StandardScaler()
M = scaler.fit_transform(M)
# set model params
N1 = M.shape[1]
N2 = clcnt
input_shape = N1
c = 1
Ns = M.shape[0] # len of train_data
dic = []
mem_dic = []
iter = 0
printed_flag = False
for initial_learning_rate_kernel in learning_rate_kernel_list:
for beta in beta_list:
for alpha in alpha_list:
for batch_size in batch_size_list:
for mask_prob in mask_prob_list:
for N_steps_add in N_steps_add_list:
for dim_change_cycle in dim_change_cycle_list:
for mask_value in mask_value_list:
iter += 1
# get the exact mask_values
if mask_value == '0':
mask_values = np.zeros(N1)
elif mask_value == 'min':
mask_values = np.amin(M, axis=0)
elif mask_value == 'max':
mask_values = np.amax(M, axis=0)
else:
mask_values = np.mean(M, axis=0)
for init in range(N_init):
print('-----------iter:', iter, ' init:', init+1, '-----------')
N_steps = int(1/alpha) + N_steps_add
learning_rate_kernel = initial_learning_rate_kernel
# define model
input_mask = Input(shape=[input_shape])
input1 = Input(shape=[input_shape])
MHN_cell = MHN_WITH_1_HIDDEN_LAYER(N1, N2, beta, alpha)
x = MHN_cell(input1, input_mask)
for i in range(N_steps-1):
x = MHN_cell(x, input_mask)
model = Model(inputs=[input1, input_mask], outputs=x)
# set optimizer
optimizer = Adam(learning_rate=learning_rate_kernel)
# create train and test dataset
dataset = tf.data.Dataset.from_tensor_slices((M, M))
train_dataset = dataset.shuffle(Ns).batch(batch_size)
test_dataset = dataset.batch(batch_size)
# set initial variables
prev_mean_loss = 100000
patience = 0
optimum_lr = learning_rate_kernel
optimum_epoch = 1
optimum_memory = []
losses_schedular = []
losses = []
epochs = []
min_loss = 10000
sum_losses = 0
initial_loss = 0
nan_flag = False
# start training the model
# track traininig time
start_train_time = time.time()
for epoch in range(1, N_ep+1):
# print(f'Epoch {epoch}/{N_ep}')
sum_loss = 0
is_entire_dim = False
if epoch % dim_change_cycle == 0:
is_entire_dim = True
for step, (x_train, y_train) in enumerate(train_dataset):
x_train = tf.cast(x_train,dtype=tf.float32)
y_train = tf.cast(y_train,dtype=tf.float32)
# # without mask training
# x_train_masked = x_train
# mask = tf.cast(tf.equal(x_train, y_train), dtype=tf.float32)
# with mask training
x_train_masked, mask = construct_masked_data(x_train, N1, mask_prob, mask_values, is_entire_dim)
KS = model.layers[2].get_weights()[0]
# update weights
with tf.GradientTape() as tape:
y_pred = model([x_train_masked, mask])
loss = mean_squared_loss(y_train, y_pred, mask)
sum_loss += loss
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.learning_rate.assign(learning_rate_kernel) # for optimizing memories
optimizer.apply_gradients(zip(gradients[:1], model.trainable_variables[:1]))
for _ in range(1, len(model.trainable_variables)):
model.trainable_variables[_].assign(tf.clip_by_value(model.trainable_variables[_], clip_low, clip_high))
if math.isnan(sum_loss):
nan_flag = True
print('Got nan value for beta', beta, ': Aborting the config..')
break
sum_losses += sum_loss
losses_schedular.append(sum_loss)
losses.append(sum_loss)
epochs.append(epoch)
if epoch == 1:
initial_loss = sum_loss
if sum_loss < min_loss:
min_loss = sum_loss
if not os.path.exists(model_directory):
os.makedirs(model_directory)
model.save(model_directory + filename)
optimum_lr = learning_rate_kernel
optimum_epoch = epoch
optimum_memory = model.layers[2].get_weights()[0].tolist()
if epoch >= 10:
mean_loss = sum_losses/10
sum_losses -= losses_schedular.pop(0)
loss_diff = prev_mean_loss - mean_loss
if loss_diff < loss_threshold:
patience += 1
if patience == max_patience:
learning_rate_kernel *= reduce_frac
patience = 0
else:
patience = 0
prev_mean_loss = mean_loss
if learning_rate_kernel < learning_rate_threshold:
print('Learning Rate is too low! Breaking the loop!')
break
if nan_flag:
break
end_train_time = time.time()
print("Training: Took", (end_train_time - start_train_time), "seconds to complete one config for epoch", epoch, "for", filename, "dataset.")
# inference step
# get the best model from saved directory in respect to minimum loss
model = tf.keras.models.load_model(model_directory + filename, compile=False)
start_test_time = time.time()
label_pred = []
for step, (x_train, y_train) in enumerate(test_dataset):
x_train = tf.cast(x_train,dtype=tf.float32)
y_train = tf.cast(y_train,dtype=tf.float32)
mask = tf.cast(tf.equal(x_train, y_train), dtype=tf.float32)
y_pred = model([x_train, mask])
KS = model.layers[2].get_weights()[0]
# for euclidian am
KS = tf.squeeze(KS)
label_pred += get_final_clusters(KS, y_pred)
end_test_time = time.time()
print("Inference: Took", (end_test_time - start_test_time), "seconds to complete one config for", filename, "dataset.")
# # get baseline labels
# if not printed_flag:
# kmeans_labels, spectral_labels, agglomerative_labels = get_basline_cluster_labels(M, N2)
# # # print baseline results for testing purpose
# # print('kmeans: number_of_cluster_found:', len(set(kmeans_labels)))
# # print('Silhouette_score of kmeans_labels:', silhouette_score(M, kmeans_labels, metric='euclidean'))
# # print('spectral_labels: number_of_cluster_found:', len(set(spectral_labels)))
# # print('Silhouette_score of spectral_labels:', silhouette_score(M, spectral_labels, metric='euclidean'))
# # print('agglomerative_labels: number_of_cluster_found:', len(set(agglomerative_labels)))
# # print('Silhouette_score of agglomerative_labels:', silhouette_score(M, agglomerative_labels, metric='euclidean'))
# print distributions
# print_distributions_of_clusters(Y_true, label_pred, kmeans_labels, spectral_labels, agglomerative_labels, N2, printed_flag)
# print cluster similarities matrices
# print_cluster_similarities_matrices(Y_true, label_pred, kmeans_labels, spectral_labels, agglomerative_labels, printed_flag)
# printed_flag = True
sc_euclidean = -1
num_of_cluster = len(Counter(label_pred).keys())
if num_of_cluster != 1:
sc_euclidean = silhouette_score(M, label_pred, metric='euclidean', sample_size=100000)
# JSON object
dictionary = {
"iter": iter,
"N_steps": N_steps,
"beta": beta,
"alpha": alpha,
"batch_size": batch_size,
"initial_learning_rate_kernel": initial_learning_rate_kernel,
"learning_rate_kernel": learning_rate_kernel,
"mask_prob": mask_prob,
"mask_value": mask_value,
"dim_change_cycle": dim_change_cycle,
"initial_loss": str(initial_loss.numpy()),
"min_loss": str(min_loss.numpy()),
"optimum_lr": optimum_lr,
"optimum_epoch": optimum_epoch,
"init": init,
"number_of_cluster_found": num_of_cluster,
"training_time": end_train_time - start_train_time,
"assignment_time": end_test_time - start_test_time,
"num_of_cluster_after_adjustment": -1,
"nmi_after_adjustment": -1,
"ari_after_adjustment": -1,
"sc_cosine_after_adjustment": -1,
"sc_euclidean_after_adjustment": -1,
"nmi": normalized_mutual_info_score(Y_true, label_pred),
"ari": adjusted_rand_score(Y_true, label_pred),
"sc_euclidean": sc_euclidean
}
dic.append(dictionary)
# write dictionary to file
sort_key = 'sc_euclidean'
write_dic_to_file(dic, mem_dic, sort_key, directory, filename, suffix, ext)
end_time = time.time()
print("Took", (end_time - start_time)/3600, "hours to complete", total_config, "iterations with", N_init, "re-start for", filename, "dataset.")
| 17,473 | 56.669967 | 180 | py |
DenseUnet_Esophagus_Segmentation | DenseUnet_Esophagus_Segmentation-master/functions/networks/dense_unet2_attention.py | import tensorflow as tf
import SimpleITK as sitk
# import math as math
import numpy as np
import os
from os import listdir
from os.path import isfile, join
import matplotlib.pyplot as plt
import time
# !!
class _densenet_unet:
def __init__(self, densnet_unet_config,compression_coefficient, growth_rate, class_no=2):
print('create object _densenet_unet')
self.compres_coef = compression_coefficient
self.class_no = class_no
self.growth_rate = growth_rate
self.kernel_size1 = 1
self.kernel_size2 = 3
self.config=densnet_unet_config
self.log_ext = '_'+''.join(map(str, self.config)) + '_' + str(
self.compres_coef) + '_' + str(self.growth_rate)
self.seed=200
def transition_layer(self,
dense_out1,
transition_name,
conv_name,
is_training_bn,
conv_pool_name,
db_size,crop_size,
kernel_size=[1, 1, 1],
padding='same',
activation=None,
dilation_rate=(1, 1,1),
pool_size=[2, 2, 2],
strides=(2, 2, 2),
bn_flag = False):
with tf.name_scope(transition_name):
filter = int(dense_out1.get_shape()[4].value * self.compres_coef)
if bn_flag==False:
conv1 = tf.layers.conv3d(inputs=dense_out1, filters=filter, kernel_size=kernel_size, padding=padding,
activation=activation,
name=conv_name + self.log_ext, dilation_rate=dilation_rate)
else:
conv1 = tf.layers.conv3d(inputs=dense_out1, filters=filter, kernel_size=kernel_size, padding=padding,
activation=None,
name=conv_name + self.log_ext, dilation_rate=dilation_rate)
bn1 = tf.layers.batch_normalization(conv1, training=is_training_bn,renorm=False)
bn1 = tf.nn.leaky_relu(bn1)
conv1=bn1
cropped = conv1[:,
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2),
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2),
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2), :]
pool1 = tf.layers.max_pooling3d(inputs=conv1, pool_size=pool_size, strides=strides)
# conv_pool1 = tf.layers.conv3d(inputs=pool1, filters=filter, kernel_size=[3,3], padding='valid',
# activation=activation,
# name=conv_pool_name+self.log_ext, dilation_rate=dilation_rate)
return pool1,cropped
# ========================
def dense_block(self, input,
feature_size,
is_training_bn,
padding='same',
activation=None,
name='dense_sub_block',
flag=0,
concat_flag=0,
bn_flag=False,
dilation_rate=1):
if bn_flag==False:
with tf.name_scope(name):
db_conv1 = tf.layers.conv3d(input,
filters=feature_size[0] * 4,
kernel_size=self.kernel_size1,
padding=padding,
activation=activation)
db_conv2 = tf.layers.conv3d(db_conv1,
filters=feature_size[1],
kernel_size=self.kernel_size2,
padding=padding,
activation=activation)
else:
with tf.name_scope(name):
db_conv1 = tf.layers.conv3d(input,
filters=feature_size[0] * 4,
kernel_size=self.kernel_size1,
padding=padding,
activation=None)
bn1 = tf.layers.batch_normalization(db_conv1, training=is_training_bn,renorm=False)
bn1 = tf.nn.leaky_relu(bn1)
db_conv2 = tf.layers.conv3d(bn1,
filters=feature_size[1],
kernel_size=self.kernel_size2,
padding=padding,
activation=None,
dilation_rate=2)
bn2 = tf.layers.batch_normalization(db_conv2, training=is_training_bn,renorm=False)
bn2 = tf.nn.leaky_relu(bn2)
db_conv2=bn2
with tf.name_scope(name+'_attention'):
g_max_pool1 =tf.keras.layers.GlobalMaxPool3D(data_format='channels_last')(db_conv2)
dense1= tf.layers.dense(g_max_pool1, int(int(g_max_pool1.shape[-1])/2), tf.nn.relu)
dense2= tf.layers.dense(dense1, g_max_pool1.shape[-1], tf.nn.relu)
sigmoid= tf.nn.sigmoid(dense2)
result=tf.multiply(tf.expand_dims(tf.expand_dims(tf.expand_dims(sigmoid,1),1) ,1) ,db_conv2)
db_concat = tf.concat([input, result], 4)
return db_concat
# ========================
def dense_loop(self, loop, input, crop_size,
db_size,is_training_bn,
padding='same',
activation=None,
name='dense_block',
flag=0,
concat_flag=0,
feature_size=[],bn_flag=False,dilation_rate=1):
with tf.name_scope(name):
output = input
for i in range(loop):
output = self.dense_block(output,
feature_size=feature_size,
padding=padding,
activation=activation,
name='dense_sub_block' +self.log_ext+ str(i),
flag=flag,
concat_flag=concat_flag,
is_training_bn=is_training_bn,
bn_flag=bn_flag,
dilation_rate=dilation_rate)
cropped = output[:,
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2),
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2),
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2), :]
# #
# cropped = output[:,
# np.int32(db_size / 2) - np.int32(crop_size / 2) - 1:
# np.int32(db_size / 2) + np.int32(crop_size / 2),
# np.int32(db_size / 2) - np.int32(crop_size / 2) - 1:
# np.int32(db_size / 2) + np.int32(crop_size / 2),
# np.int32(db_size / 2) - np.int32(crop_size / 2) - 1:
# np.int32(db_size / 2) + np.int32(crop_size / 2), :]
return output, cropped
# ========================
def dens_net(self, image, is_training, dropout_rate1,dropout_rate2, dim,is_training_bn,dilation_rate=(1,1,1)):
# x = 527
# l2 = x / 2
# l3 = l2 / 2
# l4 = l3 / 2
#
# l4_1 = l4 - 2
#
# dl4 = int(l4_1) * 2 + 1
# dl4_1 = dl4 - 2
# dl3 = dl4_1 * 2 + 1
# dl3_1 = dl3 - 2
#
# dl2 = dl3_1 * 2 + 1
# dl2_1 = dl2 - 2
# dim2 = 63
# db_size1 = np.int32(dim2)
# db_size2 = np.int32(db_size1 / 2)
# db_size3 = np.int32(db_size2 / 2)
# crop_size1 = np.int32(((db_size3 - 2) * 2 + 1.0))
# crop_size2 = np.int32((crop_size1 - 2) * 2 + 1)
# db_size0 = 0
# crop_size0 = 0
db_size1 = tf.to_int32(dim)
db_size2 = tf.to_int32(db_size1 / 2)
db_size3 = tf.to_int32(db_size2 / 2)
crop_size1 = tf.add(tf.multiply(db_size3-2, 2), 1)
crop_size2 = tf.add(tf.multiply(tf.add(crop_size1, -2), 2), 1)
db_size0 = tf.to_int32(0)
crop_size0 = tf.to_int32(0)
activation=tf.nn.leaky_relu
self.seed+=1
with tf.Session() as s:
rnd = s.run(tf.random_uniform([1], 0, 5, dtype=tf.int32,seed=self.seed))#, seed=int(time.time())))
noisy_img = tf.cond(is_training,
lambda: image + tf.round(tf.random_normal(tf.shape(image), mean=0,
stddev=rnd,
seed=self.seed+2,#int(time.time()),
dtype=tf.float32))
, lambda: image)
conv0 = tf.layers.conv3d(inputs=noisy_img, filters=8, kernel_size=3,
padding='same',
activation=None,
name='conv_deconv_0' + self.log_ext,
dilation_rate=(1, 1, 1))
bn1 = tf.layers.batch_normalization(conv0, training=is_training_bn,renorm=False)
bn1 = tf.nn.leaky_relu(bn1)
conv1 = tf.layers.conv3d(inputs=bn1, filters=8, kernel_size=3,
padding='same',
activation=None,
name='conv_deconv_00' + self.log_ext,
dilation_rate=(1, 1, 1))
bn2 = tf.layers.batch_normalization(conv1, training=is_training_bn,renorm=False)
bn2 = tf.nn.leaky_relu(bn2)
# conv2 = tf.layers.conv3d(inputs=bn2, filters=8, kernel_size=3,
# padding='same',
# activation=None,
# name='conv_deconv_000' + self.log_ext,
# dilation_rate=(1, 1, 1))
# bn3 = tf.layers.batch_normalization(conv2, training=is_training_bn, renorm=False)
# bn3 = tf.nn.leaky_relu(bn3)
bn3= tf.concat([bn1, bn2], 4)
conv2 = tf.layers.conv3d(inputs=bn3, filters=int(bn3.shape[4].value* 0.75), kernel_size=1,
padding='same',
activation=None,
name='conv_deconv_000' + self.log_ext,
dilation_rate=(1, 1, 1))
bn4 = tf.layers.batch_normalization(conv2, training=is_training_bn, renorm=False)
bn4 = tf.nn.leaky_relu(bn4)
bn4 = tf.concat([noisy_img, bn4], 4)
[dense_out1, conc1] = self.dense_loop(loop=self.config[0],
input=bn4,
crop_size=crop_size2,
db_size=db_size1,
padding='same',
activation=activation,
name='dense_block_1'+self.log_ext,
concat_flag=1,
feature_size=[8,8],
is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=1)
[pool1, conc1] = self.transition_layer(dense_out1, 'transition_1',
conv_name='conv1'+self.log_ext,
conv_pool_name='conv_pool_name1'+self.log_ext,
db_size=db_size1, crop_size=crop_size2,
kernel_size=[1, 1, 1], padding='same',
activation=activation,
dilation_rate=(1, 1, 1),
pool_size=[2, 2, 2],
strides=(2,2,2),
is_training_bn=is_training_bn,
bn_flag=True)
# ========================
[dense_out2, conc2] = self.dense_loop(loop=self.config[1],
input=pool1,
crop_size=crop_size1,
db_size=db_size2,
padding='same',
activation=activation,
name='dense_block_2'+self.log_ext,
feature_size=[8,8],
is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=2)
[pool2,conc2] = self.transition_layer(dense_out2, 'transition_2',
conv_name='conv2'+self.log_ext,
conv_pool_name='conv_pool_name2'+self.log_ext,
db_size=db_size2, crop_size=crop_size1,
kernel_size=[1, 1, 1],
padding='same',
activation=activation,
dilation_rate=(1, 1, 1),
pool_size=[2, 2, 2],
strides=(2,2,2),
is_training_bn=is_training_bn,
bn_flag=True)
# ========================
[dense_out3, conc3] = self.dense_loop(loop=self.config[2],
input=pool2,
crop_size=crop_size0,
db_size=db_size0,
padding='same',
activation=activation,
name='dense_block_3'+self.log_ext,
feature_size=[8,8]
, is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=2)
conv1 = tf.layers.conv3d(inputs=dense_out3,
filters=int(dense_out3.shape[4].value),
kernel_size=[3, 3, 3],
padding='valid',
activation=None,
name='conv_deconv_1'+self.log_ext,
dilation_rate=(1, 1, 1))
bn2 = tf.layers.batch_normalization(conv1, training=is_training_bn,renorm=False)
bn2 = tf.nn.leaky_relu(bn2)
# ========================
deconv1 = tf.layers.conv3d_transpose(bn2,
filters=int(conv1.shape[4].value/2),
kernel_size=3,
strides=(2, 2, 2),
padding='valid',
use_bias=False)
conc11=tf.concat([conc2, deconv1], 4)
[dense_out5, conctmp] = self.dense_loop(loop=self.config[3],
input=conc11,
crop_size=crop_size0,
db_size=db_size0,
padding='same',
activation=activation,
name='dense_block_5'+self.log_ext,
feature_size=[8,8]
, is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=2)
conv2 = tf.layers.conv3d(inputs=dense_out5,
filters=int(dense_out5.shape[4].value/2),
kernel_size=3,
padding='valid',
activation=None,
name='conv_deconv_2'+self.log_ext,
dilation_rate=(1, 1, 1))
bn3 = tf.layers.batch_normalization(conv2, training=is_training_bn,renorm=False)
bn3 = tf.nn.leaky_relu(bn3)
# bn_conv2 = tf.layers.batch_normalization(conv2, training=is_training_bn,renorm=False)
# bn_conv2 = tf.nn.leaky_relu(bn_conv2)
# =========================================================
deconv2 = tf.layers.conv3d_transpose(bn3, filters=int(conv2.shape[4].value/2), kernel_size=[3, 3, 3], strides=(2, 2, 2),
padding='valid', use_bias=False)
conc22 = tf.concat([conc1, deconv2], 4)
[dense_out6, conctmp] = self.dense_loop(loop=self.config[4],
input=conc22,
crop_size=crop_size0,
db_size=db_size0,
padding='same',
activation=activation,
name='dense_block_6'+self.log_ext,
feature_size=[8,8],
is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=1)
conv3 = tf.layers.conv3d(inputs=dense_out6,
filters=int(dense_out6.shape[4].value / 2),
kernel_size=3,
padding='valid',
activation=None,
name='conv_deconv_tmp' + self.log_ext,
dilation_rate=(1, 1, 1))
bn4 = tf.layers.batch_normalization(conv3, training=is_training_bn,renorm=False)
bn4 = tf.nn.leaky_relu(bn4)
# bn_conv3=tf.layers.batch_normalization(conv3,training=is_training_bn,renorm=False)
# bn_conv3=tf.nn.leaky_relu(bn_conv3)
# =========================================================
# classification layer:
with tf.name_scope('classification_layer'):
# post_rm = tf.reduce_mean(pool3, [1, 2], name='global_avg_pool'+self.log_ext, keep_dims=True)
# fc1 = tf.layers.conv3d(conv3, filters=32, kernel_size=[3,3,3], padding='same', strides=(1, 1, 1),
# activation=activation, dilation_rate=(1, 1,1),name='fc1'+self.log_ext)
# dropout1 = tf.layers.dropout(inputs=fc1, rate=dropout_rate, training=is_training,name='droup_out1'+self.log_ext)
# fc2 = tf.layers.conv3d(dropout1, filters=128, kernel_size=[3, 3, 3], padding='same', strides=(1, 1, 1),
# activation=tf.nn.leaky_relu, dilation_rate=(1, 1, 1), name='fc2' + self.log_ext)
#
# dropout2 = tf.layers.dropout(inputs=fc2, rate=dropout_rate, training=is_training,
# name='droup_out2' + self.log_ext)
y = tf.layers.conv3d(bn4, filters=self.class_no, kernel_size=1, padding='same', strides=(1, 1, 1),
activation=None, dilation_rate=(1, 1,1), name='fc3'+self.log_ext)
print(' total number of variables %s' % (
np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])))
'''h_fc1 = tf.contrib.layers.fully_connected(post_rm,20,activation_fn=tf.nn.leaky_relu)
#Fully connected layer #2
#h_fc2 = tf.layers.conv3d(h_fc1,512,[3,3],padding="valid", strides=(1,1),activation=tf.nn.leaky_relu, dilation_rate=(1, 1))
h_fc2 = tf.contrib.layers.fully_connected(h_fc1,8,activation_fn=tf.nn.leaky_relu)
#Fully connected layer #3
#y = tf.layers.conv3d(h_fc2,2,[3,3],padding="valid", strides=(1,1),activation=tf.nn.leaky_relu, dilation_rate=(1, 1))
y = tf.contrib.layers.fully_connected(h_fc2,class_no,activation_fn=tf.nn.leaky_relu)
'''
return y#,dense_out1,dense_out2,dense_out3,dense_out5,dense_out6
| 21,684 | 48.965438 | 131 | py |
DenseUnet_Esophagus_Segmentation | DenseUnet_Esophagus_Segmentation-master/functions/networks/dense_unet2_attention_spatial.py | import tensorflow as tf
import SimpleITK as sitk
# import math as math
import numpy as np
import os
from os import listdir
from os.path import isfile, join
import matplotlib.pyplot as plt
import time
# !!
class _densenet_unet:
def __init__(self, densnet_unet_config,compression_coefficient, growth_rate, class_no=2):
print('create object _densenet_unet')
self.compres_coef = compression_coefficient
self.class_no = class_no
self.growth_rate = growth_rate
self.kernel_size1 = 1
self.kernel_size2 = 3
self.config=densnet_unet_config
self.log_ext = '_'+''.join(map(str, self.config)) + '_' + str(
self.compres_coef) + '_' + str(self.growth_rate)
self.seed=200
def transition_layer(self,
dense_out1,
transition_name,
conv_name,
is_training_bn,
conv_pool_name,
db_size,crop_size,
kernel_size=[1, 1, 1],
padding='same',
activation=None,
dilation_rate=(1, 1,1),
pool_size=[2, 2, 2],
strides=(2, 2, 2),
bn_flag = False):
with tf.name_scope(transition_name):
filter = int(dense_out1.get_shape()[4].value * self.compres_coef)
if bn_flag==False:
conv1 = tf.layers.conv3d(inputs=dense_out1, filters=filter, kernel_size=kernel_size, padding=padding,
activation=activation,
name=conv_name + self.log_ext, dilation_rate=dilation_rate)
else:
conv1 = tf.layers.conv3d(inputs=dense_out1, filters=filter, kernel_size=kernel_size, padding=padding,
activation=None,
name=conv_name + self.log_ext, dilation_rate=dilation_rate)
bn1 = tf.layers.batch_normalization(conv1, training=is_training_bn,renorm=False)
bn1 = tf.nn.leaky_relu(bn1)
conv1=bn1
cropped = conv1[:,
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2),
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2),
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2), :]
pool1 = tf.layers.max_pooling3d(inputs=conv1, pool_size=pool_size, strides=strides)
# conv_pool1 = tf.layers.conv3d(inputs=pool1, filters=filter, kernel_size=[3,3], padding='valid',
# activation=activation,
# name=conv_pool_name+self.log_ext, dilation_rate=dilation_rate)
return pool1,cropped
# ========================
def dense_block(self, input,
feature_size,
is_training_bn,
padding='same',
activation=None,
name='dense_sub_block',
flag=0,
concat_flag=0,
bn_flag=False,
dilation_rate=1):
if bn_flag==False:
with tf.name_scope(name):
db_conv1 = tf.layers.conv3d(input,
filters=feature_size[0] * 4,
kernel_size=self.kernel_size1,
padding=padding,
activation=activation)
db_conv2 = tf.layers.conv3d(db_conv1,
filters=feature_size[1],
kernel_size=self.kernel_size2,
padding=padding,
activation=activation)
else:
with tf.name_scope(name):
db_conv1 = tf.layers.conv3d(input,
filters=feature_size[0] * 4,
kernel_size=self.kernel_size1,
padding=padding,
activation=None)
bn1 = tf.layers.batch_normalization(db_conv1, training=is_training_bn,renorm=False)
bn1 = tf.nn.leaky_relu(bn1)
db_conv2 = tf.layers.conv3d(bn1,
filters=feature_size[1],
kernel_size=self.kernel_size2,
padding=padding,
activation=None,
dilation_rate=2)
bn2 = tf.layers.batch_normalization(db_conv2, training=is_training_bn,renorm=False)
bn2 = tf.nn.leaky_relu(bn2)
db_conv2=bn2
# with tf.name_scope(name+'_channel_attention'):
# g_max_pool1 =tf.keras.layers.GlobalMaxPool3D(data_format='channels_last')(db_conv2)
# dense1= tf.layers.dense(g_max_pool1, int(int(g_max_pool1.shape[-1])/2), tf.nn.relu)
# dense2= tf.layers.dense(dense1, g_max_pool1.shape[-1], tf.nn.relu)
# sigmoid1= tf.nn.sigmoid(dense2)
# result1=tf.multiply(tf.expand_dims(tf.expand_dims(tf.expand_dims(sigmoid1,1),1) ,1) ,db_conv2)
with tf.name_scope(name + '_spatial_attention'):
ave_max_pool1 = tf.concat([tf.expand_dims(tf.reduce_mean(db_conv2,axis=-1),-1),tf.expand_dims(tf.reduce_max(db_conv2,-1),-1)],4)
ave_max_pool_conv = tf.layers.conv3d(ave_max_pool1,
filters=1,
kernel_size=3,
padding='same',
activation=None,
dilation_rate=1)
bn3 = tf.layers.batch_normalization(ave_max_pool_conv, training=is_training_bn, renorm=False)
bn3 = tf.nn.leaky_relu(bn3)
sigmoid2 = tf.nn.sigmoid(bn3)
result2 = tf.multiply(sigmoid2, db_conv2)
db_concat = tf.concat([input, result2], 4)
return db_concat
# ========================
def dense_loop(self, loop, input, crop_size,
db_size,is_training_bn,
padding='same',
activation=None,
name='dense_block',
flag=0,
concat_flag=0,
feature_size=[],bn_flag=False,dilation_rate=1):
with tf.name_scope(name):
output = input
for i in range(loop):
output = self.dense_block(output,
feature_size=feature_size,
padding=padding,
activation=activation,
name='dense_sub_block' +self.log_ext+ str(i),
flag=flag,
concat_flag=concat_flag,
is_training_bn=is_training_bn,
bn_flag=bn_flag,
dilation_rate=dilation_rate)
cropped = output[:,
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2),
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2),
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2), :]
# #
# cropped = output[:,
# np.int32(db_size / 2) - np.int32(crop_size / 2) - 1:
# np.int32(db_size / 2) + np.int32(crop_size / 2),
# np.int32(db_size / 2) - np.int32(crop_size / 2) - 1:
# np.int32(db_size / 2) + np.int32(crop_size / 2),
# np.int32(db_size / 2) - np.int32(crop_size / 2) - 1:
# np.int32(db_size / 2) + np.int32(crop_size / 2), :]
return output, cropped
# ========================
def dens_net(self, image, is_training, dropout_rate1,dropout_rate2, dim,is_training_bn,dilation_rate=(1,1,1)):
# x = 527
# l2 = x / 2
# l3 = l2 / 2
# l4 = l3 / 2
#
# l4_1 = l4 - 2
#
# dl4 = int(l4_1) * 2 + 1
# dl4_1 = dl4 - 2
# dl3 = dl4_1 * 2 + 1
# dl3_1 = dl3 - 2
#
# dl2 = dl3_1 * 2 + 1
# dl2_1 = dl2 - 2
# dim2 = 63
# db_size1 = np.int32(dim2)
# db_size2 = np.int32(db_size1 / 2)
# db_size3 = np.int32(db_size2 / 2)
# crop_size1 = np.int32(((db_size3 - 2) * 2 + 1.0))
# crop_size2 = np.int32((crop_size1 - 2) * 2 + 1)
# db_size0 = 0
# crop_size0 = 0
db_size1 = tf.to_int32(dim)
db_size2 = tf.to_int32(db_size1 / 2)
db_size3 = tf.to_int32(db_size2 / 2)
crop_size1 = tf.add(tf.multiply(db_size3-2, 2), 1)
crop_size2 = tf.add(tf.multiply(tf.add(crop_size1, -2), 2), 1)
db_size0 = tf.to_int32(0)
crop_size0 = tf.to_int32(0)
activation=tf.nn.leaky_relu
self.seed+=1
with tf.Session() as s:
rnd = s.run(tf.random_uniform([1], 0, 5, dtype=tf.int32,seed=self.seed))#, seed=int(time.time())))
noisy_img = tf.cond(is_training,
lambda: image + tf.round(tf.random_normal(tf.shape(image), mean=0,
stddev=rnd,
seed=self.seed+2,#int(time.time()),
dtype=tf.float32))
, lambda: image)
conv0 = tf.layers.conv3d(inputs=noisy_img, filters=8, kernel_size=3,
padding='same',
activation=None,
name='conv_deconv_0' + self.log_ext,
dilation_rate=(1, 1, 1))
bn1 = tf.layers.batch_normalization(conv0, training=is_training_bn,renorm=False)
bn1 = tf.nn.leaky_relu(bn1)
conv1 = tf.layers.conv3d(inputs=bn1, filters=8, kernel_size=3,
padding='same',
activation=None,
name='conv_deconv_00' + self.log_ext,
dilation_rate=(1, 1, 1))
bn2 = tf.layers.batch_normalization(conv1, training=is_training_bn,renorm=False)
bn2 = tf.nn.leaky_relu(bn2)
# conv2 = tf.layers.conv3d(inputs=bn2, filters=8, kernel_size=3,
# padding='same',
# activation=None,
# name='conv_deconv_000' + self.log_ext,
# dilation_rate=(1, 1, 1))
# bn3 = tf.layers.batch_normalization(conv2, training=is_training_bn, renorm=False)
# bn3 = tf.nn.leaky_relu(bn3)
bn3= tf.concat([bn1, bn2], 4)
conv2 = tf.layers.conv3d(inputs=bn3, filters=int(bn3.shape[4].value* 0.75), kernel_size=1,
padding='same',
activation=None,
name='conv_deconv_000' + self.log_ext,
dilation_rate=(1, 1, 1))
bn4 = tf.layers.batch_normalization(conv2, training=is_training_bn, renorm=False)
bn4 = tf.nn.leaky_relu(bn4)
bn4 = tf.concat([noisy_img, bn4], 4)
[dense_out1, conc1] = self.dense_loop(loop=self.config[0],
input=bn4,
crop_size=crop_size2,
db_size=db_size1,
padding='same',
activation=activation,
name='dense_block_1'+self.log_ext,
concat_flag=1,
feature_size=[8,8],
is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=1)
[pool1, conc1] = self.transition_layer(dense_out1, 'transition_1',
conv_name='conv1'+self.log_ext,
conv_pool_name='conv_pool_name1'+self.log_ext,
db_size=db_size1, crop_size=crop_size2,
kernel_size=[1, 1, 1], padding='same',
activation=activation,
dilation_rate=(1, 1, 1),
pool_size=[2, 2, 2],
strides=(2,2,2),
is_training_bn=is_training_bn,
bn_flag=True)
# ========================
[dense_out2, conc2] = self.dense_loop(loop=self.config[1],
input=pool1,
crop_size=crop_size1,
db_size=db_size2,
padding='same',
activation=activation,
name='dense_block_2'+self.log_ext,
feature_size=[8,8],
is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=2)
[pool2,conc2] = self.transition_layer(dense_out2, 'transition_2',
conv_name='conv2'+self.log_ext,
conv_pool_name='conv_pool_name2'+self.log_ext,
db_size=db_size2, crop_size=crop_size1,
kernel_size=[1, 1, 1],
padding='same',
activation=activation,
dilation_rate=(1, 1, 1),
pool_size=[2, 2, 2],
strides=(2,2,2),
is_training_bn=is_training_bn,
bn_flag=True)
# ========================
[dense_out3, conc3] = self.dense_loop(loop=self.config[2],
input=pool2,
crop_size=crop_size0,
db_size=db_size0,
padding='same',
activation=activation,
name='dense_block_3'+self.log_ext,
feature_size=[8,8]
, is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=2)
conv1 = tf.layers.conv3d(inputs=dense_out3,
filters=int(dense_out3.shape[4].value),
kernel_size=[3, 3, 3],
padding='valid',
activation=None,
name='conv_deconv_1'+self.log_ext,
dilation_rate=(1, 1, 1))
bn2 = tf.layers.batch_normalization(conv1, training=is_training_bn,renorm=False)
bn2 = tf.nn.leaky_relu(bn2)
# ========================
deconv1 = tf.layers.conv3d_transpose(bn2,
filters=int(conv1.shape[4].value/2),
kernel_size=3,
strides=(2, 2, 2),
padding='valid',
use_bias=False)
conc11=tf.concat([conc2, deconv1], 4)
[dense_out5, conctmp] = self.dense_loop(loop=self.config[3],
input=conc11,
crop_size=crop_size0,
db_size=db_size0,
padding='same',
activation=activation,
name='dense_block_5'+self.log_ext,
feature_size=[8,8]
, is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=2)
conv2 = tf.layers.conv3d(inputs=dense_out5,
filters=int(dense_out5.shape[4].value/2),
kernel_size=3,
padding='valid',
activation=None,
name='conv_deconv_2'+self.log_ext,
dilation_rate=(1, 1, 1))
bn3 = tf.layers.batch_normalization(conv2, training=is_training_bn,renorm=False)
bn3 = tf.nn.leaky_relu(bn3)
# bn_conv2 = tf.layers.batch_normalization(conv2, training=is_training_bn,renorm=False)
# bn_conv2 = tf.nn.leaky_relu(bn_conv2)
# =========================================================
deconv2 = tf.layers.conv3d_transpose(bn3, filters=int(conv2.shape[4].value/2), kernel_size=[3, 3, 3], strides=(2, 2, 2),
padding='valid', use_bias=False)
conc22 = tf.concat([conc1, deconv2], 4)
[dense_out6, conctmp] = self.dense_loop(loop=self.config[4],
input=conc22,
crop_size=crop_size0,
db_size=db_size0,
padding='same',
activation=activation,
name='dense_block_6'+self.log_ext,
feature_size=[8,8],
is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=1)
conv3 = tf.layers.conv3d(inputs=dense_out6,
filters=int(dense_out6.shape[4].value / 2),
kernel_size=3,
padding='valid',
activation=None,
name='conv_deconv_tmp' + self.log_ext,
dilation_rate=(1, 1, 1))
bn4 = tf.layers.batch_normalization(conv3, training=is_training_bn,renorm=False)
bn4 = tf.nn.leaky_relu(bn4)
# bn_conv3=tf.layers.batch_normalization(conv3,training=is_training_bn,renorm=False)
# bn_conv3=tf.nn.leaky_relu(bn_conv3)
# =========================================================
# classification layer:
with tf.name_scope('classification_layer'):
# post_rm = tf.reduce_mean(pool3, [1, 2], name='global_avg_pool'+self.log_ext, keep_dims=True)
# fc1 = tf.layers.conv3d(conv3, filters=32, kernel_size=[3,3,3], padding='same', strides=(1, 1, 1),
# activation=activation, dilation_rate=(1, 1,1),name='fc1'+self.log_ext)
# dropout1 = tf.layers.dropout(inputs=fc1, rate=dropout_rate, training=is_training,name='droup_out1'+self.log_ext)
# fc2 = tf.layers.conv3d(dropout1, filters=128, kernel_size=[3, 3, 3], padding='same', strides=(1, 1, 1),
# activation=tf.nn.leaky_relu, dilation_rate=(1, 1, 1), name='fc2' + self.log_ext)
#
# dropout2 = tf.layers.dropout(inputs=fc2, rate=dropout_rate, training=is_training,
# name='droup_out2' + self.log_ext)
y = tf.layers.conv3d(bn4, filters=self.class_no, kernel_size=1, padding='same', strides=(1, 1, 1),
activation=None, dilation_rate=(1, 1,1), name='fc3'+self.log_ext)
print(' total number of variables %s' % (
np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])))
'''h_fc1 = tf.contrib.layers.fully_connected(post_rm,20,activation_fn=tf.nn.leaky_relu)
#Fully connected layer #2
#h_fc2 = tf.layers.conv3d(h_fc1,512,[3,3],padding="valid", strides=(1,1),activation=tf.nn.leaky_relu, dilation_rate=(1, 1))
h_fc2 = tf.contrib.layers.fully_connected(h_fc1,8,activation_fn=tf.nn.leaky_relu)
#Fully connected layer #3
#y = tf.layers.conv3d(h_fc2,2,[3,3],padding="valid", strides=(1,1),activation=tf.nn.leaky_relu, dilation_rate=(1, 1))
y = tf.contrib.layers.fully_connected(h_fc2,class_no,activation_fn=tf.nn.leaky_relu)
'''
return y#,dense_out1,dense_out2,dense_out3,dense_out5,dense_out6
| 22,489 | 49.313199 | 140 | py |
DenseUnet_Esophagus_Segmentation | DenseUnet_Esophagus_Segmentation-master/functions/networks/dense_unet2_attention_spatial_skip_ch_attention.py | import tensorflow as tf
import SimpleITK as sitk
# import math as math
import numpy as np
import os
from os import listdir
from os.path import isfile, join
import matplotlib.pyplot as plt
import time
# !!
class _densenet_unet:
def __init__(self, densnet_unet_config,compression_coefficient, growth_rate, class_no=2):
print('create object _densenet_unet')
self.compres_coef = compression_coefficient
self.class_no = class_no
self.growth_rate = growth_rate
self.kernel_size1 = 1
self.kernel_size2 = 3
self.config=densnet_unet_config
self.log_ext = '_'+''.join(map(str, self.config)) + '_' + str(
self.compres_coef) + '_' + str(self.growth_rate)
self.seed=200
def transition_layer(self,
dense_out1,
transition_name,
conv_name,
is_training_bn,
conv_pool_name,
db_size,crop_size,
kernel_size=[1, 1, 1],
padding='same',
activation=None,
dilation_rate=(1, 1,1),
pool_size=[2, 2, 2],
strides=(2, 2, 2),
bn_flag = False):
with tf.name_scope(transition_name):
filter = int(dense_out1.get_shape()[4].value * self.compres_coef)
if bn_flag==False:
conv1 = tf.layers.conv3d(inputs=dense_out1, filters=filter, kernel_size=kernel_size, padding=padding,
activation=activation,
name=conv_name + self.log_ext, dilation_rate=dilation_rate)
else:
conv1 = tf.layers.conv3d(inputs=dense_out1, filters=filter, kernel_size=kernel_size, padding=padding,
activation=None,
name=conv_name + self.log_ext, dilation_rate=dilation_rate)
bn1 = tf.layers.batch_normalization(conv1, training=is_training_bn,renorm=False)
bn1 = tf.nn.leaky_relu(bn1)
conv1=bn1
cropped = conv1[:,
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2),
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2),
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2), :]
pool1 = tf.layers.max_pooling3d(inputs=conv1, pool_size=pool_size, strides=strides)
# conv_pool1 = tf.layers.conv3d(inputs=pool1, filters=filter, kernel_size=[3,3], padding='valid',
# activation=activation,
# name=conv_pool_name+self.log_ext, dilation_rate=dilation_rate)
return pool1,cropped
# ========================
def dense_block(self, input,
feature_size,
is_training_bn,
padding='same',
activation=None,
name='dense_sub_block',
flag=0,
concat_flag=0,
bn_flag=False,
dilation_rate=1):
if bn_flag==False:
with tf.name_scope(name):
db_conv1 = tf.layers.conv3d(input,
filters=feature_size[0] * 4,
kernel_size=self.kernel_size1,
padding=padding,
activation=activation)
db_conv2 = tf.layers.conv3d(db_conv1,
filters=feature_size[1],
kernel_size=self.kernel_size2,
padding=padding,
activation=activation)
else:
with tf.name_scope(name):
db_conv1 = tf.layers.conv3d(input,
filters=feature_size[0] * 4,
kernel_size=self.kernel_size1,
padding=padding,
activation=None)
bn1 = tf.layers.batch_normalization(db_conv1, training=is_training_bn,renorm=False)
bn1 = tf.nn.leaky_relu(bn1)
db_conv2 = tf.layers.conv3d(bn1,
filters=feature_size[1],
kernel_size=self.kernel_size2,
padding=padding,
activation=None,
dilation_rate=2)
bn2 = tf.layers.batch_normalization(db_conv2, training=is_training_bn,renorm=False)
bn2 = tf.nn.leaky_relu(bn2)
db_conv2=bn2
# with tf.name_scope(name+'_channel_attention'):
# g_max_pool1 =tf.keras.layers.GlobalMaxPool3D(data_format='channels_last')(db_conv2)
# dense1= tf.layers.dense(g_max_pool1, int(int(g_max_pool1.shape[-1])/2), tf.nn.relu)
# dense2= tf.layers.dense(dense1, g_max_pool1.shape[-1], tf.nn.relu)
# sigmoid1= tf.nn.sigmoid(dense2)
# result1=tf.multiply(tf.expand_dims(tf.expand_dims(tf.expand_dims(sigmoid1,1),1) ,1) ,db_conv2)
with tf.name_scope(name + '_spatial_attention'):
ave_max_pool1 = tf.concat([tf.expand_dims(tf.reduce_mean(db_conv2,axis=-1),-1),tf.expand_dims(tf.reduce_max(db_conv2,-1),-1)],4)
ave_max_pool_conv = tf.layers.conv3d(ave_max_pool1,
filters=1,
kernel_size=3,
padding='same',
activation=None,
dilation_rate=1)
bn3 = tf.layers.batch_normalization(ave_max_pool_conv, training=is_training_bn, renorm=False)
bn3 = tf.nn.leaky_relu(bn3)
sigmoid2 = tf.nn.sigmoid(bn3)
result2 = tf.multiply(sigmoid2, db_conv2)
db_concat = tf.concat([input, result2], 4)
return db_concat
# ========================
def dense_loop(self, loop, input, crop_size,
db_size,is_training_bn,
padding='same',
activation=None,
name='dense_block',
flag=0,
concat_flag=0,
feature_size=[],bn_flag=False,dilation_rate=1):
with tf.name_scope(name):
output = input
for i in range(loop):
output = self.dense_block(output,
feature_size=feature_size,
padding=padding,
activation=activation,
name='dense_sub_block' +self.log_ext+ str(i),
flag=flag,
concat_flag=concat_flag,
is_training_bn=is_training_bn,
bn_flag=bn_flag,
dilation_rate=dilation_rate)
cropped = output[:,
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2),
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2),
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2), :]
# #
# cropped = output[:,
# np.int32(db_size / 2) - np.int32(crop_size / 2) - 1:
# np.int32(db_size / 2) + np.int32(crop_size / 2),
# np.int32(db_size / 2) - np.int32(crop_size / 2) - 1:
# np.int32(db_size / 2) + np.int32(crop_size / 2),
# np.int32(db_size / 2) - np.int32(crop_size / 2) - 1:
# np.int32(db_size / 2) + np.int32(crop_size / 2), :]
return output, cropped
def spatial_attention(self,input,is_training_bn,name):
with tf.name_scope(name+'skip_attention'):
ave_max_pool = tf.concat([tf.expand_dims(tf.reduce_mean(input, axis=-1), -1),
tf.expand_dims(tf.reduce_max(input, -1), -1)], 4)
ave_max_pool_conv = tf.layers.conv3d(ave_max_pool,
filters=1,
kernel_size=3,
padding='same',
activation=None,
dilation_rate=1)
bn = tf.layers.batch_normalization(ave_max_pool_conv, training=is_training_bn, renorm=False)
bn = tf.nn.leaky_relu(bn)
sigmoid = tf.nn.sigmoid(bn)
skip_att = tf.multiply(sigmoid, input)
return skip_att
def channel_attention(self,input,name):
with tf.name_scope(name+'_channel_attention'):
g_max_pool =tf.keras.layers.GlobalMaxPool3D(data_format='channels_last')(input)
dense1= tf.layers.dense(g_max_pool, int(int(g_max_pool.shape[-1])/2), tf.nn.relu)
dense2= tf.layers.dense(dense1, g_max_pool.shape[-1], tf.nn.relu)
sigmoid1= tf.nn.sigmoid(dense2)
result=tf.multiply(tf.expand_dims(tf.expand_dims(tf.expand_dims(sigmoid1,1),1) ,1) ,input)
return result
# ========================
def dens_net(self, image, is_training, dropout_rate1,dropout_rate2, dim,is_training_bn,dilation_rate=(1,1,1)):
# x = 527
# l2 = x / 2
# l3 = l2 / 2
# l4 = l3 / 2
#
# l4_1 = l4 - 2
#
# dl4 = int(l4_1) * 2 + 1
# dl4_1 = dl4 - 2
# dl3 = dl4_1 * 2 + 1
# dl3_1 = dl3 - 2
#
# dl2 = dl3_1 * 2 + 1
# dl2_1 = dl2 - 2
# dim2 = 63
# db_size1 = np.int32(dim2)
# db_size2 = np.int32(db_size1 / 2)
# db_size3 = np.int32(db_size2 / 2)
# crop_size1 = np.int32(((db_size3 - 2) * 2 + 1.0))
# crop_size2 = np.int32((crop_size1 - 2) * 2 + 1)
# db_size0 = 0
# crop_size0 = 0
db_size1 = tf.to_int32(dim)
db_size2 = tf.to_int32(db_size1 / 2)
db_size3 = tf.to_int32(db_size2 / 2)
crop_size1 = tf.add(tf.multiply(db_size3-2, 2), 1)
crop_size2 = tf.add(tf.multiply(tf.add(crop_size1, -2), 2), 1)
db_size0 = tf.to_int32(0)
crop_size0 = tf.to_int32(0)
activation=tf.nn.leaky_relu
self.seed+=1
with tf.Session() as s:
rnd = s.run(tf.random_uniform([1], 0, 5, dtype=tf.int32,seed=self.seed))#, seed=int(time.time())))
noisy_img = tf.cond(is_training,
lambda: image + tf.round(tf.random_normal(tf.shape(image), mean=0,
stddev=rnd,
seed=self.seed+2,#int(time.time()),
dtype=tf.float32))
, lambda: image)
conv0 = tf.layers.conv3d(inputs=noisy_img, filters=8, kernel_size=3,
padding='same',
activation=None,
name='conv_deconv_0' + self.log_ext,
dilation_rate=(1, 1, 1))
bn1 = tf.layers.batch_normalization(conv0, training=is_training_bn,renorm=False)
bn1 = tf.nn.leaky_relu(bn1)
conv1 = tf.layers.conv3d(inputs=bn1, filters=8, kernel_size=3,
padding='same',
activation=None,
name='conv_deconv_00' + self.log_ext,
dilation_rate=(1, 1, 1))
bn2 = tf.layers.batch_normalization(conv1, training=is_training_bn,renorm=False)
bn2 = tf.nn.leaky_relu(bn2)
# conv2 = tf.layers.conv3d(inputs=bn2, filters=8, kernel_size=3,
# padding='same',
# activation=None,
# name='conv_deconv_000' + self.log_ext,
# dilation_rate=(1, 1, 1))
# bn3 = tf.layers.batch_normalization(conv2, training=is_training_bn, renorm=False)
# bn3 = tf.nn.leaky_relu(bn3)
bn3= tf.concat([bn1, bn2], 4)
conv2 = tf.layers.conv3d(inputs=bn3, filters=int(bn3.shape[4].value* 0.75), kernel_size=1,
padding='same',
activation=None,
name='conv_deconv_000' + self.log_ext,
dilation_rate=(1, 1, 1))
bn4 = tf.layers.batch_normalization(conv2, training=is_training_bn, renorm=False)
bn4 = tf.nn.leaky_relu(bn4)
bn4 = tf.concat([noisy_img, bn4], 4)
[dense_out1, conc1] = self.dense_loop(loop=self.config[0],
input=bn4,
crop_size=crop_size2,
db_size=db_size1,
padding='same',
activation=activation,
name='dense_block_1'+self.log_ext,
concat_flag=1,
feature_size=[8,8],
is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=1)
[pool1, conc1] = self.transition_layer(dense_out1, 'transition_1',
conv_name='conv1'+self.log_ext,
conv_pool_name='conv_pool_name1'+self.log_ext,
db_size=db_size1, crop_size=crop_size2,
kernel_size=[1, 1, 1], padding='same',
activation=activation,
dilation_rate=(1, 1, 1),
pool_size=[2, 2, 2],
strides=(2,2,2),
is_training_bn=is_training_bn,
bn_flag=True)
# ========================
[dense_out2, conc2] = self.dense_loop(loop=self.config[1],
input=pool1,
crop_size=crop_size1,
db_size=db_size2,
padding='same',
activation=activation,
name='dense_block_2'+self.log_ext,
feature_size=[8,8],
is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=2)
[pool2,conc2] = self.transition_layer(dense_out2, 'transition_2',
conv_name='conv2'+self.log_ext,
conv_pool_name='conv_pool_name2'+self.log_ext,
db_size=db_size2, crop_size=crop_size1,
kernel_size=[1, 1, 1],
padding='same',
activation=activation,
dilation_rate=(1, 1, 1),
pool_size=[2, 2, 2],
strides=(2,2,2),
is_training_bn=is_training_bn,
bn_flag=True)
# ========================
[dense_out3, conc3] = self.dense_loop(loop=self.config[2],
input=pool2,
crop_size=crop_size0,
db_size=db_size0,
padding='same',
activation=activation,
name='dense_block_3'+self.log_ext,
feature_size=[8,8]
, is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=2)
conv1 = tf.layers.conv3d(inputs=dense_out3,
filters=int(dense_out3.shape[4].value),
kernel_size=[3, 3, 3],
padding='valid',
activation=None,
name='conv_deconv_1'+self.log_ext,
dilation_rate=(1, 1, 1))
bn2 = tf.layers.batch_normalization(conv1, training=is_training_bn,renorm=False)
bn2 = tf.nn.leaky_relu(bn2)
# ========================
deconv1 = tf.layers.conv3d_transpose(bn2,
filters=int(conv1.shape[4].value/2),
kernel_size=3,
strides=(2, 2, 2),
padding='valid',
use_bias=False)
skip_attention1 = self.channel_attention( input=conc2, name='skip_ch_attention1')
conc11=tf.concat([skip_attention1, deconv1], 4)
[dense_out5, conctmp] = self.dense_loop(loop=self.config[3],
input=conc11,
crop_size=crop_size0,
db_size=db_size0,
padding='same',
activation=activation,
name='dense_block_5'+self.log_ext,
feature_size=[8,8]
, is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=2)
conv2 = tf.layers.conv3d(inputs=dense_out5,
filters=int(dense_out5.shape[4].value/2),
kernel_size=3,
padding='valid',
activation=None,
name='conv_deconv_2'+self.log_ext,
dilation_rate=(1, 1, 1))
bn3 = tf.layers.batch_normalization(conv2, training=is_training_bn,renorm=False)
bn3 = tf.nn.leaky_relu(bn3)
# bn_conv2 = tf.layers.batch_normalization(conv2, training=is_training_bn,renorm=False)
# bn_conv2 = tf.nn.leaky_relu(bn_conv2)
# =========================================================
deconv2 = tf.layers.conv3d_transpose(bn3, filters=int(conv2.shape[4].value/2), kernel_size=[3, 3, 3], strides=(2, 2, 2),
padding='valid', use_bias=False)
skip_attention2 = self.channel_attention(input=conc1, name='skip_ch_attention1')
conc22 = tf.concat([skip_attention2, deconv2], 4)
[dense_out6, conctmp] = self.dense_loop(loop=self.config[4],
input=conc22,
crop_size=crop_size0,
db_size=db_size0,
padding='same',
activation=activation,
name='dense_block_6'+self.log_ext,
feature_size=[8,8],
is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=1)
conv3 = tf.layers.conv3d(inputs=dense_out6,
filters=int(dense_out6.shape[4].value / 2),
kernel_size=3,
padding='valid',
activation=None,
name='conv_deconv_tmp' + self.log_ext,
dilation_rate=(1, 1, 1))
bn4 = tf.layers.batch_normalization(conv3, training=is_training_bn,renorm=False)
bn4 = tf.nn.leaky_relu(bn4)
# bn_conv3=tf.layers.batch_normalization(conv3,training=is_training_bn,renorm=False)
# bn_conv3=tf.nn.leaky_relu(bn_conv3)
# =========================================================
# classification layer:
with tf.name_scope('classification_layer'):
# post_rm = tf.reduce_mean(pool3, [1, 2], name='global_avg_pool'+self.log_ext, keep_dims=True)
# fc1 = tf.layers.conv3d(conv3, filters=32, kernel_size=[3,3,3], padding='same', strides=(1, 1, 1),
# activation=activation, dilation_rate=(1, 1,1),name='fc1'+self.log_ext)
# dropout1 = tf.layers.dropout(inputs=fc1, rate=dropout_rate, training=is_training,name='droup_out1'+self.log_ext)
# fc2 = tf.layers.conv3d(dropout1, filters=128, kernel_size=[3, 3, 3], padding='same', strides=(1, 1, 1),
# activation=tf.nn.leaky_relu, dilation_rate=(1, 1, 1), name='fc2' + self.log_ext)
#
# dropout2 = tf.layers.dropout(inputs=fc2, rate=dropout_rate, training=is_training,
# name='droup_out2' + self.log_ext)
y = tf.layers.conv3d(bn4, filters=self.class_no, kernel_size=1, padding='same', strides=(1, 1, 1),
activation=None, dilation_rate=(1, 1,1), name='fc3'+self.log_ext)
print(' total number of variables %s' % (
np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])))
'''h_fc1 = tf.contrib.layers.fully_connected(post_rm,20,activation_fn=tf.nn.leaky_relu)
#Fully connected layer #2
#h_fc2 = tf.layers.conv3d(h_fc1,512,[3,3],padding="valid", strides=(1,1),activation=tf.nn.leaky_relu, dilation_rate=(1, 1))
h_fc2 = tf.contrib.layers.fully_connected(h_fc1,8,activation_fn=tf.nn.leaky_relu)
#Fully connected layer #3
#y = tf.layers.conv3d(h_fc2,2,[3,3],padding="valid", strides=(1,1),activation=tf.nn.leaky_relu, dilation_rate=(1, 1))
y = tf.contrib.layers.fully_connected(h_fc2,class_no,activation_fn=tf.nn.leaky_relu)
'''
return y#,dense_out1,dense_out2,dense_out3,dense_out5,dense_out6
| 24,155 | 49.962025 | 140 | py |
DenseUnet_Esophagus_Segmentation | DenseUnet_Esophagus_Segmentation-master/functions/networks/dense_unet2_attention_spatial_skip_attention.py | import tensorflow as tf
import SimpleITK as sitk
# import math as math
import numpy as np
import os
from os import listdir
from os.path import isfile, join
import matplotlib.pyplot as plt
import time
# !!
class _densenet_unet:
def __init__(self, densnet_unet_config,compression_coefficient, growth_rate, class_no=2):
print('create object _densenet_unet')
self.compres_coef = compression_coefficient
self.class_no = class_no
self.growth_rate = growth_rate
self.kernel_size1 = 1
self.kernel_size2 = 3
self.config=densnet_unet_config
self.log_ext = '_'+''.join(map(str, self.config)) + '_' + str(
self.compres_coef) + '_' + str(self.growth_rate)
self.seed=200
def transition_layer(self,
dense_out1,
transition_name,
conv_name,
is_training_bn,
conv_pool_name,
db_size,crop_size,
kernel_size=[1, 1, 1],
padding='same',
activation=None,
dilation_rate=(1, 1,1),
pool_size=[2, 2, 2],
strides=(2, 2, 2),
bn_flag = False):
with tf.name_scope(transition_name):
filter = int(dense_out1.get_shape()[4].value * self.compres_coef)
if bn_flag==False:
conv1 = tf.layers.conv3d(inputs=dense_out1, filters=filter, kernel_size=kernel_size, padding=padding,
activation=activation,
name=conv_name + self.log_ext, dilation_rate=dilation_rate)
else:
conv1 = tf.layers.conv3d(inputs=dense_out1, filters=filter, kernel_size=kernel_size, padding=padding,
activation=None,
name=conv_name + self.log_ext, dilation_rate=dilation_rate)
bn1 = tf.layers.batch_normalization(conv1, training=is_training_bn,renorm=False)
bn1 = tf.nn.leaky_relu(bn1)
conv1=bn1
cropped = conv1[:,
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2),
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2),
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2), :]
pool1 = tf.layers.max_pooling3d(inputs=conv1, pool_size=pool_size, strides=strides)
# conv_pool1 = tf.layers.conv3d(inputs=pool1, filters=filter, kernel_size=[3,3], padding='valid',
# activation=activation,
# name=conv_pool_name+self.log_ext, dilation_rate=dilation_rate)
return pool1,cropped
# ========================
def dense_block(self, input,
feature_size,
is_training_bn,
padding='same',
activation=None,
name='dense_sub_block',
flag=0,
concat_flag=0,
bn_flag=False,
dilation_rate=1):
if bn_flag==False:
with tf.name_scope(name):
db_conv1 = tf.layers.conv3d(input,
filters=feature_size[0] * 4,
kernel_size=self.kernel_size1,
padding=padding,
activation=activation)
db_conv2 = tf.layers.conv3d(db_conv1,
filters=feature_size[1],
kernel_size=self.kernel_size2,
padding=padding,
activation=activation)
else:
with tf.name_scope(name):
db_conv1 = tf.layers.conv3d(input,
filters=feature_size[0] * 4,
kernel_size=self.kernel_size1,
padding=padding,
activation=None)
bn1 = tf.layers.batch_normalization(db_conv1, training=is_training_bn,renorm=False)
bn1 = tf.nn.leaky_relu(bn1)
db_conv2 = tf.layers.conv3d(bn1,
filters=feature_size[1],
kernel_size=self.kernel_size2,
padding=padding,
activation=None,
dilation_rate=2)
bn2 = tf.layers.batch_normalization(db_conv2, training=is_training_bn,renorm=False)
bn2 = tf.nn.leaky_relu(bn2)
db_conv2=bn2
# with tf.name_scope(name+'_channel_attention'):
# g_max_pool1 =tf.keras.layers.GlobalMaxPool3D(data_format='channels_last')(db_conv2)
# dense1= tf.layers.dense(g_max_pool1, int(int(g_max_pool1.shape[-1])/2), tf.nn.relu)
# dense2= tf.layers.dense(dense1, g_max_pool1.shape[-1], tf.nn.relu)
# sigmoid1= tf.nn.sigmoid(dense2)
# result1=tf.multiply(tf.expand_dims(tf.expand_dims(tf.expand_dims(sigmoid1,1),1) ,1) ,db_conv2)
with tf.name_scope(name + '_spatial_attention'):
ave_max_pool1 = tf.concat([tf.expand_dims(tf.reduce_mean(db_conv2,axis=-1),-1),tf.expand_dims(tf.reduce_max(db_conv2,-1),-1)],4)
ave_max_pool_conv = tf.layers.conv3d(ave_max_pool1,
filters=1,
kernel_size=3,
padding='same',
activation=None,
dilation_rate=1)
bn3 = tf.layers.batch_normalization(ave_max_pool_conv, training=is_training_bn, renorm=False)
bn3 = tf.nn.leaky_relu(bn3)
sigmoid2 = tf.nn.sigmoid(bn3)
result2 = tf.multiply(sigmoid2, db_conv2)
db_concat = tf.concat([input, result2], 4)
return db_concat
# ========================
def dense_loop(self, loop, input, crop_size,
db_size,is_training_bn,
padding='same',
activation=None,
name='dense_block',
flag=0,
concat_flag=0,
feature_size=[],bn_flag=False,dilation_rate=1):
with tf.name_scope(name):
output = input
for i in range(loop):
output = self.dense_block(output,
feature_size=feature_size,
padding=padding,
activation=activation,
name='dense_sub_block' +self.log_ext+ str(i),
flag=flag,
concat_flag=concat_flag,
is_training_bn=is_training_bn,
bn_flag=bn_flag,
dilation_rate=dilation_rate)
cropped = output[:,
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2),
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2),
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2), :]
# #
# cropped = output[:,
# np.int32(db_size / 2) - np.int32(crop_size / 2) - 1:
# np.int32(db_size / 2) + np.int32(crop_size / 2),
# np.int32(db_size / 2) - np.int32(crop_size / 2) - 1:
# np.int32(db_size / 2) + np.int32(crop_size / 2),
# np.int32(db_size / 2) - np.int32(crop_size / 2) - 1:
# np.int32(db_size / 2) + np.int32(crop_size / 2), :]
return output, cropped
def spatial_attention(self,input,is_training_bn,name):
with tf.name_scope(name+'skip_attention'):
ave_max_pool = tf.concat([tf.expand_dims(tf.reduce_mean(input, axis=-1), -1),
tf.expand_dims(tf.reduce_max(input, -1), -1)], 4)
ave_max_pool_conv = tf.layers.conv3d(ave_max_pool,
filters=1,
kernel_size=3,
padding='same',
activation=None,
dilation_rate=1)
bn = tf.layers.batch_normalization(ave_max_pool_conv, training=is_training_bn, renorm=False)
bn = tf.nn.leaky_relu(bn)
sigmoid = tf.nn.sigmoid(bn)
skip_att = tf.multiply(sigmoid, input)
return skip_att
# ========================
def dens_net(self, image, is_training, dropout_rate1,dropout_rate2, dim,is_training_bn,dilation_rate=(1,1,1)):
# x = 527
# l2 = x / 2
# l3 = l2 / 2
# l4 = l3 / 2
#
# l4_1 = l4 - 2
#
# dl4 = int(l4_1) * 2 + 1
# dl4_1 = dl4 - 2
# dl3 = dl4_1 * 2 + 1
# dl3_1 = dl3 - 2
#
# dl2 = dl3_1 * 2 + 1
# dl2_1 = dl2 - 2
# dim2 = 63
# db_size1 = np.int32(dim2)
# db_size2 = np.int32(db_size1 / 2)
# db_size3 = np.int32(db_size2 / 2)
# crop_size1 = np.int32(((db_size3 - 2) * 2 + 1.0))
# crop_size2 = np.int32((crop_size1 - 2) * 2 + 1)
# db_size0 = 0
# crop_size0 = 0
db_size1 = tf.to_int32(dim)
db_size2 = tf.to_int32(db_size1 / 2)
db_size3 = tf.to_int32(db_size2 / 2)
crop_size1 = tf.add(tf.multiply(db_size3-2, 2), 1)
crop_size2 = tf.add(tf.multiply(tf.add(crop_size1, -2), 2), 1)
db_size0 = tf.to_int32(0)
crop_size0 = tf.to_int32(0)
activation=tf.nn.leaky_relu
self.seed+=1
with tf.Session() as s:
rnd = s.run(tf.random_uniform([1], 0, 5, dtype=tf.int32,seed=self.seed))#, seed=int(time.time())))
noisy_img = tf.cond(is_training,
lambda: image + tf.round(tf.random_normal(tf.shape(image), mean=0,
stddev=rnd,
seed=self.seed+2,#int(time.time()),
dtype=tf.float32))
, lambda: image)
conv0 = tf.layers.conv3d(inputs=noisy_img, filters=8, kernel_size=3,
padding='same',
activation=None,
name='conv_deconv_0' + self.log_ext,
dilation_rate=(1, 1, 1))
bn1 = tf.layers.batch_normalization(conv0, training=is_training_bn,renorm=False)
bn1 = tf.nn.leaky_relu(bn1)
conv1 = tf.layers.conv3d(inputs=bn1, filters=8, kernel_size=3,
padding='same',
activation=None,
name='conv_deconv_00' + self.log_ext,
dilation_rate=(1, 1, 1))
bn2 = tf.layers.batch_normalization(conv1, training=is_training_bn,renorm=False)
bn2 = tf.nn.leaky_relu(bn2)
# conv2 = tf.layers.conv3d(inputs=bn2, filters=8, kernel_size=3,
# padding='same',
# activation=None,
# name='conv_deconv_000' + self.log_ext,
# dilation_rate=(1, 1, 1))
# bn3 = tf.layers.batch_normalization(conv2, training=is_training_bn, renorm=False)
# bn3 = tf.nn.leaky_relu(bn3)
bn3= tf.concat([bn1, bn2], 4)
conv2 = tf.layers.conv3d(inputs=bn3, filters=int(bn3.shape[4].value* 0.75), kernel_size=1,
padding='same',
activation=None,
name='conv_deconv_000' + self.log_ext,
dilation_rate=(1, 1, 1))
bn4 = tf.layers.batch_normalization(conv2, training=is_training_bn, renorm=False)
bn4 = tf.nn.leaky_relu(bn4)
bn4 = tf.concat([noisy_img, bn4], 4)
[dense_out1, conc1] = self.dense_loop(loop=self.config[0],
input=bn4,
crop_size=crop_size2,
db_size=db_size1,
padding='same',
activation=activation,
name='dense_block_1'+self.log_ext,
concat_flag=1,
feature_size=[8,8],
is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=1)
[pool1, conc1] = self.transition_layer(dense_out1, 'transition_1',
conv_name='conv1'+self.log_ext,
conv_pool_name='conv_pool_name1'+self.log_ext,
db_size=db_size1, crop_size=crop_size2,
kernel_size=[1, 1, 1], padding='same',
activation=activation,
dilation_rate=(1, 1, 1),
pool_size=[2, 2, 2],
strides=(2,2,2),
is_training_bn=is_training_bn,
bn_flag=True)
# ========================
[dense_out2, conc2] = self.dense_loop(loop=self.config[1],
input=pool1,
crop_size=crop_size1,
db_size=db_size2,
padding='same',
activation=activation,
name='dense_block_2'+self.log_ext,
feature_size=[8,8],
is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=2)
[pool2,conc2] = self.transition_layer(dense_out2, 'transition_2',
conv_name='conv2'+self.log_ext,
conv_pool_name='conv_pool_name2'+self.log_ext,
db_size=db_size2, crop_size=crop_size1,
kernel_size=[1, 1, 1],
padding='same',
activation=activation,
dilation_rate=(1, 1, 1),
pool_size=[2, 2, 2],
strides=(2,2,2),
is_training_bn=is_training_bn,
bn_flag=True)
# ========================
[dense_out3, conc3] = self.dense_loop(loop=self.config[2],
input=pool2,
crop_size=crop_size0,
db_size=db_size0,
padding='same',
activation=activation,
name='dense_block_3'+self.log_ext,
feature_size=[8,8]
, is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=2)
conv1 = tf.layers.conv3d(inputs=dense_out3,
filters=int(dense_out3.shape[4].value),
kernel_size=[3, 3, 3],
padding='valid',
activation=None,
name='conv_deconv_1'+self.log_ext,
dilation_rate=(1, 1, 1))
bn2 = tf.layers.batch_normalization(conv1, training=is_training_bn,renorm=False)
bn2 = tf.nn.leaky_relu(bn2)
# ========================
deconv1 = tf.layers.conv3d_transpose(bn2,
filters=int(conv1.shape[4].value/2),
kernel_size=3,
strides=(2, 2, 2),
padding='valid',
use_bias=False)
skip_attention1 = self.spatial_attention( input=conc2, is_training_bn=is_training_bn, name='skip_attention1')
conc11=tf.concat([skip_attention1, deconv1], 4)
[dense_out5, conctmp] = self.dense_loop(loop=self.config[3],
input=conc11,
crop_size=crop_size0,
db_size=db_size0,
padding='same',
activation=activation,
name='dense_block_5'+self.log_ext,
feature_size=[8,8]
, is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=2)
conv2 = tf.layers.conv3d(inputs=dense_out5,
filters=int(dense_out5.shape[4].value/2),
kernel_size=3,
padding='valid',
activation=None,
name='conv_deconv_2'+self.log_ext,
dilation_rate=(1, 1, 1))
bn3 = tf.layers.batch_normalization(conv2, training=is_training_bn,renorm=False)
bn3 = tf.nn.leaky_relu(bn3)
# bn_conv2 = tf.layers.batch_normalization(conv2, training=is_training_bn,renorm=False)
# bn_conv2 = tf.nn.leaky_relu(bn_conv2)
# =========================================================
deconv2 = tf.layers.conv3d_transpose(bn3, filters=int(conv2.shape[4].value/2), kernel_size=[3, 3, 3], strides=(2, 2, 2),
padding='valid', use_bias=False)
skip_attention2 = self.spatial_attention(input=conc1, is_training_bn=is_training_bn, name='skip_attention2')
conc22 = tf.concat([skip_attention2, deconv2], 4)
[dense_out6, conctmp] = self.dense_loop(loop=self.config[4],
input=conc22,
crop_size=crop_size0,
db_size=db_size0,
padding='same',
activation=activation,
name='dense_block_6'+self.log_ext,
feature_size=[8,8],
is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=1)
conv3 = tf.layers.conv3d(inputs=dense_out6,
filters=int(dense_out6.shape[4].value / 2),
kernel_size=3,
padding='valid',
activation=None,
name='conv_deconv_tmp' + self.log_ext,
dilation_rate=(1, 1, 1))
bn4 = tf.layers.batch_normalization(conv3, training=is_training_bn,renorm=False)
bn4 = tf.nn.leaky_relu(bn4)
# bn_conv3=tf.layers.batch_normalization(conv3,training=is_training_bn,renorm=False)
# bn_conv3=tf.nn.leaky_relu(bn_conv3)
# =========================================================
# classification layer:
with tf.name_scope('classification_layer'):
# post_rm = tf.reduce_mean(pool3, [1, 2], name='global_avg_pool'+self.log_ext, keep_dims=True)
# fc1 = tf.layers.conv3d(conv3, filters=32, kernel_size=[3,3,3], padding='same', strides=(1, 1, 1),
# activation=activation, dilation_rate=(1, 1,1),name='fc1'+self.log_ext)
# dropout1 = tf.layers.dropout(inputs=fc1, rate=dropout_rate, training=is_training,name='droup_out1'+self.log_ext)
# fc2 = tf.layers.conv3d(dropout1, filters=128, kernel_size=[3, 3, 3], padding='same', strides=(1, 1, 1),
# activation=tf.nn.leaky_relu, dilation_rate=(1, 1, 1), name='fc2' + self.log_ext)
#
# dropout2 = tf.layers.dropout(inputs=fc2, rate=dropout_rate, training=is_training,
# name='droup_out2' + self.log_ext)
y = tf.layers.conv3d(bn4, filters=self.class_no, kernel_size=1, padding='same', strides=(1, 1, 1),
activation=None, dilation_rate=(1, 1,1), name='fc3'+self.log_ext)
print(' total number of variables %s' % (
np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])))
'''h_fc1 = tf.contrib.layers.fully_connected(post_rm,20,activation_fn=tf.nn.leaky_relu)
#Fully connected layer #2
#h_fc2 = tf.layers.conv3d(h_fc1,512,[3,3],padding="valid", strides=(1,1),activation=tf.nn.leaky_relu, dilation_rate=(1, 1))
h_fc2 = tf.contrib.layers.fully_connected(h_fc1,8,activation_fn=tf.nn.leaky_relu)
#Fully connected layer #3
#y = tf.layers.conv3d(h_fc2,2,[3,3],padding="valid", strides=(1,1),activation=tf.nn.leaky_relu, dilation_rate=(1, 1))
y = tf.contrib.layers.fully_connected(h_fc2,class_no,activation_fn=tf.nn.leaky_relu)
'''
return y#,dense_out1,dense_out2,dense_out3,dense_out5,dense_out6
| 23,677 | 49.811159 | 140 | py |
DenseUnet_Esophagus_Segmentation | DenseUnet_Esophagus_Segmentation-master/functions/networks/dense_unet2_attention_channel_spatial.py | import tensorflow as tf
import SimpleITK as sitk
# import math as math
import numpy as np
import os
from os import listdir
from os.path import isfile, join
import matplotlib.pyplot as plt
import time
# !!
class _densenet_unet:
def __init__(self, densnet_unet_config,compression_coefficient, growth_rate, class_no=2):
print('create object _densenet_unet')
self.compres_coef = compression_coefficient
self.class_no = class_no
self.growth_rate = growth_rate
self.kernel_size1 = 1
self.kernel_size2 = 3
self.config=densnet_unet_config
self.log_ext = '_'+''.join(map(str, self.config)) + '_' + str(
self.compres_coef) + '_' + str(self.growth_rate)
self.seed=200
def transition_layer(self,
dense_out1,
transition_name,
conv_name,
is_training_bn,
db_size,crop_size,
kernel_size=[1, 1, 1],
padding='same',
activation=None,
dilation_rate=(1, 1,1),
pool_size=[2, 2, 2],
strides=(2, 2, 2),
bn_flag = False):
'''
:param dense_out1: output of previous denseblock
:param transition_name: scope name
:param conv_name: name of conv layer
:param is_training_bn: bacth norm flag
:param db_size: used for unet struct
:param crop_size: used for unet struct
:param kernel_size: kernel size
:param padding: either same or valid
:param activation: activation function
:param dilation_rate: dilation rate
:param pool_size: size of pooling
:param strides: stride size for downsampling
:param bn_flag: batch norm flag
:return:
'''
with tf.name_scope(transition_name):
filter = int(dense_out1.get_shape()[4].value * self.compres_coef)
if bn_flag==False:
conv1 = tf.layers.conv3d(inputs=dense_out1, filters=filter, kernel_size=kernel_size, padding=padding,
activation=activation,
name=conv_name + self.log_ext, dilation_rate=dilation_rate)
else:
conv1 = tf.layers.conv3d(inputs=dense_out1, filters=filter, kernel_size=kernel_size, padding=padding,
activation=None,
name=conv_name + self.log_ext, dilation_rate=dilation_rate)
bn1 = tf.layers.batch_normalization(conv1, training=is_training_bn,renorm=False)
bn1 = tf.nn.leaky_relu(bn1)
conv1=bn1
cropped = conv1[:,
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2),
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2),
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2), :]
pool1 = tf.layers.max_pooling3d(inputs=conv1, pool_size=pool_size, strides=strides)
# conv_pool1 = tf.layers.conv3d(inputs=pool1, filters=filter, kernel_size=[3,3], padding='valid',
# activation=activation,
# name=conv_pool_name+self.log_ext, dilation_rate=dilation_rate)
return pool1,cropped
# ========================
def dense_block(self, input,
feature_size,
is_training_bn,
padding='same',
activation=None,
name='dense_sub_block',
flag=0,
concat_flag=0,
bn_flag=False,
dilation_rate=1):
'''
:param input: input which comes from the output of the previous block
:param feature_size: a vector includes the size if featuremaps
:param is_training_bn: a flag to trigger batch norm trainable flag
:param padding: either same or valid
:param activation: activation function
:param name: name f scope
:param flag: -
:param concat_flag: -
:param bn_flag: a flag to trigger batch norm trainable flag
:param dilation_rate: -
:return:
'''
if bn_flag==False:
with tf.name_scope(name):
db_conv1 = tf.layers.conv3d(input,
filters=feature_size[0] * 4,
kernel_size=self.kernel_size1,
padding=padding,
activation=activation)
db_conv2 = tf.layers.conv3d(db_conv1,
filters=feature_size[1],
kernel_size=self.kernel_size2,
padding=padding,
activation=activation)
else:
with tf.name_scope(name):
db_conv1 = tf.layers.conv3d(input,
filters=feature_size[0] * 4,
kernel_size=self.kernel_size1,
padding=padding,
activation=None)
bn1 = tf.layers.batch_normalization(db_conv1, training=is_training_bn,renorm=False)
bn1 = tf.nn.leaky_relu(bn1)
db_conv2 = tf.layers.conv3d(bn1,
filters=feature_size[1],
kernel_size=self.kernel_size2,
padding=padding,
activation=None,
dilation_rate=2)
bn2 = tf.layers.batch_normalization(db_conv2, training=is_training_bn,renorm=False)
bn2 = tf.nn.leaky_relu(bn2)
db_conv2=bn2
with tf.name_scope(name+'_channel_attention'):
g_max_pool1 =tf.keras.layers.GlobalMaxPool3D(data_format='channels_last')(db_conv2)
dense1= tf.layers.dense(g_max_pool1, int(int(g_max_pool1.shape[-1])/2), tf.nn.relu)
dense2= tf.layers.dense(dense1, g_max_pool1.shape[-1], tf.nn.relu)
sigmoid1= tf.nn.sigmoid(dense2)
result1=tf.multiply(tf.expand_dims(tf.expand_dims(tf.expand_dims(sigmoid1,1),1) ,1) ,db_conv2)
with tf.name_scope(name + '_spatial_attention'):
ave_max_pool1 = tf.concat([tf.expand_dims(tf.reduce_mean(result1,axis=-1),-1),tf.expand_dims(tf.reduce_max(result1,-1),-1)],4)
ave_max_pool_conv = tf.layers.conv3d(ave_max_pool1,
filters=1,
kernel_size=3,
padding='same',
activation=None,
dilation_rate=1)
bn3 = tf.layers.batch_normalization(ave_max_pool_conv, training=is_training_bn, renorm=False)
bn3 = tf.nn.leaky_relu(bn3)
sigmoid2 = tf.nn.sigmoid(bn3)
result2 = tf.multiply(sigmoid2, result1)
db_concat = tf.concat([input, result2], 4)
return db_concat
# ========================
def dense_loop(self, loop, input, crop_size,
db_size,is_training_bn,
padding='same',
activation=None,
name='dense_block',
flag=0,
concat_flag=0,
feature_size=[],bn_flag=False,dilation_rate=1):
'''
:param loop: number of dense connectivity pattern
:param input: input of the block
:param crop_size: is used in unet structure
:param db_size:
:param is_training_bn: batch norm flag
:param padding:
:param activation: activation function
:param name: scope name
:param flag: -
:param concat_flag: -
:param feature_size: size of feature maps
:param bn_flag: batch norm flag
:param dilation_rate: dilation rate
:return:
'''
with tf.name_scope(name):
output = input
for i in range(loop):
output = self.dense_block(output,
feature_size=feature_size,
padding=padding,
activation=activation,
name='dense_sub_block' +self.log_ext+ str(i),
flag=flag,
concat_flag=concat_flag,
is_training_bn=is_training_bn,
bn_flag=bn_flag,
dilation_rate=dilation_rate)
# in unet struct we should concat the output of this block with its corresponding level
cropped = output[:,
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2),
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2),
tf.to_int32(db_size / 2) - tf.to_int32(crop_size / 2) - 1:
tf.to_int32(db_size / 2) + tf.to_int32(crop_size / 2), :]
# #
# cropped = output[:,
# np.int32(db_size / 2) - np.int32(crop_size / 2) - 1:
# np.int32(db_size / 2) + np.int32(crop_size / 2),
# np.int32(db_size / 2) - np.int32(crop_size / 2) - 1:
# np.int32(db_size / 2) + np.int32(crop_size / 2),
# np.int32(db_size / 2) - np.int32(crop_size / 2) - 1:
# np.int32(db_size / 2) + np.int32(crop_size / 2), :]
return output, cropped
# ========================
def dens_net(self, image, is_training, dropout_rate1,dropout_rate2, dim,is_training_bn,dilation_rate=(1,1,1)):
# x = 527
# l2 = x / 2
# l3 = l2 / 2
# l4 = l3 / 2
#
# l4_1 = l4 - 2
#
# dl4 = int(l4_1) * 2 + 1
# dl4_1 = dl4 - 2
# dl3 = dl4_1 * 2 + 1
# dl3_1 = dl3 - 2
#
# dl2 = dl3_1 * 2 + 1
# dl2_1 = dl2 - 2
# dim2 = 63
# db_size1 = np.int32(dim2)
# db_size2 = np.int32(db_size1 / 2)
# db_size3 = np.int32(db_size2 / 2)
# crop_size1 = np.int32(((db_size3 - 2) * 2 + 1.0))
# crop_size2 = np.int32((crop_size1 - 2) * 2 + 1)
# db_size0 = 0
# crop_size0 = 0
db_size1 = tf.to_int32(dim)
db_size2 = tf.to_int32(db_size1 / 2)
db_size3 = tf.to_int32(db_size2 / 2)
crop_size1 = tf.add(tf.multiply(db_size3-2, 2), 1)
crop_size2 = tf.add(tf.multiply(tf.add(crop_size1, -2), 2), 1)
db_size0 = tf.to_int32(0)
crop_size0 = tf.to_int32(0)
activation=tf.nn.leaky_relu
self.seed+=1
with tf.Session() as s:
rnd = s.run(tf.random_uniform([1], 0, 5, dtype=tf.int32,seed=self.seed))#, seed=int(time.time())))
noisy_img = tf.cond(is_training,
lambda: image + tf.round(tf.random_normal(tf.shape(image), mean=0,
stddev=rnd,
seed=self.seed+2,#int(time.time()),
dtype=tf.float32))
, lambda: image)
conv0 = tf.layers.conv3d(inputs=noisy_img, filters=8, kernel_size=3,
padding='same',
activation=None,
name='conv_deconv_0' + self.log_ext,
dilation_rate=(1, 1, 1))
bn1 = tf.layers.batch_normalization(conv0, training=is_training_bn,renorm=False)
bn1 = tf.nn.leaky_relu(bn1)
conv1 = tf.layers.conv3d(inputs=bn1, filters=8, kernel_size=3,
padding='same',
activation=None,
name='conv_deconv_00' + self.log_ext,
dilation_rate=(1, 1, 1))
bn2 = tf.layers.batch_normalization(conv1, training=is_training_bn,renorm=False)
bn2 = tf.nn.leaky_relu(bn2)
# conv2 = tf.layers.conv3d(inputs=bn2, filters=8, kernel_size=3,
# padding='same',
# activation=None,
# name='conv_deconv_000' + self.log_ext,
# dilation_rate=(1, 1, 1))
# bn3 = tf.layers.batch_normalization(conv2, training=is_training_bn, renorm=False)
# bn3 = tf.nn.leaky_relu(bn3)
bn3= tf.concat([bn1, bn2], 4)
conv2 = tf.layers.conv3d(inputs=bn3, filters=int(bn3.shape[4].value* 0.75), kernel_size=1,
padding='same',
activation=None,
name='conv_deconv_000' + self.log_ext,
dilation_rate=(1, 1, 1))
bn4 = tf.layers.batch_normalization(conv2, training=is_training_bn, renorm=False)
bn4 = tf.nn.leaky_relu(bn4)
bn4 = tf.concat([noisy_img, bn4], 4)
[dense_out1, conc1] = self.dense_loop(loop=self.config[0],
input=bn4,
crop_size=crop_size2,
db_size=db_size1,
padding='same',
activation=activation,
name='dense_block_1'+self.log_ext,
concat_flag=1,
feature_size=[8,8],
is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=1)
[pool1, conc1] = self.transition_layer(dense_out1, 'transition_1',
conv_name='conv1'+self.log_ext,
db_size=db_size1, crop_size=crop_size2,
kernel_size=[1, 1, 1], padding='same',
activation=activation,
dilation_rate=(1, 1, 1),
pool_size=[2, 2, 2],
strides=(2,2,2),
is_training_bn=is_training_bn,
bn_flag=True)
# ========================
[dense_out2, conc2] = self.dense_loop(loop=self.config[1],
input=pool1,
crop_size=crop_size1,
db_size=db_size2,
padding='same',
activation=activation,
name='dense_block_2'+self.log_ext,
feature_size=[8,8],
is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=2)
[pool2,conc2] = self.transition_layer(dense_out2, 'transition_2',
conv_name='conv2'+self.log_ext,
db_size=db_size2, crop_size=crop_size1,
kernel_size=[1, 1, 1],
padding='same',
activation=activation,
dilation_rate=(1, 1, 1),
pool_size=[2, 2, 2],
strides=(2,2,2),
is_training_bn=is_training_bn,
bn_flag=True)
# ========================
[dense_out3, conc3] = self.dense_loop(loop=self.config[2],
input=pool2,
crop_size=crop_size0,
db_size=db_size0,
padding='same',
activation=activation,
name='dense_block_3'+self.log_ext,
feature_size=[8,8]
, is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=2)
conv1 = tf.layers.conv3d(inputs=dense_out3,
filters=int(dense_out3.shape[4].value),
kernel_size=[3, 3, 3],
padding='valid',
activation=None,
name='conv_deconv_1'+self.log_ext,
dilation_rate=(1, 1, 1))
bn2 = tf.layers.batch_normalization(conv1, training=is_training_bn,renorm=False)
bn2 = tf.nn.leaky_relu(bn2)
# ========================
deconv1 = tf.layers.conv3d_transpose(bn2,
filters=int(conv1.shape[4].value/2),
kernel_size=3,
strides=(2, 2, 2),
padding='valid',
use_bias=False)
conc11=tf.concat([conc2, deconv1], 4)
[dense_out5, conctmp] = self.dense_loop(loop=self.config[3],
input=conc11,
crop_size=crop_size0,
db_size=db_size0,
padding='same',
activation=activation,
name='dense_block_5'+self.log_ext,
feature_size=[8,8]
, is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=2)
conv2 = tf.layers.conv3d(inputs=dense_out5,
filters=int(dense_out5.shape[4].value/2),
kernel_size=3,
padding='valid',
activation=None,
name='conv_deconv_2'+self.log_ext,
dilation_rate=(1, 1, 1))
bn3 = tf.layers.batch_normalization(conv2, training=is_training_bn,renorm=False)
bn3 = tf.nn.leaky_relu(bn3)
# bn_conv2 = tf.layers.batch_normalization(conv2, training=is_training_bn,renorm=False)
# bn_conv2 = tf.nn.leaky_relu(bn_conv2)
# =========================================================
deconv2 = tf.layers.conv3d_transpose(bn3, filters=int(conv2.shape[4].value/2), kernel_size=[3, 3, 3], strides=(2, 2, 2),
padding='valid', use_bias=False)
conc22 = tf.concat([conc1, deconv2], 4)
[dense_out6, conctmp] = self.dense_loop(loop=self.config[4],
input=conc22,
crop_size=crop_size0,
db_size=db_size0,
padding='same',
activation=activation,
name='dense_block_6'+self.log_ext,
feature_size=[8,8],
is_training_bn=is_training_bn,
bn_flag=True,
dilation_rate=1)
conv3 = tf.layers.conv3d(inputs=dense_out6,
filters=int(dense_out6.shape[4].value / 2),
kernel_size=3,
padding='valid',
activation=None,
name='conv_deconv_tmp' + self.log_ext,
dilation_rate=(1, 1, 1))
bn4 = tf.layers.batch_normalization(conv3, training=is_training_bn,renorm=False)
bn4 = tf.nn.leaky_relu(bn4)
# bn_conv3=tf.layers.batch_normalization(conv3,training=is_training_bn,renorm=False)
# bn_conv3=tf.nn.leaky_relu(bn_conv3)
# =========================================================
# classification layer:
with tf.name_scope('classification_layer'):
# post_rm = tf.reduce_mean(pool3, [1, 2], name='global_avg_pool'+self.log_ext, keep_dims=True)
# fc1 = tf.layers.conv3d(conv3, filters=32, kernel_size=[3,3,3], padding='same', strides=(1, 1, 1),
# activation=activation, dilation_rate=(1, 1,1),name='fc1'+self.log_ext)
# dropout1 = tf.layers.dropout(inputs=fc1, rate=dropout_rate, training=is_training,name='droup_out1'+self.log_ext)
# fc2 = tf.layers.conv3d(dropout1, filters=128, kernel_size=[3, 3, 3], padding='same', strides=(1, 1, 1),
# activation=tf.nn.leaky_relu, dilation_rate=(1, 1, 1), name='fc2' + self.log_ext)
#
# dropout2 = tf.layers.dropout(inputs=fc2, rate=dropout_rate, training=is_training,
# name='droup_out2' + self.log_ext)
y = tf.layers.conv3d(bn4, filters=self.class_no, kernel_size=1, padding='same', strides=(1, 1, 1),
activation=None, dilation_rate=(1, 1,1), name='fc3'+self.log_ext)
print(' total number of variables %s' % (
np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])))
'''h_fc1 = tf.contrib.layers.fully_connected(post_rm,20,activation_fn=tf.nn.leaky_relu)
#Fully connected layer #2
#h_fc2 = tf.layers.conv3d(h_fc1,512,[3,3],padding="valid", strides=(1,1),activation=tf.nn.leaky_relu, dilation_rate=(1, 1))
h_fc2 = tf.contrib.layers.fully_connected(h_fc1,8,activation_fn=tf.nn.leaky_relu)
#Fully connected layer #3
#y = tf.layers.conv3d(h_fc2,2,[3,3],padding="valid", strides=(1,1),activation=tf.nn.leaky_relu, dilation_rate=(1, 1))
y = tf.contrib.layers.fully_connected(h_fc2,class_no,activation_fn=tf.nn.leaky_relu)
'''
return y#,dense_out1,dense_out2,dense_out3,dense_out5,dense_out6
| 24,091 | 48.368852 | 138 | py |
icp-block-mdp | icp-block-mdp-master/reinforcement_learning/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
import math
import os
import random
from collections import deque
import gym
import numpy as np
import torch
import torch.nn.functional as F
from dm_control import suite
from numpy import linalg as LA
from torch import distributions as pyd
from torch import nn
import dmc2gym
def create_orthonormal_matrix(p):
# create the positive definite matrix P
A = np.random.rand(p, p)
P = (A + np.transpose(A)) / 2 + p * np.eye(p)
# get the subset of its eigenvectors
vals, vecs = LA.eig(P)
w = vecs[:, 0:p]
return w
def make_env(cfg):
"""Helper function to create dm_control environment"""
if cfg.env == "ball_in_cup_catch":
domain_name = "ball_in_cup"
task_name = "catch"
else:
domain_name = cfg.env.split("_")[0]
task_name = "_".join(cfg.env.split("_")[1:])
env = suite.load(domain_name, task_name)
obs_space = int(sum([np.prod(s.shape) for s in env.observation_spec().values()]))
train_factors = [np.eye(cfg.noise_dims) + i for i in range(cfg.num_train_envs)]
test_factors = [create_orthonormal_matrix(cfg.noise_dims)]
# train_factors = [1, 2, 3]
# test_factors = [4]
train_envs = [
dmc2gym.make(
domain_name=domain_name,
task_name=task_name,
noise=cfg.noise,
mult_factor=train_factors[idx],
idx=idx,
seed=cfg.seed,
visualize_reward=True,
)
for idx in range(cfg.num_train_envs)
]
test_envs = [
dmc2gym.make(
domain_name=domain_name,
task_name=task_name,
noise=cfg.noise,
mult_factor=test_factors[idx],
idx=idx + cfg.num_train_envs,
seed=cfg.seed,
visualize_reward=True,
)
for idx in range(len(test_factors))
]
[env.seed(cfg.seed) for env in train_envs]
assert train_envs[0].action_space.low.min() >= -1
assert train_envs[0].action_space.high.max() <= 1
return train_envs, test_envs
class eval_mode(object):
def __init__(self, *models):
self.models = models
def __enter__(self):
self.prev_states = []
for model in self.models:
self.prev_states.append(model.training)
model.train(False)
def __exit__(self, *args):
for model, state in zip(self.models, self.prev_states):
model.train(state)
return False
class train_mode(object):
def __init__(self, *models):
self.models = models
def __enter__(self):
self.prev_states = []
for model in self.models:
self.prev_states.append(model.training)
model.train(True)
def __exit__(self, *args):
for model, state in zip(self.models, self.prev_states):
model.train(state)
return False
def soft_update_params(net, target_net, tau):
for param, target_param in zip(net.parameters(), target_net.parameters()):
target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)
def set_seed_everywhere(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
def make_dir(*path_parts):
dir_path = os.path.join(*path_parts)
try:
os.mkdir(dir_path)
except OSError:
pass
return dir_path
def weight_init(m):
"""Custom weight init for Conv2D and Linear layers."""
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight.data)
if hasattr(m.bias, "data"):
m.bias.data.fill_(0.0)
class MLP(nn.Module):
def __init__(
self, input_dim, hidden_dim, output_dim, hidden_depth, output_mod=None
):
super().__init__()
self.trunk = mlp(input_dim, hidden_dim, output_dim, hidden_depth, output_mod)
self.apply(weight_init)
def forward(self, x):
return self.trunk(x)
def mlp(input_dim, hidden_dim, output_dim, hidden_depth, output_mod=None):
if hidden_depth == 0:
mods = [nn.Linear(input_dim, output_dim)]
else:
mods = [nn.Linear(input_dim, hidden_dim), nn.ReLU(inplace=True)]
for i in range(hidden_depth - 1):
mods += [nn.Linear(hidden_dim, hidden_dim), nn.ReLU(inplace=True)]
mods.append(nn.Linear(hidden_dim, output_dim))
if output_mod is not None:
mods.append(output_mod)
trunk = nn.Sequential(*mods)
return trunk
def to_np(t):
if t is None:
return None
elif t.nelement() == 0:
return np.array([])
else:
return t.cpu().detach().numpy()
| 4,694 | 26.296512 | 85 | py |
icp-block-mdp | icp-block-mdp-master/reinforcement_learning/encoder.py | # Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn as nn
def tie_weights(src, trg):
assert type(src) == type(trg)
trg.weight = src.weight
trg.bias = src.bias
OUT_DIM = {2: 39, 4: 35, 6: 31}
class PixelEncoder(nn.Module):
"""Convolutional encoder of pixels observations."""
def __init__(self, obs_shape, feature_dim, num_layers=2, num_filters=32):
super().__init__()
assert len(obs_shape) == 3
self.feature_dim = feature_dim
self.num_layers = num_layers
self.convs = nn.ModuleList([nn.Conv2d(obs_shape[0], num_filters, 3, stride=2)])
for i in range(num_layers - 1):
self.convs.append(nn.Conv2d(num_filters, num_filters, 3, stride=1))
out_dim = OUT_DIM[num_layers]
self.fc = nn.Linear(num_filters * out_dim * out_dim, self.feature_dim)
self.ln = nn.LayerNorm(self.feature_dim)
self.outputs = dict()
def reparameterize(self, mu, logstd):
std = torch.exp(logstd)
eps = torch.randn_like(std)
return mu + eps * std
def forward_conv(self, obs):
obs = obs / 255.0
self.outputs["obs"] = obs
conv = torch.relu(self.convs[0](obs))
self.outputs["conv1"] = conv
for i in range(1, self.num_layers):
conv = torch.relu(self.convs[i](conv))
self.outputs["conv%s" % (i + 1)] = conv
h = conv.view(conv.size(0), -1)
return h
def forward(self, obs, detach=False):
h = self.forward_conv(obs)
if detach:
h = h.detach()
h_fc = self.fc(h)
self.outputs["fc"] = h_fc
h_norm = self.ln(h_fc)
self.outputs["ln"] = h_norm
out = torch.tanh(h_norm)
self.outputs["tanh"] = out
return out
def copy_conv_weights_from(self, source):
"""Tie convolutional layers"""
# only tie conv layers
for i in range(self.num_layers):
tie_weights(src=source.convs[i], trg=self.convs[i])
def log(self, L, step, log_freq):
if step % log_freq != 0:
return
for k, v in self.outputs.items():
L.log_histogram("train_encoder/%s_hist" % k, v, step)
if len(v.shape) > 2:
L.log_image("train_encoder/%s_img" % k, v[0], step)
for i in range(self.num_layers):
L.log_param("train_encoder/conv%s" % (i + 1), self.convs[i], step)
L.log_param("train_encoder/fc", self.fc, step)
L.log_param("train_encoder/ln", self.ln, step)
class IdentityEncoder(nn.Module):
def __init__(self, obs_shape, feature_dim, num_layers, num_filters):
super().__init__()
assert len(obs_shape) == 1
self.feature_dim = obs_shape[0]
def forward(self, obs, detach=False):
return obs
def copy_conv_weights_from(self, source):
pass
def log(self, L, step, log_freq):
pass
class LinearEncoder(nn.Module):
def __init__(self, obs_shape, feature_dim, num_layers, num_filters):
super().__init__()
self.feature_dim = feature_dim
self.linear = nn.Linear(obs_shape, feature_dim)
def forward(self, obs, detach=False):
if detach:
return self.linear(obs).detach()
else:
return self.linear(obs)
def copy_conv_weights_from(self, source):
pass
def log(self, L, step, log_freq):
pass
class MLPEncoder(nn.Module):
def __init__(self, obs_shape, feature_dim, num_layers, num_filters):
super().__init__()
self.feature_dim = feature_dim
self.model = nn.ModuleList()
self.model.append(nn.Linear(obs_shape, 512))
for i in range(num_layers - 1):
self.model.append(nn.Linear(512, 512))
self.model.append(nn.Linear(512, feature_dim))
def forward(self, obs, detach=False):
for i in range(len(self.model)):
obs = torch.relu(self.model[i](obs))
if detach:
return obs.detach()
else:
return obs
def copy_conv_weights_from(self, source):
pass
def log(self, L, step, log_freq):
pass
class VariationalEncoder(nn.Module):
def __init__(self, obs_shape, feature_dim, num_layers, num_filters):
super().__init__()
self.feature_dim = feature_dim
self.model = nn.ModuleList()
self.model.append(nn.Linear(obs_shape, 200))
for i in range(num_layers - 1):
self.model.append(nn.Linear(200, 200))
self.model.append(nn.Linear(200, feature_dim * 2))
def forward(self, obs, deterministic=False, detach=False):
mu, logvar = self.encode(obs)
if deterministic:
obs = mu
else:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
obs = mu + eps * std
if detach:
return obs.detach()
else:
return obs
def encode(self, obs):
for i in range(len(self.model)):
obs = torch.relu(self.model[i](obs))
return obs[:, : self.feature_dim], obs[:, self.feature_dim :]
def copy_conv_weights_from(self, source):
pass
def log(self, L, step, log_freq):
pass
_AVAILABLE_ENCODERS = {
"pixel": PixelEncoder,
"identity": IdentityEncoder,
"linear": LinearEncoder,
"mlp": MLPEncoder,
"variational": VariationalEncoder,
}
def make_encoder(encoder_type, obs_shape, feature_dim, num_layers, num_filters):
assert encoder_type in _AVAILABLE_ENCODERS
return _AVAILABLE_ENCODERS[encoder_type](
obs_shape, feature_dim, num_layers, num_filters
)
| 5,700 | 26.541063 | 87 | py |
icp-block-mdp | icp-block-mdp-master/reinforcement_learning/logger.py | # Copyright (c) Facebook, Inc. and its affiliates.
import csv
import json
import os
import shutil
from collections import defaultdict
import numpy as np
import torch
from termcolor import colored
from torch.utils.tensorboard import SummaryWriter
COMMON_TRAIN_FORMAT = [
("episode", "E", "int"),
("step", "S", "int"),
("episode_reward", "R", "float"),
("duration", "D", "time"),
]
COMMON_EVAL_FORMAT = [
("episode", "E", "int"),
("step", "S", "int"),
("eval_episode_reward", "ER", "float"),
("train_episode_reward", "TR", "float"),
]
AGENT_TRAIN_FORMAT = {
"sac": [
("batch_reward", "BR", "float"),
("actor_loss", "ALOSS", "float"),
("critic_loss", "CLOSS", "float"),
("alpha_loss", "TLOSS", "float"),
("alpha_value", "TVAL", "float"),
("actor_entropy", "AENT", "float"),
],
"irm": [
("batch_reward", "BR", "float"),
("actor_loss", "ALOSS", "float"),
("critic_loss", "CLOSS", "float"),
("alpha_loss", "TLOSS", "float"),
("alpha_value", "TVAL", "float"),
("actor_entropy", "AENT", "float"),
("encoder_penalty", "ENCP", "float"),
],
"causal": [
("batch_reward", "BR", "float"),
("actor_loss", "ALOSS", "float"),
("critic_loss", "CLOSS", "float"),
("alpha_loss", "TLOSS", "float"),
("alpha_value", "TVAL", "float"),
("actor_entropy", "AENT", "float"),
("classifier_entropy", "CENT", "float"),
("encoder_KLD", "KLD", "float"),
("encoder_rc_loss", "RCLOSS", "float"),
],
}
class AverageMeter(object):
def __init__(self):
self._sum = 0
self._count = 0
def update(self, value, n=1):
self._sum += value
self._count += n
def value(self):
return self._sum / max(1, self._count)
class MetersGroup(object):
def __init__(self, file_name, formating):
self._csv_file_name = self._prepare_file(file_name, "csv")
self._formating = formating
self._meters = defaultdict(AverageMeter)
self._csv_file = open(self._csv_file_name, "w")
self._csv_writer = None
def _prepare_file(self, prefix, suffix):
file_name = f"{prefix}.{suffix}"
if os.path.exists(file_name):
os.remove(file_name)
return file_name
def log(self, key, value, n=1):
self._meters[key].update(value, n)
def _prime_meters(self):
data = dict()
for key, meter in self._meters.items():
if key.startswith("train"):
key = key[len("train") + 1 :]
else:
key = key[len("eval") + 1 :]
key = key.replace("/", "_")
data[key] = meter.value()
return data
def _dump_to_csv(self, data):
if self._csv_writer is None:
self._csv_writer = csv.DictWriter(
self._csv_file, fieldnames=sorted(data.keys()), restval=0.0
)
self._csv_writer.writeheader()
self._csv_writer.writerow(data)
self._csv_file.flush()
def _format(self, key, value, ty):
if ty == "int":
value = int(value)
return f"{key}: {value}"
elif ty == "float":
return f"{key}: {value:.04f}"
elif ty == "time":
return f"{key}: {value:04.1f} s"
else:
raise f"invalid format type: {ty}"
def _dump_to_console(self, data, prefix):
prefix = colored(prefix, "yellow" if prefix == "train" else "green")
pieces = [f"| {prefix: <14}"]
for key, disp_key, ty in self._formating:
value = data.get(key, 0)
pieces.append(self._format(disp_key, value, ty))
print(" | ".join(pieces))
def dump(self, step, prefix, save=True):
if len(self._meters) == 0:
return
if save:
data = self._prime_meters()
data["step"] = step
self._dump_to_csv(data)
self._dump_to_console(data, prefix)
self._meters.clear()
class Logger(object):
def __init__(self, log_dir, save_tb=False, log_frequency=10000, agent="sac"):
self._log_dir = log_dir
self._log_frequency = log_frequency
if save_tb:
tb_dir = os.path.join(log_dir, "tb")
if os.path.exists(tb_dir):
try:
shutil.rmtree(tb_dir)
except:
print("logger.py warning: Unable to remove tb directory")
pass
self._sw = SummaryWriter(tb_dir)
else:
self._sw = None
# each agent has specific output format for training
assert agent in AGENT_TRAIN_FORMAT
train_format = COMMON_TRAIN_FORMAT + AGENT_TRAIN_FORMAT[agent]
self._train_mg = MetersGroup(
os.path.join(log_dir, "train"), formating=train_format
)
self._eval_mg = MetersGroup(
os.path.join(log_dir, "eval"), formating=COMMON_EVAL_FORMAT
)
def _should_log(self, step, log_frequency):
log_frequency = log_frequency or self._log_frequency
return step % log_frequency == 0
def _try_sw_log(self, key, value, step):
if self._sw is not None:
self._sw.add_scalar(key, value, step)
def _try_sw_log_video(self, key, frames, step):
if self._sw is not None:
frames = torch.from_numpy(np.array(frames))
frames = frames.unsqueeze(0)
self._sw.add_video(key, frames, step, fps=30)
def _try_sw_log_histogram(self, key, histogram, step):
if self._sw is not None:
self._sw.add_histogram(key, histogram, step)
def log(self, key, value, step, n=1, log_frequency=1):
if not self._should_log(step, log_frequency):
return
assert key.startswith("train") or key.startswith("eval")
if type(value) == torch.Tensor:
value = value.item()
self._try_sw_log(key, value / n, step)
mg = self._train_mg if key.startswith("train") else self._eval_mg
mg.log(key, value, n)
def log_param(self, key, param, step, log_frequency=None):
if not self._should_log(step, log_frequency):
return
self.log_histogram(key + "_w", param.weight.data, step)
if hasattr(param.weight, "grad") and param.weight.grad is not None:
self.log_histogram(key + "_w_g", param.weight.grad.data, step)
if hasattr(param, "bias") and hasattr(param.bias, "data"):
self.log_histogram(key + "_b", param.bias.data, step)
if hasattr(param.bias, "grad") and param.bias.grad is not None:
self.log_histogram(key + "_b_g", param.bias.grad.data, step)
def log_video(self, key, frames, step, log_frequency=None):
if not self._should_log(step, log_frequency):
return
assert key.startswith("train") or key.startswith("eval")
self._try_sw_log_video(key, frames, step)
def log_histogram(self, key, histogram, step, log_frequency=None):
if not self._should_log(step, log_frequency):
return
assert key.startswith("train") or key.startswith("eval")
self._try_sw_log_histogram(key, histogram, step)
def dump(self, step, save=True, ty=None):
if ty is None:
self._train_mg.dump(step, "train", save)
self._eval_mg.dump(step, "eval", save)
elif ty == "eval":
self._eval_mg.dump(step, "eval", save)
elif ty == "train":
self._train_mg.dump(step, "train", save)
else:
raise f"invalid log type: {ty}"
| 7,709 | 32.815789 | 81 | py |
icp-block-mdp | icp-block-mdp-master/reinforcement_learning/decoder.py | # Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn as nn
from encoder import OUT_DIM
class PixelDecoder(nn.Module):
def __init__(self, obs_shape, feature_dim, num_layers=2, num_filters=32):
super().__init__()
self.num_layers = num_layers
self.num_filters = num_filters
self.out_dim = OUT_DIM[num_layers]
self.fc = nn.Linear(feature_dim, num_filters * self.out_dim * self.out_dim)
self.deconvs = nn.ModuleList()
for i in range(self.num_layers - 1):
self.deconvs.append(
nn.ConvTranspose2d(num_filters, num_filters, 3, stride=1)
)
self.deconvs.append(
nn.ConvTranspose2d(num_filters, obs_shape[0], 3, stride=2, output_padding=1)
)
self.outputs = dict()
def forward(self, h):
h = torch.relu(self.fc(h))
self.outputs["fc"] = h
deconv = h.view(-1, self.num_filters, self.out_dim, self.out_dim)
self.outputs["deconv1"] = deconv
for i in range(0, self.num_layers - 1):
deconv = torch.relu(self.deconvs[i](deconv))
self.outputs["deconv%s" % (i + 1)] = deconv
obs = self.deconvs[-1](deconv)
self.outputs["obs"] = obs
return obs
def log(self, L, step, log_freq):
if step % log_freq != 0:
return
for k, v in self.outputs.items():
L.log_histogram("train_decoder/%s_hist" % k, v, step)
if len(v.shape) > 2:
L.log_image("train_decoder/%s_i" % k, v[0], step)
for i in range(self.num_layers):
L.log_param("train_decoder/deconv%s" % (i + 1), self.deconvs[i], step)
L.log_param("train_decoder/fc", self.fc, step)
class MLPDecoder(nn.Module):
def __init__(self, obs_shape, feature_dim, num_layers=2, num_filters=32):
super().__init__()
self.num_layers = num_layers
self.num_filters = num_filters
self.deconvs = nn.ModuleList()
self.deconvs.append(nn.Linear(feature_dim, 200))
for i in range(self.num_layers - 1):
self.deconvs.append(nn.Linear(200, 200))
self.deconvs.append(nn.Linear(200, obs_shape[0]))
def forward(self, h):
for i in range(len(self.deconvs)):
h = torch.relu(self.deconvs[i](h))
return h
def log(self, L, step, log_freq):
pass
_AVAILABLE_DECODERS = {"pixel": PixelDecoder, "mlp": MLPDecoder}
def make_decoder(decoder_type, obs_shape, feature_dim, num_layers, num_filters):
assert decoder_type in _AVAILABLE_DECODERS
return _AVAILABLE_DECODERS[decoder_type](
obs_shape, feature_dim, num_layers, num_filters
)
| 2,728 | 28.344086 | 88 | py |
icp-block-mdp | icp-block-mdp-master/reinforcement_learning/replay_buffer.py | # Copyright (c) Facebook, Inc. and its affiliates.
import random
from typing import List, Optional, Union
import numpy as np
import torch
class MultiEnvReplayBuffer(object):
"""Buffer to store environment transitions for multiple environments"""
def __init__(self, obs_shape, action_shape, capacity, device, num_envs: int):
self.env_id_to_replay_buffer_map = [
ReplayBuffer(
obs_shape=obs_shape,
action_shape=action_shape,
capacity=int(capacity / num_envs),
device=device,
)
for _ in range(num_envs)
]
self.num_envs = num_envs
def add(self, env_id, obs, action, reward, next_obs, done, done_no_max):
self.env_id_to_replay_buffer_map[env_id].add(
obs, action, reward, next_obs, done, done_no_max
)
def add_loop(self, obs, action, reward, next_obs, done):
for env_id in range(self.num_envs):
self.env_id_to_replay_buffer_map[env_id].add(
obs=obs[env_id],
action=action[env_id],
reward=reward[env_id],
next_obs=next_obs[env_id],
done=done[env_id],
)
def sample(self, batch_size, env_id: Optional[int] = None):
if env_id is None:
env_id = random.randint(0, self.num_envs - 1)
return self.env_id_to_replay_buffer_map[env_id].sample(batch_size)
def save(self, save_dir):
for replay_buffer in self.env_id_to_replay_buffer_map:
replay_buffer.save(save_dir)
def load(self, save_dir):
for replay_buffer in self.env_id_to_replay_buffer_map:
replay_buffer.load(save_dir)
class ReplayBuffer(object):
"""Buffer to store environment transitions."""
def __init__(self, obs_shape, action_shape, capacity, device):
self.capacity = capacity
self.device = device
# the proprioceptive obs is stored as float32, pixels obs as uint8
obs_dtype = np.float32 if len(obs_shape) == 1 else np.uint8
self.obses = np.empty((capacity, *obs_shape), dtype=obs_dtype)
self.next_obses = np.empty((capacity, *obs_shape), dtype=obs_dtype)
self.actions = np.empty((capacity, *action_shape), dtype=np.float32)
self.rewards = np.empty((capacity, 1), dtype=np.float32)
self.not_dones = np.empty((capacity, 1), dtype=np.float32)
self.not_dones_no_max = np.empty((capacity, 1), dtype=np.float32)
self.idx = 0
self.last_save = 0
self.full = False
def __len__(self):
return self.capacity if self.full else self.idx
def add(self, obs, action, reward, next_obs, done, done_no_max):
np.copyto(self.obses[self.idx], obs)
np.copyto(self.actions[self.idx], action)
np.copyto(self.rewards[self.idx], reward)
np.copyto(self.next_obses[self.idx], next_obs)
np.copyto(self.not_dones[self.idx], not done)
np.copyto(self.not_dones_no_max[self.idx], not done_no_max)
self.idx = (self.idx + 1) % self.capacity
self.full = self.full or self.idx == 0
def sample(self, batch_size):
idxs = np.random.randint(
0, self.capacity if self.full else self.idx, size=batch_size
)
obses = torch.as_tensor(self.obses[idxs], device=self.device).float()
actions = torch.as_tensor(self.actions[idxs], device=self.device)
rewards = torch.as_tensor(self.rewards[idxs], device=self.device)
next_obses = torch.as_tensor(self.next_obses[idxs], device=self.device).float()
not_dones = torch.as_tensor(self.not_dones[idxs], device=self.device)
not_dones_no_max = torch.as_tensor(
self.not_dones_no_max[idxs], device=self.device
)
return obses, actions, rewards, next_obses, not_dones, not_dones_no_max
| 3,900 | 36.509615 | 87 | py |
icp-block-mdp | icp-block-mdp-master/reinforcement_learning/train.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import math
import os
import pickle as pkl
import sys
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import dmc2gym
import hydra
import utils
from logger import Logger
from replay_buffer import MultiEnvReplayBuffer
from video import VideoRecorder
class Workspace(object):
def __init__(self, cfg):
self.work_dir = os.getcwd()
print(f"workspace: {self.work_dir}")
self.cfg = cfg
self.logger = Logger(
self.work_dir,
save_tb=cfg.log_save_tb,
log_frequency=cfg.log_frequency,
agent=cfg.agent.name,
)
utils.set_seed_everywhere(cfg.seed)
self.device = torch.device(cfg.device)
self.train_envs, self.test_envs = utils.make_env(cfg)
if cfg.noise_dims == -1:
cfg.noise_dims = self.train_envs[0].observation_space.shape[0]
cfg.agent.params.obs_dim = (
self.train_envs[0].observation_space.shape[0] + cfg.noise_dims + 1
)
cfg.agent.params.action_dim = self.train_envs[0].action_space.shape[0]
if cfg.agent.name != "sac":
cfg.agent.params.num_envs = cfg.num_train_envs
cfg.agent.params.action_range = [
float(self.train_envs[0].action_space.low.min()),
float(self.train_envs[0].action_space.high.max()),
]
self.agent = hydra.utils.instantiate(cfg.agent)
self.replay_buffer = MultiEnvReplayBuffer(
(cfg.agent.params.obs_dim,), # hard coded
self.train_envs[0].action_space.shape,
int(cfg.replay_buffer_capacity),
self.device,
num_envs=cfg.num_train_envs,
)
self.video_recorder = VideoRecorder(self.work_dir if cfg.save_video else None)
self.step = [0] * cfg.num_train_envs
def evaluate(self, env, train=False):
for episode in range(self.cfg.num_eval_episodes):
obs = env.reset()
self.agent.reset()
self.video_recorder.init(enabled=(episode == 0))
done = False
episode_reward = 0
while not done:
with utils.eval_mode(self.agent):
action = self.agent.act(obs, sample=False)
obs, reward, done, _ = env.step(action)
self.video_recorder.record(env)
episode_reward += reward
self.video_recorder.save(f"{self.step}.mp4")
if train:
self.logger.log(
"eval/train_episode_reward", episode_reward, self.step[0]
)
else:
self.logger.log(
"eval/eval_episode_reward", episode_reward, self.step[0]
)
def run(self):
episode, episode_reward, episode_step, done = (
[0] * self.cfg.num_train_envs,
[0] * self.cfg.num_train_envs,
[0] * self.cfg.num_train_envs,
[True] * self.cfg.num_train_envs,
)
obs, next_obs = (
[self.train_envs[0].reset()] * self.cfg.num_train_envs,
[self.train_envs[0].reset()] * self.cfg.num_train_envs,
)
start_time = time.time()
while self.step[0] < self.cfg.num_train_steps:
for e_idx, env in enumerate(self.train_envs):
if done[e_idx]:
if self.step[e_idx] > 0:
self.logger.log(
"train/duration", time.time() - start_time, self.step[e_idx]
)
start_time = time.time()
self.logger.dump(
self.step[e_idx],
save=(self.step[e_idx] > self.cfg.num_seed_steps),
)
# evaluate agent periodically
if self.step[0] > 0 and self.step[0] % self.cfg.eval_frequency == 0:
self.logger.log(
"eval/episode", episode[e_idx], self.step[e_idx]
)
self.evaluate(env, train=True)
self.evaluate(self.test_envs[0], train=False)
self.logger.dump(self.step[e_idx])
self.logger.log(
"train/episode_reward", episode_reward[e_idx], self.step[e_idx]
)
obs[e_idx] = env.reset()
self.agent.reset()
done[e_idx] = False
episode_reward[e_idx] = 0
episode_step[e_idx] = 0
episode[e_idx] += 1
self.logger.log("train/episode", episode[e_idx], self.step[e_idx])
# sample action for data collection
if self.step[e_idx] < self.cfg.num_seed_steps:
action = env.action_space.sample()
else:
with utils.eval_mode(self.agent):
action = self.agent.act(obs[e_idx], sample=True)
# run training update
if self.step[e_idx] >= self.cfg.num_seed_steps:
self.agent.update(self.replay_buffer, self.logger, self.step[e_idx])
try:
next_obs[e_idx], reward, done[e_idx], _ = env.step(action)
except:
next_obs[e_idx] = obs[e_idx]
reward = 0
print("Invalid action. Terminating episode.")
done[e_idx] = True
# allow infinite bootstrap
done[e_idx] = float(done[e_idx])
done_no_max = (
0
if episode_step[e_idx] + 1 == env._max_episode_steps
else done[e_idx]
)
episode_reward[e_idx] += reward
self.replay_buffer.add(
e_idx,
obs[e_idx],
action,
reward,
next_obs[e_idx],
done[e_idx],
done_no_max,
)
obs[e_idx] = next_obs[e_idx]
episode_step[e_idx] += 1
self.step[e_idx] += 1
@hydra.main(config_path="config/train.yaml", strict=True)
def main(cfg):
workspace = Workspace(cfg)
workspace.run()
if __name__ == "__main__":
main()
| 6,586 | 33.851852 | 88 | py |
icp-block-mdp | icp-block-mdp-master/reinforcement_learning/agent/sac.py | # Copyright (c) Facebook, Inc. and its affiliates.
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import autograd, optim
import hydra
import utils
from agent import Agent
from decoder import make_decoder
from encoder import make_encoder
def make_dynamics_model(feature_dim, hidden_dim, action_shape):
model = nn.Sequential(
nn.Linear(feature_dim + action_shape, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, feature_dim),
)
return model
def irm_penalty(logits, labels):
scale = torch.tensor(1.0).cuda().requires_grad_()
loss = F.mse_loss(logits * scale, labels)
grad = autograd.grad(loss, [scale], create_graph=True)[0]
return torch.sum(grad ** 2)
class SACAgent(Agent):
"""SAC algorithm."""
def __init__(
self,
obs_dim,
action_dim,
action_range,
device,
encoder_type,
encoder_feature_dim,
critic_cfg,
actor_cfg,
discount,
init_temperature,
alpha_lr,
alpha_betas,
actor_lr,
actor_betas,
actor_update_frequency,
critic_lr,
critic_betas,
critic_tau,
critic_target_update_frequency,
batch_size,
):
super().__init__()
self.action_range = action_range
self.device = torch.device(device)
self.discount = discount
self.critic_tau = critic_tau
self.actor_update_frequency = actor_update_frequency
self.critic_target_update_frequency = critic_target_update_frequency
self.batch_size = batch_size
self.encoder = make_encoder(
encoder_type, obs_dim, encoder_feature_dim, 2, 32
).to(self.device)
self.critic = hydra.utils.instantiate(critic_cfg).to(self.device)
self.critic.encoder = self.encoder
self.critic_target = hydra.utils.instantiate(critic_cfg).to(self.device)
self.critic_target.encoder = self.encoder
self.critic_target.load_state_dict(self.critic.state_dict())
self.actor = hydra.utils.instantiate(actor_cfg).to(self.device)
self.actor.encoder = self.encoder
self.log_alpha = torch.tensor(np.log(init_temperature)).to(self.device)
self.log_alpha.requires_grad = True
# set target entropy to -|A|
self.target_entropy = -action_dim
# optimizers
self.actor_optimizer = torch.optim.Adam(
self.actor.parameters(), lr=actor_lr, betas=actor_betas
)
self.critic_optimizer = torch.optim.Adam(
self.critic.parameters(), lr=critic_lr, betas=critic_betas
)
self.log_alpha_optimizer = torch.optim.Adam(
[self.log_alpha], lr=alpha_lr, betas=alpha_betas
)
self.train()
self.critic_target.train()
def train(self, training=True):
self.training = training
self.actor.train(training)
self.critic.train(training)
@property
def alpha(self):
return self.log_alpha.exp()
def act(self, obs, sample=False):
obs = torch.FloatTensor(obs).to(self.device)
obs = obs.unsqueeze(0)
dist = self.actor(obs)
action = dist.sample() if sample else dist.mean
action = action.clamp(*self.action_range)
assert action.ndim == 2 and action.shape[0] == 1
return utils.to_np(action[0])
def update_critic(self, obs, action, reward, next_obs, not_done, logger, step):
# with torch.no_grad():
dist = self.actor(next_obs)
next_action = dist.rsample()
log_prob = dist.log_prob(next_action).sum(-1, keepdim=True)
target_Q1, target_Q2 = self.critic_target(next_obs, next_action)
target_V = torch.min(target_Q1, target_Q2) - self.alpha.detach() * log_prob
target_Q = reward + (not_done * self.discount * target_V)
target_Q = target_Q.detach()
# get current Q estimates
current_Q1, current_Q2 = self.critic(obs, action)
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(
current_Q2, target_Q
)
logger.log("train_critic/loss", critic_loss, step)
# add L1 penalty
L1_reg = torch.tensor(0.0, requires_grad=True).to(self.device)
for name, param in self.critic.encoder.named_parameters():
if "weight" in name:
L1_reg = L1_reg + torch.norm(param, 1)
# Optimize the critic
self.critic_optimizer.zero_grad()
(critic_loss + 1e-5 * L1_reg).backward()
self.critic_optimizer.step()
self.critic.log(logger, step)
def update_actor_and_alpha(self, obs, logger, step):
dist = self.actor(obs, detach=True)
action = dist.rsample()
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
actor_Q1, actor_Q2 = self.critic(obs, action, detach=True)
actor_Q = torch.min(actor_Q1, actor_Q2)
actor_loss = (self.alpha.detach() * log_prob - actor_Q).mean()
logger.log("train_actor/loss", actor_loss, step)
logger.log("train_actor/target_entropy", self.target_entropy, step)
logger.log("train_actor/entropy", -log_prob.mean(), step)
# optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
self.actor.log(logger, step)
self.log_alpha_optimizer.zero_grad()
alpha_loss = (self.alpha * (-log_prob - self.target_entropy).detach()).mean()
logger.log("train_alpha/loss", alpha_loss, step)
logger.log("train_alpha/value", self.alpha, step)
alpha_loss.backward()
self.log_alpha_optimizer.step()
def update(self, replay_buffer, logger, step):
obs, action, reward, next_obs, not_done, not_done_no_max = replay_buffer.sample(
self.batch_size
)
logger.log("train/batch_reward", reward.mean(), step)
self.update_critic(obs, action, reward, next_obs, not_done_no_max, logger, step)
if step % self.actor_update_frequency == 0:
self.update_actor_and_alpha(obs, logger, step)
if step % self.critic_target_update_frequency == 0:
utils.soft_update_params(self.critic, self.critic_target, self.critic_tau)
class CausalAgent(Agent):
"""SAC algorithm."""
def __init__(
self,
obs_dim,
action_dim,
action_range,
device,
encoder_type,
num_envs,
c_ent,
kld,
critic_cfg,
actor_cfg,
discount,
init_temperature,
alpha_lr,
encoder_lr,
c_ent_iters,
alpha_betas,
actor_lr,
actor_betas,
actor_update_frequency,
decoder_lr,
decoder_weight_lambda,
critic_lr,
critic_betas,
critic_tau,
encoder_feature_dim,
decoder_latent_lambda,
critic_target_update_frequency,
batch_size,
):
super().__init__()
self.action_range = action_range
self.device = torch.device(device)
self.discount = discount
self.critic_tau = critic_tau
self.actor_update_frequency = actor_update_frequency
self.critic_target_update_frequency = critic_target_update_frequency
self.batch_size = batch_size
self.num_envs = num_envs
self.encoder_tau = 0.005
self.decoder_latent_lambda = decoder_latent_lambda
self.encoder_type = encoder_type
self.c_ent = c_ent
self.c_ent_iters = c_ent_iters
self.kld = kld
self.encoder = make_encoder(
encoder_type, obs_dim, encoder_feature_dim, 2, 32
).to(self.device)
self.task_specific_encoders = [
make_encoder(encoder_type, obs_dim, encoder_feature_dim, 2, 32).to(device)
for i in range(self.num_envs)
]
self.model = make_dynamics_model(encoder_feature_dim, 200, action_dim).to(
device
)
self.task_specific_models = [
make_dynamics_model(encoder_feature_dim, 200, action_dim).to(device)
for i in range(self.num_envs)
]
self.reward_model = nn.Sequential(
nn.Linear(encoder_feature_dim, 200),
nn.ReLU(),
nn.Linear(200, 200),
nn.ReLU(),
nn.Linear(200, 1),
).to(device)
self.decoder = nn.Sequential(
nn.Linear(encoder_feature_dim * 2, 200),
nn.ReLU(),
nn.Linear(200, 200),
nn.ReLU(),
nn.Linear(200, obs_dim),
).to(device)
self.classifier = nn.Sequential(
nn.Linear(encoder_feature_dim, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, self.num_envs),
).to(device)
self.critic = hydra.utils.instantiate(critic_cfg).to(self.device)
self.critic.encoder = self.encoder
self.critic_target = hydra.utils.instantiate(critic_cfg).to(self.device)
self.critic_target.encoder = make_encoder(
encoder_type, obs_dim, encoder_feature_dim, 2, 32
).to(self.device)
self.critic_target.load_state_dict(self.critic.state_dict())
self.actor = hydra.utils.instantiate(actor_cfg).to(self.device)
self.actor.encoder = self.encoder
self.log_alpha = torch.tensor(np.log(init_temperature)).to(self.device)
self.log_alpha.requires_grad = True
# set target entropy to -|A|
self.target_entropy = -action_dim
# optimizers
self.actor_optimizer = torch.optim.Adam(
self.actor.parameters(), lr=actor_lr, betas=actor_betas
)
self.critic_optimizer = torch.optim.Adam(
list(self.critic.parameters()) + list(self.encoder.parameters()),
lr=critic_lr,
betas=critic_betas,
)
self.log_alpha_optimizer = torch.optim.Adam(
[self.log_alpha], lr=alpha_lr, betas=alpha_betas
)
self.classifier_optimizer = torch.optim.Adam(
self.classifier.parameters(), lr=actor_lr, betas=actor_betas
)
# optimizer for critic encoder for reconstruction loss
self.encoder_optimizer = torch.optim.Adam(
self.critic.encoder.parameters(), lr=encoder_lr
)
# optimizer for decoder
task_specific_parameters = [
params
for t in (self.task_specific_encoders + self.task_specific_models)
for params in list(t.parameters())
]
self.decoder_optimizer = torch.optim.Adam(
list(self.decoder.parameters())
+ list(self.model.parameters())
+ list(self.reward_model.parameters())
+ task_specific_parameters
+ list(self.encoder.parameters()),
lr=decoder_lr,
weight_decay=decoder_weight_lambda,
)
self.train()
self.critic_target.train()
def train(self, training=True):
self.training = training
self.actor.train(training)
self.critic.train(training)
@property
def alpha(self):
return self.log_alpha.exp()
def act(self, obs, sample=False):
obs = torch.FloatTensor(obs).to(self.device)
obs = obs.unsqueeze(0)
dist = self.actor(obs, detach=True)
action = dist.sample() if sample else dist.mean
action = action.clamp(*self.action_range)
assert action.ndim == 2 and action.shape[0] == 1
return utils.to_np(action[0])
def update_critic(self, obs, action, reward, next_obs, not_done, logger, step):
dist = self.actor(next_obs, detach=False)
next_action = dist.rsample()
log_prob = dist.log_prob(next_action).sum(-1, keepdim=True)
target_Q1, target_Q2 = self.critic_target(next_obs, next_action)
target_V = torch.min(target_Q1, target_Q2) - self.alpha.detach() * log_prob
target_Q = reward + (not_done * self.discount * target_V)
target_Q = target_Q.detach()
# get current Q estimates
current_Q1, current_Q2 = self.critic(obs, action, detach=False)
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(
current_Q2, target_Q
)
logger.log("train_critic/loss", critic_loss, step)
return critic_loss
def update_actor_and_alpha(self, obs, logger, step):
dist = self.actor(obs, detach=True)
action = dist.rsample()
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
actor_Q1, actor_Q2 = self.critic(obs, action, detach=True)
actor_Q = torch.min(actor_Q1, actor_Q2)
actor_loss = (self.alpha.detach() * log_prob - actor_Q).mean()
logger.log("train_actor/loss", actor_loss, step)
logger.log("train_actor/target_entropy", self.target_entropy, step)
logger.log("train_actor/entropy", -log_prob.mean(), step)
self.actor.log(logger, step)
self.log_alpha_optimizer.zero_grad()
alpha_loss = (self.alpha * (-log_prob - self.target_entropy).detach()).mean()
logger.log("train_alpha/loss", alpha_loss, step)
logger.log("train_alpha/value", self.alpha, step)
return actor_loss, alpha_loss
def update_decoder(self, obs, action, reward, target_obs, logger, step, i):
h = self.critic.encoder(obs)
task_specific_h = self.task_specific_encoders[i](obs)
next_h = self.model(torch.cat([h, action], dim=-1))
next_task_specific_h = self.task_specific_models[i](
torch.cat([task_specific_h, action], dim=-1)
)
r_hat = self.reward_model(next_h)
rec_obs = self.decoder(torch.cat([next_h, next_task_specific_h], dim=-1))
rec_loss = F.mse_loss(target_obs, rec_obs)
rew_loss = F.mse_loss(r_hat, reward)
logger.log("train_encoder/rc_loss", rec_loss.item(), step)
# autoencoder loss
# rec_obs = self.decoder(torch.cat([h, task_specific_h], dim=-1))
# rec_loss = F.mse_loss(obs, rec_obs)
# add L2 penalty on latent representation
# see https://arxiv.org/pdf/1903.12436.pdf
latent_loss = (0.5 * h.pow(2).sum(1)).mean()
# add L1 penalty
L1_reg = torch.tensor(0.0, requires_grad=True).to(self.device)
for name, param in self.critic.encoder.named_parameters():
if "weight" in name:
L1_reg = L1_reg + torch.norm(param, 1)
# get classifier entropy
h = self.critic.encoder(obs)
probs = F.softmax(self.classifier(h))
entropy = -1.0 * (probs * torch.log2(probs + 1e-12)).sum(dim=1).mean()
logger.log("train_classifier/entropy", entropy, step)
# compute information bottleneck
KLD = 0.0
if self.encoder_type == "variational":
mu, logvar = self.critic.encoder.encode(obs)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
logger.log("train_encoder/KLD", self.kld * KLD.item(), step)
self.encoder_optimizer.zero_grad()
self.decoder_optimizer.zero_grad()
if step > self.c_ent_iters:
c_ent = self.c_ent
else:
c_ent = 0
(
rec_loss
+ self.decoder_latent_lambda * L1_reg
- c_ent * entropy
+ self.kld * KLD
).backward()
self.encoder_optimizer.step()
self.decoder_optimizer.step()
logger.log("train/model_loss", rec_loss, step)
logger.log("train/reward_loss", rew_loss, step)
def update_classifier(self, obs, env_id):
h = self.critic.encoder(obs)
pred_labels = self.classifier(h)
classifier_loss = F.cross_entropy(pred_labels, env_id)
self.classifier_optimizer.zero_grad()
classifier_loss.backward()
self.classifier_optimizer.step()
def update(self, replay_buffer, logger, step):
total_actor_loss, total_alpha_loss, total_critic_loss, obses, env_ids = (
[],
[],
[],
[],
[],
)
for env_id in range(self.num_envs):
(
obs,
action,
reward,
next_obs,
not_done,
not_done_no_max,
) = replay_buffer.sample(self.batch_size, env_id)
obses.append(obs)
env_ids.append(torch.ones_like(reward).long() * env_id)
logger.log("train/batch_reward", reward.mean(), step)
critic_loss = self.update_critic(
obs, action, reward, next_obs, not_done_no_max, logger, step
)
total_critic_loss.append(critic_loss)
if step % self.actor_update_frequency == 0:
actor_loss, alpha_loss = self.update_actor_and_alpha(obs, logger, step)
total_actor_loss.append(actor_loss)
total_alpha_loss.append(alpha_loss)
self.update_decoder(obs, action, reward, next_obs, logger, step, env_id)
# Optimize the critic
self.critic_optimizer.zero_grad()
torch.stack(total_critic_loss).mean().backward()
self.critic_optimizer.step()
self.critic.log(logger, step)
# Optimize classifier
self.update_classifier(
torch.cat(obses, dim=0), torch.cat(env_ids, dim=0).squeeze()
)
if step % self.actor_update_frequency == 0:
# optimize the actor
self.actor_optimizer.zero_grad()
torch.stack(total_actor_loss).mean().backward()
self.actor_optimizer.step()
self.actor.log(logger, step)
self.log_alpha_optimizer.zero_grad()
torch.stack(total_alpha_loss).mean().backward()
self.log_alpha_optimizer.step()
if step % self.critic_target_update_frequency == 0:
utils.soft_update_params(self.critic, self.critic_target, self.critic_tau)
class IRMAgent(Agent):
"""IRM algorithm."""
def __init__(
self,
obs_dim,
action_dim,
action_range,
device,
encoder_type,
critic_cfg,
actor_cfg,
discount,
init_temperature,
alpha_lr,
l2_regularizer_weight,
alpha_betas,
actor_lr,
actor_betas,
actor_update_frequency,
critic_lr,
critic_betas,
critic_tau,
num_envs,
encoder_feature_dim,
critic_target_update_frequency,
batch_size,
penalty_anneal_iters,
penalty_weight,
):
super().__init__()
self.action_range = action_range
self.device = torch.device(device)
self.discount = discount
self.critic_tau = critic_tau
self.actor_update_frequency = actor_update_frequency
self.critic_target_update_frequency = critic_target_update_frequency
self.batch_size = batch_size
self.num_envs = num_envs
self.l2_regularizer_weight = l2_regularizer_weight
self.penalty_anneal_iters = penalty_anneal_iters
self.penalty_weight = penalty_weight
self.encoder = make_encoder(
encoder_type, obs_dim, encoder_feature_dim, 2, 32
).to(self.device)
self.critic = hydra.utils.instantiate(critic_cfg).to(self.device)
self.critic.encoder = self.encoder
self.critic_target = hydra.utils.instantiate(critic_cfg).to(self.device)
self.critic_target.encoder = make_encoder(
encoder_type, obs_dim, encoder_feature_dim, 2, 32
).to(self.device)
self.critic_target.load_state_dict(self.critic.state_dict())
self.actor = hydra.utils.instantiate(actor_cfg).to(self.device)
self.actor.encoder = self.encoder
self.log_alpha = torch.tensor(np.log(init_temperature)).to(self.device)
self.log_alpha.requires_grad = True
# set target entropy to -|A|
self.target_entropy = -action_dim
# optimizers
self.actor_optimizer = torch.optim.Adam(
self.actor.parameters(), lr=actor_lr, betas=actor_betas
)
self.critic_optimizer = torch.optim.Adam(
list(self.critic.parameters()) + list(self.critic.encoder.parameters()),
lr=critic_lr,
betas=critic_betas,
)
self.log_alpha_optimizer = torch.optim.Adam(
[self.log_alpha], lr=alpha_lr, betas=alpha_betas
)
self.train()
self.critic_target.train()
def train(self, training=True):
self.training = training
self.actor.train(training)
self.critic.train(training)
@property
def alpha(self):
return self.log_alpha.exp()
def act(self, obs, sample=False):
obs = torch.FloatTensor(obs).to(self.device)
obs = obs.unsqueeze(0)
dist = self.actor(obs)
action = dist.sample() if sample else dist.mean
action = action.clamp(*self.action_range)
assert action.ndim == 2 and action.shape[0] == 1
return utils.to_np(action[0])
def update_critic(self, obs, action, reward, next_obs, not_done, logger, step):
dist = self.actor(next_obs)
next_action = dist.rsample()
log_prob = dist.log_prob(next_action).sum(-1, keepdim=True)
target_Q1, target_Q2 = self.critic_target(next_obs, next_action)
target_V = torch.min(target_Q1, target_Q2) - self.alpha.detach() * log_prob
target_Q = reward + (not_done * self.discount * target_V)
target_Q = target_Q.detach()
# get current Q estimates
current_Q1, current_Q2, h = self.critic(obs, action, return_latent=True)
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(
current_Q2, target_Q
)
self.irm_penalty = irm_penalty(current_Q1, target_Q) + irm_penalty(
current_Q2, target_Q
)
logger.log("train_critic/loss", critic_loss, step)
# add L2 penalty on latent representation
# see https://arxiv.org/pdf/1903.12436.pdf
latent_loss = (0.5 * h.pow(2).sum(1)).mean()
# add L1 penalty
L1_reg = torch.tensor(0.0, requires_grad=True).to(self.device)
for name, param in self.critic.encoder.named_parameters():
if "weight" in name:
L1_reg = L1_reg + torch.norm(param, 1)
return critic_loss + self.l2_regularizer_weight * L1_reg, target_V.mean()
def update_actor_and_alpha(self, obs, logger, step):
dist = self.actor(obs, detach=True)
action = dist.rsample()
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
actor_Q1, actor_Q2 = self.critic(obs, action, detach=True)
actor_Q = torch.min(actor_Q1, actor_Q2)
actor_loss = (self.alpha.detach() * log_prob - actor_Q).mean()
logger.log("train_actor/loss", actor_loss, step)
logger.log("train_actor/target_entropy", self.target_entropy, step)
logger.log("train_actor/entropy", -log_prob.mean(), step)
self.actor.log(logger, step)
self.log_alpha_optimizer.zero_grad()
alpha_loss = (self.alpha * (-log_prob - self.target_entropy).detach()).mean()
logger.log("train_alpha/loss", alpha_loss, step)
logger.log("train_alpha/value", self.alpha, step)
return actor_loss, alpha_loss
def update(self, replay_buffer, logger, step):
total_actor_loss, total_alpha_loss, total_critic_loss = [], [], []
target_vs = []
irm_penalties = []
for env_id in range(self.num_envs):
(
obs,
action,
reward,
next_obs,
not_done,
not_done_no_max,
) = replay_buffer.sample(self.batch_size, env_id)
logger.log("train/batch_reward", reward.mean(), step)
critic_loss, target_v = self.update_critic(
obs, action, reward, next_obs, not_done_no_max, logger, step
)
total_critic_loss.append(critic_loss)
target_vs.append(target_v)
if step % self.actor_update_frequency == 0:
actor_loss, alpha_loss = self.update_actor_and_alpha(obs, logger, step)
total_actor_loss.append(actor_loss)
total_alpha_loss.append(alpha_loss)
irm_penalties.append(self.irm_penalty)
# Optimize the critic
train_penalty = torch.stack(irm_penalties).mean()
penalty_weight = (
self.penalty_weight if step >= self.penalty_anneal_iters else 1.0
)
logger.log("train_encoder/penalty", train_penalty, step)
total_critic_loss = torch.stack(total_critic_loss).mean()
total_critic_loss += penalty_weight * train_penalty
if penalty_weight > 1.0:
# Rescale the entire loss to keep gradients in a reasonable range
total_critic_loss /= penalty_weight
self.critic_optimizer.zero_grad()
total_critic_loss.backward()
self.critic_optimizer.step()
self.critic.log(logger, step)
if step % self.actor_update_frequency == 0:
# optimize the actor
self.actor_optimizer.zero_grad()
torch.stack(total_actor_loss).mean().backward()
self.actor_optimizer.step()
self.actor.log(logger, step)
self.log_alpha_optimizer.zero_grad()
torch.stack(total_alpha_loss).mean().backward()
self.log_alpha_optimizer.step()
if step % self.critic_target_update_frequency == 0:
utils.soft_update_params(self.critic, self.critic_target, self.critic_tau)
| 25,968 | 33.90457 | 88 | py |
icp-block-mdp | icp-block-mdp-master/reinforcement_learning/agent/actor.py | # Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import torch
import math
from torch import nn
import torch.nn.functional as F
from torch import distributions as pyd
import utils
class TanhTransform(pyd.transforms.Transform):
domain = pyd.constraints.real
codomain = pyd.constraints.interval(-1.0, 1.0)
bijective = True
sign = +1
def __init__(self, cache_size=1):
super().__init__(cache_size=cache_size)
@staticmethod
def atanh(x):
return 0.5 * (x.log1p() - (-x).log1p())
def __eq__(self, other):
return isinstance(other, TanhTransform)
def _call(self, x):
return x.tanh()
def _inverse(self, y):
# We do not clamp to the boundary here as it may degrade the performance of certain algorithms.
# one should use `cache_size=1` instead
return self.atanh(y)
def log_abs_det_jacobian(self, x, y):
# We use a formula that is more numerically stable, see details in the following link
# https://github.com/tensorflow/probability/commit/ef6bb176e0ebd1cf6e25c6b5cecdd2428c22963f#diff-e120f70e92e6741bca649f04fcd907b7
return 2. * (math.log(2.) - x - F.softplus(-2. * x))
class SquashedNormal(pyd.transformed_distribution.TransformedDistribution):
def __init__(self, loc, scale):
self.loc = loc
self.scale = scale
self.base_dist = pyd.Normal(loc, scale)
transforms = [TanhTransform()]
super().__init__(self.base_dist, transforms)
@property
def mean(self):
mu = self.loc
for tr in self.transforms:
mu = tr(mu)
return mu
class DiagGaussianActor(nn.Module):
"""torch.distributions implementation of an diagonal Gaussian policy."""
def __init__(self, obs_dim, action_dim, hidden_dim, hidden_depth,
log_std_bounds):
super().__init__()
self.log_std_bounds = log_std_bounds
self.trunk = utils.mlp(obs_dim, hidden_dim, 2 * action_dim,
hidden_depth)
self.encoder = None
self.outputs = dict()
self.apply(utils.weight_init)
def forward(self, obs, detach=False):
if self.encoder is not None:
if detach:
obs = self.encoder(obs).detach()
else:
obs = self.encoder(obs)
mu, log_std = self.trunk(obs).chunk(2, dim=-1)
# constrain log_std inside [log_std_min, log_std_max]
log_std = torch.tanh(log_std)
log_std_min, log_std_max = self.log_std_bounds
log_std = log_std_min + 0.5 * (log_std_max - log_std_min) * (log_std +
1)
std = log_std.exp()
self.outputs['mu'] = mu
self.outputs['std'] = std
dist = SquashedNormal(mu, std)
return dist
def log(self, logger, step):
for k, v in self.outputs.items():
logger.log_histogram(f'train_actor/{k}_hist', v, step)
for i, m in enumerate(self.trunk):
if type(m) == nn.Linear:
logger.log_param(f'train_actor/fc{i}', m, step) | 3,183 | 30.215686 | 137 | py |
icp-block-mdp | icp-block-mdp-master/reinforcement_learning/agent/critic.py | # Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn.functional as F
import utils
class DoubleQCritic(nn.Module):
"""Critic network, employes double Q-learning."""
def __init__(self, obs_dim, action_dim, hidden_dim, hidden_depth):
super().__init__()
self.Q1 = utils.mlp(obs_dim + action_dim, hidden_dim, 1, hidden_depth)
self.Q2 = utils.mlp(obs_dim + action_dim, hidden_dim, 1, hidden_depth)
self.encoder = None
self.outputs = dict()
self.apply(utils.weight_init)
def forward(self, obs, action, detach=False, return_latent=False):
assert obs.size(0) == action.size(0)
if self.encoder is not None:
if detach:
obs = self.encoder(obs).detach()
else:
obs = self.encoder(obs)
obs_action = torch.cat([obs, action], dim=-1)
q1 = self.Q1(obs_action)
q2 = self.Q2(obs_action)
self.outputs["q1"] = q1
self.outputs["q2"] = q2
if return_latent:
return q1, q2, obs
return q1, q2
def log(self, logger, step):
for k, v in self.outputs.items():
logger.log_histogram(f"train_critic/{k}_hist", v, step)
assert len(self.Q1) == len(self.Q2)
for i, (m1, m2) in enumerate(zip(self.Q1, self.Q2)):
assert type(m1) == type(m2)
if type(m1) is nn.Linear:
logger.log_param(f"train_critic/q1_fc{i}", m1, step)
logger.log_param(f"train_critic/q2_fc{i}", m2, step)
| 1,620 | 30.173077 | 78 | py |
icp-block-mdp | icp-block-mdp-master/imitation_learning/utils.py | # The different vectorized envs have been taken from: https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail/blob/master/a2c_ppo_acktr/envs.py
import glob
import os
import random
from collections import defaultdict, deque
from random import sample
from typing import List, Optional, Union
import gym
import numpy as np
import skvideo.io
import torch
import torch.nn as nn
import dmc2gym
from baselines.common.vec_env import VecEnvWrapper
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
from baselines.common.vec_env.shmem_vec_env import ShmemVecEnv
from sacae.sacae import SacAeAgent
class MultiEnvReplayBuffer(object):
"""Buffer to store environment transitions for multiple environments"""
def __init__(
self, obs_shape, action_shape, capacity, batch_size, device, num_envs: int
):
self.env_id_to_replay_buffer_map = [
ReplayBuffer(
obs_shape=obs_shape,
action_shape=action_shape,
capacity=int(capacity / num_envs),
batch_size=batch_size,
device=device,
)
for _ in range(num_envs)
]
self.num_envs = num_envs
def add(self, env_id, obs, action, reward, next_obs, done):
self.env_id_to_replay_buffer_map[env_id].add(
obs, action, reward, next_obs, done
)
def add_loop(self, obs, action, reward, next_obs, done):
for env_id in range(self.num_envs):
self.env_id_to_replay_buffer_map[env_id].add(
obs=obs[env_id],
action=action[env_id],
reward=reward[env_id],
next_obs=next_obs[env_id],
done=done[env_id],
)
def sample(self, env_id: Optional[int] = None):
if env_id is None:
env_id = random.randint(0, self.num_envs - 1)
return self.env_id_to_replay_buffer_map[env_id].sample()
def save(self, save_dir):
for idx, replay_buffer in enumerate(self.env_id_to_replay_buffer_map):
replay_buffer.save(f"{save_dir}/{idx}")
def load(self, save_dir):
for idx, replay_buffer in enumerate(self.env_id_to_replay_buffer_map):
replay_buffer.load(f"{save_dir}/{idx}")
class ReplayBuffer(object):
"""Buffer to store environment transitions."""
def __init__(self, obs_shape, action_shape, capacity, batch_size, device):
self.capacity = capacity
self.batch_size = batch_size
self.device = device
# the proprioceptive obs is stored as float32, pixels obs as uint8
obs_dtype = np.float32 if len(obs_shape) == 1 else np.uint8
self.obses = np.empty((capacity, *obs_shape), dtype=obs_dtype)
self.next_obses = np.empty((capacity, *obs_shape), dtype=obs_dtype)
self.actions = np.empty((capacity, *action_shape), dtype=np.float32)
self.rewards = np.empty((capacity, 1), dtype=np.float32)
self.not_dones = np.empty((capacity, 1), dtype=np.float32)
self.idx = 0
self.last_save = 0
self.full = False
def add(self, obs, action, reward, next_obs, done):
np.copyto(self.obses[self.idx], obs)
np.copyto(self.actions[self.idx], action)
np.copyto(self.rewards[self.idx], reward)
np.copyto(self.next_obses[self.idx], next_obs)
np.copyto(self.not_dones[self.idx], not done)
self.idx = (self.idx + 1) % self.capacity
self.full = self.full or self.idx == 0
def sample(self):
idxs = np.random.randint(
0, self.capacity if self.full else self.idx, size=self.batch_size
)
obses = torch.as_tensor(self.obses[idxs], device=self.device).float()
actions = torch.as_tensor(self.actions[idxs], device=self.device)
rewards = torch.as_tensor(self.rewards[idxs], device=self.device)
next_obses = torch.as_tensor(self.next_obses[idxs], device=self.device).float()
not_dones = torch.as_tensor(self.not_dones[idxs], device=self.device)
return obses, actions, rewards, next_obses, not_dones
def save(self, save_dir):
if self.idx == self.last_save:
return
path = os.path.join(save_dir, "%d_%d.pt" % (self.last_save, self.idx))
payload = [
self.obses[self.last_save : self.idx],
self.next_obses[self.last_save : self.idx],
self.actions[self.last_save : self.idx],
self.rewards[self.last_save : self.idx],
self.not_dones[self.last_save : self.idx],
]
self.last_save = self.idx
torch.save(payload, path)
def load(self, save_dir):
chunks = os.listdir(save_dir)
chucks = sorted(chunks, key=lambda x: int(x.split("_")[0]))
for chunk in chucks:
start, end = [int(x) for x in chunk.split(".")[0].split("_")]
path = os.path.join(save_dir, chunk)
payload = torch.load(path)
assert self.idx == start
self.obses[start:end] = payload[0]
self.next_obses[start:end] = payload[1]
self.actions[start:end] = payload[2]
self.rewards[start:end] = payload[3]
self.not_dones[start:end] = payload[4]
self.idx = end
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
gym.Wrapper.__init__(self, env)
self._k = k
self._frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=1,
shape=((shp[0] * k,) + shp[1:]),
dtype=env.observation_space.dtype,
)
self._max_episode_steps = env._max_episode_steps
def reset(self):
obs, info = self.env.reset()
for _ in range(self._k):
self._frames.append(obs)
return self._get_obs(), info
def step(self, action):
obs, reward, done, info = self.env.step(action)
self._frames.append(obs)
return self._get_obs(), reward, done, info
def _get_obs(self):
assert len(self._frames) == self._k
return np.concatenate(list(self._frames), axis=0)
class VecPyTorch(VecEnvWrapper):
def __init__(self, venv, device, num_envs):
"""Return only every `skip`-th frame"""
super(VecPyTorch, self).__init__(venv)
self.device = device
self.num_envs = num_envs
# TODO: Fix data types
def reset(self):
obs, info = self.venv.reset()
# obs = torch.from_numpy(obs).float().to(self.device)
obs = torch.from_numpy(obs).to(self.device)
state = torch.cat([torch.from_numpy(x["state"]).unsqueeze(0) for x in info]).to(
self.device
)
return obs, state
def step_async(self, actions):
if isinstance(actions, torch.LongTensor):
# Squeeze the dimension for discrete actions
actions = actions.squeeze(1)
actions = actions.cpu().numpy()
self.venv.step_async(actions)
def step_wait(self):
obs, reward, done, info = self.venv.step_wait()
# obs = torch.from_numpy(obs).float().to(self.device)
obs = torch.from_numpy(obs).to(self.device)
reward = torch.from_numpy(reward).unsqueeze(dim=1).float()
state = torch.cat([torch.from_numpy(x["state"]).unsqueeze(0) for x in info]).to(
self.device
)
return obs, reward, done, state
# Derived from
# https://github.com/openai/baselines/blob/master/baselines/common/vec_env/vec_frame_stack.py
class VecPyTorchFrameStack(VecEnvWrapper):
def __init__(self, venv, nstack, device=None):
self.venv = venv
self.nstack = nstack
wos = venv.observation_space # wrapped ob space
self.shape_dim0 = wos.shape[0]
low = np.repeat(wos.low, self.nstack, axis=0)
high = np.repeat(wos.high, self.nstack, axis=0)
if device is None:
device = torch.device("cpu")
self.stacked_obs = torch.zeros((venv.num_envs,) + low.shape).to(device)
observation_space = gym.spaces.Box(
low=low, high=high, dtype=venv.observation_space.dtype
)
VecEnvWrapper.__init__(self, venv, observation_space=observation_space)
def step_wait(self):
obs, rews, news, infos = self.venv.step_wait()
self.stacked_obs[:, : -self.shape_dim0] = self.stacked_obs[:, self.shape_dim0 :]
for (i, new) in enumerate(news):
if new:
self.stacked_obs[i] = 0
self.stacked_obs[:, -self.shape_dim0 :] = obs
return self.stacked_obs, rews, news, infos
def reset(self):
obs = self.venv.reset()
if torch.backends.cudnn.deterministic:
self.stacked_obs = torch.zeros(self.stacked_obs.shape)
else:
self.stacked_obs.zero_()
self.stacked_obs[:, -self.shape_dim0 :] = obs
return self.stacked_obs
def close(self):
self.venv.close()
def make_env(
args, seed: int, resource_files: Optional[Union[List[str], str]], camera_id: int
):
env = dmc2gym.make(
domain_name=args.domain_name,
task_name=args.task_name,
resource_files=resource_files,
img_source=None,
total_frames=None,
seed=args.seed + seed,
visualize_reward=False,
from_pixels=(args.encoder_type == "pixel"),
height=args.image_size,
width=args.image_size,
frame_skip=args.action_repeat,
camera_id=camera_id,
)
env.seed(args.seed)
if args.encoder_type == "pixel":
env = FrameStack(env, k=args.frame_stack)
return env
def make_dummy_env(args):
env = dmc2gym.make(
domain_name=args.domain_name,
task_name=args.task_name,
resource_files=None,
img_source=None,
total_frames=None,
seed=args.seed + 1234,
visualize_reward=False,
from_pixels=(args.encoder_type == "pixel"),
height=args.image_size,
width=args.image_size,
frame_skip=args.action_repeat,
)
env.seed(args.seed)
if args.encoder_type == "pixel":
env = FrameStack(env, k=args.frame_stack)
return env
def fn_to_make_env(
args, seed: int, resource_files: Union[List[str], str], camera_id: int
):
def fn():
return make_env(
args=args, seed=seed, resource_files=resource_files, camera_id=camera_id
)
return fn
def make_vec_envs(fns_to_make_envs, device):
if len(fns_to_make_envs) > 1:
envs = ShmemVecEnv(fns_to_make_envs, context="spawn")
else:
envs = DummyVecEnv(fns_to_make_envs)
envs = VecPyTorch(envs, device, len(fns_to_make_envs))
return envs
class eval_mode(object):
def __init__(self, *models):
self.models = models
def __enter__(self):
self.prev_states = []
for model in self.models:
self.prev_states.append(model.training)
model.train(False)
def __exit__(self, *args):
for model, state in zip(self.models, self.prev_states):
model.train(state)
return False
def soft_update_params(net, target_net, tau):
for param, target_param in zip(net.parameters(), target_net.parameters()):
target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)
def set_seed_everywhere(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
def module_hash(module):
result = 0
for tensor in module.state_dict().values():
result += tensor.sum().item()
return result
def make_dir(dir_path):
try:
os.makedirs(dir_path, exist_ok=True)
except OSError:
pass
return dir_path
def collect_data_using_fn(
env: FrameStack,
env_id: int,
num_samples: int,
replay_buffer: MultiEnvReplayBuffer,
fn_to_get_action,
save_video=False,
):
obs = env.reset()
if save_video:
frames = [obs]
for i in range(num_samples):
action = fn_to_get_action(obs)
next_obs, reward, done, _ = env.step(action)
replay_buffer.add(env_id, obs, action, reward, next_obs, done)
if save_video:
frames.append(next_obs)
obs = next_obs
if done:
obs = env.reset()
if save_video:
skvideo.io.vwrite(
f"video/{str(env)}_{env.unwrapped._img_source}.mp4", frames
)
save_video = False
return replay_buffer
def collect_random_data(
env: FrameStack,
env_id: int,
num_samples: int,
replay_buffer: MultiEnvReplayBuffer,
save_video: bool = False,
):
fn_to_get_action = lambda obs: env.action_space.sample()
return collect_data_using_fn(
env, env_id, num_samples, replay_buffer, fn_to_get_action, save_video
)
def collect_data_using_policy(
env: FrameStack,
env_id: int,
num_samples: int,
replay_buffer: MultiEnvReplayBuffer,
policy: SacAeAgent,
save_video: bool = False,
):
fn_to_get_action = lambda obs: policy.sample_action(obs)
return collect_data_using_fn(
env, env_id, num_samples, replay_buffer, fn_to_get_action, save_video
)
def collect_both_state_and_obs_using_policy_vec(
vec_env: VecPyTorch,
num_samples: int,
replay_buffer: MultiEnvReplayBuffer,
policy: SacAeAgent,
save_video: bool = False,
):
def fn_to_get_action(obs):
return policy.sample_action(obs.float())
return collect_data_using_fn_vec(
vec_env, num_samples, replay_buffer, fn_to_get_action, save_video
)
def collect_data_using_fn_vec(
vec_env: VecPyTorch,
num_samples: int,
replay_buffer: MultiEnvReplayBuffer,
fn_to_get_action,
save_video: bool = False,
):
make_vector_using_val = lambda val: np.full(vec_env.num_envs, val)
obs, state = vec_env.reset()
if save_video:
frames = [obs]
save_video = make_vector_using_val(save_video)
for i in range(num_samples):
action = fn_to_get_action(state)
next_obs, reward, done, next_state = vec_env.step(action)
replay_buffer.add_loop(obs, action, reward, next_obs, done)
obs = next_obs
state = next_state
save_frame = False
if save_frame:
frames.append(next_obs)
for env_id in range(vec_env.num_envs):
if done[env_id] and save_video[env_id]:
env_frames = torch.cat(
[x[env_id].unsqueeze(0) for x in frames], dim=0
)
for env_id in range(vec_env.num_envs):
skvideo.io.vwrite(
f"video/env_id_{env_id}.mp4",
env_frames
# f"video/{str(env)}_{env.unwrapped._img_source}.mp4", frames
)
save_video[env_id] = False
save_frame = np.any(save_video)
return replay_buffer
def collect_random_data_vec(
vec_env: VecPyTorch,
num_samples: int,
replay_buffer: MultiEnvReplayBuffer,
save_video: bool = False,
):
make_tensor_using_fn = lambda fn: torch.tensor(
[fn() for _ in range(vec_env.num_envs)]
)
fn_to_get_action = lambda obs: make_tensor_using_fn(vec_env.action_space.sample)
return collect_data_using_fn_vec(
vec_env, num_samples, replay_buffer, fn_to_get_action, save_video
)
def collect_data_using_policy_vec(
vec_env: VecPyTorch,
num_samples: int,
replay_buffer: MultiEnvReplayBuffer,
policy: SacAeAgent,
save_video: bool = False,
):
fn_to_get_action = lambda obs: policy.sample_action(obs.float())
return collect_data_using_fn_vec(
vec_env, num_samples, replay_buffer, fn_to_get_action, save_video
)
def preprocess_obs(obs, bits=5):
"""Preprocessing image, see https://arxiv.org/abs/1807.03039."""
bins = 2 ** bits
assert obs.dtype == torch.float32
if bits < 8:
obs = torch.floor(obs / 2 ** (8 - bits))
obs = obs / bins
obs = obs + torch.rand_like(obs) / bins
obs = obs - 0.5
return obs
def populate_buffer_with_random_data(
envs: List[FrameStack],
buffer: MultiEnvReplayBuffer,
save_video: bool,
num_samples: int,
):
for env_id, env in enumerate(envs):
buffer = collect_random_data(
env=env,
env_id=env_id,
num_samples=num_samples,
replay_buffer=buffer,
save_video=save_video,
)
return buffer
| 16,718 | 30.545283 | 144 | py |
icp-block-mdp | icp-block-mdp-master/imitation_learning/collect_data_using_expert_policy.py | # Copyright (c) Facebook, Inc. and its affiliates.
"""Method to train the one encoder baseline for imitation learning"""
import json
import os
from argparse import Namespace
from time import time
import torch
import utils
from argument_parser import parse_args
from model_utils.bootstrap.common import create_multi_env_replay_buffer, make_logbook
from model_utils.env import make_fns_to_make_train_and_eval_envs
from sacae.vec_logger import VecLogger
from sacae_utils import bootstrap as sacae_bootstrap
def bootstrap_envs_and_buffer(args: Namespace):
"""Method to bootstrap the envs, buffer and related objects"""
logbook = make_logbook(args=args)
device = "cuda" if torch.cuda.is_available() else "cpu"
utils.make_dir(args.work_dir)
with open(os.path.join(args.work_dir, "args.json"), "w") as f:
json.dump(vars(args), f, sort_keys=True, indent=4)
dummy_env = utils.make_dummy_env(args=args)
pixel_space_obs = dummy_env.env.env._get_observation_space_for_pixel_space(
args.image_size, args.image_size
)
state_space_obs = dummy_env.env.env._get_observation_space_for_state_space()
action_size = dummy_env.action_space.shape[0]
train_replay_buffer = create_multi_env_replay_buffer(
args=args, env=dummy_env, device=device, num_envs=args.num_train_envs
)
eval_replay_buffer = create_multi_env_replay_buffer(
args=args, env=dummy_env, device=device, num_envs=args.num_eval_envs
)
(
fns_to_make_train_envs,
fns_to_make_eval_envs,
) = make_fns_to_make_train_and_eval_envs(args=args)
max_episode_steps = dummy_env._max_episode_steps
vec_train_envs = utils.make_vec_envs(
fns_to_make_envs=fns_to_make_train_envs, device=None,
)
vec_eval_envs = utils.make_vec_envs(
fns_to_make_envs=fns_to_make_eval_envs, device=None,
)
logging_dict = {
"steps": [],
"model_error_in_latent_state": [],
"model_error_in_eta_state": [],
"reward_error": [],
"decoding_error": [],
"test_model_error_in_latent_state": [],
"test_model_error_in_eta_state": [],
"test_reward_error": [],
"test_decoding_error": [],
"discriminator_loss": [],
"encoder_discriminator_loss": [],
"test_encoder_discriminator_loss": [],
}
return (
logbook,
device,
vec_train_envs,
vec_eval_envs,
state_space_obs,
pixel_space_obs,
action_size,
train_replay_buffer,
eval_replay_buffer,
logging_dict,
max_episode_steps,
)
def bootstrap_agent(args: Namespace, obs_shape, action_size, device):
video_dir, model_dir, buffer_dir, video = sacae_bootstrap.make_dirs_and_recorders(
args=args
)
agent = sacae_bootstrap.make_expert(
obs_shape=obs_shape, action_shape=(action_size,), args=args, device=device,
)
L = VecLogger(args.work_dir, use_tb=args.save_tb, num_envs=args.num_train_envs)
return (
video_dir,
model_dir,
buffer_dir,
video,
device,
agent,
L,
)
def main():
args = parse_args(should_use_model=True, should_use_rl=True)
args.load_model = True
args.return_both_pixel_and_state = True
args.change_angle = True
args.num_train_envs = 2
args.num_eval_envs = 1
(
logbook,
device,
vec_train_envs,
vec_eval_envs,
state_space_obs,
pixel_space_obs,
action_size,
train_replay_buffer,
eval_replay_buffer,
logging_dict,
max_episode_steps,
) = bootstrap_envs_and_buffer(args)
args.encoder_type = "identity"
args.decoder_type = "identity"
(video_dir, model_dir, buffer_dir, video, device, agent, L,) = bootstrap_agent(
args, state_space_obs.shape, action_size, device
)
model_dir, step = args.load_model_path.rsplit("_", 1)
agent.load(
model_dir=model_dir, step=step,
)
start = time()
# collect data across environments
train_replay_buffer = utils.collect_both_state_and_obs_using_policy_vec(
vec_env=vec_train_envs,
num_samples=50000,
replay_buffer=train_replay_buffer,
policy=agent,
save_video=args.save_video,
)
train_replay_buffer.save(f"{args.save_buffer_path}/train")
eval_replay_buffer = utils.collect_both_state_and_obs_using_policy_vec(
vec_env=vec_eval_envs,
num_samples=50000,
replay_buffer=eval_replay_buffer,
policy=agent,
save_video=False,
)
eval_replay_buffer.save(f"{args.save_buffer_path}/eval")
end = time()
print(f" Time to collect {args.num_samples} datapoints = {end - start}")
if __name__ == "__main__":
main()
| 4,838 | 26.185393 | 86 | py |
icp-block-mdp | icp-block-mdp-master/imitation_learning/sacae/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
import os
import random
from collections import deque
import gym
import numpy as np
import torch
import torch.nn as nn
class eval_mode(object):
def __init__(self, *models):
self.models = models
def __enter__(self):
self.prev_states = []
for model in self.models:
self.prev_states.append(model.training)
model.train(False)
def __exit__(self, *args):
for model, state in zip(self.models, self.prev_states):
model.train(state)
return False
def soft_update_params(net, target_net, tau):
for param, target_param in zip(net.parameters(), target_net.parameters()):
target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)
def set_seed_everywhere(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
def module_hash(module):
result = 0
for tensor in module.state_dict().values():
result += tensor.sum().item()
return result
def make_dir(dir_path):
try:
os.mkdir(dir_path)
except OSError:
pass
return dir_path
def preprocess_obs(obs, bits=5):
"""Preprocessing image, see https://arxiv.org/abs/1807.03039."""
bins = 2 ** bits
assert obs.dtype == torch.float32
if bits < 8:
obs = torch.floor(obs / 2 ** (8 - bits))
obs = obs / bins
obs = obs + torch.rand_like(obs) / bins
obs = obs - 0.5
return obs
class ReplayBuffer(object):
"""Buffer to store environment transitions."""
def __init__(self, obs_shape, action_shape, capacity, batch_size, device):
self.capacity = capacity
self.batch_size = batch_size
self.device = device
# the proprioceptive obs is stored as float32, pixels obs as uint8
obs_dtype = np.float32 if len(obs_shape) == 1 else np.uint8
self.obses = np.empty((capacity, *obs_shape), dtype=obs_dtype)
self.next_obses = np.empty((capacity, *obs_shape), dtype=obs_dtype)
self.actions = np.empty((capacity, *action_shape), dtype=np.float32)
self.rewards = np.empty((capacity, 1), dtype=np.float32)
self.not_dones = np.empty((capacity, 1), dtype=np.float32)
self.idx = 0
self.last_save = 0
self.full = False
def add(self, obs, action, reward, next_obs, done):
np.copyto(self.obses[self.idx], obs)
np.copyto(self.actions[self.idx], action)
np.copyto(self.rewards[self.idx], reward)
np.copyto(self.next_obses[self.idx], next_obs)
np.copyto(self.not_dones[self.idx], not done)
self.idx = (self.idx + 1) % self.capacity
self.full = self.full or self.idx == 0
def sample(self):
idxs = np.random.randint(
0, self.capacity if self.full else self.idx, size=self.batch_size
)
obses = torch.as_tensor(self.obses[idxs], device=self.device).float()
actions = torch.as_tensor(self.actions[idxs], device=self.device)
rewards = torch.as_tensor(self.rewards[idxs], device=self.device)
next_obses = torch.as_tensor(self.next_obses[idxs], device=self.device).float()
not_dones = torch.as_tensor(self.not_dones[idxs], device=self.device)
return obses, actions, rewards, next_obses, not_dones
def save(self, save_dir):
if self.idx == self.last_save:
return
path = os.path.join(save_dir, "%d_%d.pt" % (self.last_save, self.idx))
payload = [
self.obses[self.last_save : self.idx],
self.next_obses[self.last_save : self.idx],
self.actions[self.last_save : self.idx],
self.rewards[self.last_save : self.idx],
self.not_dones[self.last_save : self.idx],
]
self.last_save = self.idx
torch.save(payload, path)
def load(self, save_dir):
chunks = os.listdir(save_dir)
chucks = sorted(chunks, key=lambda x: int(x.split("_")[0]))
for chunk in chucks:
start, end = [int(x) for x in chunk.split(".")[0].split("_")]
path = os.path.join(save_dir, chunk)
payload = torch.load(path)
assert self.idx == start
self.obses[start:end] = payload[0]
self.next_obses[start:end] = payload[1]
self.actions[start:end] = payload[2]
self.rewards[start:end] = payload[3]
self.not_dones[start:end] = payload[4]
self.idx = end
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
gym.Wrapper.__init__(self, env)
self._k = k
self._frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=1,
shape=((shp[0] * k,) + shp[1:]),
dtype=env.observation_space.dtype,
)
self._max_episode_steps = env._max_episode_steps
def reset(self):
obs = self.env.reset()
for _ in range(self._k):
self._frames.append(obs)
return self._get_obs()
def step(self, action):
obs, reward, done, info = self.env.step(action)
self._frames.append(obs)
return self._get_obs(), reward, done, info
def _get_obs(self):
assert len(self._frames) == self._k
return np.concatenate(list(self._frames), axis=0)
| 5,490 | 31.3 | 87 | py |
icp-block-mdp | icp-block-mdp-master/imitation_learning/sacae/encoder.py | # Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn as nn
def tie_weights(src, trg):
assert type(src) == type(trg)
trg.weight = src.weight
trg.bias = src.bias
OUT_DIM = {2: 39, 4: 35, 6: 31}
class PixelEncoder(nn.Module):
"""Convolutional encoder of pixels observations."""
def __init__(self, obs_shape, feature_dim, num_layers=2, num_filters=32):
super().__init__()
assert len(obs_shape) == 3
self.feature_dim = feature_dim
self.num_layers = num_layers
self.convs = nn.ModuleList([nn.Conv2d(obs_shape[0], num_filters, 3, stride=2)])
# self.convs = nn.ModuleList([nn.Conv2d(3, num_filters, 3, stride=2)])
for i in range(num_layers - 1):
self.convs.append(nn.Conv2d(num_filters, num_filters, 3, stride=1))
out_dim = OUT_DIM[num_layers]
self.fc = nn.Linear(num_filters * out_dim * out_dim, self.feature_dim)
self.ln = nn.LayerNorm(self.feature_dim)
self.outputs = dict()
def reparameterize(self, mu, logstd):
std = torch.exp(logstd)
eps = torch.randn_like(std)
return mu + eps * std
def forward_conv(self, obs):
obs = obs / 255.0
self.outputs["obs"] = obs
conv = torch.relu(self.convs[0](obs))
self.outputs["conv1"] = conv
for i in range(1, self.num_layers):
conv = torch.relu(self.convs[i](conv))
self.outputs["conv%s" % (i + 1)] = conv
h = conv.view(conv.size(0), -1)
return h
def forward(self, obs, detach=False):
if detach:
with torch.no_grad():
return self._forward(obs)
return self._forward(obs)
def _forward(self, obs):
h = self.forward_conv(obs)
h_fc = self.fc(h)
self.outputs["fc"] = h_fc
h_norm = self.ln(h_fc)
self.outputs["ln"] = h_norm
out = torch.tanh(h_norm)
self.outputs["tanh"] = out
return out
# def forward(self, obs, detach=False):
# h = self.forward_conv(obs)
# if detach:
# h = h.detach()
# h_fc = self.fc(h)
# self.outputs["fc"] = h_fc
# h_norm = self.ln(h_fc)
# self.outputs["ln"] = h_norm
# out = torch.tanh(h_norm)
# self.outputs["tanh"] = out
# return out
def copy_conv_weights_from(self, source):
"""Tie convolutional layers"""
# only tie conv layers
for i in range(self.num_layers):
tie_weights(src=source.convs[i], trg=self.convs[i])
def log(self, L, step, log_freq):
if step % log_freq != 0:
return
for k, v in self.outputs.items():
L.log_histogram("train_encoder/%s_hist" % k, v, step)
if len(v.shape) > 2:
L.log_image("train_encoder/%s_img" % k, v[0], step)
for i in range(self.num_layers):
L.log_param("train_encoder/conv%s" % (i + 1), self.convs[i], step)
L.log_param("train_encoder/fc", self.fc, step)
L.log_param("train_encoder/ln", self.ln, step)
class VecPixelEncoder(PixelEncoder):
"""Convolutional encoder of pixels observations."""
def __init__(
self, obs_shape, feature_dim: int, num_layers: int = 2, num_filters: int = 32
):
super().__init__(
obs_shape=obs_shape,
feature_dim=feature_dim,
num_layers=num_layers,
num_filters=num_filters,
)
def log(self, L, step, log_freq, env_idx):
if step % log_freq != 0:
return
for k, v in self.outputs.items():
L.log_histogram("train_encoder/%s_hist" % k, v, step, env_idx=env_idx)
if len(v.shape) > 2:
L.log_image("train_encoder/%s_img" % k, v[0], step, env_idx=env_idx)
for i in range(self.num_layers):
L.log_param(
"train_encoder/conv%s" % (i + 1), self.convs[i], step, env_idx=env_idx
)
L.log_param("train_encoder/fc", self.fc, step, env_idx=env_idx)
L.log_param("train_encoder/ln", self.ln, step, env_idx=env_idx)
class IdentityEncoder(nn.Module):
def __init__(self, obs_shape, feature_dim, num_layers, num_filters):
super().__init__()
assert len(obs_shape) == 1
self.feature_dim = obs_shape[0]
def forward(self, obs, detach=False):
return obs
def copy_conv_weights_from(self, source):
pass
def log(self, L, step, log_freq):
pass
class VecIdentityEncoder(IdentityEncoder):
def __init__(self, obs_shape, feature_dim, num_layers, num_filters):
super().__init__(
obs_shape=obs_shape,
feature_dim=feature_dim,
num_layers=num_layers,
num_filters=num_filters,
)
def forward(self, obs, detach=False):
return obs
def copy_conv_weights_from(self, source):
pass
def log(self, L, step, log_freq, env_idx):
pass
_AVAILABLE_ENCODERS = {"pixel": PixelEncoder, "identity": IdentityEncoder}
_AVAILABLE_VEC_ENCODERS = {"pixel": VecPixelEncoder, "identity": VecIdentityEncoder}
def make_encoder(encoder_type, obs_shape, feature_dim, num_layers, num_filters):
assert encoder_type in _AVAILABLE_ENCODERS
return _AVAILABLE_ENCODERS[encoder_type](
obs_shape, feature_dim, num_layers, num_filters
)
def make_vec_encoder(encoder_type, obs_shape, feature_dim, num_layers, num_filters):
assert encoder_type in _AVAILABLE_VEC_ENCODERS
return _AVAILABLE_VEC_ENCODERS[encoder_type](
obs_shape, feature_dim, num_layers, num_filters
)
| 5,688 | 27.445 | 87 | py |
icp-block-mdp | icp-block-mdp-master/imitation_learning/sacae/logger.py | # Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
import shutil
from collections import defaultdict
import numpy as np
import torch
import torchvision
from termcolor import colored
from torch.utils.tensorboard import SummaryWriter
FORMAT_CONFIG = {
"rl": {
"train": [
("episode", "E", "int"),
("step", "S", "int"),
("duration", "D", "time"),
("episode_reward", "R", "float"),
("batch_reward", "BR", "float"),
("actor_loss", "ALOSS", "float"),
("critic_loss", "CLOSS", "float"),
("ae_loss", "RLOSS", "float"),
],
"eval": [("step", "S", "int"), ("episode_reward", "ER", "float")],
}
}
class AverageMeter(object):
def __init__(self):
self._sum = 0
self._count = 0
def update(self, value, n=1):
self._sum += value
self._count += n
def value(self):
return self._sum / max(1, self._count)
class MetersGroup(object):
def __init__(self, file_name, formating):
self._file_name = file_name
if os.path.exists(file_name):
os.remove(file_name)
self._formating = formating
self._meters = defaultdict(AverageMeter)
def log(self, key, value, n=1):
self._meters[key].update(value, n)
def _prime_meters(self):
data = dict()
for key, meter in self._meters.items():
if key.startswith("train"):
key = key[len("train") + 1 :]
else:
key = key[len("eval") + 1 :]
key = key.replace("/", "_")
data[key] = meter.value()
return data
def _dump_to_file(self, data):
with open(self._file_name, "a") as f:
f.write(json.dumps(data) + "\n")
def _format(self, key, value, ty):
template = "%s: "
if ty == "int":
template += "%d"
elif ty == "float":
template += "%.04f"
elif ty == "time":
template += "%.01f s"
else:
raise "invalid format type: %s" % ty
return template % (key, value)
def _dump_to_console(self, data, prefix):
# prefix = colored(prefix, "yellow" if prefix == "train" else "green")
pieces = ["{:5}".format(prefix)]
for key, disp_key, ty in self._formating:
value = data.get(key, 0)
pieces.append(self._format(disp_key, value, ty))
print("| %s" % (" | ".join(pieces)))
def dump(self, step, prefix):
if len(self._meters) == 0:
return
data = self._prime_meters()
data["step"] = step
self._dump_to_file(data)
self._dump_to_console(data, prefix)
self._meters.clear()
class Logger(object):
def __init__(self, log_dir, use_tb=True, config="rl"):
self._log_dir = log_dir
if use_tb:
tb_dir = os.path.join(log_dir, "tb")
if os.path.exists(tb_dir):
shutil.rmtree(tb_dir)
self._sw = SummaryWriter(tb_dir)
else:
self._sw = None
self._train_mg = MetersGroup(
os.path.join(log_dir, "train.log"), formating=FORMAT_CONFIG[config]["train"]
)
self._eval_mg = MetersGroup(
os.path.join(log_dir, "eval.log"), formating=FORMAT_CONFIG[config]["eval"]
)
def _try_sw_log(self, key, value, step):
if self._sw is not None:
self._sw.add_scalar(key, value, step)
def _try_sw_log_image(self, key, image, step):
if self._sw is not None:
assert image.dim() == 3
grid = torchvision.utils.make_grid(image.unsqueeze(1))
self._sw.add_image(key, grid, step)
def _try_sw_log_video(self, key, frames, step):
if self._sw is not None:
frames = torch.from_numpy(np.array(frames))
frames = frames.unsqueeze(0)
self._sw.add_video(key, frames, step, fps=30)
def _try_sw_log_histogram(self, key, histogram, step):
if self._sw is not None:
self._sw.add_histogram(key, histogram, step)
def log(self, key, value, step, n=1):
assert key.startswith("train") or key.startswith("eval")
if type(value) == torch.Tensor:
value = value.item()
self._try_sw_log(key, value / n, step)
mg = self._train_mg if key.startswith("train") else self._eval_mg
mg.log(key, value, n)
def log_param(self, key, param, step):
self.log_histogram(key + "_w", param.weight.data, step)
if hasattr(param.weight, "grad") and param.weight.grad is not None:
self.log_histogram(key + "_w_g", param.weight.grad.data, step)
if hasattr(param, "bias"):
self.log_histogram(key + "_b", param.bias.data, step)
if hasattr(param.bias, "grad") and param.bias.grad is not None:
self.log_histogram(key + "_b_g", param.bias.grad.data, step)
def log_image(self, key, image, step):
assert key.startswith("train") or key.startswith("eval")
self._try_sw_log_image(key, image, step)
def log_video(self, key, frames, step):
assert key.startswith("train") or key.startswith("eval")
self._try_sw_log_video(key, frames, step)
def log_histogram(self, key, histogram, step):
assert key.startswith("train") or key.startswith("eval")
self._try_sw_log_histogram(key, histogram, step)
def dump(self, step):
self._train_mg.dump(step, "train")
self._eval_mg.dump(step, "eval")
| 5,597 | 32.12426 | 88 | py |
icp-block-mdp | icp-block-mdp-master/imitation_learning/sacae/sacae.py | # Copyright (c) Facebook, Inc. and its affiliates.
import copy
import math
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import sacae.utils as utils
from sacae.decoder import make_decoder
from sacae.encoder import make_encoder
LOG_FREQ = 10000
def gaussian_logprob(noise, log_std):
"""Compute Gaussian log probability."""
residual = (-0.5 * noise.pow(2) - log_std).sum(-1, keepdim=True)
return residual - 0.5 * np.log(2 * np.pi) * noise.size(-1)
def squash(mu, pi, log_pi):
"""Apply squashing function.
See appendix C from https://arxiv.org/pdf/1812.05905.pdf.
"""
mu = torch.tanh(mu)
if pi is not None:
pi = torch.tanh(pi)
if log_pi is not None:
log_pi -= torch.log(F.relu(1 - pi.pow(2)) + 1e-6).sum(-1, keepdim=True)
return mu, pi, log_pi
def weight_init(m):
"""Custom weight init for Conv2D and Linear layers."""
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight.data)
m.bias.data.fill_(0.0)
elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
# delta-orthogonal init from https://arxiv.org/pdf/1806.05393.pdf
assert m.weight.size(2) == m.weight.size(3)
m.weight.data.fill_(0.0)
m.bias.data.fill_(0.0)
mid = m.weight.size(2) // 2
gain = nn.init.calculate_gain("relu")
nn.init.orthogonal_(m.weight.data[:, :, mid, mid], gain)
class Actor(nn.Module):
"""MLP actor network."""
def __init__(
self,
obs_shape,
action_shape,
hidden_dim,
encoder_type,
encoder_feature_dim,
log_std_min,
log_std_max,
num_layers,
num_filters,
encoder: Optional[nn.Module] = None,
encoder_cls=None,
update_encoder_via_rl: bool = True,
):
super().__init__()
if encoder is None:
if encoder_cls is None:
self.encoder = make_encoder(
encoder_type,
obs_shape,
encoder_feature_dim,
num_layers,
num_filters,
)
else:
self.encoder = encoder_cls(
encoder_type,
obs_shape,
encoder_feature_dim,
num_layers,
num_filters,
)
else:
self.encoder = encoder
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.trunk = nn.Sequential(
nn.Linear(self.encoder.feature_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 2 * action_shape[0]),
)
self.outputs = dict()
self.apply(weight_init)
def forward(self, obs, compute_pi=True, compute_log_pi=True, detach_encoder=False):
obs = self.encoder(obs, detach=detach_encoder)
mu, log_std = self.trunk(obs).chunk(2, dim=-1)
# constrain log_std inside [log_std_min, log_std_max]
log_std = torch.tanh(log_std)
log_std = self.log_std_min + 0.5 * (self.log_std_max - self.log_std_min) * (
log_std + 1
)
self.outputs["mu"] = mu
self.outputs["std"] = log_std.exp()
if compute_pi:
std = log_std.exp()
noise = torch.randn_like(mu)
pi = mu + noise * std
else:
pi = None
entropy = None
if compute_log_pi:
log_pi = gaussian_logprob(noise, log_std)
else:
log_pi = None
mu, pi, log_pi = squash(mu, pi, log_pi)
return mu, pi, log_pi, log_std
def log(self, L, step, log_freq=LOG_FREQ):
if step % log_freq != 0:
return
for k, v in self.outputs.items():
L.log_histogram("train_actor/%s_hist" % k, v, step)
L.log_param("train_actor/fc1", self.trunk[0], step)
L.log_param("train_actor/fc2", self.trunk[2], step)
L.log_param("train_actor/fc3", self.trunk[4], step)
class QFunction(nn.Module):
"""MLP for q-function."""
def __init__(self, obs_dim, action_dim, hidden_dim):
super().__init__()
self.trunk = nn.Sequential(
nn.Linear(obs_dim + action_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1),
)
def forward(self, obs, action):
assert obs.size(0) == action.size(0)
obs_action = torch.cat([obs, action], dim=1)
return self.trunk(obs_action)
class Critic(nn.Module):
"""Critic network, employes two q-functions."""
def __init__(
self,
obs_shape,
action_shape,
hidden_dim,
encoder_type,
encoder_feature_dim,
num_layers,
num_filters,
encoder_cls=None,
):
super().__init__()
if encoder_cls is None:
self.encoder = make_encoder(
encoder_type, obs_shape, encoder_feature_dim, num_layers, num_filters
)
else:
self.encoder = encoder_cls(
encoder_type, obs_shape, encoder_feature_dim, num_layers, num_filters
)
self.Q1 = QFunction(self.encoder.feature_dim, action_shape[0], hidden_dim)
self.Q2 = QFunction(self.encoder.feature_dim, action_shape[0], hidden_dim)
self.outputs = dict()
self.apply(weight_init)
def forward(self, obs, action, detach_encoder=False):
# detach_encoder allows to stop gradient propogation to encoder
if detach_encoder:
with torch.no_grad():
obs = self.encoder(obs, detach=detach_encoder)
else:
obs = self.encoder(obs, detach=detach_encoder)
# obs = self.encoder(obs, detach=detach_encoder)
q1 = self.Q1(obs, action)
q2 = self.Q2(obs, action)
self.outputs["q1"] = q1
self.outputs["q2"] = q2
return q1, q2
def log(self, L, step, log_freq=LOG_FREQ):
if step % log_freq != 0:
return
self.encoder.log(L, step, log_freq)
for k, v in self.outputs.items():
L.log_histogram("train_critic/%s_hist" % k, v, step)
for i in range(3):
L.log_param("train_critic/q1_fc%d" % i, self.Q1.trunk[i * 2], step)
L.log_param("train_critic/q2_fc%d" % i, self.Q2.trunk[i * 2], step)
class SacAeAgent(object):
"""SAC+AE algorithm."""
def __init__(
self,
obs_shape,
action_shape,
device,
hidden_dim=256,
discount=0.99,
init_temperature=0.01,
alpha_lr=1e-3,
alpha_beta=0.9,
actor_lr=1e-3,
actor_beta=0.9,
actor_log_std_min=-10,
actor_log_std_max=2,
actor_update_freq=2,
critic_lr=1e-3,
critic_beta=0.9,
critic_tau=0.005,
critic_target_update_freq=2,
encoder_type="pixel",
encoder_feature_dim=50,
encoder_lr=1e-3,
encoder_tau=0.005,
decoder_type="pixel",
decoder_lr=1e-3,
decoder_update_freq=1,
decoder_latent_lambda=0.0,
decoder_weight_lambda=0.0,
num_layers=4,
num_filters=32,
update_encoder_via_rl: bool = False,
):
self.device = device
self.discount = discount
self.critic_tau = critic_tau
self.encoder_tau = encoder_tau
self.actor_update_freq = actor_update_freq
self.critic_target_update_freq = critic_target_update_freq
self.decoder_update_freq = decoder_update_freq
self.decoder_latent_lambda = decoder_latent_lambda
self.update_encoder_via_rl = update_encoder_via_rl
self.actor = Actor(
obs_shape,
action_shape,
hidden_dim,
encoder_type,
encoder_feature_dim,
actor_log_std_min,
actor_log_std_max,
num_layers,
num_filters,
).to(device)
self.critic = Critic(
obs_shape,
action_shape,
hidden_dim,
encoder_type,
encoder_feature_dim,
num_layers,
num_filters,
).to(device)
self.critic_target = Critic(
obs_shape,
action_shape,
hidden_dim,
encoder_type,
encoder_feature_dim,
num_layers,
num_filters,
).to(device)
self.critic_target.load_state_dict(self.critic.state_dict())
# tie encoders between actor and critic
self.actor.encoder.copy_conv_weights_from(self.critic.encoder)
self.log_alpha = torch.tensor(np.log(init_temperature)).to(device)
self.log_alpha.requires_grad = True
# set target entropy to -|A|
self.target_entropy = -np.prod(action_shape)
self.decoder = None
if decoder_type != "identity":
# create decoder
self.decoder = make_decoder(
decoder_type, obs_shape, encoder_feature_dim, num_layers, num_filters
).to(device)
self.decoder.apply(weight_init)
# optimizer for critic encoder for reconstruction loss
self.encoder_optimizer = torch.optim.Adam(
self.critic.encoder.parameters(), lr=encoder_lr
)
# optimizer for decoder
self.decoder_optimizer = torch.optim.Adam(
self.decoder.parameters(),
lr=decoder_lr,
weight_decay=decoder_weight_lambda,
)
# optimizers
self.actor_optimizer = torch.optim.Adam(
self.actor.parameters(), lr=actor_lr, betas=(actor_beta, 0.999)
)
self.critic_optimizer = torch.optim.Adam(
self.critic.parameters(), lr=critic_lr, betas=(critic_beta, 0.999)
)
self.log_alpha_optimizer = torch.optim.Adam(
[self.log_alpha], lr=alpha_lr, betas=(alpha_beta, 0.999)
)
self.train()
self.critic_target.train()
def train(self, training=True):
self.training = training
self.actor.train(training)
self.critic.train(training)
if self.decoder is not None:
self.decoder.train(training)
@property
def alpha(self):
return self.log_alpha.exp()
def select_action(self, obs):
with torch.no_grad():
obs = torch.FloatTensor(obs).to(self.device)
if len(obs.shape) == 3:
obs = obs.unsqueeze(0)
mu, _, _, _ = self.actor(obs, compute_pi=False, compute_log_pi=False)
return mu.cpu()
# return mu.cpu().data.numpy().flatten()
def sample_action(self, obs):
with torch.no_grad():
obs = torch.FloatTensor(obs).to(self.device)
if len(obs.shape) == 3:
obs = obs.unsqueeze(0)
mu, pi, _, _ = self.actor(obs, compute_log_pi=False)
return pi.cpu()
# return pi.cpu().data.numpy().flatten()
def update_critic(self, obs, action, reward, next_obs, not_done, L, step):
with torch.no_grad():
_, policy_action, log_pi, _ = self.actor(next_obs)
target_Q1, target_Q2 = self.critic_target(next_obs, policy_action)
target_V = torch.min(target_Q1, target_Q2) - self.alpha.detach() * log_pi
target_Q = reward + (not_done * self.discount * target_V)
# get current Q estimates
current_Q1, current_Q2 = self.critic(obs, action)
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(
current_Q2, target_Q
)
L.log("train_critic/loss", critic_loss, step)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
self.critic.log(L, step)
def update_actor_and_alpha(self, obs, L, step):
# detach encoder, so we don't update it with the actor loss
_, pi, log_pi, log_std = self.actor(obs, detach_encoder=True)
actor_Q1, actor_Q2 = self.critic(obs, pi, detach_encoder=True)
actor_Q = torch.min(actor_Q1, actor_Q2)
actor_loss = (self.alpha.detach() * log_pi - actor_Q).mean()
L.log("train_actor/loss", actor_loss, step)
L.log("train_actor/target_entropy", self.target_entropy, step)
entropy = 0.5 * log_std.shape[1] * (1.0 + np.log(2 * np.pi)) + log_std.sum(
dim=-1
)
L.log("train_actor/entropy", entropy.mean(), step)
# optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
self.actor.log(L, step)
self.log_alpha_optimizer.zero_grad()
alpha_loss = (self.alpha * (-log_pi - self.target_entropy).detach()).mean()
L.log("train_alpha/loss", alpha_loss, step)
L.log("train_alpha/value", self.alpha, step)
alpha_loss.backward()
self.log_alpha_optimizer.step()
def update_decoder(self, obs, target_obs, L, step):
h = self.critic.encoder(obs)
if target_obs.dim() == 4:
# preprocess images to be in [-0.5, 0.5] range
target_obs = utils.preprocess_obs(target_obs)
rec_obs = self.decoder(h)
rec_loss = F.mse_loss(target_obs, rec_obs)
# add L2 penalty on latent representation
# see https://arxiv.org/pdf/1903.12436.pdf
latent_loss = (0.5 * h.pow(2).sum(1)).mean()
loss = rec_loss + self.decoder_latent_lambda * latent_loss
self.encoder_optimizer.zero_grad()
self.decoder_optimizer.zero_grad()
loss.backward()
self.encoder_optimizer.step()
self.decoder_optimizer.step()
L.log("train/ae_loss", loss, step)
self.decoder.log(L, step, log_freq=LOG_FREQ)
def update(self, replay_buffer, L, step):
obs, action, reward, next_obs, not_done = replay_buffer.sample()
L.log("train/batch_reward", reward.mean(), step)
self.update_critic(obs, action, reward, next_obs, not_done, L, step)
if step % self.actor_update_freq == 0:
self.update_actor_and_alpha(obs, L, step)
if step % self.critic_target_update_freq == 0:
utils.soft_update_params(
self.critic.Q1, self.critic_target.Q1, self.critic_tau
)
utils.soft_update_params(
self.critic.Q2, self.critic_target.Q2, self.critic_tau
)
utils.soft_update_params(
self.critic.encoder, self.critic_target.encoder, self.encoder_tau
)
if self.decoder is not None and step % self.decoder_update_freq == 0:
self.update_decoder(obs, obs, L, step)
def save(self, model_dir, step):
torch.save(self.actor.state_dict(), "%s/actor_%s.pt" % (model_dir, step))
torch.save(self.critic.state_dict(), "%s/critic_%s.pt" % (model_dir, step))
torch.save(
self.critic_target.state_dict(),
"%s/critic_target_%s.pt" % (model_dir, step),
)
if self.decoder is not None:
torch.save(
self.decoder.state_dict(), "%s/decoder_%s.pt" % (model_dir, step)
)
def load(self, model_dir, step):
self.actor.load_state_dict(torch.load("%s/actor_%s.pt" % (model_dir, step)))
self.critic.load_state_dict(torch.load("%s/critic_%s.pt" % (model_dir, step)))
self.critic_target.load_state_dict(
torch.load("%s/critic_target_%s.pt" % (model_dir, step))
)
if self.decoder is not None:
self.decoder.load_state_dict(
torch.load("%s/decoder_%s.pt" % (model_dir, step))
)
| 16,040 | 30.890656 | 87 | py |
icp-block-mdp | icp-block-mdp-master/imitation_learning/sacae/decoder.py | # Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn as nn
from sacae.encoder import OUT_DIM
class PixelDecoder(nn.Module):
def __init__(self, obs_shape, feature_dim, num_layers=2, num_filters=32):
super().__init__()
self.num_layers = num_layers
self.num_filters = num_filters
self.out_dim = OUT_DIM[num_layers]
self.fc = nn.Linear(feature_dim, num_filters * self.out_dim * self.out_dim)
self.deconvs = nn.ModuleList()
for i in range(self.num_layers - 1):
self.deconvs.append(
nn.ConvTranspose2d(num_filters, num_filters, 3, stride=1)
)
self.deconvs.append(
nn.ConvTranspose2d(num_filters, obs_shape[0], 3, stride=2, output_padding=1)
)
# self.deconvs.append(
# nn.ConvTranspose2d(num_filters, 3, 3, stride=2, output_padding=1)
# )
self.outputs = dict()
def forward(self, h):
h = torch.relu(self.fc(h))
self.outputs["fc"] = h
deconv = h.view(-1, self.num_filters, self.out_dim, self.out_dim)
self.outputs["deconv1"] = deconv
for i in range(0, self.num_layers - 1):
deconv = torch.relu(self.deconvs[i](deconv))
self.outputs["deconv%s" % (i + 1)] = deconv
obs = self.deconvs[-1](deconv)
self.outputs["obs"] = obs
return obs
def log(self, L, step, log_freq):
if step % log_freq != 0:
return
for k, v in self.outputs.items():
L.log_histogram("train_decoder/%s_hist" % k, v, step)
if len(v.shape) > 2:
L.log_image("train_decoder/%s_i" % k, v[0], step)
for i in range(self.num_layers):
L.log_param("train_decoder/deconv%s" % (i + 1), self.deconvs[i], step)
L.log_param("train_decoder/fc", self.fc, step)
class VecPixelDecoder(PixelDecoder):
def __init__(
self, obs_shape, feature_dim: int, num_layers: int = 2, num_filters: int = 32
):
super().__init__(
obs_shape=obs_shape,
feature_dim=feature_dim,
num_layers=num_layers,
num_filters=num_filters,
)
def log(self, L, step, log_freq, env_idx: int):
if step % log_freq != 0:
return
for k, v in self.outputs.items():
L.log_histogram("train_decoder/%s_hist" % k, v, step, env_idx=env_idx)
if len(v.shape) > 2:
L.log_image("train_decoder/%s_i" % k, v[0], step, env_idx=env_idx)
for i in range(self.num_layers):
L.log_param(
"train_decoder/deconv%s" % (i + 1),
self.deconvs[i],
step,
env_idx=env_idx,
)
L.log_param("train_decoder/fc", self.fc, step, env_idx=env_idx)
_AVAILABLE_DECODERS = {"pixel": PixelDecoder}
_AVAILABLE_VEC_DECODERS = {"pixel": VecPixelDecoder}
def make_decoder(decoder_type, obs_shape, feature_dim, num_layers, num_filters):
assert decoder_type in _AVAILABLE_DECODERS
return _AVAILABLE_DECODERS[decoder_type](
obs_shape, feature_dim, num_layers, num_filters
)
def make_vec_decoder(decoder_type, obs_shape, feature_dim, num_layers, num_filters):
assert decoder_type in _AVAILABLE_VEC_DECODERS
return _AVAILABLE_VEC_DECODERS[decoder_type](
obs_shape, feature_dim, num_layers, num_filters
)
| 3,455 | 30.418182 | 88 | py |
icp-block-mdp | icp-block-mdp-master/imitation_learning/sacae/vec_logger.py | # Copyright (c) Facebook, Inc. and its affiliates.
import os
import shutil
from typing import Optional
import numpy as np
import torch
import torchvision
from torch.utils.tensorboard import SummaryWriter
from sacae.logger import MetersGroup
FORMAT_CONFIG = {
"rl": {
"train": [
("env_idx", "ENV_ID", "int"),
("episode", "E", "int"),
("step", "S", "int"),
("duration", "D", "time"),
("episode_reward", "R", "float"),
("batch_reward", "BR", "float"),
("actor_loss", "ALOSS", "float"),
("critic_loss", "CLOSS", "float"),
("ae_loss", "RLOSS", "float"),
],
"eval": [
("env_idx", "ENV_ID", "int"),
("step", "S", "int"),
("episode_reward", "ER", "float"),
],
}
}
class MetersGroupWithIdx(MetersGroup):
def __init__(self, file_name, formating, env_idx):
super().__init__(file_name=file_name, formating=formating)
self.env_idx = env_idx
def dump(self, step, prefix):
if len(self._meters) == 0:
return
data = self._prime_meters()
data["step"] = step
data["env_idx"] = self.env_idx
self._dump_to_file(data)
self._dump_to_console(data, prefix)
self._meters.clear()
class VecLogger(object):
"""Vectorized Logger"""
def __init__(
self, log_dir: str, use_tb: bool = True, config: str = "rl", num_envs: int = 1
):
self._log_dir = log_dir
if use_tb:
tb_dir = os.path.join(log_dir, "tb")
if os.path.exists(tb_dir):
shutil.rmtree(tb_dir)
self._sw = [SummaryWriter(tb_dir) for _ in range(num_envs)]
else:
self._sw = None
self._train_mg = [
MetersGroupWithIdx(
os.path.join(log_dir, "train.log"),
formating=FORMAT_CONFIG[config]["train"],
env_idx=env_idx,
)
for env_idx in range(num_envs)
]
self._eval_mg = [
MetersGroupWithIdx(
os.path.join(log_dir, "eval.log"),
formating=FORMAT_CONFIG[config]["eval"],
env_idx=env_idx,
)
for env_idx in range(num_envs)
]
def _try_sw_log(self, key, value, step, env_idx):
if self._sw is not None:
self._sw[env_idx].add_scalar(key, value, step)
def _try_sw_log_image(self, key, image, step, env_idx):
if self._sw is not None:
assert image.dim() == 3
grid = torchvision.utils.make_grid(image.unsqueeze(1))
self._sw[env_idx].add_image(key, grid, step)
def _try_sw_log_video(self, key, frames, step, env_idx):
if self._sw is not None:
frames = torch.from_numpy(np.array(frames))
frames = frames.unsqueeze(0)
self._sw[env_idx].add_video(key, frames, step, fps=30)
def _try_sw_log_histogram(self, key, histogram, step, env_idx):
if self._sw is not None:
self._sw[env_idx].add_histogram(key, histogram, step)
def log(self, key, value, step, n=1, env_idx=None):
assert key.startswith("train") or key.startswith("eval")
if type(value) == torch.Tensor:
value = value.item()
self._try_sw_log(key, value / n, step, env_idx=env_idx)
mg = (
self._train_mg[env_idx]
if key.startswith("train")
else self._eval_mg[env_idx]
)
mg.log(key, value, n)
def log_param(self, key, param, step, env_idx):
self.log_histogram(key + "_w", param.weight.data, step, env_idx=env_idx)
if hasattr(param.weight, "grad") and param.weight.grad is not None:
self.log_histogram(
key + "_w_g", param.weight.grad.data, step, env_idx=env_idx
)
if hasattr(param, "bias"):
self.log_histogram(key + "_b", param.bias.data, step, env_idx=env_idx)
if hasattr(param.bias, "grad") and param.bias.grad is not None:
self.log_histogram(
key + "_b_g", param.bias.grad.data, step, env_idx=env_idx
)
def log_image(self, key, image, step, env_idx):
assert key.startswith("train") or key.startswith("eval")
self._try_sw_log_image(key, image, step, env_idx=env_idx)
def log_video(self, key, frames, step, env_idx):
assert key.startswith("train") or key.startswith("eval")
self._try_sw_log_video(key, frames, step, env_idx=env_idx)
def log_histogram(self, key, histogram, step, env_idx):
assert key.startswith("train") or key.startswith("eval")
self._try_sw_log_histogram(key, histogram, step, env_idx=env_idx)
def dump(self, step, env_idx, mode: Optional[str] = None) -> None:
if mode is None:
self._train_mg[env_idx].dump(step, "train")
self._eval_mg[env_idx].dump(step, "eval")
elif mode == "train":
self._train_mg[env_idx].dump(step, "train")
elif mode == "eval":
self._eval_mg[env_idx].dump(step, "eval")
| 5,190 | 34.312925 | 86 | py |
icp-block-mdp | icp-block-mdp-master/imitation_learning/sacae/sacae_vec.py | # Copyright (c) Facebook, Inc. and its affiliates.
from typing import Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
import sacae.utils as utils
from sacae import sacae
from sacae.decoder import make_vec_decoder
from sacae.encoder import make_vec_encoder
from sacae.logger import Logger
from sacae.vec_logger import VecLogger
LoggerType = Union[Logger, VecLogger]
class Actor(sacae.Actor):
"""MLP actor network."""
def __init__(
self,
obs_shape,
action_shape,
hidden_dim: int,
encoder_type: str,
encoder_feature_dim: int,
log_std_min: float,
log_std_max: float,
num_layers: int,
num_filters: int,
encoder: Optional[torch.nn.Module] = None,
):
if encoder is None:
encoder_cls = make_vec_encoder
else:
encoder_cls = None
super().__init__(
obs_shape=obs_shape,
action_shape=action_shape,
hidden_dim=hidden_dim,
encoder_type=encoder_type,
encoder_feature_dim=encoder_feature_dim,
log_std_min=log_std_min,
log_std_max=log_std_max,
num_layers=num_layers,
num_filters=num_filters,
encoder=encoder,
encoder_cls=encoder_cls,
)
def log(
self,
L: LoggerType,
step: int,
log_freq: int = sacae.LOG_FREQ,
env_idx: Optional[int] = None,
) -> None:
if step % log_freq != 0:
return
for k, v in self.outputs.items():
L.log_histogram("train_actor/%s_hist" % k, v, step, env_idx=env_idx)
L.log_param("train_actor/fc1", self.trunk[0], step, env_idx=env_idx)
L.log_param("train_actor/fc2", self.trunk[2], step, env_idx=env_idx)
L.log_param("train_actor/fc3", self.trunk[4], step, env_idx=env_idx)
class Critic(sacae.Critic):
"""Critic network, employes two q-functions."""
def __init__(
self,
obs_shape,
action_shape,
hidden_dim: int,
encoder_type: str,
encoder_feature_dim: int,
num_layers: int,
num_filters: int,
):
super().__init__(
obs_shape=obs_shape,
action_shape=action_shape,
hidden_dim=hidden_dim,
encoder_type=encoder_type,
encoder_feature_dim=encoder_feature_dim,
num_layers=num_layers,
num_filters=num_filters,
encoder_cls=make_vec_encoder,
)
def log(
self,
L: LoggerType,
step: int,
log_freq: int = sacae.LOG_FREQ,
env_idx: Optional[int] = None,
) -> None:
if step % log_freq != 0:
return
self.encoder.log(L, step, log_freq, env_idx=env_idx)
for k, v in self.outputs.items():
L.log_histogram("train_critic/%s_hist" % k, v, step, env_idx=env_idx)
for i in range(3):
L.log_param(
"train_critic/q1_fc%d" % i, self.Q1.trunk[i * 2], step, env_idx=env_idx
)
L.log_param(
"train_critic/q2_fc%d" % i, self.Q2.trunk[i * 2], step, env_idx=env_idx
)
class SacAeAgent(sacae.SacAeAgent):
"""SAC+AE algorithm."""
def __init__(
self,
obs_shape,
action_shape,
device,
hidden_dim: int = 256,
discount: int = 0.99,
init_temperature: float = 0.01,
alpha_lr: float = 1e-3,
alpha_beta: float = 0.9,
actor_lr: float = 1e-3,
actor_beta: float = 0.9,
actor_log_std_min: int = -10,
actor_log_std_max: int = 2,
actor_update_freq: int = 2,
critic_lr: float = 1e-3,
critic_beta: float = 0.9,
critic_tau: float = 0.005,
critic_target_update_freq: int = 2,
encoder_type: str = "pixel",
encoder_feature_dim: int = 50,
encoder_lr: float = 1e-3,
encoder_tau: float = 0.005,
decoder_type: str = "pixel",
decoder_lr: float = 1e-3,
decoder_update_freq: int = 1,
decoder_latent_lambda: float = 0.0,
decoder_weight_lambda: float = 0.0,
num_layers: int = 4,
num_filters: int = 32,
update_encoder_via_rl: bool = False,
):
self.device = device
self.discount = discount
self.critic_tau = critic_tau
self.encoder_tau = encoder_tau
self.actor_update_freq = actor_update_freq
self.critic_target_update_freq = critic_target_update_freq
self.decoder_update_freq = decoder_update_freq
self.decoder_latent_lambda = decoder_latent_lambda
self.update_encoder_via_rl = update_encoder_via_rl
self.actor = Actor(
obs_shape,
action_shape,
hidden_dim,
encoder_type,
encoder_feature_dim,
actor_log_std_min,
actor_log_std_max,
num_layers,
num_filters,
).to(device)
self.critic = Critic(
obs_shape,
action_shape,
hidden_dim,
encoder_type,
encoder_feature_dim,
num_layers,
num_filters,
).to(device)
self.critic_target = Critic(
obs_shape,
action_shape,
hidden_dim,
encoder_type,
encoder_feature_dim,
num_layers,
num_filters,
).to(device)
self.critic_target.load_state_dict(self.critic.state_dict())
# tie encoders between actor and critic
self.actor.encoder.copy_conv_weights_from(self.critic.encoder)
self.log_alpha = torch.tensor(np.log(init_temperature)).to(device)
self.log_alpha.requires_grad = True
# set target entropy to -|A|
self.target_entropy = -np.prod(action_shape)
self.decoder = None
if decoder_type != "identity":
# create decoder
self.decoder = make_vec_decoder(
decoder_type, obs_shape, encoder_feature_dim, num_layers, num_filters
).to(device)
self.decoder.apply(sacae.weight_init)
# optimizer for critic encoder for reconstruction loss
self.encoder_optimizer = torch.optim.Adam(
self.critic.encoder.parameters(), lr=encoder_lr
)
# optimizer for decoder
self.decoder_optimizer = torch.optim.Adam(
self.decoder.parameters(),
lr=decoder_lr,
weight_decay=decoder_weight_lambda,
)
# optimizers
self.actor_optimizer = torch.optim.Adam(
self.actor.parameters(), lr=actor_lr, betas=(actor_beta, 0.999)
)
self.critic_optimizer = torch.optim.Adam(
self.critic.parameters(), lr=critic_lr, betas=(critic_beta, 0.999)
)
self.log_alpha_optimizer = torch.optim.Adam(
[self.log_alpha], lr=alpha_lr, betas=(alpha_beta, 0.999)
)
self.train()
self.critic_target.train()
def update_critic(
self,
obs,
action,
reward,
next_obs,
not_done,
L: LoggerType,
step: int,
env_idx: int,
):
with torch.no_grad():
_, policy_action, log_pi, _ = self.actor(next_obs)
target_Q1, target_Q2 = self.critic_target(next_obs, policy_action)
target_V = torch.min(target_Q1, target_Q2) - self.alpha.detach() * log_pi
target_Q = reward + (not_done * self.discount * target_V)
# get current Q estimates
current_Q1, current_Q2 = self.critic(
obs, action, detach_encoder=not self.update_encoder_via_rl
)
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(
current_Q2, target_Q
)
L.log("train_critic/loss", critic_loss, step, env_idx=env_idx)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
self.critic.log(L, step, env_idx=env_idx)
def update_actor_and_alpha(self, obs, L: LoggerType, step: int, env_idx: int):
# detach encoder, so we don't update it with the actor loss
_, pi, log_pi, log_std = self.actor(obs, detach_encoder=True)
actor_Q1, actor_Q2 = self.critic(obs, pi, detach_encoder=True)
actor_Q = torch.min(actor_Q1, actor_Q2)
actor_loss = (self.alpha.detach() * log_pi - actor_Q).mean()
L.log("train_actor/loss", actor_loss, step, env_idx=env_idx)
L.log("train_actor/target_entropy", self.target_entropy, step, env_idx=env_idx)
entropy = 0.5 * log_std.shape[1] * (1.0 + np.log(2 * np.pi)) + log_std.sum(
dim=-1
)
L.log("train_actor/entropy", entropy.mean(), step, env_idx=env_idx)
# optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
self.actor.log(L, step, env_idx=env_idx)
self.log_alpha_optimizer.zero_grad()
alpha_loss = (self.alpha * (-log_pi - self.target_entropy).detach()).mean()
L.log("train_alpha/loss", alpha_loss, step, env_idx=env_idx)
L.log("train_alpha/value", self.alpha, step, env_idx=env_idx)
alpha_loss.backward()
self.log_alpha_optimizer.step()
def update_decoder(self, obs, target_obs, L: LoggerType, step: int, env_idx: int):
h = self.critic.encoder(obs)
if target_obs.dim() == 4:
# preprocess images to be in [-0.5, 0.5] range
target_obs = utils.preprocess_obs(target_obs)
rec_obs = self.decoder(h)
rec_loss = F.mse_loss(target_obs, rec_obs)
# add L2 penalty on latent representation
# see https://arxiv.org/pdf/1903.12436.pdf
latent_loss = (0.5 * h.pow(2).sum(1)).mean()
loss = rec_loss + self.decoder_latent_lambda * latent_loss
self.encoder_optimizer.zero_grad()
self.decoder_optimizer.zero_grad()
loss.backward()
self.encoder_optimizer.step()
self.decoder_optimizer.step()
L.log("train/ae_loss", loss, step, env_idx=env_idx)
self.decoder.log(L, step, log_freq=sacae.LOG_FREQ, env_idx=env_idx)
def update(self, replay_buffer, L: Logger, step: int, env_idx: int):
obs, action, reward, next_obs, not_done = replay_buffer.sample(env_id=env_idx)
L.log("train/batch_reward", reward.mean(), step, env_idx=env_idx)
self.update_critic(
obs, action, reward, next_obs, not_done, L, step, env_idx=env_idx
)
if step % self.actor_update_freq == 0:
self.update_actor_and_alpha(obs, L, step, env_idx=env_idx)
if step % self.critic_target_update_freq == 0:
utils.soft_update_params(
self.critic.Q1, self.critic_target.Q1, self.critic_tau
)
utils.soft_update_params(
self.critic.Q2, self.critic_target.Q2, self.critic_tau
)
utils.soft_update_params(
self.critic.encoder, self.critic_target.encoder, self.encoder_tau,
)
if self.decoder is not None and step % self.decoder_update_freq == 0:
self.update_decoder(obs, obs, L, step, env_idx=env_idx)
| 11,471 | 31.590909 | 87 | py |
icp-block-mdp | icp-block-mdp-master/imitation_learning/rl/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import glob
import json
import os
from typing import Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import autograd, nn, optim
from torch.utils.data import DataLoader, Dataset
import argument_parser
import utils
from main import make_fns_to_make_train_and_eval_envs
from main_aux import bootstrap_models_and_optimizers, bootstrap_setup
from ml_logger.logbook import LogBook
from ml_logger.logbook import make_config as make_logbook_config
from model import Decoder, DynamicsModel, Encoder, RewardModel
from rl.logger import Logger
from sac_ae.sac_ae import SacAeAgent
# from video import VideoRecorder
def compute_encoder_and_dynamics_loss(obs, action, next_obs, encoder, dynamics_model):
state = encoder(obs)
pred_next_state = dynamics_model(state, action)
true_next_state = encoder(next_obs).detach()
return F.mse_loss(pred_next_state, true_next_state), state
def bootstrap_setup_for_rl(args: argparse.Namespace):
"""Method to bootstrap the setup"""
utils.set_seed_everywhere(args.seed)
(
logbook,
device,
train_envs,
eval_envs,
obs_shape,
action_size,
train_replay_buffer,
eval_replay_buffer,
logging_dict,
) = bootstrap_setup(args)
args.video_dir = utils.make_dir(os.path.join(args.work_dir, "video"))
args.model_dir = utils.make_dir(os.path.join(args.work_dir, "model"))
args.buffer_dir = utils.make_dir(os.path.join(args.work_dir, "buffer"))
# video = VideoRecorder(video_dir if args.save_video else None)
logging_dict = {
"steps": [],
"model_error_in_latent_state": [],
"model_error_in_eta_state": [],
"reward_error": [],
"decoding_error": [],
"test_model_error_in_latent_state": [],
"test_model_error_in_eta_state": [],
"test_reward_error": [],
"test_decoding_error": [],
}
logger = Logger(args.work_dir, use_tb=args.save_tb, logbook=logbook)
# train_envs = utils.make_vec_envs(envs = train_envs,
# device=None,
# num_frame_stack=args.frame_stack)
# eval_envs = utils.make_vec_envs(envs = eval_envs,
# device=None,
# num_frame_stack=args.frame_stack)
(
fns_to_make_train_envs,
fns_to_make_eval_envs,
) = make_fns_to_make_train_and_eval_envs(args=args)
max_episode_steps = train_envs[0]._max_episode_steps
train_envs = utils.make_vec_envs(
fns_to_make_envs=fns_to_make_train_envs,
device=None,
num_frame_stack=args.frame_stack,
)
eval_envs = utils.make_vec_envs(
fns_to_make_envs=fns_to_make_eval_envs,
device=None,
num_frame_stack=args.frame_stack,
)
return (
logbook,
device,
train_envs,
eval_envs,
obs_shape,
action_size,
train_replay_buffer,
eval_replay_buffer,
logging_dict,
logger,
max_episode_steps,
)
def bootstrap_agent(
obs_shape: Tuple[int, int, int],
action_shape,
args: argparse.Namespace,
device: torch.device,
phi_encoder: torch.nn.Module,
) -> SacAeAgent:
if args.agent == "sac_ae":
return SacAeAgent(
obs_shape=obs_shape,
action_shape=action_shape,
device=device,
hidden_dim=args.hidden_dim,
discount=args.discount,
init_temperature=args.init_temperature,
alpha_lr=args.alpha_lr,
alpha_beta=args.alpha_beta,
actor_lr=args.actor_lr,
actor_beta=args.actor_beta,
actor_log_std_min=args.actor_log_std_min,
actor_log_std_max=args.actor_log_std_max,
actor_update_freq=args.actor_update_freq,
critic_lr=args.critic_lr,
critic_beta=args.critic_beta,
critic_tau=args.critic_tau,
critic_target_update_freq=args.critic_target_update_freq,
encoder_type=args.encoder_type,
encoder_feature_dim=args.encoder_feature_dim,
encoder_lr=args.encoder_lr,
encoder_tau=args.encoder_tau,
decoder_type=args.decoder_type,
decoder_lr=args.decoder_lr,
decoder_update_freq=args.decoder_update_freq,
decoder_latent_lambda=args.decoder_latent_lambda,
decoder_weight_lambda=args.decoder_weight_lambda,
num_layers=args.num_layers,
num_filters=args.num_filters,
phi_encoder=phi_encoder,
)
else:
assert "agent is not supported: %s" % args.agent
| 4,741 | 29.792208 | 86 | py |
icp-block-mdp | icp-block-mdp-master/imitation_learning/rl/logger.py | # Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
import shutil
from collections import defaultdict
import numpy as np
import torch
import torchvision
from termcolor import colored
FORMAT_CONFIG = {
"rl": {
"train": [
("episode", "E", "int"),
("step", "S", "int"),
("duration", "D", "time"),
("episode_reward", "R", "float"),
("batch_reward", "BR", "float"),
("actor_loss", "ALOSS", "float"),
("critic_loss", "CLOSS", "float"),
("ae_loss", "RLOSS", "float"),
],
"eval": [("step", "S", "int"), ("episode_reward", "ER", "float")],
}
}
class AverageMeter(object):
def __init__(self):
self._sum = 0
self._count = 0
def update(self, value, n=1):
self._sum += value
self._count += n
def value(self):
return self._sum / max(1, self._count)
class MetersGroup(object):
def __init__(self, file_name, formating):
self._file_name = file_name
if os.path.exists(file_name):
os.remove(file_name)
self._formating = formating
self._meters = defaultdict(AverageMeter)
def log(self, key, value, n=1):
self._meters[key].update(value, n)
def _prime_meters(self, key_list=None):
data = dict()
if key_list is None:
key_list = self._meters.keys()
for key in key_list:
meter = self._meters[key]
if key.startswith("train"):
key = key[len("train") + 1 :]
else:
key = key[len("eval") + 1 :]
key = key.replace("/", "_")
data[key] = meter.value()
return data
def _dump_to_file(self, data):
with open(self._file_name, "a") as f:
f.write(json.dumps(data) + "\n")
def _format(self, key, value, ty):
template = "%s: "
if ty == "int":
template += "%d"
elif ty == "float":
template += "%.04f"
elif ty == "time":
template += "%.01f s"
else:
raise "invalid format type: %s" % ty
return template % (key, value)
def _dump_to_console(self, data, prefix, suffix):
prefix = colored(prefix, "yellow" if prefix == "train" else "green")
pieces = ["{:5}".format(prefix)]
for key, disp_key, ty in self._formating:
if key + suffix in data:
value = data[key + suffix]
elif key in data:
value = data[key]
else:
value = 0
pieces.append(self._format(disp_key, value, ty))
print("| %s" % (" | ".join(pieces)))
def dump(self, step, prefix, logbook):
if len(self._meters) == 0:
return
data = self._prime_meters()
data["step"] = step
data["mode"] = prefix
logbook.write_metric_logs(data)
self._dump_to_file(data)
self._dump_to_console(data, prefix)
self._meters.clear()
def filter_meter_keys(self, suffix):
"""Method to select the meters whose key ends with a given string"""
return [key for key in self._meters if key.endswith(suffix)]
def dump_for_one_env(self, step, prefix, logbook, env_id):
suffix = f"env_id_{env_id}"
meter_keys = self.filter_meter_keys(suffix)
if len(meter_keys) == 0:
return
data = self._prime_meters(key_list=meter_keys)
data["step"] = step
data["mode"] = prefix
logbook.write_metric_logs(data)
self._dump_to_file(data)
self._dump_to_console(data, prefix, suffix="_" + suffix)
list(map(self._meters.pop, meter_keys))
class Logger(object):
def __init__(self, log_dir, use_tb=True, config="rl", logbook=None):
self._log_dir = log_dir
if use_tb:
tb_dir = os.path.join(log_dir, "tb")
if os.path.exists(tb_dir):
shutil.rmtree(tb_dir)
self._sw = SummaryWriter(tb_dir)
else:
self._sw = None
self._train_mg = MetersGroup(
os.path.join(log_dir, "train.log"), formating=FORMAT_CONFIG[config]["train"]
)
self._eval_mg = MetersGroup(
os.path.join(log_dir, "eval.log"), formating=FORMAT_CONFIG[config]["eval"]
)
self.logbook = logbook
def _try_sw_log(self, key, value, step):
if self._sw is not None:
self._sw.add_scalar(key, value, step)
def _try_sw_log_image(self, key, image, step):
if self._sw is not None:
assert image.dim() == 3
grid = torchvision.utils.make_grid(image.unsqueeze(1))
self._sw.add_image(key, grid, step)
def _try_sw_log_video(self, key, frames, step):
if self._sw is not None:
frames = torch.from_numpy(np.array(frames))
frames = frames.unsqueeze(0)
self._sw.add_video(key, frames, step, fps=30)
def _try_sw_log_histogram(self, key, histogram, step):
if self._sw is not None:
self._sw.add_histogram(key, histogram, step)
def log(self, key, value, step, n=1):
assert key.startswith("train") or key.startswith("eval")
if type(value) == torch.Tensor:
value = value.item()
self._try_sw_log(key, value / n, step)
mg = self._train_mg if key.startswith("train") else self._eval_mg
mg.log(key, value, n)
def log_param(self, key, param, step):
self.log_histogram(key + "_w", param.weight.data, step)
if hasattr(param.weight, "grad") and param.weight.grad is not None:
self.log_histogram(key + "_w_g", param.weight.grad.data, step)
if hasattr(param, "bias"):
self.log_histogram(key + "_b", param.bias.data, step)
if hasattr(param.bias, "grad") and param.bias.grad is not None:
self.log_histogram(key + "_b_g", param.bias.grad.data, step)
def log_image(self, key, image, step):
assert key.startswith("train") or key.startswith("eval")
self._try_sw_log_image(key, image, step)
def log_video(self, key, frames, step):
assert key.startswith("train") or key.startswith("eval")
self._try_sw_log_video(key, frames, step)
def log_histogram(self, key, histogram, step):
assert key.startswith("train") or key.startswith("eval")
self._try_sw_log_histogram(key, histogram, step)
def dump(self, step):
self._train_mg.dump(step, "train", logbook=self.logbook)
self._eval_mg.dump(step, "eval", logbook=self.logbook)
def dump_for_one_env(self, step, env_id):
self._train_mg.dump_for_one_env(
step, "train", logbook=self.logbook, env_id=env_id
)
self._eval_mg.dump_for_one_env(
step, "eval", logbook=self.logbook, env_id=env_id
)
| 6,941 | 33.197044 | 88 | py |
icp-block-mdp | icp-block-mdp-master/imitation_learning/model_utils/utils_baseline.py | # Copyright (c) Facebook, Inc. and its affiliates.
from argparse import Namespace
from typing import Dict, List, Optional, Tuple, Union
import torch
import torch.nn.functional as F
import utils
from ml_logger.logbook import LogBook
Envs_Type = Union[List[utils.FrameStack], utils.VecPyTorch]
def train_model(
args: Namespace,
logbook: LogBook,
device: torch.device,
train_envs: Envs_Type,
eval_envs: Envs_Type,
obs_shape: Tuple[int, int, int],
action_size: int,
train_replay_buffer: utils.MultiEnvReplayBuffer,
eval_replay_buffer: utils.MultiEnvReplayBuffer,
logging_dict: Dict,
models: Dict,
num_iters: Optional[int] = None,
iteration_start_index: int = 0,
):
# Iteration start index is used for logging
if num_iters is None:
num_iters = args.num_iters
for iteration in range(iteration_start_index, num_iters + iteration_start_index):
train_metrics = train_iter(
args=args, train_replay_buffer=train_replay_buffer, models=models
)
if iteration % args.log_interval == 0:
test_metrics = eval_iter(
args=args, eval_replay_buffer=eval_replay_buffer, models=models
)
metrics_to_log = {"steps": iteration}
for key in train_metrics:
if train_metrics[key] is not None:
metrics_to_log[f"train_{key}"] = train_metrics[key].item()
for key in test_metrics:
if test_metrics[key] is not None:
metrics_to_log[f"test_{key}"] = test_metrics[key].item()
logbook.write_metric_log(metric=metrics_to_log)
def compute_encoder_and_dynamics_loss(
args, obs, action, next_obs, encoder, dynamics_model
):
state = encoder(obs)
pred_next_state = dynamics_model(state, action)
true_next_state = encoder(next_obs).detach()
penalty = None
return (F.mse_loss(pred_next_state, true_next_state), state)
def train_iter(
args: Namespace, train_replay_buffer: utils.MultiEnvReplayBuffer, models: Dict,
):
metrics = get_default_metrics_dict()
for env_idx in range(args.num_train_envs):
obses, actions, rewards, next_obses, not_dones = train_replay_buffer.sample(
env_idx
)
if len(models["decoders"]) == 1:
current_decoder = models["decoders"][0] # only use one decoder
else:
current_decoder = models["decoders"][env_idx]
current_eta_encoder = None
current_eta_dynamics_model = None
if models["eta_encoders"] is not None:
if len(models["eta_encoders"]) == 1:
current_eta_encoder = models["eta_encoders"][0]
else:
current_eta_encoder = models["eta_encoders"][env_idx]
if models["eta_dynamics_models"] is not None:
if len(models["eta_dynamics_models"]) == 1:
current_eta_dynamics_model = models["eta_dynamics_models"][0]
else:
current_eta_dynamics_model = models["eta_dynamics_models"][env_idx]
current_metrics = compute_loss_using_buffer(
args=args,
obs=obses,
actions=actions,
next_obs=next_obses,
rewards=rewards,
shared_models=models,
eta_encoder=current_eta_encoder,
eta_dynamics_model=current_eta_dynamics_model,
decoder=current_decoder,
env_idx=env_idx,
)
for key in current_metrics:
if current_metrics[key] is not None:
if metrics[key] is None:
metrics[key] = current_metrics[key]
else:
metrics[key] += current_metrics[key]
if not models["discriminator_model"] is None:
models["discriminator_opt"].zero_grad()
metrics["discriminator_error"].backward()
models["discriminator_opt"].step()
list_of_loss_for_opt = [
"model_error_in_latent_state",
"reward_error",
"decoder_error",
"actor_error",
"encoder_discriminator_error",
]
loss = 0
for loss_for_opt in list_of_loss_for_opt:
if metrics[loss_for_opt] is not None:
loss += metrics[loss_for_opt]
models["opt"].zero_grad()
loss.backward()
models["opt"].step()
return metrics
def eval_iter(
args: Namespace, eval_replay_buffer: utils.MultiEnvReplayBuffer, models: Dict
):
metrics = get_default_metrics_dict()
with torch.no_grad():
for i in range(args.num_eval_envs):
obses, actions, rewards, next_obses, not_dones = eval_replay_buffer.sample(
i
)
current_metrics = compute_loss_using_buffer(
args=args,
obs=obses,
actions=actions,
next_obs=next_obses,
rewards=rewards,
shared_models=models,
eta_encoder=None,
eta_dynamics_model=None,
decoder=None,
env_idx=None,
)
for key in current_metrics:
if current_metrics[key] is not None:
if metrics[key] is None:
metrics[key] = current_metrics[key]
else:
metrics[key] += current_metrics[key]
return metrics
def compute_loss_using_buffer(
args,
obs,
actions,
next_obs,
rewards,
shared_models,
eta_encoder,
eta_dynamics_model,
decoder,
env_idx,
):
phi_encoder = shared_models["phi_encoder"]
phi_dynamics_model = shared_models["phi_dynamics_model"]
reward_model = shared_models["reward_model"]
actor_model = shared_models["actor_model"]
discriminator_model = shared_models["discriminator_model"]
metrics = get_default_metrics_dict()
(
metrics["model_error_in_latent_state"],
latent_state,
) = compute_encoder_and_dynamics_loss(
args=args,
obs=obs,
action=actions,
next_obs=next_obs,
encoder=phi_encoder,
dynamics_model=phi_dynamics_model,
)
if reward_model is not None:
predicted_reward = reward_model(latent_state, actions)
metrics["reward_error"] = F.mse_loss(predicted_reward, rewards)
if actor_model is not None:
predicted_action = actor_model(latent_state)
metrics["actor_error"] = F.mse_loss(predicted_action, actions)
if discriminator_model is not None:
discriminator_pred = discriminator_model(latent_state.detach())
batch_size = discriminator_pred.shape[0]
device = discriminator_pred.device
if env_idx is not None:
metrics["discriminator_error"] = F.cross_entropy(
discriminator_pred,
torch.ones(batch_size, dtype=torch.long, device=device) * env_idx,
)
discriminator_pred = discriminator_model(latent_state)
metrics["encoder_discriminator_error"] = torch.mean(
F.softmax(discriminator_pred, dim=1)
* F.log_softmax(discriminator_pred, dim=1)
)
if eta_encoder is None or eta_dynamics_model is None:
inp_for_decoder = phi_encoder(next_obs)
else:
_, eta_state = compute_encoder_and_dynamics_loss(
args=args,
obs=obs,
action=actions,
next_obs=next_obs,
encoder=eta_encoder,
dynamics_model=eta_dynamics_model,
)
inp_for_decoder = torch.cat(
[encoder(next_obs) for encoder in [phi_encoder, eta_encoder]], dim=1,
)
if decoder is not None:
pred_next_obs = decoder(inp_for_decoder)
metrics["decoder_error"] = F.mse_loss(pred_next_obs, next_obs)
return metrics
def get_default_metrics_dict():
return {
"model_error_in_latent_state": None,
"reward_error": None,
"actor_error": None,
"decoder_error": None,
"encoder_discriminator_error": None,
"discriminator_error": None,
}
| 8,130 | 30.153257 | 87 | py |
icp-block-mdp | icp-block-mdp-master/imitation_learning/model_utils/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
import os
from argparse import Namespace
from typing import Dict, List, Optional, Tuple, Union
import torch
import torch.nn.functional as F
from torch import nn
import utils
from ml_logger.logbook import LogBook
Envs_Type = Union[List[utils.FrameStack], utils.VecPyTorch]
def run_model_with_aux_loss(
args: Namespace,
logbook: LogBook,
device: torch.device,
train_envs: Envs_Type,
eval_envs: Envs_Type,
obs_shape: Tuple[int, int, int],
action_size: int,
train_replay_buffer: utils.MultiEnvReplayBuffer,
eval_replay_buffer: utils.MultiEnvReplayBuffer,
logging_dict: Dict,
phi_encoder: torch.nn.Module,
phi_dynamics_model: torch.nn.Module,
reward_model: torch.nn.Module,
eta_encoders: List[torch.nn.Module],
eta_dynamics_models: List[torch.nn.Module],
decoders: List[torch.nn.Module],
opt: torch.optim.Optimizer,
num_iters: Optional[int] = None,
iteration_start_index: int = 0,
):
# Iteration start index is used during IRM training
if num_iters is None:
num_iters = args.num_iters
for iteration in range(iteration_start_index, num_iters + iteration_start_index):
(
model_error_in_latent_state,
reward_error,
model_error_in_eta_state,
decoder_error,
) = train_iter(
args=args,
train_replay_buffer=train_replay_buffer,
phi_encoder=phi_encoder,
phi_dynamics_model=phi_dynamics_model,
reward_model=reward_model,
eta_encoders=eta_encoders,
eta_dynamics_models=eta_dynamics_models,
decoders=decoders,
opt=opt,
)
if iteration % args.log_interval == 0:
print(
f"Iteration {iteration}: Mean train set model error: {model_error_in_latent_state.mean()}, decoding error: {decoder_error.mean()}%%"
)
(
test_model_error_in_latent_state,
test_reward_error,
test_model_error_in_eta_state,
test_decoder_error,
) = eval_iter(
args=args,
eval_replay_buffer=eval_replay_buffer,
phi_encoder=phi_encoder,
phi_dynamics_model=phi_dynamics_model,
reward_model=reward_model,
eta_encoders=eta_encoders,
eta_dynamics_models=eta_dynamics_models,
decoders=decoders,
opt=opt,
)
# test_model_error_in_latent_state = 0
# test_reward_error = 0
# test_model_error_in_eta_state = 0
# test_decoder_error = 0
current_metrics = {
"steps": iteration,
"model_error_in_latent_state": model_error_in_latent_state.item(),
"model_error_in_eta_state": model_error_in_eta_state.item(),
"reward_error": reward_error.item(),
"decoding_error": decoder_error.item(),
"test_model_error_in_latent_state": test_model_error_in_latent_state.item(),
# "test_model_error_in_eta_state": test_model_error_in_eta_state.item(),
"test_reward_error": test_reward_error.item(),
# "test_decoding_error": test_decoder_error.item(),
}
for key in current_metrics:
logging_dict[key].append(current_metrics[key])
# logging_dict["eval_model_error"].append(test_error.item())
print(
f"Mean test set model error: {test_model_error_in_latent_state.item()}"
)
logbook.write_metric_logs(metrics=current_metrics)
torch.save(logging_dict, os.path.join(args.work_dir, "logging_dict.pt"))
if args.save_model:
state_dict = {
"phi_encoder": phi_encoder.state_dict(),
"phi_dynamics_model": phi_dynamics_model.state_dict(),
"reward_model": reward_model.state_dict(),
"eta_encoders": [
eta_encoder.state_dict() for eta_encoder in eta_encoders
],
"eta_dynamics_models": [
eta_dynamics_model.state_dict()
for eta_dynamics_model in eta_dynamics_models
],
"decoders": [decoder.state_dict() for decoder in decoders],
"opt": opt.state_dict(),
"epoch": iteration,
}
if args.save_model_path.endswith(".pt"):
path_to_save_model = args.save_model_path
else:
path_to_save_model = os.path.join(
args.save_model_path, f"{iteration}.pt"
)
torch.save(state_dict, path_to_save_model)
print(f"Saved model at {path_to_save_model}")
def compute_encoder_and_dynamics_loss(obs, action, next_obs, encoder, dynamics_model):
state = encoder(obs)
pred_next_state = dynamics_model(state, action)
true_next_state = encoder(next_obs).detach()
return F.mse_loss(pred_next_state, true_next_state), state
def train_iter(
args: Namespace,
train_replay_buffer: utils.MultiEnvReplayBuffer,
phi_encoder: nn.Module,
phi_dynamics_model: nn.Module,
reward_model: nn.Module,
eta_encoders: List[nn.Module],
eta_dynamics_models: List[nn.Module],
decoders: List[nn.Module],
opt: torch.optim,
):
should_use_one_decoder = args.one_decoder or len(decoders) == 1
model_error_in_latent_state = 0
reward_error = 0
model_error_in_eta_state = 0
decoder_error = 0
for i in range(args.num_train_envs):
obses, actions, rewards, next_obses, not_dones = train_replay_buffer.sample(i)
if should_use_one_decoder:
current_decoder = decoders[0] # only use one decoder
else:
current_decoder = decoders[i]
current_eta_encoder = None
current_eta_dynamics_model = None
if eta_encoders is not None:
if len(eta_encoders) == 1:
current_eta_encoder = eta_encoders[0]
else:
current_eta_encoder = eta_encoders[i]
if eta_dynamics_models is not None:
if len(eta_dynamics_models) == 1:
current_eta_dynamics_model = eta_dynamics_models[0]
else:
current_eta_dynamics_model = eta_dynamics_models[i]
(
current_error_in_latent_state,
current_reward_error,
current_error_in_eta_state,
current_decoder_error,
) = compute_loss_using_buffer(
obs=obses,
actions=actions,
next_obs=next_obses,
rewards=rewards,
phi_encoder=phi_encoder,
phi_dynamics_model=phi_dynamics_model,
reward_model=reward_model,
eta_encoder=current_eta_encoder,
eta_dynamics_model=current_eta_dynamics_model,
decoder=current_decoder,
)
model_error_in_latent_state += current_error_in_latent_state
reward_error += current_reward_error
model_error_in_eta_state += current_error_in_eta_state
decoder_error += current_decoder_error
opt.zero_grad()
(
model_error_in_latent_state
+ reward_error
+ model_error_in_eta_state
+ decoder_error
).backward()
opt.step()
return (
model_error_in_latent_state,
reward_error,
model_error_in_eta_state,
decoder_error,
)
def eval_iter(
args: Namespace,
eval_replay_buffer: utils.MultiEnvReplayBuffer,
phi_encoder: nn.Module,
phi_dynamics_model: nn.Module,
reward_model: nn.Module,
eta_encoders: List[nn.Module],
eta_dynamics_models: List[nn.Module],
decoders: List[nn.Module],
opt: torch.optim,
):
should_use_one_decoder = args.one_decoder or len(decoders) == 1
model_error_in_latent_state = 0
reward_error = 0
model_error_in_eta_state = 0
decoder_error = 0
with torch.no_grad():
for i in range(args.num_eval_envs):
obses, actions, rewards, next_obses, not_dones = eval_replay_buffer.sample(
i
)
if should_use_one_decoder:
current_decoder = decoders[0] # only use one decoder
else:
current_decoder = decoders[i]
# (
# current_error_in_latent_state,
# current_reward_error,
# current_error_in_eta_state,
# current_decoder_error,
# ) = compute_loss_using_buffer(
# obs=obses,
# actions=actions,
# next_obs=next_obses,
# rewards=rewards,
# phi_encoder=phi_encoder,
# phi_dynamics_model=phi_dynamics_model,
# reward_model=reward_model,
# eta_encoder=eta_encoders[i],
# eta_dynamics_model=eta_dynamics_models[i],
# decoder=current_decoder,
# )
(
current_error_in_latent_state,
current_reward_error,
current_error_in_eta_state,
current_decoder_error,
) = compute_loss_using_buffer(
obs=obses,
actions=actions,
next_obs=next_obses,
rewards=rewards,
phi_encoder=phi_encoder,
phi_dynamics_model=phi_dynamics_model,
reward_model=reward_model,
eta_encoder=None,
eta_dynamics_model=None,
decoder=None,
)
model_error_in_latent_state += current_error_in_latent_state
reward_error += current_reward_error
model_error_in_eta_state += current_error_in_eta_state
decoder_error += current_decoder_error
return (
model_error_in_latent_state,
reward_error,
model_error_in_eta_state,
decoder_error,
)
def compute_loss_using_buffer(
obs,
actions,
next_obs,
rewards,
phi_encoder,
phi_dynamics_model,
reward_model,
eta_encoder,
eta_dynamics_model,
decoder,
):
(current_error_in_latent_state, latent_state,) = compute_encoder_and_dynamics_loss(
obs=obs,
action=actions,
next_obs=next_obs,
encoder=phi_encoder,
dynamics_model=phi_dynamics_model,
)
predicted_reward = reward_model(latent_state, actions)
current_reward_error = F.mse_loss(predicted_reward, rewards)
if eta_encoder is None or eta_dynamics_model is None:
current_error_in_eta_state = torch.tensor(0)
inp_for_decoder = phi_encoder(next_obs)
else:
current_error_in_eta_state, eta_state = compute_encoder_and_dynamics_loss(
obs=obs,
action=actions,
next_obs=next_obs,
encoder=eta_encoder,
dynamics_model=eta_dynamics_model,
)
inp_for_decoder = torch.cat(
[encoder(next_obs) for encoder in [phi_encoder, eta_encoder]], dim=1,
)
if decoder is not None:
pred_next_obs = decoder(inp_for_decoder)
current_decoder_error = F.mse_loss(pred_next_obs, next_obs)
else:
current_decoder_error = torch.tensor(0)
return (
current_error_in_latent_state,
current_reward_error,
current_error_in_eta_state,
current_decoder_error,
)
| 11,782 | 31.821727 | 148 | py |
icp-block-mdp | icp-block-mdp-master/imitation_learning/model_utils/model.py | # Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn as nn
class DynamicsModel(nn.Module):
def __init__(self, representation_size, action_shape):
super().__init__()
self.action_linear = nn.Linear(action_shape, representation_size)
self.trunk = nn.Sequential(
nn.Linear(representation_size * 2, 128),
nn.ReLU(),
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, representation_size),
)
def forward(self, state, action):
action_emb = self.action_linear(action)
return self.trunk(torch.cat([state, action_emb], dim=-1))
class RewardModel(nn.Module):
def __init__(self, representation_size, action_shape):
super().__init__()
self.action_linear = nn.Linear(action_shape, representation_size)
self.trunk = nn.Sequential(
nn.Linear(representation_size * 2, 128),
nn.ReLU(),
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, 1),
)
def forward(self, state, action):
action_emb = self.action_linear(action)
return self.trunk(torch.cat([state, action_emb], dim=-1))
class ActorModel(nn.Module):
def __init__(self, representation_size, action_shape):
super().__init__()
self.trunk = nn.Sequential(
nn.Linear(representation_size, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU(),
nn.Linear(256, 256),
nn.ReLU(),
nn.Linear(256, 128),
nn.ReLU(),
nn.Linear(128, action_shape),
)
def forward(self, state):
return self.trunk(state)
class DiscriminatorModel(nn.Module):
def __init__(self, representation_size, num_envs):
super().__init__()
self.trunk = nn.Sequential(
nn.Linear(representation_size, 128),
nn.ReLU(),
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, num_envs),
)
def forward(self, state):
return self.trunk(state)
| 2,125 | 26.61039 | 73 | py |
icp-block-mdp | icp-block-mdp-master/imitation_learning/model_utils/bootstrap/common.py | # Copyright (c) Facebook, Inc. and its affiliates.
import argparse
from argparse import Namespace
import torch
import utils
from ml_logger.logbook import LogBook
from ml_logger.logbook import make_config as make_logbook_config
def make_logbook(args: Namespace) -> LogBook:
logbook_config = make_logbook_config(
logger_file_path=args.logger_file_path, id="0",
)
logbook = LogBook(config=logbook_config)
logbook.write_config_log(config=vars(args))
return logbook
def create_multi_env_replay_buffer(
args: argparse.Namespace, env: utils.FrameStack, device: torch.device, num_envs: int
) -> utils.ReplayBuffer:
""""Method to create a multi env replay buffer"""
return utils.MultiEnvReplayBuffer(
obs_shape=env.observation_space.shape,
action_shape=env.action_space.shape,
capacity=args.replay_buffer_capacity,
batch_size=args.batch_size,
device=device,
num_envs=num_envs,
)
def create_replay_buffer(
args: argparse.Namespace, env: utils.FrameStack, device: torch.device
) -> utils.ReplayBuffer:
""""Method to create a replay buffer"""
return utils.ReplayBuffer(
obs_shape=env.observation_space.shape,
action_shape=env.action_space.shape,
capacity=args.replay_buffer_capacity,
batch_size=args.batch_size,
device=device,
)
| 1,375 | 27.666667 | 88 | py |
icp-block-mdp | icp-block-mdp-master/imitation_learning/model_utils/bootstrap/basic_model.py | # Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
from argparse import Namespace
from typing import Tuple
import numpy as np
import torch
import utils
from model_utils.bootstrap.common import create_multi_env_replay_buffer, make_logbook
from model_utils.env import make_train_and_eval_envs
from model_utils.model import DynamicsModel
from sacae.decoder import make_decoder
from sacae.encoder import make_encoder
def bootstrap_envs_and_buffer(args: Namespace):
"""Method to bootstrap the envs, buffer and related objects"""
logbook = make_logbook(args=args)
device = "cuda" if torch.cuda.is_available() else "cpu"
args.work_dir = os.path.join(
args.work_dir,
args.domain_name + "_" + args.task_name,
args.exp_name,
str(args.seed),
)
utils.make_dir(args.work_dir)
with open(os.path.join(args.work_dir, "args.json"), "w") as f:
json.dump(vars(args), f, sort_keys=True, indent=4)
train_envs, eval_envs = make_train_and_eval_envs(args=args)
# print('Train env backgrounds: ', [train_env.bg_color for train_env in train_envs])
# print('Eval env backgrounds: ', [eval_env.bg_color for eval_env in eval_envs])
dummy_env = train_envs[0]
obs_shape = dummy_env.observation_space.shape
action_size = dummy_env.action_space.shape[0]
train_replay_buffer = create_multi_env_replay_buffer(
args=args, env=train_envs[0], device=device, num_envs=args.num_train_envs
)
eval_replay_buffer = create_multi_env_replay_buffer(
args=args, env=train_envs[0], device=device, num_envs=args.num_eval_envs
)
logging_dict = {
"model_error": [],
"decoding_error": [],
"eval_model_error": [],
"steps": [],
}
return (
logbook,
device,
train_envs,
eval_envs,
obs_shape,
action_size,
train_replay_buffer,
eval_replay_buffer,
logging_dict,
)
def bootstrap_models_and_optimizers(
args: Namespace,
obs_shape: Tuple[int, int, int],
action_size: int,
device: torch.device,
):
"""Method to bootstrap the models and optimizers"""
phi = make_encoder(
encoder_type=args.encoder_type,
obs_shape=obs_shape,
feature_dim=args.encoder_feature_dim,
num_layers=args.num_layers,
num_filters=args.num_filters,
).to(device)
dynamics_model = DynamicsModel(
representation_size=args.encoder_feature_dim, action_shape=action_size
).to(device)
decoders = [
make_decoder(
decoder_type=args.decoder_type,
obs_shape=obs_shape,
feature_dim=args.encoder_feature_dim,
num_layers=args.num_layers,
num_filters=args.num_filters,
).to(device)
for i in range(args.num_train_envs)
]
opt = torch.optim.Adam(
list(phi.parameters()) + list(dynamics_model.parameters()), lr=args.lr
)
decoder_opt = torch.optim.Adam(
np.concatenate([list(decoder.parameters()) for decoder in decoders]), lr=args.lr
)
return phi, dynamics_model, decoders, opt, decoder_opt
| 3,179 | 27.648649 | 88 | py |
icp-block-mdp | icp-block-mdp-master/imitation_learning/model_utils/bootstrap/model_with_aux_loss.py | # Copyright (c) Facebook, Inc. and its affiliates.
import os
from argparse import Namespace
from typing import Optional, Tuple
import torch
import utils
from ml_logger.logbook import LogBook
from model_utils.bootstrap import basic_model as basic_bootstrap
from model_utils.bootstrap.common import create_multi_env_replay_buffer, make_logbook
from model_utils.env import make_train_and_eval_envs
from model_utils.model import ActorModel, DiscriminatorModel, DynamicsModel, RewardModel
from sacae.decoder import make_decoder
from sacae.encoder import make_encoder
def bootstrap_envs_and_buffer(args: Namespace):
"""Method to bootstrap the envs, buffer and related objects"""
(
logbook,
device,
train_envs,
eval_envs,
obs_shape,
action_size,
train_replay_buffer,
eval_replay_buffer,
logging_dict,
) = basic_bootstrap.bootstrap_envs_and_buffer(args=args)
logging_dict = {
"steps": [],
"model_error_in_latent_state": [],
"model_error_in_eta_state": [],
"reward_error": [],
"decoding_error": [],
"test_model_error_in_latent_state": [],
"test_model_error_in_eta_state": [],
"test_reward_error": [],
"test_decoding_error": [],
}
return (
logbook,
device,
train_envs,
eval_envs,
obs_shape,
action_size,
train_replay_buffer,
eval_replay_buffer,
logging_dict,
)
def bootstrap_models_and_optimizers(
args: Namespace,
obs_shape: Tuple[int, int, int],
action_size: int,
device: torch.device,
logbook: Optional[LogBook],
):
"""Method to bootstrap the models and optimizers"""
phi_encoder = make_encoder(
encoder_type=args.encoder_type,
obs_shape=obs_shape,
feature_dim=args.encoder_feature_dim,
num_layers=args.num_layers,
num_filters=args.num_filters,
).to(device)
params_to_add = list(phi_encoder.parameters())
phi_dynamics_model = DynamicsModel(
representation_size=args.encoder_feature_dim, action_shape=action_size
).to(device)
params_to_add += list(phi_dynamics_model.parameters())
if args.use_discriminator:
discriminator = DiscriminatorModel(
representation_size=args.encoder_feature_dim, num_envs=args.num_train_envs
).to(device)
discriminator_opt = torch.optim.Adam(
list(discriminator.parameters()), lr=args.lr,
)
else:
discriminator = None
discriminator_opt = None
if args.use_reward:
reward_model = RewardModel(
representation_size=args.encoder_feature_dim, action_shape=action_size
).to(device)
params_to_add += list(reward_model.parameters())
else:
reward_model = None
if args.use_actor:
actor_model = ActorModel(
representation_size=args.encoder_feature_dim, action_shape=action_size
).to(device)
params_to_add += list(actor_model.parameters())
else:
actor_model = None
def flatten(_list):
# Taken from https://stackoverflow.com/questions/952914/how-to-make-a-flat-list-out-of-list-of-lists
return [item for sublist in _list for item in sublist]
if args.use_single_encoder_decoder:
eta_encoders = None
eta_dynamics_models = None
decoders = [
make_decoder(
decoder_type=args.decoder_type,
obs_shape=obs_shape,
feature_dim=args.encoder_feature_dim,
num_layers=args.num_layers,
num_filters=args.num_filters,
).to(device)
]
params_to_add += flatten([list(decoder.parameters()) for decoder in decoders])
else:
num_models_to_make = args.num_train_envs
eta_encoders = [
make_encoder(
encoder_type=args.encoder_type,
obs_shape=obs_shape,
feature_dim=args.encoder_feature_dim,
num_layers=args.num_layers,
num_filters=args.num_filters,
).to(device)
for i in range(num_models_to_make)
]
eta_dynamics_models = [
DynamicsModel(
representation_size=args.encoder_feature_dim, action_shape=action_size
).to(device)
for i in range(num_models_to_make)
]
decoders = [
make_decoder(
decoder_type=args.decoder_type,
obs_shape=obs_shape,
feature_dim=args.encoder_feature_dim * 2,
num_layers=args.num_layers,
num_filters=args.num_filters,
).to(device)
for i in range(num_models_to_make)
]
params_to_add += (
flatten([list(decoder.parameters()) for decoder in decoders])
+ flatten(
[
list(dynamics_model.parameters())
for dynamics_model in eta_dynamics_models
]
)
+ flatten([list(encoder.parameters()) for encoder in eta_encoders])
)
opt = torch.optim.Adam(list(params_to_add), lr=args.lr,)
if logbook:
logbook.write_message(f"args.load_model: {args.load_model}")
if args.load_model:
if os.path.exists(args.load_model_path):
if args.load_model_path.endswith(".pt"):
path_to_load_model = args.load_model_path
else:
epochs = [
int(x.split(".pt")[0]) for x in os.listdir(args.load_model_path)
]
epoch_to_select = max(epochs)
path_to_load_model = os.path.join(
args.load_model_path, f"{epoch_to_select}.pt"
)
state_dict = torch.load(path_to_load_model)
phi_encoder.load_state_dict(state_dict["phi_encoder"])
phi_dynamics_model.load_state_dict(state_dict["phi_dynamics_model"])
reward_model.load_state_dict(state_dict["reward_model"])
for idx, eta_encoder in enumerate(eta_encoders):
eta_encoder.load_state_dict(state_dict["eta_encoders"][idx])
for idx, eta_dynamics_model in enumerate(eta_dynamics_models):
eta_dynamics_model.load_state_dict(
state_dict["eta_dynamics_models"][idx]
)
for idx, decoder in enumerate(decoders):
decoder.load_state_dict(state_dict["decoders"][idx])
opt.load_state_dict(state_dict["opt"])
if logbook:
logbook.write_message_logs(
{"message": f"Loading model from {path_to_load_model}"}
)
model = {
"phi_encoder": phi_encoder,
"phi_dynamics_model": phi_dynamics_model,
"reward_model": reward_model,
"eta_encoders": eta_encoders,
"eta_dynamics_models": eta_dynamics_models,
"decoders": decoders,
"discriminator_model": discriminator,
"opt": opt,
"discriminator_opt": discriminator_opt,
"actor_model": actor_model,
}
return model
| 7,253 | 31.675676 | 108 | py |
icp-block-mdp | icp-block-mdp-master/imitation_learning/model_utils/bootstrap/model_with_aux_loss_vec.py | # Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
from argparse import Namespace
import torch
import utils
from model_utils.bootstrap.common import (create_multi_env_replay_buffer,
make_logbook)
from model_utils.env import make_fns_to_make_train_and_eval_envs
def bootstrap_envs_and_buffer(args: Namespace):
"""Method to bootstrap the envs, buffer and related objects"""
logbook = make_logbook(args=args)
device = "cuda" if torch.cuda.is_available() else "cpu"
utils.make_dir(args.work_dir)
with open(os.path.join(args.work_dir, "args.json"), "w") as f:
json.dump(vars(args), f, sort_keys=True, indent=4)
dummy_env = utils.make_dummy_env(args=args)
obs_shape = dummy_env.observation_space.shape
action_size = dummy_env.action_space.shape[0]
train_replay_buffer = create_multi_env_replay_buffer(
args=args, env=dummy_env, device=device, num_envs=args.num_train_envs
)
eval_replay_buffer = create_multi_env_replay_buffer(
args=args, env=dummy_env, device=device, num_envs=args.num_eval_envs
)
(
fns_to_make_train_envs,
fns_to_make_eval_envs,
) = make_fns_to_make_train_and_eval_envs(args=args)
max_episode_steps = dummy_env._max_episode_steps
vec_train_envs = utils.make_vec_envs(
fns_to_make_envs=fns_to_make_train_envs, device=None,
)
vec_eval_envs = utils.make_vec_envs(
fns_to_make_envs=fns_to_make_eval_envs, device=None,
)
logging_dict = {
"steps": [],
"model_error_in_latent_state": [],
"model_error_in_eta_state": [],
"reward_error": [],
"decoding_error": [],
"test_model_error_in_latent_state": [],
"test_model_error_in_eta_state": [],
"test_reward_error": [],
"test_decoding_error": [],
"discriminator_loss": [],
"encoder_discriminator_loss": [],
"test_encoder_discriminator_loss": [],
"actor_error": [],
"test_actor_error": [],
"discriminator_error": [],
"encoder_discriminator_error": [],
"test_encoder_discriminator_error": [],
}
return (
logbook,
device,
vec_train_envs,
vec_eval_envs,
obs_shape,
action_size,
train_replay_buffer,
eval_replay_buffer,
logging_dict,
max_episode_steps,
)
| 2,445 | 27.44186 | 77 | py |
icp-block-mdp | icp-block-mdp-master/imitation_learning/sacae_utils/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
import time
from argparse import Namespace
from typing import Optional
import numpy as np
import torch
import utils
from sacae.sacae_vec import SacAeAgent
from sacae.vec_logger import VecLogger
# def evaluate_one_env_from_list_of_envs(
# env: utils.FrameStack,
# agent: sac_ae_vec.SacAeAgent,
# video,
# num_episodes: int,
# L: vec_logger.VecLogger,
# step: int,
# env_idx: int,
# ):
# """Evaluate one env from a list of envs"""
# for i in range(num_episodes):
# obs = env.reset()
# video.init(enabled=(i == 0))
# done = False
# episode_reward = 0
# while not done:
# with utils.eval_mode(agent):
# action = agent.select_action(obs)
# obs, reward, done, _ = env.step(action)
# video.record(env)
# episode_reward += reward
# video.save("%d.mp4" % step)
# L.log(f"eval/episode_reward", episode_reward, step, env_idx=env_idx)
# L.dump(step, env_idx=env_idx)
# def evaluate_list_of_envs(
# envs: List[utils.FrameStack],
# agent: sac_ae_vec.SacAeAgent,
# video,
# num_episodes: int,
# L: vec_logger.VecLogger,
# step: int,
# episode: int,
# ):
# for env_idx, env in enumerate(envs):
# L.log("eval/episode", episode, step, env_idx=env_idx)
# evaluate_one_env_from_list_of_envs(
# env, agent, video, num_episodes, L, step, env_idx
# )
def evaluate_agent(
vec_eval_envs: utils.VecPyTorch,
agent: SacAeAgent,
replay_buffer: utils.MultiEnvReplayBuffer,
video,
num_episodes: int,
L: VecLogger,
step: int,
args: Namespace,
max_episode_steps: int = 1000,
):
num_envs = args.num_eval_envs
mode = "eval"
def make_vector_using_val(val):
return np.full(num_envs, val)
def make_tensor_using_fn(fn):
return torch.tensor([fn() for _ in range(num_envs)])
def make_tensor_using_val(val):
return torch.tensor(make_vector_using_val(val))
episode, episode_reward, done = [make_vector_using_val(x) for x in [0, 0.0, True]]
obs = vec_eval_envs.reset()
if isinstance(obs, tuple):
obs = obs[0]
episode_reward = make_vector_using_val(0.0)
episode_step = make_vector_using_val(0)
start_time = make_vector_using_val(time.time())
should_add_reward = make_vector_using_val(1.0)
# This masking array is used to make sure that no environment adds rewards for more than num_epsiodes
for _ in range(num_episodes * max_episode_steps):
with utils.eval_mode(agent):
action = agent.sample_action(obs.float())
next_obs, reward, done, _ = vec_eval_envs.step(action)
for env_idx in range(num_envs):
if done[env_idx] and episode[env_idx] <= num_episodes:
done[env_idx] = False
episode[env_idx] += 1
should_add_reward = (episode < num_episodes).astype(float)
reward = reward.numpy()[:, 0]
episode_reward += reward * should_add_reward
obs = next_obs
if isinstance(obs, tuple):
obs = obs[0]
condition = episode_step + 1 == max_episode_steps
done_bool = condition * 0 + (1 - condition) * done.astype(float)
if replay_buffer is not None:
for env_idx in range(num_envs):
replay_buffer.add(
obs=obs[env_idx],
action=action[env_idx],
reward=reward[env_idx],
next_obs=next_obs[env_idx],
done=done_bool[env_idx],
env_id=env_idx,
)
(
start_time,
episode_reward,
episode_step,
episode,
done,
L,
) = log_metrics_and_update_state(
num_envs=num_envs,
step=step,
mode=mode,
L=L,
start_time=start_time,
episode_reward=episode_reward / num_episodes,
episode=episode,
episode_step=episode_step,
done=done,
should_log_env_idx=make_vector_using_val(True),
)
def train_agent(
args: Namespace,
vec_train_envs: utils.VecPyTorch,
vec_eval_envs: utils.VecPyTorch,
L: VecLogger,
agent: SacAeAgent,
video,
model_dir,
train_replay_buffer: utils.MultiEnvReplayBuffer,
eval_replay_buffer: utils.MultiEnvReplayBuffer,
buffer_dir,
max_episode_steps: int,
num_train_steps: Optional[int] = None,
step_start_index: int = 0,
episode_start_index: int = 0,
):
num_envs = args.num_train_envs
def make_vector_using_val(val):
return np.full(num_envs, val)
def make_tensor_using_fn(fn):
return torch.tensor([fn() for _ in range(num_envs)])
def make_tensor_using_val(val):
return torch.tensor(make_vector_using_val(val))
if num_train_steps is None:
num_train_steps = args.num_train_steps
episode, episode_reward, done = [
make_vector_using_val(x) for x in [episode_start_index, 0.0, True]
]
obs = vec_train_envs.reset()
if isinstance(obs, tuple):
obs = obs[0]
episode_reward = make_vector_using_val(0.0)
episode_step = make_vector_using_val(0)
start_time = make_vector_using_val(time.time())
for step in range(step_start_index, step_start_index + num_train_steps):
# evaluate agent periodically
if step > 0 and args.eval_freq > 0 and step % args.eval_freq == 0:
evaluate_agent(
vec_eval_envs=vec_eval_envs,
agent=agent,
replay_buffer=eval_replay_buffer,
video=video,
num_episodes=args.num_eval_episodes,
L=L,
step=step,
args=args,
max_episode_steps=max_episode_steps,
)
if args.save_model:
agent.save(model_dir, step)
print(f"Saving the model at {model_dir} after {step} steps.")
# sample action for data collection
if step < args.init_steps:
action = make_tensor_using_fn(vec_train_envs.action_space.sample)
else:
with utils.eval_mode(agent):
action = agent.sample_action(obs.float())
# run training update
if step >= args.init_steps:
num_updates = args.init_steps if step == args.init_steps else 1
for _ in range(num_updates):
for env_idx in range(args.num_train_envs):
agent.update(train_replay_buffer, L, step, env_idx=env_idx)
next_obs, reward, done, _ = vec_train_envs.step(action)
# allow infinit bootstrap
condition = episode_step + 1 == max_episode_steps
done_bool = condition * 0 + (1 - condition) * done.astype(float)
reward = reward.numpy()[:, 0]
episode_reward += reward
for env_idx in range(args.num_train_envs):
train_replay_buffer.add(
obs=obs[env_idx],
action=action[env_idx],
reward=reward[env_idx],
next_obs=next_obs[env_idx],
done=done_bool[env_idx],
env_id=env_idx,
)
(
start_time,
episode_reward,
episode_step,
episode,
done,
L,
) = log_metrics_and_update_state(
num_envs=args.num_train_envs,
step=step,
mode="train",
L=L,
start_time=start_time,
episode_reward=episode_reward,
episode=episode,
episode_step=episode_step,
done=done,
should_log_env_idx=done * (step > 0),
)
obs = next_obs
if isinstance(obs, tuple):
obs = obs[0]
episode_step += 1
def log_metrics_and_update_state(
num_envs: int,
step: int,
mode: str,
L: VecLogger,
start_time: np.array,
episode_reward: np.array,
episode: np.array,
episode_step: np.array,
done: np.array,
should_log_env_idx: np.array,
):
for env_idx in range(num_envs):
if should_log_env_idx[env_idx]:
L.log(
f"{mode}/duration",
time.time() - start_time[env_idx],
step,
env_idx=env_idx,
)
start_time[env_idx] = time.time()
L.log(
f"{mode}/episode_reward",
episode_reward[env_idx],
step,
env_idx=env_idx,
)
done[env_idx] = False
episode_reward[env_idx] = 0
episode_step[env_idx] = 0
episode[env_idx] += 1
L.log(f"{mode}/episode", episode[env_idx], step, env_idx=env_idx)
L.dump(step, env_idx=env_idx, mode=mode)
return (start_time, episode_reward, episode_step, episode, done, L)
| 9,022 | 28.680921 | 105 | py |
icp-block-mdp | icp-block-mdp-master/imitation_learning/sacae_utils/bootstrap.py | # Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import json
import os
from argparse import Namespace
from typing import Optional, Union
import torch
from torch import nn
import dmc2gym
import utils
from ml_logger.logbook import LogBook
from ml_logger.logbook import make_config as make_logbook_config
from sacae import sacae, sacae_vec
from sacae.logger import Logger
from sacae.vec_logger import VecLogger
from sacae.video import VideoRecorder
AgentType = Union[sacae.SacAeAgent, sacae_vec.SacAeAgent]
def validate_env(env):
# the dmc2gym wrapper standardizes actions
assert env.action_space.low.min() >= -1
assert env.action_space.high.max() <= 1
def make_dirs_and_recorders(args: Namespace):
utils.make_dir(args.work_dir)
video_dir = utils.make_dir(os.path.join(args.work_dir, "video"))
model_dir = utils.make_dir(os.path.join(args.work_dir, args.save_model_path))
buffer_dir = utils.make_dir(os.path.join(args.work_dir, args.save_buffer_path))
video = VideoRecorder(video_dir if args.save_video else None)
with open(os.path.join(args.work_dir, "args.json"), "w") as f:
json.dump(vars(args), f, sort_keys=True, indent=4)
return video_dir, model_dir, buffer_dir, video
def make_expert(
obs_shape, action_shape, args: Namespace, device: torch.device,
) -> AgentType:
return sacae_vec.SacAeAgent(
obs_shape=obs_shape,
action_shape=action_shape,
device=device,
hidden_dim=args.hidden_dim,
discount=args.discount,
init_temperature=args.init_temperature,
alpha_lr=args.alpha_lr,
alpha_beta=args.alpha_beta,
actor_lr=args.actor_lr,
actor_beta=args.actor_beta,
actor_log_std_min=args.actor_log_std_min,
actor_log_std_max=args.actor_log_std_max,
actor_update_freq=args.actor_update_freq,
critic_lr=args.critic_lr,
critic_beta=args.critic_beta,
critic_tau=args.critic_tau,
critic_target_update_freq=args.critic_target_update_freq,
encoder_type=args.encoder_type,
encoder_feature_dim=args.encoder_feature_dim,
encoder_lr=args.encoder_lr,
encoder_tau=args.encoder_tau,
decoder_type=args.decoder_type,
decoder_lr=args.decoder_lr,
decoder_update_freq=args.decoder_update_freq,
decoder_latent_lambda=args.decoder_latent_lambda,
decoder_weight_lambda=args.decoder_weight_lambda,
num_layers=args.num_layers,
num_filters=args.num_filters,
)
def bootstrap_expert(args: Namespace):
utils.set_seed_everywhere(args.seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
fns_to_make_train_envs = [
utils.fn_to_make_env(args=args, seed=seed, resource_files=None, camera_id=0)
for seed in range(args.num_train_envs)
]
fns_to_make_eval_envs = [
utils.fn_to_make_env(args=args, seed=seed, resource_files=None, camera_id=0)
for seed in range(args.num_eval_envs)
]
vec_train_envs = utils.make_vec_envs(
fns_to_make_envs=fns_to_make_train_envs, device=None
)
vec_eval_envs = utils.make_vec_envs(
fns_to_make_envs=fns_to_make_eval_envs, device=None
)
dummy_env = utils.make_env(args, 0, resource_files=None, camera_id=0)
video_dir, model_dir, buffer_dir, video = make_dirs_and_recorders(args=args)
validate_env(dummy_env)
replay_buffer = utils.MultiEnvReplayBuffer(
obs_shape=dummy_env.observation_space.shape,
action_shape=dummy_env.action_space.shape,
capacity=args.replay_buffer_capacity,
batch_size=args.batch_size,
device=device,
num_envs=args.num_train_envs,
)
agent = make_expert(
obs_shape=dummy_env.observation_space.shape,
action_shape=dummy_env.action_space.shape,
args=args,
device=device,
)
L = VecLogger(args.work_dir, use_tb=args.save_tb, num_envs=args.num_train_envs)
max_episode_steps = dummy_env._max_episode_steps
return (
vec_train_envs,
vec_eval_envs,
max_episode_steps,
video_dir,
model_dir,
buffer_dir,
video,
device,
replay_buffer,
agent,
L,
)
def bootstrap_agent(args: Namespace, obs_shape, action_size, device, encoder):
video_dir, model_dir, buffer_dir, video = make_dirs_and_recorders(args=args)
agent = sacae_bootstrap.make_agent(
obs_shape=obs_shape,
action_shape=(action_size,),
args=args,
device=device,
is_vec=True,
is_irm=True,
encoder=encoder,
)
L = VecLogger(args.work_dir, use_tb=args.save_tb, num_envs=args.num_train_envs)
return (
video_dir,
model_dir,
buffer_dir,
video,
device,
agent,
L,
)
| 4,898 | 27.988166 | 84 | py |
icp-block-mdp | icp-block-mdp-master/model_learning/main.py | # Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import json
import os
import numpy as np
import torch
import torch.nn.functional as F
from torch import autograd, nn, optim
from torch.utils.data import DataLoader, Dataset
import utils
from model import Decoder, DynamicsModel, Encoder
def parse_args():
parser = argparse.ArgumentParser()
# environment
parser.add_argument("--domain_name", default="cheetah")
parser.add_argument("--task_name", default="run")
parser.add_argument("--image_size", default=84, type=int)
parser.add_argument("--action_repeat", default=1, type=int)
parser.add_argument("--frame_stack", default=3, type=int)
parser.add_argument("--num_envs", default=2, type=int)
# replay buffer
parser.add_argument("--replay_buffer_capacity", default=1000000, type=int)
parser.add_argument("--num_samples", default=50000, type=int)
# training
parser.add_argument("--num_iters", default=100000, type=int)
parser.add_argument("--lr", default=1e-3, type=float)
parser.add_argument("--batch_size", default=128, type=int)
parser.add_argument(
"--one_decoder", action="store_true", help="baseline with single decoder"
)
# encoder/decoder
parser.add_argument("--encoder_type", default="identity", type=str)
parser.add_argument("--encoder_feature_dim", default=50, type=int)
parser.add_argument("--encoder_lr", default=1e-3, type=float)
parser.add_argument("--encoder_tau", default=0.05, type=float)
parser.add_argument("--num_layers", default=4, type=int)
parser.add_argument("--num_filters", default=32, type=int)
# misc
parser.add_argument("--seed", default=1, type=int)
parser.add_argument("--exp_name", default="local", type=str)
parser.add_argument("--log_interval", default=1000, type=int)
parser.add_argument("--work_dir", default=".", type=str)
parser.add_argument("--save_tb", default=False, action="store_true")
parser.add_argument("--save_model", default=False, action="store_true")
parser.add_argument("--save_buffer", default=False, action="store_true")
parser.add_argument("--save_video", default=False, action="store_true")
args = parser.parse_args()
return args
def main(args):
device = "cuda" if torch.cuda.is_available() else "cpu"
args.work_dir = os.path.join(
args.work_dir,
args.domain_name + "_" + args.task_name,
args.exp_name,
str(args.seed),
)
os.makedirs(args.work_dir, exist_ok=True)
with open(os.path.join(args.work_dir, "args.json"), "w") as f:
json.dump(vars(args), f, sort_keys=True, indent=4)
train_envs = [
utils.make_env(np.random.randint(0, 255), args) for i in range(args.num_envs)
]
eval_envs = [utils.make_env(np.random.randint(0, 255), args) for i in range(5)]
print("Train env backgrounds: ", [train_env.bg_color for train_env in train_envs])
print("Eval env backgrounds: ", [eval_env.bg_color for eval_env in eval_envs])
obs_shape = train_envs[0].observation_space.shape
action_size = train_envs[0].action_space.shape[0]
phi = Encoder(obs_shape, args.encoder_feature_dim).to(device)
model = DynamicsModel(args.encoder_feature_dim, action_size).to(device)
decoders = [
Decoder(obs_shape, args.encoder_feature_dim).to(device)
for i in range(args.num_envs)
]
opt = torch.optim.Adam(
list(phi.parameters()) + list(model.parameters()), lr=args.lr
)
decoder_opt = torch.optim.Adam(
np.concatenate([list(decoder.parameters()) for decoder in decoders]), lr=args.lr
)
train_replay_buffer = utils.ReplayBuffer(
obs_shape=train_envs[0].observation_space.shape,
action_shape=train_envs[0].action_space.shape,
capacity=args.replay_buffer_capacity,
batch_size=args.batch_size,
device=device,
)
eval_replay_buffer = utils.ReplayBuffer(
obs_shape=train_envs[0].observation_space.shape,
action_shape=train_envs[0].action_space.shape,
capacity=args.replay_buffer_capacity,
batch_size=args.batch_size,
device=device,
)
logging_dict = {
"model_error": [],
"decoding_error": [],
"eval_model_error": [],
"steps": [],
}
# collect data across environments
for env_id in range(args.num_envs):
train_replay_buffer = utils.collect_random_data(
train_envs[env_id],
env_id,
args.num_samples,
train_replay_buffer,
save_video=args.save_video,
)
eval_replay_buffer = utils.collect_random_data(
eval_envs[env_id], env_id, args.num_samples, eval_replay_buffer
)
# Train loop
for iteration in range(args.num_iters):
model_error = 0
decoder_error = 0
for i in range(args.num_envs):
obses, actions, rewards, next_obses, not_dones = train_replay_buffer.sample(
i
)
latent = phi(obses)
pred_next_latent = model(latent, actions)
true_next_latent = phi(next_obses).detach()
error_e = F.mse_loss(pred_next_latent, true_next_latent)
model_error += error_e
if args.one_decoder:
pred_next_obses = decoders[0](pred_next_latent) # only use one decoder
else:
pred_next_obses = decoders[i](pred_next_latent)
decoder_error_e = F.mse_loss(pred_next_obses, next_obses)
decoder_error += decoder_error_e
opt.zero_grad()
model_error.backward(retain_graph=True)
opt.step()
decoder_opt.zero_grad()
decoder_error.backward()
decoder_opt.step()
if iteration % args.log_interval == 0:
with torch.no_grad():
logging_dict["steps"].append(iteration)
logging_dict["model_error"].append(model_error.item())
logging_dict["decoding_error"].append(decoder_error.item())
print(
f"Iteration {iteration}: Mean train set model error: {model_error.mean()}, decoding error: {decoder_error.mean()}%%"
)
# Evaluate on test environment
(
obses,
actions,
rewards,
next_obses,
not_dones,
) = eval_replay_buffer.sample()
with torch.no_grad():
latent = phi(obses)
pred_next_latent = model(latent, actions)
true_next_latent = phi(next_obses).detach()
test_error = F.mse_loss(pred_next_latent, true_next_latent)
logging_dict["eval_model_error"].append(test_error.item())
print(f"Mean test set error: {test_error}")
torch.save(logging_dict, os.path.join(args.work_dir, "logging_dict.pt"))
if __name__ == "__main__":
args = parse_args()
print(args)
main(args)
| 7,094 | 37.145161 | 136 | py |
icp-block-mdp | icp-block-mdp-master/model_learning/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
import os
import random
from collections import defaultdict, deque
import gym
import numpy as np
import skvideo.io
import torch
import torch.nn as nn
import dmc2gym
def make_env(bg, args):
env = dmc2gym.make(
domain_name=args.domain_name,
task_name=args.task_name,
seed=args.seed,
visualize_reward=False,
from_pixels=True,
height=args.image_size,
width=args.image_size,
frame_skip=args.action_repeat,
bg_color=bg,
)
# env.seed(args.seed)
env = FrameStack(env, k=args.frame_stack)
return env
class eval_mode(object):
def __init__(self, *models):
self.models = models
def __enter__(self):
self.prev_states = []
for model in self.models:
self.prev_states.append(model.training)
model.train(False)
def __exit__(self, *args):
for model, state in zip(self.models, self.prev_states):
model.train(state)
return False
def soft_update_params(net, target_net, tau):
for param, target_param in zip(net.parameters(), target_net.parameters()):
target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)
def set_seed_everywhere(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
def module_hash(module):
result = 0
for tensor in module.state_dict().values():
result += tensor.sum().item()
return result
def make_dir(dir_path):
try:
os.mkdir(dir_path)
except OSError:
pass
return dir_path
def collect_random_data(env, env_id, num_samples, replay_buffer, save_video=False):
obs = env.reset()
if save_video:
frames = [obs]
for i in range(num_samples):
action = env.action_space.sample()
next_obs, reward, done, _ = env.step(action)
replay_buffer.add(env_id, obs, action, reward, next_obs, done)
if save_video:
frames.append(next_obs)
obs = next_obs
if done:
obs = env.reset()
if save_video:
skvideo.io.vwrite(f"video/{str(env)}_{env.bg_color}.mp4", frames)
save_video = False
return replay_buffer
def preprocess_obs(obs, bits=5):
"""Preprocessing image, see https://arxiv.org/abs/1807.03039."""
bins = 2 ** bits
assert obs.dtype == torch.float32
if bits < 8:
obs = torch.floor(obs / 2 ** (8 - bits))
obs = obs / bins
obs = obs + torch.rand_like(obs) / bins
obs = obs - 0.5
return obs
class ReplayBuffer(object):
"""Buffer to store environment transitions."""
def __init__(self, obs_shape, action_shape, capacity, batch_size, device):
self.capacity = capacity
self.batch_size = batch_size
self.device = device
# the proprioceptive obs is stored as float32, pixels obs as uint8
obs_dtype = np.float32 if len(obs_shape) == 1 else np.uint8
self.env_ids = np.empty((capacity, 1), dtype=np.int32)
self.obses = np.empty((capacity, *obs_shape), dtype=obs_dtype)
self.next_obses = np.empty((capacity, *obs_shape), dtype=obs_dtype)
self.actions = np.empty((capacity, *action_shape), dtype=np.float32)
self.rewards = np.empty((capacity, 1), dtype=np.float32)
self.not_dones = np.empty((capacity, 1), dtype=np.float32)
self.idx = 0
self.last_save = 0
self.full = False
self.env_id_dict = defaultdict(list)
def add(self, env_id, obs, action, reward, next_obs, done):
if self.full:
self.env_id_dict[self.env_ids[self.idx]].remove(self.idx)
np.copyto(self.env_ids[self.idx], env_id)
np.copyto(self.obses[self.idx], obs)
np.copyto(self.actions[self.idx], action)
np.copyto(self.rewards[self.idx], reward)
np.copyto(self.next_obses[self.idx], next_obs)
np.copyto(self.not_dones[self.idx], not done)
self.env_id_dict[env_id].append(self.idx)
self.idx = (self.idx + 1) % self.capacity
self.full = self.full or self.idx == 0
def sample(self, env_id=None):
if env_id is not None:
idxs = np.random.choice(self.env_id_dict[env_id], size=self.batch_size)
else:
idxs = np.random.randint(
0, self.capacity if self.full else self.idx, size=self.batch_size
)
obses = torch.as_tensor(self.obses[idxs], device=self.device).float()
actions = torch.as_tensor(self.actions[idxs], device=self.device)
rewards = torch.as_tensor(self.rewards[idxs], device=self.device)
next_obses = torch.as_tensor(self.next_obses[idxs], device=self.device).float()
not_dones = torch.as_tensor(self.not_dones[idxs], device=self.device)
return obses, actions, rewards, next_obses, not_dones
def save(self, save_dir):
if self.idx == self.last_save:
return
path = os.path.join(save_dir, "%d_%d.pt" % (self.last_save, self.idx))
payload = [
self.obses[self.last_save : self.idx],
self.next_obses[self.last_save : self.idx],
self.actions[self.last_save : self.idx],
self.rewards[self.last_save : self.idx],
self.not_dones[self.last_save : self.idx],
]
self.last_save = self.idx
torch.save(payload, path)
def load(self, save_dir):
chunks = os.listdir(save_dir)
chucks = sorted(chunks, key=lambda x: int(x.split("_")[0]))
for chunk in chucks:
start, end = [int(x) for x in chunk.split(".")[0].split("_")]
path = os.path.join(save_dir, chunk)
payload = torch.load(path)
assert self.idx == start
self.obses[start:end] = payload[0]
self.next_obses[start:end] = payload[1]
self.actions[start:end] = payload[2]
self.rewards[start:end] = payload[3]
self.not_dones[start:end] = payload[4]
self.idx = end
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
gym.Wrapper.__init__(self, env)
self._k = k
self._frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=1,
shape=((shp[0] * k,) + shp[1:]),
dtype=env.observation_space.dtype,
)
self._max_episode_steps = env._max_episode_steps
def reset(self):
obs = self.env.reset()
for _ in range(self._k):
self._frames.append(obs)
return self._get_obs()
def step(self, action):
obs, reward, done, info = self.env.step(action)
self._frames.append(obs)
return self._get_obs(), reward, done, info
def _get_obs(self):
assert len(self._frames) == self._k
return np.concatenate(list(self._frames), axis=0)
| 7,067 | 31.422018 | 87 | py |
icp-block-mdp | icp-block-mdp-master/model_learning/model.py | # Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn as nn
def tie_weights(src, trg):
assert type(src) == type(trg)
trg.weight = src.weight
trg.bias = src.bias
OUT_DIM = {2: 39, 4: 35, 6: 31}
class Encoder(nn.Module):
"""Convolutional encoder of pixels observations."""
def __init__(self, obs_shape, feature_dim, num_layers=2, num_filters=32):
super().__init__()
assert len(obs_shape) == 3
self.feature_dim = feature_dim
self.num_layers = num_layers
self.convs = nn.ModuleList([nn.Conv2d(obs_shape[0], num_filters, 3, stride=2)])
for i in range(num_layers - 1):
self.convs.append(nn.Conv2d(num_filters, num_filters, 3, stride=1))
out_dim = OUT_DIM[num_layers]
self.fc = nn.Linear(num_filters * out_dim * out_dim, self.feature_dim)
self.ln = nn.LayerNorm(self.feature_dim)
self.outputs = dict()
def forward_conv(self, obs):
obs = obs / 255.0
self.outputs["obs"] = obs
conv = torch.relu(self.convs[0](obs))
self.outputs["conv1"] = conv
for i in range(1, self.num_layers):
conv = torch.relu(self.convs[i](conv))
self.outputs["conv%s" % (i + 1)] = conv
h = conv.view(conv.size(0), -1)
return h
def forward(self, obs, detach=False):
h = self.forward_conv(obs)
if detach:
h = h.detach()
h_fc = self.fc(h)
self.outputs["fc"] = h_fc
h_norm = self.ln(h_fc)
self.outputs["ln"] = h_norm
out = torch.tanh(h_norm)
self.outputs["tanh"] = out
return out
def copy_conv_weights_from(self, source):
"""Tie convolutional layers"""
# only tie conv layers
for i in range(self.num_layers):
tie_weights(src=source.convs[i], trg=self.convs[i])
def log(self, L, step, log_freq):
if step % log_freq != 0:
return
for k, v in self.outputs.items():
L.log_histogram("train_encoder/%s_hist" % k, v, step)
if len(v.shape) > 2:
L.log_image("train_encoder/%s_img" % k, v[0], step)
for i in range(self.num_layers):
L.log_param("train_encoder/conv%s" % (i + 1), self.convs[i], step)
L.log_param("train_encoder/fc", self.fc, step)
L.log_param("train_encoder/ln", self.ln, step)
class DynamicsModel(nn.Module):
def __init__(self, representation_size, action_shape):
super().__init__()
self.action_linear = nn.Linear(action_shape, representation_size)
self.trunk = nn.Sequential(
nn.Linear(representation_size * 2, 128),
nn.ReLU(),
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, representation_size),
)
def forward(self, state, action):
action_emb = self.action_linear(action)
return self.trunk(torch.cat([state, action_emb], dim=-1))
class Decoder(nn.Module):
def __init__(self, obs_shape, feature_dim, num_layers=2, num_filters=32):
super().__init__()
self.num_layers = num_layers
self.num_filters = num_filters
self.out_dim = OUT_DIM[num_layers]
self.fc = nn.Linear(feature_dim, num_filters * self.out_dim * self.out_dim)
self.deconvs = nn.ModuleList()
for i in range(self.num_layers - 1):
self.deconvs.append(
nn.ConvTranspose2d(num_filters, num_filters, 3, stride=1)
)
self.deconvs.append(
nn.ConvTranspose2d(num_filters, obs_shape[0], 3, stride=2, output_padding=1)
)
self.outputs = dict()
def forward(self, h):
h = torch.relu(self.fc(h))
self.outputs["fc"] = h
deconv = h.view(-1, self.num_filters, self.out_dim, self.out_dim)
self.outputs["deconv1"] = deconv
for i in range(0, self.num_layers - 1):
deconv = torch.relu(self.deconvs[i](deconv))
self.outputs["deconv%s" % (i + 1)] = deconv
obs = self.deconvs[-1](deconv)
self.outputs["obs"] = obs
return obs
def log(self, L, step, log_freq):
if step % log_freq != 0:
return
for k, v in self.outputs.items():
L.log_histogram("train_decoder/%s_hist" % k, v, step)
if len(v.shape) > 2:
L.log_image("train_decoder/%s_i" % k, v[0], step)
for i in range(self.num_layers):
L.log_param("train_decoder/deconv%s" % (i + 1), self.deconvs[i], step)
L.log_param("train_decoder/fc", self.fc, step)
| 4,649 | 28.807692 | 88 | py |
SlothSpeech | SlothSpeech-main/main.py | import logging
import warnings
from datetime import datetime
from pathlib import Path
from time import time
import torch
from attasr.attack_loop import EnergyAttack, EnergyAttackConfig
from attasr.attack_losses import l2_norm, linf_norm
from attasr.experiment_datasets import ExprDataset
from attasr.experiment_models import ExprModel
warnings.filterwarnings("ignore")
CUDA_DEVICE = "cuda:3"
MODEL = "whisper" # "s2t", "s2t2"
DATASET = "libri" # "vctk", "openslr"
DIST_CRITERION = "l2" # "linf"
DATASET_DICT = {
"libri": ExprDataset.LibriSpeech,
"vctk": ExprDataset.VCTK,
"openslr": ExprDataset.OpenSLR,
}
MODEL_DICT = {
"whisper": ExprModel.Whisper,
"s2t2": ExprModel.Speech2Text2,
"s2t": ExprModel.Speech2Text,
}
DIST_CRITERIA = {"l2": l2_norm, "linf": linf_norm}
logging.basicConfig(
filename=(
f"logs/{MODEL.capitalize()}{DATASET.capitalize()}"
f"{DIST_CRITERION.capitalize()}.log"
),
filemode="w",
level=logging.INFO,
)
logger = logging.getLogger()
try:
start = time()
dataset_type = DATASET_DICT[DATASET]
dataset = ExprDataset.get_dataset(dataset_type)
load_time = time() - start
logger.info(
"Completed loading the dataset in "
f"{load_time//60:.0f}m {load_time%60:.0f}s\n"
)
model_type = MODEL_DICT[MODEL]
attack_base = EnergyAttack.for_model(model_type, device=CUDA_DEVICE)
logger.info(
f"Completed instantiating the attack for {MODEL} in "
f"{load_time//60:.0f}m {load_time%60:.0f}s\n"
)
dist_criterion = DIST_CRITERIA[DIST_CRITERION]
conf = EnergyAttackConfig(
max_iter=101,
learning_rate=1e-1,
optimizer_class=torch.optim.Adam,
adv_dist_criterion=dist_criterion,
adv_dist_factor=0.1,
num_datapoints=100,
dataset_name=dataset_type,
dataset=dataset,
storage_frequency=20,
storage_location=str(Path("out").absolute()),
)
logger.info(
f"Starting Attacks... "
f"{datetime.now().isoformat(timespec='seconds', sep=' ')}"
)
attack_base.launch(conf)
except Exception as e:
logger.exception(e.args)
| 2,172 | 23.977011 | 72 | py |
SlothSpeech | SlothSpeech-main/setup.py | from setuptools import find_packages, setup
setup(
name="attasr",
version="1.1.0",
package_dir=find_packages(),
include_package_data=True,
install_requires=[
"datasets>=2.10.1",
"pandas>=1.5.3",
"setuptools>=65.5.0",
"tqdm>=4.64.1",
"transformers>=4.26.1",
"torch>=1.12.1",
"torchaudio>=0.12.1",
],
entry_points={
"console_scripts": [],
},
)
| 439 | 19.952381 | 43 | py |
SlothSpeech | SlothSpeech-main/src/attasr/noise.py | import torch
def get_white_noise(
signal_embedding: torch.Tensor, signal_to_noise_ratio: float
) -> torch.Tensor:
rms_signal = torch.sqrt(torch.mean(signal_embedding**2))
rms_noise = torch.sqrt(
rms_signal**2
/ torch.pow(torch.tensor(10), signal_to_noise_ratio / 10)
)
std_noise = rms_noise
size = tuple(signal_embedding.shape)
return torch.normal(0.0, std_noise.cpu().item(), (size[-2], size[-1]))
| 447 | 25.352941 | 74 | py |
SlothSpeech | SlothSpeech-main/src/attasr/attack_losses.py | from typing import Callable, Protocol
import torch
def linf_norm(x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
return torch.linalg.norm((target - x), ord=float("inf"), dim=1).sum(dim=-1)
def l2_norm(x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
return torch.sum((target - x) ** 2, dim=-1, keepdim=True).sum()
class AttackLossProtocol(Protocol):
def __call__(
self,
batch_logits: torch.Tensor,
adv_x: torch.Tensor,
input_feats: torch.Tensor,
) -> torch.Tensor:
...
def get_attack_loss(
skip_tokens: set[int],
eos_token_id: int,
adv_dist_factor: float,
adv_dist_criterion: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
minimize_eos_all_pos: bool = False,
) -> AttackLossProtocol:
def _attack_loss(
batch_logits: torch.Tensor,
adv_x: torch.Tensor,
input_feats: torch.Tensor,
) -> torch.Tensor:
logits = batch_logits[0]
sfmax = torch.softmax(logits[-1], dim=0)
tokens_sorted = torch.argsort(sfmax)
tail = -2
while tokens_sorted[tail].item() in skip_tokens:
tail -= 1
if minimize_eos_all_pos:
eos_penalty = logits[:, eos_token_id].sum()
else:
eos_penalty = logits[-1][eos_token_id]
l1 = eos_penalty - logits[-1][tokens_sorted[tail]]
l2 = adv_dist_criterion(adv_x, input_feats)
return l1 + adv_dist_factor * l2
return _attack_loss
| 1,493 | 27.188679 | 79 | py |
SlothSpeech | SlothSpeech-main/src/attasr/attack_loop.py | import logging
from dataclasses import dataclass
from pathlib import Path
from time import time
from typing import Any, Callable, Optional, Type
import datasets
import pandas as pd
import torch
from torch.autograd import Variable
from tqdm.auto import tqdm
from attasr.attack_losses import AttackLossProtocol, get_attack_loss
from attasr.experiment_datasets import ExprDataset
from attasr.experiment_models import (
EXPR_BACKBONE,
EXPR_FEAT_EXTRACTOR,
EXPR_TOKENIZER,
ExprModel,
)
from attasr.noise import get_white_noise
SKIP_TOKENS: dict[ExprModel, set[int]] = {
ExprModel.Speech2Text: set(),
ExprModel.Speech2Text2: set(),
ExprModel.Whisper: {50256, 50257, 50362},
}
NUM_REPS = 2
logger = logging.getLogger()
@dataclass
class EnergyAttackConfig:
max_iter: int
learning_rate: float
optimizer_class: Type[torch.optim.Optimizer]
adv_dist_criterion: Callable[[torch.Tensor, torch.Tensor], torch.Tensor]
adv_dist_factor: float
dataset_name: ExprDataset
num_datapoints: int
storage_frequency: int
storage_location: str
shard_id: int = -1
minimize_eos_all_pos: bool = True
dataset: Optional[datasets.arrow_dataset.Dataset] = None
def __post_init__(self):
if not self.dataset:
self.dataset = ExprDataset.get_dataset(self.dataset_name)
self.num_datapoints = min(self.num_datapoints, len(self.dataset))
self.sampling_rate = 16000
@dataclass
class EnergyAttack:
model: EXPR_BACKBONE
tokenizer: EXPR_TOKENIZER
ftex: EXPR_FEAT_EXTRACTOR
device: str
skip_tokens: set[int]
model_name: str
max_new_tokens: int = 500
def __post_init__(self):
self.eos_token_id = self.tokenizer.eos_token_id
self.model.to(self.device, non_blocking=True)
self.stats = self._create_stats_table()
self.tensor_dict = self._create_tensor_dict()
@classmethod
def for_model(
cls,
expr_model: ExprModel,
device: str,
max_new_tokens: int = 500,
) -> "EnergyAttack":
return cls(
model=ExprModel.get_pretained_model(expr_model),
tokenizer=ExprModel.get_tokenizer(expr_model),
ftex=ExprModel.get_feature_extractor(expr_model),
max_new_tokens=max_new_tokens,
device=device,
model_name=expr_model.name,
skip_tokens=SKIP_TOKENS[expr_model],
)
def _create_stats_table(self) -> dict[str, list]:
return {
"idx": [],
"max_token_delta": [],
"awgn_token_delta": [],
"og_latency": [],
"adv_latency": [],
"awgn_latency": [],
"perturb_dist": [],
"awgn_dist": [],
"og_transcript": [],
"awgn_transcript": [],
"og_output_shape": [],
"adv_output_shape": [],
"awgn_output_shape": [],
}
def _create_tensor_dict(self) -> dict[int, dict[str, Any]]:
return dict()
def clear_stats(self):
self.stats = self._create_stats_table()
self.tensor_dict = self._create_tensor_dict()
def get_stats_df(self) -> pd.DataFrame:
df = pd.DataFrame(self.stats)
df.set_index("idx", inplace=True)
return df
def save_stats(self, conf: EnergyAttackConfig, final: bool = False):
df = self.get_stats_df()
mname, dname, dcname = (
self.model_name,
conf.dataset_name.name,
conf.adv_dist_criterion.__name__,
)
df["model"] = mname
df["dataset"] = dname
df["dist_criteria"] = dcname
sid = str(conf.shard_id) if conf.shard_id != -1 else ""
save_path_base = f"{mname}_{dname}_{dcname}{sid}"
save_path = Path(conf.storage_location).joinpath(f"{save_path_base}")
df.to_csv(save_path.with_suffix(".csv"))
torch.save(self.tensor_dict, save_path.with_suffix(".pt"))
def _append_stats(
self,
idx: int,
og_output_shape: tuple[int, ...],
adv_output_shape: tuple[int, ...],
max_token_delta: int,
og_latency: int,
adv_latency: int,
perturb_dist: float,
og_transcript: str,
longest_transcript: str,
awgn_token_delta: int,
awgn_latency: int,
awgn_dist: float,
awgn_transcript: str,
awgn_output_shape: tuple[int, ...],
raw_perturbation: torch.Tensor,
):
self.stats["idx"].append(idx)
self.stats["og_output_shape"].append(og_output_shape)
self.stats["adv_output_shape"].append(adv_output_shape)
self.stats["max_token_delta"].append(max_token_delta)
self.stats["og_latency"].append(og_latency)
self.stats["perturb_dist"].append(perturb_dist)
self.stats["og_transcript"].append(og_transcript)
self.stats["awgn_token_delta"].append(awgn_token_delta)
self.stats["awgn_latency"].append(awgn_latency)
self.stats["adv_latency"].append(adv_latency)
self.stats["awgn_dist"].append(awgn_dist)
self.stats["awgn_transcript"].append(awgn_transcript)
self.stats["awgn_output_shape"].append(awgn_output_shape)
self.tensor_dict[idx] = {
"longest_transcript": longest_transcript,
"raw_perturbation": raw_perturbation,
}
def launch(self, config: EnergyAttackConfig, prog_pos: int = -1) -> None:
datapoints = tqdm(
enumerate(config.dataset), # type: ignore
total=config.num_datapoints,
desc="Datapoints",
position=prog_pos + 1,
)
num_proc = 0
for i, datapoint in datapoints:
if num_proc >= config.num_datapoints:
break
if num_proc > 0 and num_proc % config.storage_frequency == 0:
self.save_stats(config)
inputs = self.ftex(
datapoint["audio"]["array"],
sampling_rate=config.sampling_rate,
return_tensors="pt",
)
ip_raw = (
inputs.input_features
if "input_features" in inputs
else inputs.input_values
)
if (
self.model_name == ExprModel.Speech2Text2.name
and ip_raw.shape[1] > 400_000
):
continue
num_proc += 1
input_features: torch.Tensor = Variable(
ip_raw,
requires_grad=True,
).to(self.device, non_blocking=True)
logger.debug(
"\nThe size of input features\n", input_features.shape
)
modifier_var: torch.Tensor = Variable(
torch.zeros_like(input_features), requires_grad=True
).to(self.device, non_blocking=True)
optimizer = config.optimizer_class( # type: ignore
[modifier_var], lr=config.learning_rate
)
time_totals = 0.0
skip_dp = False
for _ in range(NUM_REPS):
start = time()
try:
generated_ids = self.model.generate(
inputs=input_features,
max_new_tokens=self.max_new_tokens,
)
time_totals += time() - start
except Exception:
skip_dp = True
break
if skip_dp:
continue
og_latency = time_totals / NUM_REPS
original_output_shape = generated_ids.shape
original_ouput_length = original_output_shape[1]
best_adv_x = modifier_var + input_features
best_output_shape = original_output_shape
best_perturbation = modifier_var.detach().cpu()
max_token_delta = float("-inf")
attack_loss_criterion: AttackLossProtocol = get_attack_loss(
skip_tokens=self.skip_tokens,
eos_token_id=self.eos_token_id,
adv_dist_factor=config.adv_dist_factor,
adv_dist_criterion=config.adv_dist_criterion,
minimize_eos_all_pos=config.minimize_eos_all_pos,
)
attack_iterations = tqdm(
range(config.max_iter),
desc="Attack Iteration",
position=prog_pos + 2,
)
for j in attack_iterations:
adv_x = modifier_var + input_features
try:
generated_ids_j = self.model.generate(
inputs=adv_x, max_new_tokens=self.max_new_tokens
)
except Exception:
break
logits = self.model(
adv_x, decoder_input_ids=generated_ids_j
).logits
optimizer.zero_grad()
loss = attack_loss_criterion(logits, adv_x, input_features)
loss.backward()
optimizer.step()
current_output_shape = generated_ids_j.shape
current_output_length = generated_ids_j.shape[1]
token_delta = current_output_length - original_ouput_length
if token_delta > max_token_delta:
best_adv_x = adv_x.detach().cpu()
best_output_shape = current_output_shape
max_token_delta = token_delta
best_perturbation = modifier_var.detach().cpu()
if int(max_token_delta) >= 500:
break
time_totals = 0
skip_dp = False
for _ in range(NUM_REPS):
start = time()
try:
generated_ids_best = self.model.generate(
inputs=best_adv_x.to(self.device),
max_new_tokens=self.max_new_tokens,
)
time_totals += time() - start
except Exception:
skip_dp = True
break
if skip_dp:
continue
adv_latency = time_totals / NUM_REPS
noise = get_white_noise(input_features, signal_to_noise_ratio=10)
noisy_input = (
noise.to(self.device, non_blocking=True) + input_features
)
time_totals = 0
skip_dp = False
for _ in range(NUM_REPS):
start = time()
try:
generated_ids_noisy = self.model.generate(
noisy_input.to(self.device, non_blocking=True),
max_new_tokens=self.max_new_tokens,
)
time_totals += time() - start
except Exception:
skip_dp = True
break
if skip_dp:
continue
awgn_latency = time_totals / NUM_REPS
awgn_distance = (
torch.nn.functional.l1_loss(
noisy_input.to(self.device), input_features
)
.detach()
.cpu()
.item()
)
awgn_output_shape = generated_ids_noisy.shape
awgn_token_delta = awgn_output_shape[1] - original_ouput_length
awgn_xscript = self.tokenizer.batch_decode(
generated_ids_noisy, skip_special_tokens=True
)[0]
og_xscript = self.tokenizer.batch_decode(
generated_ids, skip_special_tokens=True
)[0]
best_xscript = self.tokenizer.batch_decode(
generated_ids_best, skip_special_tokens=True
)[0]
perturb_dist = (
torch.nn.functional.l1_loss(
best_adv_x.to(self.device), input_features
)
.detach()
.cpu()
.item()
)
logger.info(
f"\n{'-' * 70}"
f"\nLargest Change: {int(max_token_delta)}"
f"\nOriginal Transcription: {og_xscript}"
f"\nLongest Transcription: {best_xscript[:50]}"
f"\n\n{'-' * 70}\n\n"
)
self._append_stats(
idx=i,
og_output_shape=tuple(original_output_shape),
adv_output_shape=tuple(best_output_shape),
max_token_delta=int(max_token_delta),
perturb_dist=perturb_dist,
og_transcript=og_xscript,
longest_transcript=best_xscript,
adv_latency=int(adv_latency * 1000),
og_latency=int(og_latency * 1000),
awgn_token_delta=awgn_token_delta,
awgn_latency=int(awgn_latency * 1000),
awgn_dist=awgn_distance,
awgn_transcript=awgn_xscript,
awgn_output_shape=awgn_output_shape,
raw_perturbation=best_perturbation.flatten().detach().cpu(),
)
self.save_stats(conf=config, final=True)
self.clear_stats()
return
| 13,276 | 32.69797 | 77 | py |
eda_nlp | eda_nlp-master/experiments/d_2_tsne.py | from methods import *
from numpy.random import seed
from keras import backend as K
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
seed(0)
################################
#### get dense layer output ####
################################
#getting the x and y inputs in numpy array form from the text file
def train_x(train_txt, word2vec_len, input_size, word2vec):
#read in lines
train_lines = open(train_txt, 'r').readlines()
num_lines = len(train_lines)
x_matrix = np.zeros((num_lines, input_size, word2vec_len))
#insert values
for i, line in enumerate(train_lines):
parts = line[:-1].split('\t')
label = int(parts[0])
sentence = parts[1]
#insert x
words = sentence.split(' ')
words = words[:x_matrix.shape[1]] #cut off if too long
for j, word in enumerate(words):
if word in word2vec:
x_matrix[i, j, :] = word2vec[word]
return x_matrix
def get_dense_output(model_checkpoint, file, num_classes):
x = train_x(file, word2vec_len, input_size, word2vec)
model = load_model(model_checkpoint)
get_3rd_layer_output = K.function([model.layers[0].input], [model.layers[4].output])
layer_output = get_3rd_layer_output([x])[0]
return layer_output
def get_tsne_labels(file):
labels = []
alphas = []
lines = open(file, 'r').readlines()
for i, line in enumerate(lines):
parts = line[:-1].split('\t')
_class = int(parts[0])
alpha = i % 10
labels.append(_class)
alphas.append(alpha)
return labels, alphas
def get_plot_vectors(layer_output):
tsne = TSNE(n_components=2).fit_transform(layer_output)
return tsne
def plot_tsne(tsne, labels, output_path):
label_to_legend_label = { 'outputs_f4/pc_tsne.png':{ 0:'Con (augmented)',
100:'Con (original)',
1: 'Pro (augmented)',
101:'Pro (original)'},
'outputs_f4/trec_tsne.png':{0:'Description (augmented)',
100:'Description (original)',
1:'Entity (augmented)',
101:'Entity (original)',
2:'Abbreviation (augmented)',
102:'Abbreviation (original)',
3:'Human (augmented)',
103:'Human (original)',
4:'Location (augmented)',
104:'Location (original)',
5:'Number (augmented)',
105:'Number (original)'}}
plot_to_legend_size = {'outputs_f4/pc_tsne.png':11, 'outputs_f4/trec_tsne.png':6}
labels = labels.tolist()
big_groups = [label for label in labels if label < 100]
big_groups = list(sorted(set(big_groups)))
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', '#ff1493', '#FF4500']
fig, ax = plt.subplots()
for big_group in big_groups:
for group in [big_group, big_group+100]:
x, y = [], []
for j, label in enumerate(labels):
if label == group:
x.append(tsne[j][0])
y.append(tsne[j][1])
#params
color = colors[int(group % 100)]
marker = 'x' if group < 100 else 'o'
size = 1 if group < 100 else 27
legend_label = label_to_legend_label[output_path][group]
ax.scatter(x, y, color=color, marker=marker, s=size, label=legend_label)
plt.axis('off')
legend_size = plot_to_legend_size[output_path]
plt.legend(prop={'size': legend_size})
plt.savefig(output_path, dpi=1000)
plt.clf()
if __name__ == "__main__":
#global variables
word2vec_len = 300
input_size = 25
datasets = ['pc'] #['pc', 'trec']
num_classes_list =[2] #[2, 6]
for i, dataset in enumerate(datasets):
#load parameters
model_checkpoint = 'outputs_f4/' + dataset + '.h5'
file = 'special_f4/' + dataset + '/test_short_aug.txt'
num_classes = num_classes_list[i]
word2vec_pickle = 'special_f4/' + dataset + '/word2vec.p'
word2vec = load_pickle(word2vec_pickle)
#do tsne
layer_output = get_dense_output(model_checkpoint, file, num_classes)
print(layer_output.shape)
t = get_plot_vectors(layer_output)
labels, alphas = get_tsne_labels(file)
print(labels, alphas)
writer = open("outputs_f4/new_tsne.txt", 'w')
label_to_mark = {0:'x', 1:'o'}
for i, label in enumerate(labels):
alpha = alphas[i]
line = str(t[i, 0]) + ' ' + str(t[i, 1]) + ' ' + str(label_to_mark[label]) + ' ' + str(alpha/10)
writer.write(line + '\n')
| 4,213 | 26.187097 | 99 | py |
eda_nlp | eda_nlp-master/experiments/methods.py | from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.layers import Bidirectional
import keras.layers as layers
from keras.models import Sequential
from keras.models import load_model
from keras.callbacks import EarlyStopping
from sklearn.utils import shuffle
from sklearn.metrics import accuracy_score
import math
import time
import numpy as np
import random
from random import randint
random.seed(3)
import datetime, re, operator
from random import shuffle
from time import gmtime, strftime
import gc
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' #get rid of warnings
from os import listdir
from os.path import isfile, join, isdir
import pickle
#import data augmentation methods
from nlp_aug import *
###################################################
######### loading folders and txt files ###########
###################################################
#loading a pickle file
def load_pickle(file):
return pickle.load(open(file, 'rb'))
#create an output folder if it does not already exist
def confirm_output_folder(output_folder):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
#get full image paths
def get_txt_paths(folder):
txt_paths = [join(folder, f) for f in listdir(folder) if isfile(join(folder, f)) and '.txt' in f]
if join(folder, '.DS_Store') in txt_paths:
txt_paths.remove(join(folder, '.DS_Store'))
txt_paths = sorted(txt_paths)
return txt_paths
#get subfolders
def get_subfolder_paths(folder):
subfolder_paths = [join(folder, f) for f in listdir(folder) if (isdir(join(folder, f)) and '.DS_Store' not in f)]
if join(folder, '.DS_Store') in subfolder_paths:
subfolder_paths.remove(join(folder, '.DS_Store'))
subfolder_paths = sorted(subfolder_paths)
return subfolder_paths
#get all image paths
def get_all_txt_paths(master_folder):
all_paths = []
subfolders = get_subfolder_paths(master_folder)
if len(subfolders) > 1:
for subfolder in subfolders:
all_paths += get_txt_paths(subfolder)
else:
all_paths = get_txt_paths(master_folder)
return all_paths
###################################################
################ data processing ##################
###################################################
#get the pickle file for the word2vec so you don't have to load the entire huge file each time
def gen_vocab_dicts(folder, output_pickle_path, huge_word2vec):
vocab = set()
text_embeddings = open(huge_word2vec, 'r').readlines()
word2vec = {}
#get all the vocab
all_txt_paths = get_all_txt_paths(folder)
print(all_txt_paths)
#loop through each text file
for txt_path in all_txt_paths:
# get all the words
try:
all_lines = open(txt_path, "r").readlines()
for line in all_lines:
words = line[:-1].split(' ')
for word in words:
vocab.add(word)
except:
print(txt_path, "has an error")
print(len(vocab), "unique words found")
# load the word embeddings, and only add the word to the dictionary if we need it
for line in text_embeddings:
items = line.split(' ')
word = items[0]
if word in vocab:
vec = items[1:]
word2vec[word] = np.asarray(vec, dtype = 'float32')
print(len(word2vec), "matches between unique words and word2vec dictionary")
pickle.dump(word2vec, open(output_pickle_path, 'wb'))
print("dictionaries outputted to", output_pickle_path)
#getting the x and y inputs in numpy array form from the text file
def get_x_y(train_txt, num_classes, word2vec_len, input_size, word2vec, percent_dataset):
#read in lines
train_lines = open(train_txt, 'r').readlines()
shuffle(train_lines)
train_lines = train_lines[:int(percent_dataset*len(train_lines))]
num_lines = len(train_lines)
#initialize x and y matrix
x_matrix = None
y_matrix = None
try:
x_matrix = np.zeros((num_lines, input_size, word2vec_len))
except:
print("Error!", num_lines, input_size, word2vec_len)
y_matrix = np.zeros((num_lines, num_classes))
#insert values
for i, line in enumerate(train_lines):
parts = line[:-1].split('\t')
label = int(parts[0])
sentence = parts[1]
#insert x
words = sentence.split(' ')
words = words[:x_matrix.shape[1]] #cut off if too long
for j, word in enumerate(words):
if word in word2vec:
x_matrix[i, j, :] = word2vec[word]
#insert y
y_matrix[i][label] = 1.0
return x_matrix, y_matrix
###################################################
############### data augmentation #################
###################################################
def gen_tsne_aug(train_orig, output_file):
writer = open(output_file, 'w')
lines = open(train_orig, 'r').readlines()
for i, line in enumerate(lines):
parts = line[:-1].split('\t')
label = parts[0]
sentence = parts[1]
writer.write(line)
for alpha in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
aug_sentence = eda_4(sentence, alpha_sr=alpha, alpha_ri=alpha, alpha_rs=alpha, p_rd=alpha, num_aug=2)[0]
writer.write(label + "\t" + aug_sentence + '\n')
writer.close()
print("finished eda for tsne for", train_orig, "to", output_file)
#generate more data with standard augmentation
def gen_standard_aug(train_orig, output_file, num_aug=9):
writer = open(output_file, 'w')
lines = open(train_orig, 'r').readlines()
for i, line in enumerate(lines):
parts = line[:-1].split('\t')
label = parts[0]
sentence = parts[1]
aug_sentences = eda_4(sentence, num_aug=num_aug)
for aug_sentence in aug_sentences:
writer.write(label + "\t" + aug_sentence + '\n')
writer.close()
print("finished eda for", train_orig, "to", output_file)
#generate more data with only synonym replacement (SR)
def gen_sr_aug(train_orig, output_file, alpha_sr, n_aug):
writer = open(output_file, 'w')
lines = open(train_orig, 'r').readlines()
for i, line in enumerate(lines):
parts = line[:-1].split('\t')
label = parts[0]
sentence = parts[1]
aug_sentences = SR(sentence, alpha_sr=alpha_sr, n_aug=n_aug)
for aug_sentence in aug_sentences:
writer.write(label + "\t" + aug_sentence + '\n')
writer.close()
print("finished SR for", train_orig, "to", output_file, "with alpha", alpha_sr)
#generate more data with only random insertion (RI)
def gen_ri_aug(train_orig, output_file, alpha_ri, n_aug):
writer = open(output_file, 'w')
lines = open(train_orig, 'r').readlines()
for i, line in enumerate(lines):
parts = line[:-1].split('\t')
label = parts[0]
sentence = parts[1]
aug_sentences = RI(sentence, alpha_ri=alpha_ri, n_aug=n_aug)
for aug_sentence in aug_sentences:
writer.write(label + "\t" + aug_sentence + '\n')
writer.close()
print("finished RI for", train_orig, "to", output_file, "with alpha", alpha_ri)
#generate more data with only random swap (RS)
def gen_rs_aug(train_orig, output_file, alpha_rs, n_aug):
writer = open(output_file, 'w')
lines = open(train_orig, 'r').readlines()
for i, line in enumerate(lines):
parts = line[:-1].split('\t')
label = parts[0]
sentence = parts[1]
aug_sentences = RS(sentence, alpha_rs=alpha_rs, n_aug=n_aug)
for aug_sentence in aug_sentences:
writer.write(label + "\t" + aug_sentence + '\n')
writer.close()
print("finished RS for", train_orig, "to", output_file, "with alpha", alpha_rs)
#generate more data with only random deletion (RD)
def gen_rd_aug(train_orig, output_file, alpha_rd, n_aug):
writer = open(output_file, 'w')
lines = open(train_orig, 'r').readlines()
for i, line in enumerate(lines):
parts = line[:-1].split('\t')
label = parts[0]
sentence = parts[1]
aug_sentences = RD(sentence, alpha_rd=alpha_rd, n_aug=n_aug)
for aug_sentence in aug_sentences:
writer.write(label + "\t" + aug_sentence + '\n')
writer.close()
print("finished RD for", train_orig, "to", output_file, "with alpha", alpha_rd)
###################################################
##################### model #######################
###################################################
#building the model in keras
def build_model(sentence_length, word2vec_len, num_classes):
model = None
model = Sequential()
model.add(Bidirectional(LSTM(64, return_sequences=True), input_shape=(sentence_length, word2vec_len)))
model.add(Dropout(0.5))
model.add(Bidirectional(LSTM(32, return_sequences=False)))
model.add(Dropout(0.5))
model.add(Dense(20, activation='relu'))
model.add(Dense(num_classes, kernel_initializer='normal', activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
#print(model.summary())
return model
#building the cnn in keras
def build_cnn(sentence_length, word2vec_len, num_classes):
model = None
model = Sequential()
model.add(layers.Conv1D(128, 5, activation='relu', input_shape=(sentence_length, word2vec_len)))
model.add(layers.GlobalMaxPooling1D())
model.add(Dense(20, activation='relu'))
model.add(Dense(num_classes, kernel_initializer='normal', activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
#one hot to categorical
def one_hot_to_categorical(y):
assert len(y.shape) == 2
return np.argmax(y, axis=1)
def get_now_str():
return str(strftime("%Y-%m-%d_%H:%M:%S", gmtime()))
| 9,650 | 33.223404 | 117 | py |
Graft-PSMNet | Graft-PSMNet-main/test_kitti.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.autograd import Variable
from torch.autograd import grad as Grad
from torchvision import transforms
import skimage.io
import os
import copy
from collections import OrderedDict
from tqdm import tqdm, trange
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import argparse
from dataloader import KITTIloader as kt
from dataloader import KITTI2012loader as kt2012
import networks.Aggregator as Agg
import networks.feature_extraction as FE
import networks.U_net as un
parser = argparse.ArgumentParser(description='GraftNet')
parser.add_argument('--no_cuda', action='store_true', default=False)
parser.add_argument('--gpu_id', type=str, default='2')
parser.add_argument('--seed', type=str, default=0)
parser.add_argument('--kitti', type=str, default='2015')
parser.add_argument('--data_path', type=str, default='/media/data/dataset/KITTI/data_scene_flow/training/')
parser.add_argument('--load_path', type=str, default='trained_models/checkpoint_final_10epoch.tar')
parser.add_argument('--max_disp', type=int, default=192)
args = parser.parse_args()
if not args.no_cuda:
os.environ['CUDA_DEVICE_ORDER'] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
cuda = torch.cuda.is_available()
if args.kitti == '2015':
all_limg, all_rimg, all_ldisp, test_limg, test_rimg, test_ldisp = kt.kt_loader(args.data_path)
else:
all_limg, all_rimg, all_ldisp, test_limg, test_rimg, test_ldisp = kt2012.kt2012_loader(args.data_path)
test_limg = all_limg + test_limg
test_rimg = all_rimg + test_rimg
test_ldisp = all_ldisp + test_ldisp
fe_model = FE.VGG_Feature(fixed_param=True).eval()
adaptor = un.U_Net_v4(img_ch=256, output_ch=64).eval()
agg_model = Agg.PSMAggregator(args.max_disp, udc=True).eval()
if cuda:
fe_model = nn.DataParallel(fe_model.cuda())
adaptor = nn.DataParallel(adaptor.cuda())
agg_model = nn.DataParallel(agg_model.cuda())
adaptor.load_state_dict(torch.load(args.load_path)['fa_net'])
agg_model.load_state_dict(torch.load(args.load_path)['net'])
pred_mae = 0
pred_op = 0
for i in trange(len(test_limg)):
limg = Image.open(test_limg[i]).convert('RGB')
rimg = Image.open(test_rimg[i]).convert('RGB')
w, h = limg.size
m = 16
wi, hi = (w // m + 1) * m, (h // m + 1) * m
limg = limg.crop((w - wi, h - hi, w, h))
rimg = rimg.crop((w - wi, h - hi, w, h))
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
limg_tensor = transform(limg)
rimg_tensor = transform(rimg)
limg_tensor = limg_tensor.unsqueeze(0).cuda()
rimg_tensor = rimg_tensor.unsqueeze(0).cuda()
disp_gt = Image.open(test_ldisp[i])
disp_gt = np.ascontiguousarray(disp_gt, dtype=np.float32) / 256
gt_tensor = torch.FloatTensor(disp_gt).unsqueeze(0).unsqueeze(0).cuda()
with torch.no_grad():
left_fea = fe_model(limg_tensor)
right_fea = fe_model(rimg_tensor)
left_fea = adaptor(left_fea)
right_fea = adaptor(right_fea)
pred_disp = agg_model(left_fea, right_fea, gt_tensor, training=False)
pred_disp = pred_disp[:, hi - h:, wi - w:]
predict_np = pred_disp.squeeze().cpu().numpy()
op_thresh = 3
mask = (disp_gt > 0) & (disp_gt < args.max_disp)
error = np.abs(predict_np * mask.astype(np.float32) - disp_gt * mask.astype(np.float32))
pred_error = np.abs(predict_np * mask.astype(np.float32) - disp_gt * mask.astype(np.float32))
pred_op += np.sum((pred_error > op_thresh)) / np.sum(mask)
pred_mae += np.mean(pred_error[mask])
print(pred_mae / len(test_limg))
print(pred_op / len(test_limg)) | 3,733 | 33.897196 | 107 | py |
Graft-PSMNet | Graft-PSMNet-main/test_middlebury.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torch.autograd import Variable
from torch.autograd import grad as Grad
import skimage.io
import os
import copy
from collections import OrderedDict
from tqdm import tqdm, trange
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import cv2
import argparse
from dataloader import middlebury_loader as mb
from dataloader import readpfm as rp
import networks.Aggregator as Agg
import networks.U_net as un
import networks.feature_extraction as FE
parser = argparse.ArgumentParser(description='GraftNet')
parser.add_argument('--no_cuda', action='store_true', default=False)
parser.add_argument('--gpu_id', type=str, default='2')
parser.add_argument('--seed', type=str, default=0)
parser.add_argument('--resolution', type=str, default='H')
parser.add_argument('--data_path', type=str, default='/media/data/dataset/MiddEval3-data-H/')
parser.add_argument('--load_path', type=str, default='trained_models/checkpoint_final_10epoch.tar')
parser.add_argument('--max_disp', type=int, default=192)
args = parser.parse_args()
if not args.no_cuda:
os.environ['CUDA_DEVICE_ORDER'] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
cuda = torch.cuda.is_available()
train_limg, train_rimg, train_gt, test_limg, test_rimg = mb.mb_loader(args.data_path, res=args.resolution)
fe_model = FE.VGG_Feature(fixed_param=True).eval()
adaptor = un.U_Net_v4(img_ch=256, output_ch=64).eval()
agg_model = Agg.PSMAggregator(args.max_disp, udc=True).eval()
if cuda:
fe_model = nn.DataParallel(fe_model.cuda())
adaptor = nn.DataParallel(adaptor.cuda())
agg_model = nn.DataParallel(agg_model.cuda())
adaptor.load_state_dict(torch.load(args.load_path)['fa_net'])
agg_model.load_state_dict(torch.load(args.load_path)['net'])
def test_trainset():
op = 0
mae = 0
for i in trange(len(train_limg)):
limg_path = train_limg[i]
rimg_path = train_rimg[i]
limg = Image.open(limg_path).convert('RGB')
rimg = Image.open(rimg_path).convert('RGB')
w, h = limg.size
wi, hi = (w // 16 + 1) * 16, (h // 16 + 1) * 16
limg = limg.crop((w - wi, h - hi, w, h))
rimg = rimg.crop((w - wi, h - hi, w, h))
limg_tensor = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])(limg)
rimg_tensor = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])(rimg)
limg_tensor = limg_tensor.unsqueeze(0).cuda()
rimg_tensor = rimg_tensor.unsqueeze(0).cuda()
with torch.no_grad():
left_fea = fe_model(limg_tensor)
right_fea = fe_model(rimg_tensor)
left_fea = adaptor(left_fea)
right_fea = adaptor(right_fea)
pred_disp = agg_model(left_fea, right_fea, limg_tensor, training=False)
pred_disp = pred_disp[:, hi - h:, wi - w:]
pred_np = pred_disp.squeeze().cpu().numpy()
torch.cuda.empty_cache()
disp_gt, _ = rp.readPFM(train_gt[i])
disp_gt = np.ascontiguousarray(disp_gt, dtype=np.float32)
disp_gt[disp_gt == np.inf] = 0
occ_mask = Image.open(train_gt[i].replace('disp0GT.pfm', 'mask0nocc.png')).convert('L')
occ_mask = np.ascontiguousarray(occ_mask, dtype=np.float32)
mask = (disp_gt <= 0) | (occ_mask != 255) | (disp_gt >= args.max_disp)
# mask = (disp_gt <= 0) | (disp_gt >= maxdisp)
error = np.abs(pred_np - disp_gt)
error[mask] = 0
if i in [6, 8, 9, 12, 14]:
k = 1
else:
k = 1
op += np.sum(error > 2.0) / (w * h - np.sum(mask)) * k
mae += np.sum(error) / (w * h - np.sum(mask)) * k
print(op / 15 * 100)
print(mae / 15)
if __name__ == '__main__':
test_trainset()
# test_testset() | 4,011 | 31.617886 | 106 | py |
Graft-PSMNet | Graft-PSMNet-main/loss_functions.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import cv2
from PIL import Image
import matplotlib.pyplot as plt
def disp2distribute(disp_gt, max_disp, b=2):
disp_gt = disp_gt.unsqueeze(1)
disp_range = torch.arange(0, max_disp).view(1, -1, 1, 1).float().cuda()
gt_distribute = torch.exp(-torch.abs(disp_range - disp_gt) / b)
gt_distribute = gt_distribute / (torch.sum(gt_distribute, dim=1, keepdim=True) + 1e-8)
return gt_distribute
def CEloss(disp_gt, max_disp, gt_distribute, pred_distribute):
mask = (disp_gt > 0) & (disp_gt < max_disp)
pred_distribute = torch.log(pred_distribute + 1e-8)
ce_loss = torch.sum(-gt_distribute * pred_distribute, dim=1)
ce_loss = torch.mean(ce_loss[mask])
return ce_loss
def gradient_x(img):
img = F.pad(img, (0, 1, 0, 0), mode="replicate")
gx = img[:, :, :, :-1] - img[:, :, :, 1:]
return gx
def gradient_y(img):
img = F.pad(img, (0, 0, 0, 1), mode="replicate")
gy = img[:, :, :-1, :] - img[:, :, 1:, :]
return gy
def smooth_loss(img, disp):
img_gx = gradient_x(img)
img_gy = gradient_y(img)
disp_gx = gradient_x(disp)
disp_gy = gradient_y(disp)
weight_x = torch.exp(-torch.mean(torch.abs(img_gx), dim=1, keepdim=True))
weight_y = torch.exp(-torch.mean(torch.abs(img_gy), dim=1, keepdim=True))
smoothness_x = torch.abs(disp_gx * weight_x)
smoothness_y = torch.abs(disp_gy * weight_y)
smoothness_loss = smoothness_x + smoothness_y
return torch.mean(smoothness_loss)
def occlusion_mask(left_disp, right_disp, threshold=1):
# left_disp = left_disp.unsqueeze(1)
# right_disp = right_disp.unsqueeze(1)
B, _, H, W = left_disp.size()
x_base = torch.linspace(0, 1, W).repeat(B, H, 1).type_as(right_disp)
y_base = torch.linspace(0, 1, H).repeat(B, W, 1).transpose(1, 2).type_as(right_disp)
flow_field = torch.stack((x_base - left_disp.squeeze(1) / W, y_base), dim=3)
recon_left_disp = F.grid_sample(right_disp, 2 * flow_field - 1, mode='bilinear', padding_mode='zeros')
lr_check = torch.abs(recon_left_disp - left_disp)
mask = lr_check > threshold
return mask
def reconstruction(right, disp):
b, _, h, w = right.size()
x_base = torch.linspace(0, 1, w).repeat(b, h, 1).type_as(right)
y_base = torch.linspace(0, 1, h).repeat(b, w, 1).transpose(1, 2).type_as(right)
flow_field = torch.stack((x_base - disp / w, y_base), dim=3)
recon_left = F.grid_sample(right, 2 * flow_field - 1, mode='bilinear', padding_mode='zeros')
return recon_left
def NT_Xent_loss(positive_simi, negative_simi, t):
loss = torch.exp(positive_simi / t) / \
(torch.exp(positive_simi / t) + torch.sum(torch.exp(negative_simi / t), dim=4))
loss = -torch.log(loss + 1e-9)
return loss
class FeatureSimilarityLoss(nn.Module):
def __init__(self, max_disp):
super(FeatureSimilarityLoss, self).__init__()
self.max_disp = max_disp
self.m = 0.3
self.nega_num = 1
def forward(self, left_fea, right_fea, left_disp, right_disp):
B, _, H, W = left_fea.size()
down_disp = F.interpolate(left_disp, (H, W), mode='nearest') / 4.
# down_img = F.interpolate(left_img, (H, W), mode='nearest')
# down_img = torch.mean(down_img, dim=1, keepdim=True)
# t_map = self.t_net(left_fea)
# create negative samples
random_offset = torch.rand(B, self.nega_num, H, W).cuda() * 2 + 1
random_sign = torch.sign(torch.rand(B, self.nega_num, H, W).cuda() - 0.5)
random_offset *= random_sign
negative_disp = down_disp + random_offset
positive_recon = reconstruction(right_fea, down_disp.squeeze(1))
negative_recon = []
for i in range(self.nega_num):
negative_recon.append(reconstruction(right_fea, negative_disp[:, i]))
negative_recon = torch.stack(negative_recon, dim=4)
left_fea = F.normalize(left_fea, dim=1)
positive_recon = F.normalize(positive_recon, dim=1)
negative_recon = F.normalize(negative_recon, dim=1)
positive_simi = (torch.sum(left_fea * positive_recon, dim=1, keepdim=True) + 1) / 2
negative_simi = (torch.sum(left_fea.unsqueeze(4) * negative_recon, dim=1, keepdim=True) + 1) / 2
judge_mat_p = torch.zeros_like(positive_simi)
judge_mat_n = torch.zeros_like(negative_simi)
if torch.sum(positive_simi < judge_mat_p) > 0 or torch.sum(negative_simi < judge_mat_n) > 0:
print('cosine_simi < 0')
# hinge loss
# dist = self.m + negative_simi - positive_simi
# criteria = torch.zeros_like(dist)
# loss, _ = torch.max(torch.cat((dist, criteria), dim=1), dim=1, keepdim=True)
# NT-Xent loss
# loss = NT_Xent_loss(positive_simi, negative_simi, t=t_map)
loss = NT_Xent_loss(positive_simi, negative_simi, t=0.2)
# img_grad = torch.sqrt(gradient_x(down_img) ** 2 + gradient_y(down_img) ** 2)
# weight = torch.exp(-img_grad)
# loss = loss * weight
occ_mask = occlusion_mask(left_disp, right_disp, threshold=1)
occ_mask = F.interpolate(occ_mask.float(), (H, W), mode='nearest')
valid_mask = (down_disp > 0) & (down_disp < self.max_disp // 4) & (occ_mask == 0)
return torch.mean(loss[valid_mask])
def gram_matrix(feature):
B, C, H, W = feature.size()
feature = feature.view(B, C, H * W)
feature_t = feature.transpose(1, 2)
gram_m = torch.bmm(feature, feature_t) / (H * W)
return gram_m
def gram_matrix_v2(feature):
B, C, H, W = feature.size()
feature = feature.view(B * C, H * W)
gram_m = torch.mm(feature, feature.t()) / (B * C * H * W)
return gram_m
if __name__ == '__main__':
a = torch.rand(2, 256, 64, 128)
b = torch.rand(2, 256, 64, 128)
gram_a = gram_matrix(a)
gram_b = gram_matrix(b)
print(F.mse_loss(gram_a, gram_b))
ga_2 = gram_matrix_v2(a)
gb_2 = gram_matrix_v2(b)
print(F.mse_loss(ga_2, gb_2))
| 6,062 | 32.497238 | 106 | py |
Graft-PSMNet | Graft-PSMNet-main/retrain_CostAggregation.py | import torch
import torch.utils.data
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn
import os
import copy
from tqdm import tqdm, trange
import matplotlib.pyplot as plt
import argparse
from dataloader import sceneflow_loader as sf
import networks.Aggregator as Agg
import networks.submodule as sm
import networks.U_net as un
import networks.feature_extraction as FE
import loss_functions as lf
parser = argparse.ArgumentParser(description='GraftNet')
parser.add_argument('--no_cuda', action='store_true', default=False)
parser.add_argument('--gpu_id', type=str, default='0, 1')
parser.add_argument('--seed', type=str, default=0)
parser.add_argument('--batch_size', type=int, default=6)
parser.add_argument('--epoch', type=int, default=10)
parser.add_argument('--data_path', type=str, default='/media/data/dataset/SceneFlow/')
parser.add_argument('--save_path', type=str, default='trained_models/')
parser.add_argument('--load_path', type=str, default='trained_models/checkpoint_adaptor_1epoch.tar')
parser.add_argument('--max_disp', type=int, default=192)
parser.add_argument('--color_transform', action='store_true', default=False)
args = parser.parse_args()
if not args.no_cuda:
os.environ['CUDA_DEVICE_ORDER'] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if cuda:
torch.cuda.manual_seed(args.seed)
all_limg, all_rimg, all_ldisp, all_rdisp, test_limg, test_rimg, test_ldisp, test_rdisp = sf.sf_loader(args.data_path)
trainLoader = torch.utils.data.DataLoader(
sf.myDataset(all_limg, all_rimg, all_ldisp, all_rdisp, training=True, color_transform=args.color_transform),
batch_size=args.batch_size, shuffle=True, num_workers=4, drop_last=False)
fe_model = FE.VGG_Feature(fixed_param=True).eval()
adaptor = un.U_Net_v4(img_ch=256, output_ch=64).eval()
model = Agg.PSMAggregator(args.max_disp, udc=True).train()
print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
if cuda:
fe_model = nn.DataParallel(fe_model.cuda())
adaptor = nn.DataParallel(adaptor.cuda())
model = nn.DataParallel(model.cuda())
adaptor.load_state_dict(torch.load(args.load_path)['fa_net'])
for p in adaptor.parameters():
p.requires_grad = False
optimizer = optim.Adam(model.parameters(), lr=1e-3, betas=(0.9, 0.999))
def train(imgL, imgR, gt_left, gt_right):
imgL = torch.FloatTensor(imgL)
imgR = torch.FloatTensor(imgR)
gt_left = torch.FloatTensor(gt_left)
gt_right = torch.FloatTensor(gt_right)
if cuda:
imgL, imgR, gt_left, gt_right = imgL.cuda(), imgR.cuda(), gt_left.cuda(), gt_right.cuda()
optimizer.zero_grad()
with torch.no_grad():
left_fea = fe_model(imgL)
right_fea = fe_model(imgR)
left_fea = adaptor(left_fea)
right_fea = adaptor(right_fea)
loss1, loss2 = model(left_fea, right_fea, gt_left, training=True)
loss1 = torch.mean(loss1)
loss2 = torch.mean(loss2)
loss = 0.1 * loss1 + loss2
# loss = loss1
loss.backward()
optimizer.step()
return loss1.item(), loss2.item()
def adjust_learning_rate(optimizer, epoch):
if epoch <= 5:
lr = 0.001
else:
lr = 0.0001
# print(lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def main():
# start_total_time = time.time()
start_epoch = 1
# checkpoint = torch.load('trained_ft_costAgg/checkpoint_1_v4.tar')
# CostAggregator.load_state_dict(checkpoint['net'])
# optimizer.load_state_dict(checkpoint['optimizer'])
# start_epoch = checkpoint['epoch'] + 1
for epoch in range(start_epoch, args.epoch + start_epoch):
print('This is %d-th epoch' % (epoch))
total_train_loss1 = 0
total_train_loss2 = 0
adjust_learning_rate(optimizer, epoch)
#
for batch_id, (imgL, imgR, disp_L, disp_R) in enumerate(tqdm(trainLoader)):
train_loss1, train_loss2 = train(imgL, imgR, disp_L, disp_R)
total_train_loss1 += train_loss1
total_train_loss2 += train_loss2
avg_train_loss1 = total_train_loss1 / len(trainLoader)
avg_train_loss2 = total_train_loss2 / len(trainLoader)
print('Epoch %d average training loss1 = %.3f, average training loss2 = %.3f' %
(epoch, avg_train_loss1, avg_train_loss2))
state = {'fa_net': adaptor.state_dict(),
'net': model.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch}
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
save_model_path = args.save_path + 'checkpoint_final_{}epoch.tar'.format(epoch)
torch.save(state, save_model_path)
torch.cuda.empty_cache()
if __name__ == '__main__':
main()
| 4,896 | 32.312925 | 117 | py |
Graft-PSMNet | Graft-PSMNet-main/train_adaptor.py | import torch
import torch.utils.data
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn
import os
import copy
from tqdm import tqdm
import matplotlib.pyplot as plt
import argparse
from dataloader import sceneflow_loader as sf
import networks.Aggregator as Agg
import networks.U_net as un
import networks.feature_extraction as FE
import loss_functions as lf
parser = argparse.ArgumentParser(description='GraftNet')
parser.add_argument('--no_cuda', action='store_true', default=False)
parser.add_argument('--gpu_id', type=str, default='0, 1')
parser.add_argument('--seed', type=str, default=0)
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--epoch', type=int, default=1)
parser.add_argument('--data_path', type=str, default='/media/data/dataset/SceneFlow/')
parser.add_argument('--save_path', type=str, default='trained_models/')
parser.add_argument('--load_path', type=str, default='trained_models/checkpoint_baseline_8epoch.tar')
parser.add_argument('--max_disp', type=int, default=192)
parser.add_argument('--color_transform', action='store_true', default=False)
args = parser.parse_args()
if not args.no_cuda:
os.environ['CUDA_DEVICE_ORDER'] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if cuda:
torch.cuda.manual_seed(args.seed)
all_limg, all_rimg, all_ldisp, all_rdisp, test_limg, test_rimg, test_ldisp, test_rdisp = sf.sf_loader(args.data_path)
trainLoader = torch.utils.data.DataLoader(
sf.myDataset(all_limg, all_rimg, all_ldisp, all_rdisp, training=True, color_transform=args.color_transform),
batch_size=args.batch_size, shuffle=True, num_workers=4, drop_last=False)
fe_model = FE.VGG_Feature(fixed_param=True).eval()
model = un.U_Net_v4(img_ch=256, output_ch=64).train()
print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
agg_model = Agg.PSMAggregator(args.max_disp, udc=True).eval()
if cuda:
fe_model = nn.DataParallel(fe_model.cuda())
model = nn.DataParallel(model.cuda())
agg_model = nn.DataParallel(agg_model.cuda())
agg_model.load_state_dict(torch.load(args.load_path)['net'])
for p in agg_model.parameters():
p.requires_grad = False
optimizer = optim.Adam(model.parameters(), lr=1e-3, betas=(0.9, 0.999))
def train(imgL, imgR, gt_left, gt_right):
imgL = torch.FloatTensor(imgL)
imgR = torch.FloatTensor(imgR)
gt_left = torch.FloatTensor(gt_left)
gt_right = torch.FloatTensor(gt_right)
if cuda:
imgL, imgR, gt_left, gt_right = imgL.cuda(), imgR.cuda(), gt_left.cuda(), gt_right.cuda()
optimizer.zero_grad()
with torch.no_grad():
left_fea = fe_model(imgL)
right_fea = fe_model(imgR)
agg_left_fea = model(left_fea)
agg_right_fea = model(right_fea)
loss1, loss2 = agg_model(agg_left_fea, agg_right_fea, gt_left, training=True)
loss1 = torch.mean(loss1)
loss2 = torch.mean(loss2)
loss = 0.1 * loss1 + loss2
# loss = loss1
loss.backward()
optimizer.step()
return loss1.item(), loss2.item()
def adjust_learning_rate(optimizer, epoch):
if epoch <= 10:
lr = 0.001
else:
lr = 0.0001
# print(lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def main():
# start_total_time = time.time()
start_epoch = 1
# checkpoint = torch.load('trained_ft_CA_8.12/checkpoint_3_DA.tar')
# agg_model.load_state_dict(checkpoint['net'])
# optimizer.load_state_dict(checkpoint['optimizer'])
# start_epoch = checkpoint['epoch'] + 1
for epoch in range(start_epoch, args.epoch + start_epoch):
print('This is %d-th epoch' % (epoch))
total_train_loss1 = 0
total_train_loss2 = 0
adjust_learning_rate(optimizer, epoch)
for batch_id, (imgL, imgR, disp_L, disp_R) in enumerate(tqdm(trainLoader)):
train_loss1, train_loss2 = train(imgL, imgR, disp_L, disp_R)
total_train_loss1 += train_loss1
total_train_loss2 += train_loss2
avg_train_loss1 = total_train_loss1 / len(trainLoader)
avg_train_loss2 = total_train_loss2 / len(trainLoader)
print('Epoch %d average training loss1 = %.3f, average training loss2 = %.3f' %
(epoch, avg_train_loss1, avg_train_loss2))
state = {'fa_net': model.state_dict(),
'net': agg_model.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch}
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
save_model_path = args.save_path + 'checkpoint_adaptor_{}epoch.tar'.format(epoch)
torch.save(state, save_model_path)
torch.cuda.empty_cache()
if __name__ == '__main__':
main()
| 4,861 | 32.531034 | 117 | py |
Graft-PSMNet | Graft-PSMNet-main/test_eth3d.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.autograd import Variable
from torch.autograd import grad as Grad
from torchvision import transforms
import os
import copy
import skimage.io
from collections import OrderedDict
from tqdm import tqdm, trange
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import argparse
from dataloader import ETH3D_loader as et
from dataloader.readpfm import readPFM
import networks.Aggregator as Agg
import networks.feature_extraction as FE
import networks.U_net as un
parser = argparse.ArgumentParser(description='GraftNet')
parser.add_argument('--no_cuda', action='store_true', default=False)
parser.add_argument('--gpu_id', type=str, default='2')
parser.add_argument('--seed', type=str, default=0)
parser.add_argument('--data_path', type=str, default='/media/data/dataset/ETH3D/')
parser.add_argument('--load_path', type=str, default='trained_models/checkpoint_final_10epoch.tar')
parser.add_argument('--max_disp', type=int, default=192)
args = parser.parse_args()
if not args.no_cuda:
os.environ['CUDA_DEVICE_ORDER'] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
cuda = torch.cuda.is_available()
all_limg, all_rimg, all_disp, all_mask = et.et_loader(args.data_path)
fe_model = FE.VGG_Feature(fixed_param=True).eval()
adaptor = un.U_Net_v4(img_ch=256, output_ch=64).eval()
agg_model = Agg.PSMAggregator(args.max_disp, udc=True).eval()
if cuda:
fe_model = nn.DataParallel(fe_model.cuda())
adaptor = nn.DataParallel(adaptor.cuda())
agg_model = nn.DataParallel(agg_model.cuda())
adaptor.load_state_dict(torch.load(args.load_path)['fa_net'])
agg_model.load_state_dict(torch.load(args.load_path)['net'])
pred_mae = 0
pred_op = 0
for i in trange(len(all_limg)):
limg = Image.open(all_limg[i]).convert('RGB')
rimg = Image.open(all_rimg[i]).convert('RGB')
w, h = limg.size
wi, hi = (w // 16 + 1) * 16, (h // 16 + 1) * 16
limg = limg.crop((w - wi, h - hi, w, h))
rimg = rimg.crop((w - wi, h - hi, w, h))
limg_tensor = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])(limg)
rimg_tensor = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])(rimg)
limg_tensor = limg_tensor.unsqueeze(0).cuda()
rimg_tensor = rimg_tensor.unsqueeze(0).cuda()
disp_gt, _ = readPFM(all_disp[i])
disp_gt = np.ascontiguousarray(disp_gt, dtype=np.float32)
disp_gt[disp_gt == np.inf] = 0
gt_tensor = torch.FloatTensor(disp_gt).unsqueeze(0).unsqueeze(0).cuda()
occ_mask = np.ascontiguousarray(Image.open(all_mask[i]))
with torch.no_grad():
left_fea = fe_model(limg_tensor)
right_fea = fe_model(rimg_tensor)
left_fea = adaptor(left_fea)
right_fea = adaptor(right_fea)
pred_disp = agg_model(left_fea, right_fea, gt_tensor, training=False)
pred_disp = pred_disp[:, hi - h:, wi - w:]
predict_np = pred_disp.squeeze().cpu().numpy()
op_thresh = 1
mask = (disp_gt > 0) & (occ_mask == 255)
# mask = disp_gt > 0
error = np.abs(predict_np * mask.astype(np.float32) - disp_gt * mask.astype(np.float32))
pred_error = np.abs(predict_np * mask.astype(np.float32) - disp_gt * mask.astype(np.float32))
pred_op += np.sum(pred_error > op_thresh) / np.sum(mask)
pred_mae += np.mean(pred_error[mask])
print(pred_mae / len(all_limg))
print(pred_op / len(all_limg)) | 3,559 | 32.904762 | 99 | py |
Graft-PSMNet | Graft-PSMNet-main/train_baseline.py | import argparse
import torch
import torch.utils.data
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn
import os
import copy
from tqdm import tqdm
from dataloader import sceneflow_loader as sf
import networks.submodule as sm
import networks.U_net as un
import networks.Aggregator as Agg
import networks.feature_extraction as FE
import loss_functions as lf
parser = argparse.ArgumentParser(description='GraftNet')
parser.add_argument('--no_cuda', action='store_true', default=False)
parser.add_argument('--gpu_id', type=str, default='0, 1')
parser.add_argument('--seed', type=str, default=0)
parser.add_argument('--batch_size', type=int, default=6)
parser.add_argument('--epoch', type=int, default=8)
parser.add_argument('--data_path', type=str, default='/media/data/dataset/SceneFlow/')
parser.add_argument('--save_path', type=str, default='trained_models/')
parser.add_argument('--max_disp', type=int, default=192)
parser.add_argument('--color_transform', action='store_true', default=False)
args = parser.parse_args()
if not args.no_cuda:
os.environ['CUDA_DEVICE_ORDER'] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if cuda:
torch.cuda.manual_seed(args.seed)
all_limg, all_rimg, all_ldisp, all_rdisp, test_limg, test_rimg, test_ldisp, test_rdisp = sf.sf_loader(args.data_path)
trainLoader = torch.utils.data.DataLoader(
sf.myDataset(all_limg, all_rimg, all_ldisp, all_rdisp, training=True, color_transform=args.color_transform),
batch_size=args.batch_size, shuffle=True, num_workers=4, drop_last=False)
fe_model = sm.GwcFeature(out_c=64).train()
model = Agg.PSMAggregator(args.max_disp, udc=True).train()
if cuda:
fe_model = nn.DataParallel(fe_model.cuda())
model = nn.DataParallel(model.cuda())
params = [
{'params': fe_model.parameters(), 'lr': 1e-3},
{'params': model.parameters(), 'lr': 1e-3},
]
optimizer = optim.Adam(params, lr=1e-3, betas=(0.9, 0.999))
def train(imgL, imgR, gt_left, gt_right):
imgL = torch.FloatTensor(imgL)
imgR = torch.FloatTensor(imgR)
gt_left = torch.FloatTensor(gt_left)
gt_right = torch.FloatTensor(gt_right)
if cuda:
imgL, imgR = imgL.cuda(), imgR.cuda()
gt_left, gt_right = gt_left.cuda(), gt_right.cuda()
optimizer.zero_grad()
left_fea = fe_model(imgL)
right_fea = fe_model(imgR)
loss1, loss2 = model(left_fea, right_fea, gt_left, training=True)
loss1 = torch.mean(loss1)
loss2 = torch.mean(loss2)
loss = 0.1 * loss1 + loss2
loss.backward()
optimizer.step()
return loss1.item(), loss2.item()
def adjust_learning_rate(optimizer, epoch):
if epoch <= 10:
lr = 0.001
else:
lr = 0.0001
# print(lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def main():
# start_total_time = time.time()
start_epoch = 1
# checkpoint = torch.load('trained_gwcAgg/checkpoint_5_v1.tar')
# model.load_state_dict(checkpoint['net'])
# optimizer.load_state_dict(checkpoint['optimizer'])
# start_epoch = checkpoint['epoch'] + 1
# new_dict = {}
# for k, v in checkpoint['fe_net'].items():
# k = "module." + k
# new_dict[k] = v
# fe_model.load_state_dict(new_dict)
# optimizer_fe.load_state_dict(checkpoint['fe_optimizer'])
for epoch in range(start_epoch, args.epoch + start_epoch):
print('This is %d-th epoch' % (epoch))
total_train_loss1 = 0
total_train_loss2 = 0
adjust_learning_rate(optimizer, epoch)
for batch_id, (imgL, imgR, disp_L, disp_R) in enumerate(tqdm(trainLoader)):
train_loss1, train_loss2 = train(imgL, imgR, disp_L, disp_R)
total_train_loss1 += train_loss1
total_train_loss2 += train_loss2
avg_train_loss1 = total_train_loss1 / len(trainLoader)
avg_train_loss2 = total_train_loss2 / len(trainLoader)
print('Epoch %d average training loss1 = %.3f, average training loss2 = %.3f' %
(epoch, avg_train_loss1, avg_train_loss2))
state = {'net': model.state_dict(),
'fe_net': fe_model.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch}
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
save_model_path = args.save_path + 'checkpoint_baseline_{}epoch.tar'.format(epoch)
torch.save(state, save_model_path)
torch.cuda.empty_cache()
if __name__ == '__main__':
main()
| 4,611 | 30.589041 | 117 | py |
Graft-PSMNet | Graft-PSMNet-main/networks/U_net.py | import torch
import torch.nn as nn
import math
class conv_block(nn.Module):
def __init__(self, ch_in, ch_out):
super(conv_block, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(ch_out),
nn.ReLU(inplace=True),
nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(ch_out),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
class up_conv(nn.Module):
def __init__(self, ch_in, ch_out):
super(up_conv, self).__init__()
self.up = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1, bias=True),
nn.BatchNorm2d(ch_out),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.up(x)
return x
class U_Net(nn.Module):
def __init__(self, img_ch=3, output_ch=1):
super(U_Net, self).__init__()
self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.Conv1 = conv_block(ch_in=img_ch, ch_out=32)
self.Conv2 = conv_block(ch_in=32, ch_out=64)
self.Conv3 = conv_block(ch_in=64, ch_out=128)
self.Conv4 = conv_block(ch_in=128, ch_out=256)
# self.Conv5 = conv_block(ch_in=256, ch_out=512)
self.Conv5 = conv_block(ch_in=256, ch_out=256)
# self.Up5 = up_conv(ch_in=512, ch_out=256)
self.Up5 = up_conv(ch_in=256, ch_out=256)
self.Up_conv5 = conv_block(ch_in=512, ch_out=256)
self.Up4 = up_conv(ch_in=256, ch_out=128)
self.Up_conv4 = conv_block(ch_in=256, ch_out=128)
self.Up3 = up_conv(ch_in=128, ch_out=64)
self.Up_conv3 = conv_block(ch_in=128, ch_out=64)
self.Up2 = up_conv(ch_in=64, ch_out=32)
self.Up_conv2 = conv_block(ch_in=64, ch_out=32)
self.Conv_1x1 = nn.Conv2d(32, output_ch, kernel_size=1, stride=1, padding=0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
x1 = self.Conv1(x)
x2 = self.Maxpool(x1)
x2 = self.Conv2(x2)
x3 = self.Maxpool(x2)
x3 = self.Conv3(x3)
x4 = self.Maxpool(x3)
x4 = self.Conv4(x4)
x5 = self.Maxpool(x4)
x5 = self.Conv5(x5)
d5 = self.Up5(x5)
d5 = torch.cat((x4, d5), dim=1)
d5 = self.Up_conv5(d5)
d4 = self.Up4(d5)
d4 = torch.cat((x3, d4), dim=1)
d4 = self.Up_conv4(d4)
d3 = self.Up3(d4)
d3 = torch.cat((x2, d3), dim=1)
d3 = self.Up_conv3(d3)
d2 = self.Up2(d3)
d2 = torch.cat((x1, d2), dim=1)
d2 = self.Up_conv2(d2)
d1 = self.Conv_1x1(d2)
return d1
class U_Net_v2(nn.Module):
def __init__(self, img_ch=3, output_ch=1):
super(U_Net_v2, self).__init__()
self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.Conv1 = conv_block(ch_in=img_ch, ch_out=32)
self.Conv2 = conv_block(ch_in=32, ch_out=64)
self.Conv3 = conv_block(ch_in=64, ch_out=128)
self.Conv4 = conv_block(ch_in=128, ch_out=256)
self.Up4 = up_conv(ch_in=256, ch_out=128)
self.Up_conv4 = conv_block(ch_in=256, ch_out=128)
self.Up3 = up_conv(ch_in=128, ch_out=64)
self.Up_conv3 = conv_block(ch_in=128, ch_out=64)
self.Up2 = up_conv(ch_in=64, ch_out=32)
self.Up_conv2 = conv_block(ch_in=64, ch_out=32)
self.Conv_1x1 = nn.Conv2d(32, output_ch, kernel_size=1, stride=1, padding=0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
x1 = self.Conv1(x)
x2 = self.Maxpool(x1)
x2 = self.Conv2(x2)
x3 = self.Maxpool(x2)
x3 = self.Conv3(x3)
x4 = self.Maxpool(x3)
x4 = self.Conv4(x4)
d4 = self.Up4(x4)
d4 = torch.cat((x3, d4), dim=1)
d4 = self.Up_conv4(d4)
d3 = self.Up3(d4)
d3 = torch.cat((x2, d3), dim=1)
d3 = self.Up_conv3(d3)
d2 = self.Up2(d3)
d2 = torch.cat((x1, d2), dim=1)
d2 = self.Up_conv2(d2)
d1 = self.Conv_1x1(d2)
return d1
class U_Net_v3(nn.Module):
def __init__(self, img_ch=3, output_ch=1):
super(U_Net_v3, self).__init__()
self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.Conv0 = conv_block(ch_in=img_ch, ch_out=64)
self.Conv1 = conv_block(ch_in=64, ch_out=128)
self.Conv2 = conv_block(ch_in=128, ch_out=256)
self.Up5 = up_conv(ch_in=256, ch_out=128)
self.Up_conv5 = conv_block(ch_in=256, ch_out=128)
self.Up4 = up_conv(ch_in=128, ch_out=64)
self.Up_conv4 = conv_block(ch_in=128, ch_out=64)
self.Up3 = up_conv(ch_in=64, ch_out=32)
self.Up_conv3 = conv_block(ch_in=32, ch_out=32)
self.Up2 = up_conv(ch_in=32, ch_out=32)
self.Up_conv2 = conv_block(ch_in=32, ch_out=32)
self.Conv_1x1 = nn.Conv2d(32, output_ch, kernel_size=1, stride=1, padding=0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
x0 = self.Conv0(x) # 64 channels
x1 = self.Conv1(x0) # 128 channels
x1 = self.Maxpool(x1) # 1/8 resolution
x2 = self.Conv2(x1) # 256 channels
x2 = self.Maxpool(x2) # 1/16 resolution
d4 = self.Up5(x2) # 1/8 resolution
d4 = torch.cat((x1, d4), dim=1)
d4 = self.Up_conv5(d4) # 128 channels
d3 = self.Up4(d4) # 1/4 resolution
d3 = torch.cat((x0, d3), dim=1)
d3 = self.Up_conv4(d3) # 64 channels
d2 = self.Up3(d3) # 1/2 resolution
d2 = self.Up_conv3(d2) # 32 channels
d1 = self.Up2(d2) # 1/2 resolution
d1 = self.Up_conv2(d1) # 32 channels
d0 = self.Conv_1x1(d1)
return d0
class U_Net_v4(nn.Module):
def __init__(self, img_ch, output_ch):
super(U_Net_v4, self).__init__()
self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.Conv1 = conv_block(ch_in=img_ch, ch_out=32)
self.Conv2 = conv_block(ch_in=32, ch_out=64)
self.Conv3 = conv_block(ch_in=64, ch_out=128)
self.Conv4 = conv_block(ch_in=128, ch_out=128)
self.Up4 = conv_block(ch_in=128, ch_out=128)
self.Up_conv4 = up_conv(ch_in=256, ch_out=64)
self.Up3 = conv_block(ch_in=64, ch_out=64)
self.Up_conv3 = up_conv(ch_in=128, ch_out=32)
self.last_conv = nn.Conv2d(64, output_ch, 1, 1, 0, 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
# nn.init.kaiming_normal_(m, mode='fan_in', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
x1 = self.Conv1(x) # 32, 1/4
x2 = self.Maxpool(x1)
x2 = self.Conv2(x2) # 64, 1/8
x3 = self.Maxpool(x2)
x3 = self.Conv3(x3) # 128, 1/16
x4 = self.Conv4(x3) # 128, 1/16
d4 = self.Up4(x4) # 128, 1/16
d4 = torch.cat((x3, d4), dim=1)
d4 = self.Up_conv4(d4) # 64, 1/8
d3 = self.Up3(d4) # 64, 1/8
d3 = torch.cat((x2, d3), dim=1)
d3 = self.Up_conv3(d3) # 32, 1/4
d2 = torch.cat((x1, d3), dim=1)
d2 = self.last_conv(d2)
return d2
class LinearProj(nn.Module):
def __init__(self, in_c, out_c):
super(LinearProj, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_c, out_c, 1, 1, 0, 1),
nn.ReLU(inplace=True),
nn.Conv2d(out_c, out_c, 1, 1, 0, 1))
# self.conv = nn.Conv2d(in_c, out_c, 1, 1, 0, 1)
def forward(self, x):
x = self.conv(x)
return x
if __name__ == '__main__':
a = torch.rand(2, 3, 64, 128).cuda()
net = U_Net_v3(img_ch=3, output_ch=4).cuda()
b = net(a)
print(b.shape) | 9,607 | 30.093851 | 86 | py |
Graft-PSMNet | Graft-PSMNet-main/networks/resnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock_Res(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, use_relu=True):
super(BasicBlock_Res, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
self.use_relu = use_relu
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
if self.use_relu:
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, use_relu=True):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.use_relu = use_relu
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
if self.use_relu:
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# if DenseCl, comment this line
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock_Res):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
# stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
if i == blocks - 1:
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer, use_relu=False))
else:
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer, use_relu=True))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
# x = self.relu(x)
# x = self.layer2(x)
# x = self.relu(x)
# x = self.layer3(x)
# x = self.layer4(x)
#
# x = self.avgpool(x)
# x = torch.flatten(x, 1)
# x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
| 8,386 | 35.947137 | 106 | py |
Graft-PSMNet | Graft-PSMNet-main/networks/vgg.py | import torch
import torch.nn as nn
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def _vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs):
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
return model
def vgg11(pretrained=False, progress=True, **kwargs):
r"""VGG 11-layer model (configuration "A") from
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs)
def vgg11_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 11-layer model (configuration "A") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs)
def vgg13(pretrained=False, progress=True, **kwargs):
r"""VGG 13-layer model (configuration "B")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs)
def vgg13_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 13-layer model (configuration "B") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs)
def vgg16(pretrained=False, progress=True, **kwargs):
r"""VGG 16-layer model (configuration "D")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs)
def vgg16_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 16-layer model (configuration "D") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs)
def vgg19(pretrained=False, progress=True, **kwargs):
r"""VGG 19-layer model (configuration "E")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs)
def vgg19_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 19-layer model (configuration 'E') with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs)
| 6,388 | 37.257485 | 113 | py |
Graft-PSMNet | Graft-PSMNet-main/networks/stackhourglass.py | from __future__ import print_function
import torch
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import math
from networks.submodule import convbn, convbn_3d, DisparityRegression
class hourglass(nn.Module):
def __init__(self, inplanes):
super(hourglass, self).__init__()
self.conv1 = nn.Sequential(convbn_3d(inplanes, inplanes*2, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv2 = convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=1, pad=1)
self.conv3 = nn.Sequential(convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=1, pad=1),
nn.ReLU(inplace=True))
self.conv5 = nn.Sequential(nn.ConvTranspose3d(inplanes*2, inplanes*2, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),
nn.BatchNorm3d(inplanes*2)) #+conv2
self.conv6 = nn.Sequential(nn.ConvTranspose3d(inplanes*2, inplanes, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),
nn.BatchNorm3d(inplanes)) #+x
def forward(self, x ,presqu, postsqu):
out = self.conv1(x) #in:1/4 out:1/8
pre = self.conv2(out) #in:1/8 out:1/8
if postsqu is not None:
pre = F.relu(pre + postsqu, inplace=True)
else:
pre = F.relu(pre, inplace=True)
# print('pre2', pre.size())
out = self.conv3(pre) #in:1/8 out:1/16
out = self.conv4(out) #in:1/16 out:1/16
# print('out', out.size())
if presqu is not None:
post = F.relu(self.conv5(out)+presqu, inplace=True) #in:1/16 out:1/8
else:
post = F.relu(self.conv5(out)+pre, inplace=True)
out = self.conv6(post) #in:1/8 out:1/4
return out, pre, post
class hourglass_gwcnet(nn.Module):
def __init__(self, inplanes):
super(hourglass_gwcnet, self).__init__()
self.conv1 = nn.Sequential(convbn_3d(inplanes, inplanes * 2, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv2 = nn.Sequential(convbn_3d(inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1),
nn.ReLU(inplace=True))
self.conv3 = nn.Sequential(convbn_3d(inplanes * 2, inplanes * 4, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(convbn_3d(inplanes * 4, inplanes * 4, 3, 1, 1),
nn.ReLU(inplace=True))
self.conv5 = nn.Sequential(nn.ConvTranspose3d(inplanes * 4, inplanes * 2, kernel_size=3, padding=1,
output_padding=1, stride=2, bias=False),
nn.BatchNorm3d(inplanes * 2))
self.conv6 = nn.Sequential(nn.ConvTranspose3d(inplanes * 2, inplanes, kernel_size=3, padding=1,
output_padding=1, stride=2, bias=False),
nn.BatchNorm3d(inplanes))
self.redir1 = convbn_3d(inplanes, inplanes, kernel_size=1, stride=1, pad=0)
self.redir2 = convbn_3d(inplanes * 2, inplanes * 2, kernel_size=1, stride=1, pad=0)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = F.relu(self.conv5(conv4) + self.redir2(conv2), inplace=True)
conv6 = F.relu(self.conv6(conv5) + self.redir1(x), inplace=True)
return conv6
| 3,858 | 40.053191 | 143 | py |
Graft-PSMNet | Graft-PSMNet-main/networks/submodule.py | from __future__ import print_function
import torch
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
from torchvision import models
import math
import numpy as np
import torchvision.transforms as transforms
import PIL
import os
import matplotlib.pyplot as plt
from networks.resnet import ResNet, Bottleneck, BasicBlock_Res
def convbn(in_planes, out_planes, kernel_size, stride, pad, dilation):
return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=dilation if dilation > 1 else pad, dilation = dilation, bias=False),
nn.BatchNorm2d(out_planes))
def convbn_3d(in_planes, out_planes, kernel_size, stride, pad):
return nn.Sequential(nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, padding=pad, stride=stride,bias=False),
nn.BatchNorm3d(out_planes))
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, downsample, pad, dilation):
super(BasicBlock, self).__init__()
self.conv1 = nn.Sequential(convbn(inplanes, planes, 3, stride, pad, dilation),
nn.ReLU(inplace=True))
self.conv2 = convbn(planes, planes, 3, 1, pad, dilation)
self.downsample = downsample
self.stride = stride
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
x = self.downsample(x)
out += x
return out
class DisparityRegression(nn.Module):
def __init__(self, maxdisp, win_size):
super(DisparityRegression, self).__init__()
self.max_disp = maxdisp
self.win_size = win_size
def forward(self, x):
disp = torch.arange(0, self.max_disp).view(1, -1, 1, 1).float().to(x.device)
if self.win_size > 0:
max_d = torch.argmax(x, dim=1, keepdim=True)
d_value = []
prob_value = []
for d in range(-self.win_size, self.win_size + 1):
index = max_d + d
index[index < 0] = 0
index[index > x.shape[1] - 1] = x.shape[1] - 1
d_value.append(index)
prob = torch.gather(x, dim=1, index=index)
prob_value.append(prob)
part_x = torch.cat(prob_value, dim=1)
part_x = part_x / (torch.sum(part_x, dim=1, keepdim=True) + 1e-8)
part_d = torch.cat(d_value, dim=1).float()
out = torch.sum(part_x * part_d, dim=1)
else:
out = torch.sum(x * disp, 1)
return out
class GwcFeature(nn.Module):
def __init__(self, out_c, fuse_mode='add'):
super(GwcFeature, self).__init__()
self.inplanes = 32
self.fuse_mode = fuse_mode
self.firstconv = nn.Sequential(convbn(3, 32, 3, 2, 1, 1),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True))
self.layer1 = self._make_layer(BasicBlock, 32, 3, 1, 1, 1)
self.layer2 = self._make_layer(BasicBlock, 64, 16, 2, 1, 1)
self.layer3 = self._make_layer(BasicBlock, 128, 3, 1, 1, 1)
self.layer4 = self._make_layer(BasicBlock, 128, 3, 1, 1, 2)
if self.fuse_mode == 'cat':
self.lastconv = nn.Sequential(convbn(320, 128, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(128, out_c, kernel_size=1, padding=0, stride=1, bias=False))
elif self.fuse_mode == 'add':
self.l1_conv = nn.Conv2d(32, out_c, 1, stride=1, padding=0, bias=False)
self.l2_conv = nn.Conv2d(64, out_c, 1, stride=1, padding=0, bias=False)
self.l4_conv = nn.Conv2d(128, out_c, 1, stride=1, padding=0, bias=False)
elif self.fuse_mode == 'add_sa':
self.l1_conv = nn.Conv2d(64, out_c, 1, stride=1, padding=0, bias=False)
self.l4_conv = nn.Conv2d(64, out_c, 1, stride=1, padding=0, bias=False)
self.sa = nn.Sequential(convbn(2 * out_c, 2 * out_c, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(2 * out_c, 2, 3, stride=1, padding=1, bias=False))
def _make_layer(self, block, planes, blocks, stride, pad, dilation):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, pad, dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,1,None,pad,dilation))
return nn.Sequential(*layers)
def forward(self, x):
output = self.firstconv(x)
output_l1 = self.layer1(output)
output_l2 = self.layer2(output_l1)
output_l3 = self.layer3(output_l2)
output_l4 = self.layer4(output_l3)
output_l1 = F.interpolate(output_l1, (output_l4.size()[2], output_l4.size()[3]),
mode='bilinear', align_corners=True)
if self.fuse_mode == 'cat':
cat_feature = torch.cat((output_l2, output_l3, output_l4), dim=1)
output_feature = self.lastconv(cat_feature)
elif self.fuse_mode == 'add':
output_l1 = self.l1_conv(output_l1)
output_l4 = self.l4_conv(output_l4)
output_feature = output_l1 + output_l4
elif self.fuse_mode == 'add_sa':
output_l1 = self.l1_conv(output_l1)
output_l4 = self.l4_conv(output_l4)
attention_map = self.sa(torch.cat((output_l1, output_l4), dim=1))
attention_map = torch.sigmoid(attention_map)
output_feature = output_l1 * attention_map[:, 0].unsqueeze(1) + \
output_l4 * attention_map[:, 1].unsqueeze(1)
return output_feature
| 6,460 | 37.921687 | 174 | py |
Graft-PSMNet | Graft-PSMNet-main/networks/Aggregator.py | import torch
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import math
from networks.submodule import convbn, convbn_3d, DisparityRegression
from networks.stackhourglass import hourglass_gwcnet, hourglass
import matplotlib.pyplot as plt
import loss_functions as lf
def build_cost_volume(left_fea, right_fea, max_disp, cost_type):
if cost_type == 'cor':
left_fea_norm = F.normalize(left_fea, dim=1)
right_fea_norm = F.normalize(right_fea, dim=1)
cost = torch.zeros(left_fea.size()[0], 1, max_disp // 4,
left_fea.size()[2], left_fea.size()[3]).cuda()
for i in range(max_disp // 4):
if i > 0:
cost[:, :, i, :, i:] = (torch.sum(left_fea_norm[:, :, :, i:] * right_fea_norm[:, :, :, :-i],
dim=1, keepdim=True) + 1) / 2
else:
cost[:, :, i, :, :] = (torch.sum(left_fea_norm * right_fea_norm, dim=1, keepdim=True) + 1) / 2
elif cost_type == 'l2':
cost = torch.zeros(left_fea.size()[0], 1, max_disp // 4,
left_fea.size()[2], left_fea.size()[3]).cuda()
for i in range(max_disp // 4):
if i > 0:
cost[:, :, i, :, i:] = torch.sqrt(torch.sum(
(left_fea[:, :, :, i:] - right_fea[:, :, :, :-i]) ** 2, dim=1, keepdim=True))
else:
cost[:, :, i, :, :] = torch.sqrt(torch.sum((left_fea - right_fea) ** 2, dim=1, keepdim=True))
elif cost_type == 'cat':
cost = torch.zeros(left_fea.size()[0], left_fea.size()[1] * 2, max_disp // 4,
left_fea.size()[2], left_fea.size()[3]).cuda()
for i in range(max_disp // 4):
if i > 0:
cost[:, :left_fea.size()[1], i, :, i:] = left_fea[:, :, :, i:]
cost[:, left_fea.size()[1]:, i, :, i:] = right_fea[:, :, :, :-i]
else:
cost[:, :left_fea.size()[1], i, :, :] = left_fea
cost[:, left_fea.size()[1]:, i, :, :] = right_fea
elif cost_type == 'ncat':
left_fea = F.normalize(left_fea, dim=1)
right_fea = F.normalize(right_fea, dim=1)
cost = torch.zeros(left_fea.size()[0], left_fea.size()[1] * 2, max_disp // 4,
left_fea.size()[2], left_fea.size()[3]).cuda()
for i in range(max_disp // 4):
if i > 0:
cost[:, :left_fea.size()[1], i, :, i:] = left_fea[:, :, :, i:]
cost[:, left_fea.size()[1]:, i, :, i:] = right_fea[:, :, :, :-i]
else:
cost[:, :left_fea.size()[1], i, :, :] = left_fea
cost[:, left_fea.size()[1]:, i, :, :] = right_fea
cost = cost.contiguous()
return cost
class GwcAggregator(nn.Module):
def __init__(self, maxdisp):
super(GwcAggregator, self).__init__()
self.maxdisp = maxdisp
self.dres0 = nn.Sequential(convbn_3d(64, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True))
self.dres1 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True))
self.hg1 = hourglass_gwcnet(32)
self.hg2 = hourglass_gwcnet(32)
self.hg3 = hourglass_gwcnet(32)
self.classify1 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
self.classify2 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
self.classify3 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1]*m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, left_fea, right_fea, gt_left, gt_right):
cost = build_cost_volume(left_fea, right_fea, self.maxdisp, cost_type='ncat')
cost0 = self.dres0(cost)
cost1 = self.dres1(cost0) + cost0
out1 = self.hg1(cost1)
out2 = self.hg2(out1)
out3 = self.hg3(out2)
win_s = 5
if self.training:
cost1 = self.classify1(out1)
cost1 = F.interpolate(cost1, scale_factor=4, mode='trilinear', align_corners=True)
cost1 = torch.squeeze(cost1, 1)
distribute1 = F.softmax(cost1, dim=1)
pred1 = DisparityRegression(self.maxdisp, win_size=win_s)(distribute1)
cost2 = self.classify2(out2)
cost2 = F.interpolate(cost2, scale_factor=4, mode='trilinear', align_corners=True)
cost2 = torch.squeeze(cost2, 1)
distribute2 = F.softmax(cost2, dim=1)
pred2 = DisparityRegression(self.maxdisp, win_size=win_s)(distribute2)
cost3 = self.classify3(out3)
cost3 = F.interpolate(cost3, scale_factor=4, mode='trilinear', align_corners=True)
cost3 = torch.squeeze(cost3, 1)
distribute3 = F.softmax(cost3, dim=1)
pred3 = DisparityRegression(self.maxdisp, win_size=win_s)(distribute3)
if self.training:
mask = (gt_left < self.maxdisp) & (gt_left > 0)
loss1 = 0.5 * F.smooth_l1_loss(pred1[mask], gt_left[mask]) + \
0.7 * F.smooth_l1_loss(pred2[mask], gt_left[mask]) + \
F.smooth_l1_loss(pred3[mask], gt_left[mask])
gt_distribute = lf.disp2distribute(gt_left, self.maxdisp, b=2)
loss2 = 0.5 * lf.CEloss(gt_left, self.maxdisp, gt_distribute, distribute1) + \
0.7 * lf.CEloss(gt_left, self.maxdisp, gt_distribute, distribute2) + \
lf.CEloss(gt_left, self.maxdisp, gt_distribute, distribute3)
loss3 = lf.FeatureSimilarityLoss(self.maxdisp)(left_fea, right_fea, gt_left, gt_right)
return loss1, loss2, loss3
else:
return pred3
class PSMAggregator(nn.Module):
def __init__(self, maxdisp, udc):
super(PSMAggregator, self).__init__()
self.maxdisp = maxdisp
self.udc = udc
self.dres0 = nn.Sequential(convbn_3d(1, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True))
self.dres1 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True))
self.hg1 = hourglass(32)
self.hg2 = hourglass(32)
self.hg3 = hourglass(32)
self.classify1 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
self.classify2 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
self.classify3 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1]*m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, left_fea, right_fea, gt_left, training):
cost = build_cost_volume(left_fea, right_fea, self.maxdisp, cost_type='cor')
cost0 = self.dres0(cost)
cost1 = self.dres1(cost0) + cost0
out1, pre1, post1 = self.hg1(cost1, None, None)
out1 = out1+cost0
out2, pre2, post2 = self.hg2(out1, pre1, post1)
out2 = out2+cost0
out3, pre3, post3 = self.hg3(out2, pre1, post2)
out3 = out3+cost0
cost1 = self.classify1(out1)
cost2 = self.classify2(out2) + cost1
cost3 = self.classify3(out3) + cost2
if self.udc:
win_s = 5
else:
win_s = 0
if self.training:
cost1 = F.interpolate(cost1, scale_factor=4, mode='trilinear', align_corners=True)
cost1 = torch.squeeze(cost1, 1)
distribute1 = F.softmax(cost1, dim=1)
pred1 = DisparityRegression(self.maxdisp, win_size=win_s)(distribute1)
cost2 = F.interpolate(cost2, scale_factor=4, mode='trilinear', align_corners=True)
cost2 = torch.squeeze(cost2, 1)
distribute2 = F.softmax(cost2, dim=1)
pred2 = DisparityRegression(self.maxdisp, win_size=win_s)(distribute2)
cost3 = F.interpolate(cost3, scale_factor=4, mode='trilinear', align_corners=True)
cost3 = torch.squeeze(cost3, 1)
distribute3 = F.softmax(cost3, dim=1)
pred3 = DisparityRegression(self.maxdisp, win_size=win_s)(distribute3)
if self.training:
mask = (gt_left < self.maxdisp) & (gt_left > 0)
loss1 = 0.5 * F.smooth_l1_loss(pred1[mask], gt_left[mask]) + \
0.7 * F.smooth_l1_loss(pred2[mask], gt_left[mask]) + \
F.smooth_l1_loss(pred3[mask], gt_left[mask])
gt_distribute = lf.disp2distribute(gt_left, self.maxdisp, b=2)
loss2 = 0.5 * lf.CEloss(gt_left, self.maxdisp, gt_distribute, distribute1) + \
0.7 * lf.CEloss(gt_left, self.maxdisp, gt_distribute, distribute2) + \
lf.CEloss(gt_left, self.maxdisp, gt_distribute, distribute3)
return loss1, loss2
else:
if training:
mask = (gt_left < self.maxdisp) & (gt_left > 0)
loss1 = F.smooth_l1_loss(pred3[mask], gt_left[mask])
# loss2 = loss1
gt_distribute = lf.disp2distribute(gt_left, self.maxdisp, b=2)
loss2 = lf.CEloss(gt_left, self.maxdisp, gt_distribute, distribute3)
return loss1, loss2
else:
return pred3
| 11,953 | 42 | 110 | py |
Graft-PSMNet | Graft-PSMNet-main/networks/feature_extraction.py | import torch
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
from torchvision import models
import math
import numpy as np
import torchvision.transforms as transforms
import PIL
import os
import matplotlib.pyplot as plt
from networks.resnet import ResNet, Bottleneck, BasicBlock_Res
from networks.vgg import vgg16
from collections import OrderedDict
class VGG_Feature(nn.Module):
def __init__(self, fixed_param):
super(VGG_Feature, self).__init__()
self.fe = vgg16(pretrained=False)
self.fe.load_state_dict(
torch.load('networks/vgg16-397923af.pth'))
features = self.fe.features
self.to_feat = nn.Sequential()
for i in range(15):
self.to_feat.add_module(str(i), features[i])
if fixed_param:
for p in self.to_feat.parameters():
p.requires_grad = False
def forward(self, x):
feature = self.to_feat(x)
# feature = F.interpolate(feature, scale_factor=0.5, mode='bilinear', align_corners=True)
return feature
class VGG_Bn_Feature(nn.Module):
def __init__(self):
super(VGG_Bn_Feature, self).__init__()
features = models.vgg16_bn(pretrained=True).cuda().eval().features
self.to_feat = nn.Sequential()
# for i in range(8):
# self.to_feat.add_module(str(i), features[i])
for i in range(15):
self.to_feat.add_module(str(i), features[i])
for p in self.to_feat.parameters():
p.requires_grad = False
def forward(self, x):
feature = self.to_feat(x)
# feature = F.interpolate(feature, scale_factor=0.5, mode='bilinear', align_corners=True)
return feature
class Res18(nn.Module):
def __init__(self):
super(Res18, self).__init__()
self.fe = ResNet(BasicBlock_Res, [2, 2, 2, 2])
# self.fe = ResNet(Bottleneck, [3, 4, 6, 3])
for p in self.fe.parameters():
p.requires_grad = False
self.fe.load_state_dict(
torch.load('networks/resnet18-5c106cde.pth'))
def forward(self, x):
self.fe.eval()
with torch.no_grad():
feature = self.fe(x)
return feature
class Res50(nn.Module):
def __init__(self):
super(Res50, self).__init__()
self.fe = ResNet(Bottleneck, [3, 4, 6, 3])
for p in self.fe.parameters():
p.requires_grad = False
# self.fe.load_state_dict(
# torch.load('networks/resnet50-19c8e357.pth'))
self.fe.load_state_dict(
torch.load('networks/DenseCL_R50_imagenet.pth'))
def forward(self, x):
self.fe.eval()
with torch.no_grad():
feature = self.fe(x)
return feature
if __name__ == '__main__':
os.environ['CUDA_DEVICE_ORDER'] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = "2"
from collections import OrderedDict
ckpt = torch.load('selfTrainVGG_withDA.pth')
new_dict = OrderedDict()
for k, v in ckpt.items():
new_k = k.replace('module.', '')
new_dict[new_k] = v
torch.save(new_dict, 'selfTrainVGG_withDA.pth') | 3,225 | 24.401575 | 97 | py |
Graft-PSMNet | Graft-PSMNet-main/dataloader/ETH3D_loader.py | import os
from PIL import Image
from dataloader import readpfm as rp
import dataloader.preprocess
import torch.utils.data as data
import torchvision.transforms as transforms
import numpy as np
import random
IMG_EXTENSIONS= [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP'
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
# filepath = '/media/data/dataset/ETH3D/'
def et_loader(filepath):
left_img = []
right_img = []
disp_gt = []
occ_mask = []
img_path = os.path.join(filepath, 'two_view_training')
gt_path = os.path.join(filepath, 'two_view_training_gt')
for c in os.listdir(img_path):
img_cpath = os.path.join(img_path, c)
gt_cpath = os.path.join(gt_path, c)
left_img.append(os.path.join(img_cpath, 'im0.png'))
right_img.append(os.path.join(img_cpath, 'im1.png'))
disp_gt.append(os.path.join(gt_cpath, 'disp0GT.pfm'))
occ_mask.append(os.path.join(gt_cpath, 'mask0nocc.png'))
return left_img, right_img, disp_gt, occ_mask,
def img_loader(path):
return Image.open(path).convert('RGB')
def disparity_loader(path):
return rp.readPFM(path)
class myDataset(data.Dataset):
def __init__(self, left, right, disp_gt, occ_mask, training, imgloader=img_loader, dploader = disparity_loader):
self.left = left
self.right = right
self.disp_gt = disp_gt
self.occ_mask = occ_mask
self.imgloader = imgloader
self.dploader = dploader
self.training = training
self.img_transorm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
def __getitem__(self, index):
left = self.left[index]
right = self.right[index]
disp_L = self.disp_L[index]
disp_R = self.disp_R[index]
left_img = self.imgloader(left)
right_img = self.imgloader(right)
dataL, _ = self.dploader(disp_L)
dataL = np.ascontiguousarray(dataL, dtype=np.float32)
dataR, _ = self.dploader(disp_R)
dataR = np.ascontiguousarray(dataR, dtype=np.float32)
if self.training:
w, h = left_img.size
tw, th = 512, 256
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
left_img = left_img.crop((x1, y1, x1+tw, y1+th))
right_img = right_img.crop((x1, y1, x1+tw, y1+th))
dataL = dataL[y1:y1+th, x1:x1+tw]
dataR = dataR[y1:y1+th, x1:x1+tw]
left_img = self.img_transorm(left_img)
right_img = self.img_transorm(right_img)
return left_img, right_img, dataL, dataR
else:
w, h = left_img.size
left_img = left_img.crop((w-960, h-544, w, h))
right_img = right_img.crop((w-960, h-544, w, h))
left_img = self.img_transorm(left_img)
right_img = self.img_transorm(right_img)
dataL = Image.fromarray(dataL).crop((w-960, h-544, w, h))
dataL = np.ascontiguousarray(dataL)
dataR = Image.fromarray(dataR).crop((w-960, h-544, w, h))
dataR = np.ascontiguousarray(dataR)
return left_img, right_img, dataL, dataR
def __len__(self):
return len(self.left)
| 3,410 | 28.405172 | 116 | py |
Graft-PSMNet | Graft-PSMNet-main/dataloader/KITTIloader.py | import torch
import torch.utils.data as data
import torchvision.transforms as transforms
import os
from PIL import Image
import random
import numpy as np
IMG_EXTENSIONS= [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP'
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def kt_loader(filepath):
left_path = os.path.join(filepath, 'image_2')
right_path = os.path.join(filepath, 'image_3')
displ_path = os.path.join(filepath, 'disp_occ_0')
# total_name = sorted([name for name in os.listdir(left_path) if name.find('_10') > -1])
total_name = [name for name in os.listdir(left_path) if name.find('_10') > -1]
train_name = total_name[:160]
val_name = total_name[160:]
train_left = []
train_right = []
train_displ = []
for name in train_name:
train_left.append(os.path.join(left_path, name))
train_right.append(os.path.join(right_path, name))
train_displ.append(os.path.join(displ_path, name))
val_left = []
val_right = []
val_displ = []
for name in val_name:
val_left.append(os.path.join(left_path, name))
val_right.append(os.path.join(right_path, name))
val_displ.append(os.path.join(displ_path, name))
return train_left, train_right, train_displ, val_left, val_right, val_displ
def kt2012_loader(filepath):
left_path = os.path.join(filepath, 'colored_0')
right_path = os.path.join(filepath, 'colored_1')
displ_path = os.path.join(filepath, 'disp_occ')
total_name = sorted([name for name in os.listdir(left_path) if name.find('_10') > -1])
train_name = total_name[:160]
val_name = total_name[160:]
train_left = []
train_right = []
train_displ = []
for name in train_name:
train_left.append(os.path.join(left_path, name))
train_right.append(os.path.join(right_path, name))
train_displ.append(os.path.join(displ_path, name))
val_left = []
val_right = []
val_displ = []
for name in val_name:
val_left.append(os.path.join(left_path, name))
val_right.append(os.path.join(right_path, name))
val_displ.append(os.path.join(displ_path, name))
return train_left, train_right, train_displ, val_left, val_right, val_displ
def img_loader(path):
return Image.open(path).convert('RGB')
def disparity_loader(path):
return Image.open(path)
class myDataset(data.Dataset):
def __init__(self, left, right, left_disp, training, imgloader=img_loader, disploader=disparity_loader):
self.left = left
self.right = right
self.left_disp = left_disp
self.training = training
self.imgloader = imgloader
self.disploader = disploader
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
def __getitem__(self, index):
left = self.left[index]
right = self.right[index]
left_disp = self.left_disp[index]
limg = self.imgloader(left)
rimg = self.imgloader(right)
ldisp = self.disploader(left_disp)
# W, H = limg.size
# limg = limg.resize((960, 288))
# rimg = rimg.resize((960, 288))
# ldisp = ldisp.resize((960, 288), Image.NEAREST)
if self.training:
w, h = limg.size
tw, th = 512, 256
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
limg = limg.crop((x1, y1, x1 + tw, y1 + th))
rimg = rimg.crop((x1, y1, x1 + tw, y1 + th))
ldisp = np.ascontiguousarray(ldisp, dtype=np.float32) / 256
ldisp = ldisp[y1:y1 + th, x1:x1 + tw]
limg = self.transform(limg)
rimg = self.transform(rimg)
else:
w, h = limg.size
limg = limg.crop((w-1232, h-368, w, h))
rimg = rimg.crop((w-1232, h-368, w, h))
ldisp = ldisp.crop((w-1232, h-368, w, h))
ldisp = np.ascontiguousarray(ldisp, dtype=np.float32)/256
limg = self.transform(limg)
rimg = self.transform(rimg)
# ldisp = ldisp * (960/W)
return limg, rimg, ldisp, ldisp
def __len__(self):
return len(self.left)
| 4,371 | 28.540541 | 108 | py |
Graft-PSMNet | Graft-PSMNet-main/dataloader/KITTI2012loader.py | import torch.utils.data as data
import torchvision.transforms as transforms
import os
from PIL import Image
import random
import numpy as np
IMG_EXTENSIONS= [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP'
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def kt2012_loader(filepath):
left_path = os.path.join(filepath, 'colored_0')
right_path = os.path.join(filepath, 'colored_1')
displ_path = os.path.join(filepath, 'disp_occ')
total_name = [name for name in os.listdir(left_path) if name.find('_10') > -1]
train_name = total_name[:160]
val_name = total_name[160:]
train_left = []
train_right = []
train_displ = []
for name in train_name:
train_left.append(os.path.join(left_path, name))
train_right.append(os.path.join(right_path, name))
train_displ.append(os.path.join(displ_path, name))
val_left = []
val_right = []
val_displ = []
for name in val_name:
val_left.append(os.path.join(left_path, name))
val_right.append(os.path.join(right_path, name))
val_displ.append(os.path.join(displ_path, name))
return train_left, train_right, train_displ, val_left, val_right, val_displ
def img_loader(path):
return Image.open(path).convert('RGB')
def disparity_loader(path):
return Image.open(path)
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
class myDataset(data.Dataset):
def __init__(self, left, right, left_disp, training, imgloader=img_loader, disploader=disparity_loader):
self.left = left
self.right = right
self.left_disp = left_disp
self.imgloader = imgloader
self.disploader = disploader
self.training = training
def __getitem__(self, index):
left = self.left[index]
right = self.right[index]
left_disp = self.left_disp[index]
limg = self.imgloader(left)
rimg = self.imgloader(right)
ldisp = self.disploader(left_disp)
if self.training:
w, h = limg.size
tw, th = 512, 256
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
limg = limg.crop((x1, y1, x1 + tw, y1 + th))
rimg = rimg.crop((x1, y1, x1 + tw, y1 + th))
ldisp = np.ascontiguousarray(ldisp, dtype=np.float32)/256
ldisp = ldisp[y1:y1 + th, x1:x1 + tw]
limg = transform(limg)
rimg = transform(rimg)
return limg, rimg, ldisp
else:
w, h = limg.size
limg = limg.crop((w-1232, h-368, w, h))
rimg = rimg.crop((w-1232, h-368, w, h))
ldisp = ldisp.crop((w-1232, h-368, w, h))
ldisp = np.ascontiguousarray(ldisp, dtype=np.float32)/256
limg = transform(limg)
rimg = transform(rimg)
return limg, rimg, ldisp
def __len__(self):
return len(self.left) | 3,118 | 26.848214 | 108 | py |
Graft-PSMNet | Graft-PSMNet-main/dataloader/sceneflow_loader.py | import os
from PIL import Image
from dataloader import readpfm as rp
import dataloader.preprocess
import torch.utils.data as data
import torchvision.transforms as transforms
import numpy as np
import random
IMG_EXTENSIONS= [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP'
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
# filepath = '/media/data/LiuBiyang/SceneFlow/'
def sf_loader(filepath):
classes = [d for d in os.listdir(filepath) if os.path.isdir(os.path.join(filepath, d))]
image = [img for img in classes if img.find('frames_cleanpass') > -1]
disparity = [disp for disp in classes if disp.find('disparity') > -1]
all_left_img = []
all_right_img = []
all_left_disp = []
all_right_disp = []
test_left_img = []
test_right_img = []
test_left_disp = []
test_right_disp = []
monkaa_img = filepath + [x for x in image if 'monkaa' in x][0]
monkaa_disp = filepath + [x for x in disparity if 'monkaa' in x][0]
monkaa_dir = os.listdir(monkaa_img)
for dd in monkaa_dir:
left_path = monkaa_img + '/' + dd + '/left/'
right_path = monkaa_img + '/' + dd + '/right/'
disp_path = monkaa_disp + '/' + dd + '/left/'
rdisp_path = monkaa_disp + '/' + dd + '/right/'
left_imgs = os.listdir(left_path)
for img in left_imgs:
img_path = os.path.join(left_path, img)
if is_image_file(img_path):
all_left_img.append(img_path)
all_right_img.append(os.path.join(right_path, img))
all_left_disp.append(disp_path + img.split(".")[0] + '.pfm')
all_right_disp.append(rdisp_path + img.split(".")[0] + '.pfm')
flying_img = filepath + [x for x in image if 'flying' in x][0]
flying_disp = filepath + [x for x in disparity if 'flying' in x][0]
fimg_train = flying_img + '/TRAIN/'
fimg_test = flying_img + '/TEST/'
fdisp_train = flying_disp + '/TRAIN/'
fdisp_test = flying_disp + '/TEST/'
fsubdir = ['A', 'B', 'C']
for dd in fsubdir:
imgs_path = fimg_train + dd + '/'
disps_path = fdisp_train + dd + '/'
imgs = os.listdir(imgs_path)
for cc in imgs:
left_path = imgs_path + cc + '/left/'
right_path = imgs_path + cc + '/right/'
disp_path = disps_path + cc + '/left/'
rdisp_path = disps_path + cc + '/right/'
left_imgs = os.listdir(left_path)
for img in left_imgs:
img_path = os.path.join(left_path, img)
if is_image_file(img_path):
all_left_img.append(img_path)
all_right_img.append(os.path.join(right_path, img))
all_left_disp.append(disp_path + img.split(".")[0] + '.pfm')
all_right_disp.append(rdisp_path + img.split(".")[0] + '.pfm')
for dd in fsubdir:
imgs_path = fimg_test + dd + '/'
disps_path = fdisp_test + dd + '/'
imgs = os.listdir(imgs_path)
for cc in imgs:
left_path = imgs_path + cc + '/left/'
right_path = imgs_path + cc + '/right/'
disp_path = disps_path + cc + '/left/'
rdisp_path = disps_path + cc + '/right/'
left_imgs = os.listdir(left_path)
for img in left_imgs:
img_path = os.path.join(left_path, img)
if is_image_file(img_path):
test_left_img.append(img_path)
test_right_img.append(os.path.join(right_path, img))
test_left_disp.append(disp_path + img.split(".")[0] + '.pfm')
test_right_disp.append(rdisp_path + img.split(".")[0] + '.pfm')
driving_img = filepath + [x for x in image if 'driving' in x][0]
driving_disp = filepath + [x for x in disparity if 'driving' in x][0]
dsubdir1 = ['15mm_focallength', '35mm_focallength']
dsubdir2 = ['scene_backwards', 'scene_forwards']
dsubdir3 = ['fast', 'slow']
for d in dsubdir1:
img_path1 = driving_img + '/' + d + '/'
disp_path1 = driving_disp + '/' + d + '/'
for dd in dsubdir2:
img_path2 = img_path1 + dd + '/'
disp_path2 = disp_path1 + dd + '/'
for ddd in dsubdir3:
img_path3 = img_path2 + ddd + '/'
disp_path3 = disp_path2 + ddd + '/'
left_path = img_path3 + 'left/'
right_path = img_path3 + 'right/'
disp_path = disp_path3 + 'left/'
rdisp_path = disp_path3 + 'right/'
left_imgs = os.listdir(left_path)
for img in left_imgs:
img_path = os.path.join(left_path, img)
if is_image_file(img_path):
all_left_img.append(img_path)
all_right_img.append(os.path.join(right_path, img))
all_left_disp.append(disp_path + img.split(".")[0] + '.pfm')
all_right_disp.append(rdisp_path + img.split(".")[0] + '.pfm')
return all_left_img, all_right_img, all_left_disp, all_right_disp, \
test_left_img, test_right_img, test_left_disp, test_right_disp
def img_loader(path):
return Image.open(path).convert('RGB')
def disparity_loader(path):
return rp.readPFM(path)
def random_transform(left_img, right_img):
if np.random.rand(1) <= 0.2:
left_img = transforms.Grayscale(num_output_channels=3)(left_img)
right_img = transforms.Grayscale(num_output_channels=3)(right_img)
else:
left_img = transforms.ColorJitter(brightness=0.5, contrast=0.5, hue=0.1)(left_img)
right_img = transforms.ColorJitter(brightness=0.5, contrast=0.5, hue=0.1)(right_img)
return left_img, right_img
class myDataset(data.Dataset):
def __init__(self, left, right, left_disp, right_disp, training, imgloader=img_loader, dploader = disparity_loader,
color_transform = False):
self.left = left
self.right = right
self.disp_L = left_disp
self.disp_R = right_disp
self.imgloader = imgloader
self.dploader = dploader
self.training = training
self.img_transorm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
self.color_transform = color_transform
def __getitem__(self, index):
left = self.left[index]
right = self.right[index]
disp_L = self.disp_L[index]
disp_R = self.disp_R[index]
left_img = self.imgloader(left)
right_img = self.imgloader(right)
dataL, _ = self.dploader(disp_L)
dataL = np.ascontiguousarray(dataL, dtype=np.float32)
dataR, _ = self.dploader(disp_R)
dataR = np.ascontiguousarray(dataR, dtype=np.float32)
if self.training:
w, h = left_img.size
tw, th = 512, 256
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
left_img = left_img.crop((x1, y1, x1+tw, y1+th))
right_img = right_img.crop((x1, y1, x1+tw, y1+th))
dataL = dataL[y1:y1+th, x1:x1+tw]
dataR = dataR[y1:y1+th, x1:x1+tw]
if self.color_transform:
left_img, right_img = random_transform(left_img, right_img)
left_img = self.img_transorm(left_img)
right_img = self.img_transorm(right_img)
return left_img, right_img, dataL, dataR
else:
w, h = left_img.size
left_img = left_img.crop((w-960, h-544, w, h))
right_img = right_img.crop((w-960, h-544, w, h))
left_img = self.img_transorm(left_img)
right_img = self.img_transorm(right_img)
dataL = Image.fromarray(dataL).crop((w-960, h-544, w, h))
dataL = np.ascontiguousarray(dataL)
dataR = Image.fromarray(dataR).crop((w-960, h-544, w, h))
dataR = np.ascontiguousarray(dataR)
return left_img, right_img, dataL, dataR
def __len__(self):
return len(self.left)
| 8,270 | 36.425339 | 119 | py |
Graft-PSMNet | Graft-PSMNet-main/dataloader/middlebury_loader.py | import os
from PIL import Image
from dataloader import readpfm as rp
import torch.utils.data as data
import torchvision.transforms as transforms
import numpy as np
import random
def mb_loader(filepath, res):
train_path = os.path.join(filepath, 'training' + res)
test_path = os.path.join(filepath, 'test' + res)
gt_path = train_path.replace('training' + res, 'Eval3_GT/training' + res)
train_left = []
train_right = []
train_gt = []
for c in os.listdir(train_path):
train_left.append(os.path.join(train_path, c, 'im0.png'))
train_right.append(os.path.join(train_path, c, 'im1.png'))
train_gt.append(os.path.join(gt_path, c, 'disp0GT.pfm'))
test_left = []
test_right = []
for c in os.listdir(test_path):
test_left.append(os.path.join(test_path, c, 'im0.png'))
test_right.append(os.path.join(test_path, c, 'im1.png'))
train_left = sorted(train_left)
train_right = sorted(train_right)
train_gt = sorted(train_gt)
test_left = sorted(test_left)
test_right = sorted(test_right)
return train_left, train_right, train_gt, test_left, test_right
def img_loader(path):
return Image.open(path).convert('RGB')
def disparity_loader(path):
return rp.readPFM(path)
class myDataset(data.Dataset):
def __init__(self, left, right, left_disp, training, imgloader=img_loader, dploader = disparity_loader):
self.left = left
self.right = right
self.disp_L = left_disp
self.imgloader = imgloader
self.dploader = dploader
self.training = training
self.img_transorm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
def __getitem__(self, index):
left = self.left[index]
right = self.right[index]
disp_L = self.disp_L[index]
left_img = self.imgloader(left)
right_img = self.imgloader(right)
dataL, scaleL = self.dploader(disp_L)
dataL = Image.fromarray(np.ascontiguousarray(dataL, dtype=np.float32))
if self.training:
w, h = left_img.size
# random resize
s = np.random.uniform(0.95, 1.05, 1)
rw, rh = np.round(w*s), np.round(h*s)
left_img = left_img.resize((rw, rh), Image.NEAREST)
right_img = right_img.resize((rw, rh), Image.NEAREST)
dataL = dataL.resize((rw, rh), Image.NEAREST)
dataL = Image.fromarray(np.array(dataL) * s)
# random horizontal flip
p = np.random.rand(1)
if p >= 0.5:
left_img = horizontal_flip(left_img)
right_img = horizontal_flip(right_img)
dataL = horizontal_flip(dataL)
w, h = left_img.size
tw, th = 320, 240
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
left_img = left_img.crop((x1, y1, x1+tw, y1+th))
right_img = right_img.crop((x1, y1, x1+tw, y1+th))
dataL = dataL.crop((x1, y1, x1+tw, y1+th))
left_img = self.img_transorm(left_img)
right_img = self.img_transorm(right_img)
dataL = np.array(dataL)
return left_img, right_img, dataL
else:
w, h = left_img.size
left_img = left_img.resize((w // 32 * 32, h // 32 * 32))
right_img = right_img.resize((w // 32 * 32, h // 32 * 32))
left_img = self.img_transorm(left_img)
right_img = self.img_transorm(right_img)
dataL = np.array(dataL)
return left_img, right_img, dataL
def __len__(self):
return len(self.left)
def horizontal_flip(img):
img_np = np.array(img)
img_np = np.flip(img_np, axis=1)
img = Image.fromarray(img_np)
return img
if __name__ == '__main__':
train_left, train_right, train_gt, _, _ = mb_loader('/media/data/dataset/MiddEval3-data-Q/', res='Q')
H, W = 0, 0
for l in train_right:
left_img = Image.open(l).convert('RGB')
h, w = left_img.size
H += h
W += w
print(H / 15, W / 15) | 4,193 | 30.298507 | 108 | py |
Graft-PSMNet | Graft-PSMNet-main/dataloader/vKITTI_loader.py | import torch.utils.data as data
import torchvision.transforms as transforms
import os
from PIL import Image
import random
import numpy as np
def vkt_loader(filepath):
all_limg = []
all_rimg = []
all_disp = []
img_path = os.path.join(filepath, 'vkitti_2.0.3_rgb')
depth_path = os.path.join(filepath, 'vkitti_2.0.3_depth')
for scene in os.listdir(img_path):
img_scenes_path = os.path.join(img_path, scene, 'clone/frames/rgb')
depth_scenes_path = os.path.join(depth_path, scene, 'clone/frames/depth')
for name in os.listdir(os.path.join(img_scenes_path, 'Camera_0')):
all_limg.append(os.path.join(img_scenes_path, 'Camera_0', name))
all_rimg.append(os.path.join(img_scenes_path, 'Camera_1', name))
all_disp.append(os.path.join(depth_scenes_path, 'Camera_0',
name.replace('jpg', 'png').replace('rgb', 'depth')))
total_num = len(all_limg)
train_length = int(total_num * 0.75)
train_limg = all_limg[:train_length]
train_rimg = all_rimg[:train_length]
train_disp = all_disp[:train_length]
val_limg = all_limg[train_length:]
val_rimg = all_rimg[train_length:]
val_disp = all_disp[train_length:]
return train_limg, train_rimg, train_disp, val_limg, val_rimg, val_disp
def img_loader(path):
return Image.open(path).convert('RGB')
def disparity_loader(path):
return Image.open(path)
class vkDataset(data.Dataset):
def __init__(self, left, right, left_disp, training, imgloader=img_loader, disploader=disparity_loader):
self.left = left
self.right = right
self.left_disp = left_disp
self.imgloader = imgloader
self.disploader = disploader
self.training = training
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def __getitem__(self, index):
left = self.left[index]
right = self.right[index]
left_disp = self.left_disp[index]
limg = self.imgloader(left)
rimg = self.imgloader(right)
ldisp = self.disploader(left_disp)
if self.training:
w, h = limg.size
tw, th = 512, 256
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
limg = limg.crop((x1, y1, x1 + tw, y1 + th))
rimg = rimg.crop((x1, y1, x1 + tw, y1 + th))
limg = self.transform(limg)
rimg = self.transform(rimg)
baseline, fx, fy = 0.532725, 725.0087, 725.0087
camera_params = {'baseline': baseline,
'fx': fx,
'fy': fy}
ldepth = np.ascontiguousarray(ldisp, dtype=np.float32) / 100.
ldisp = baseline * fy / ldepth
ldisp = ldisp[y1:y1 + th, x1:x1 + tw]
return limg, rimg, ldisp, ldisp
else:
w, h = limg.size
limg = limg.crop((w-1232, h-368, w, h))
rimg = rimg.crop((w-1232, h-368, w, h))
ldisp = ldisp.crop((w-1232, h-368, w, h))
limg = self.transform(limg)
rimg = self.transform(rimg)
baseline, fx, fy = 0.532725, 725.0087, 725.0087
ldepth = np.ascontiguousarray(ldisp, dtype=np.float32) / 100.
ldisp = baseline * fy / ldepth
return limg, rimg, ldisp, ldisp
def __len__(self):
return len(self.left)
if __name__ == '__main__':
path = '/media/data2/Dataset/vKITTI2/'
a, b, c, d, e, f = vkt_loader(path)
print(len(a)) | 3,671 | 29.6 | 108 | py |
contextualLoss | contextualLoss-master/CX/CX_distance.py | # import tensorflow as tf
import torch
import numpy as np
import sklearn.manifold.t_sne
class TensorAxis:
N = 0
H = 1
W = 2
C = 3
class CSFlow:
def __init__(self, sigma=float(0.1), b=float(1.0)):
self.b = b
self.sigma = sigma
def __calculate_CS(self, scaled_distances, axis_for_normalization=TensorAxis.C):
self.scaled_distances = scaled_distances
self.cs_weights_before_normalization = torch.exp((self.b - scaled_distances) / self.sigma)
# self.cs_weights_before_normalization = 1 / (1 + scaled_distances)
# self.cs_NHWC = CSFlow.sum_normalize(self.cs_weights_before_normalization, axis_for_normalization)
self.cs_NHWC = self.cs_weights_before_normalization
# def reversed_direction_CS(self):
# cs_flow_opposite = CSFlow(self.sigma, self.b)
# cs_flow_opposite.raw_distances = self.raw_distances
# work_axis = [TensorAxis.H, TensorAxis.W]
# relative_dist = cs_flow_opposite.calc_relative_distances(axis=work_axis)
# cs_flow_opposite.__calculate_CS(relative_dist, work_axis)
# return cs_flow_opposite
# --
@staticmethod
def create_using_L2(I_features, T_features, sigma=float(0.5), b=float(1.0)):
cs_flow = CSFlow(sigma, b)
sT = T_features.shape
sI = I_features.shape
Ivecs = torch.reshape(I_features, (sI[0], -1, sI[3]))
Tvecs = torch.reshape(T_features, (sI[0], -1, sT[3]))
r_Ts = torch.sum(Tvecs * Tvecs, 2)
r_Is = torch.sum(Ivecs * Ivecs, 2)
raw_distances_list = []
for i in range(sT[0]):
Ivec, Tvec, r_T, r_I = Ivecs[i], Tvecs[i], r_Ts[i], r_Is[i]
A = Tvec @ torch.transpose(Ivec, 0, 1) # (matrix multiplication)
cs_flow.A = A
# A = tf.matmul(Tvec, tf.transpose(Ivec))
r_T = torch.reshape(r_T, [-1, 1]) # turn to column vector
dist = r_T - 2 * A + r_I
dist = torch.reshape(torch.transpose(dist, 0, 1), shape=(1, sI[1], sI[2], dist.shape[0]))
# protecting against numerical problems, dist should be positive
dist = torch.clamp(dist, min=float(0.0))
# dist = tf.sqrt(dist)
raw_distances_list += [dist]
cs_flow.raw_distances = torch.cat(raw_distances_list)
relative_dist = cs_flow.calc_relative_distances()
cs_flow.__calculate_CS(relative_dist)
return cs_flow
# --
@staticmethod
def create_using_L1(I_features, T_features, sigma=float(0.5), b=float(1.0)):
cs_flow = CSFlow(sigma, b)
sT = T_features.shape
sI = I_features.shape
Ivecs = torch.reshape(I_features, (sI[0], -1, sI[3]))
Tvecs = torch.reshape(T_features, (sI[0], -1, sT[3]))
raw_distances_list = []
for i in range(sT[0]):
Ivec, Tvec = Ivecs[i], Tvecs[i]
dist = torch.abs(torch.sum(Ivec.unsqueeze(1) - Tvec.unsqueeze(0), dim=2))
dist = torch.reshape(torch.transpose(dist, 0, 1), shape=(1, sI[1], sI[2], dist.shape[0]))
# protecting against numerical problems, dist should be positive
dist = torch.clamp(dist, min=float(0.0))
# dist = tf.sqrt(dist)
raw_distances_list += [dist]
cs_flow.raw_distances = torch.cat(raw_distances_list)
relative_dist = cs_flow.calc_relative_distances()
cs_flow.__calculate_CS(relative_dist)
return cs_flow
# --
@staticmethod
def create_using_dotP(I_features, T_features, sigma=float(0.5), b=float(1.0)):
cs_flow = CSFlow(sigma, b)
# prepare feature before calculating cosine distance
T_features, I_features = cs_flow.center_by_T(T_features, I_features)
T_features = CSFlow.l2_normalize_channelwise(T_features)
I_features = CSFlow.l2_normalize_channelwise(I_features)
# work seperatly for each example in dim 1
cosine_dist_l = []
N = T_features.size()[0]
for i in range(N):
T_features_i = T_features[i, :, :, :].unsqueeze_(0) # 1HWC --> 1CHW
I_features_i = I_features[i, :, :, :].unsqueeze_(0).permute((0, 3, 1, 2))
patches_PC11_i = cs_flow.patch_decomposition(T_features_i) # 1HWC --> PC11, with P=H*W
cosine_dist_i = torch.nn.functional.conv2d(I_features_i, patches_PC11_i)
cosine_dist_1HWC = cosine_dist_i.permute((0, 2, 3, 1))
cosine_dist_l.append(cosine_dist_i.permute((0, 2, 3, 1))) # back to 1HWC
cs_flow.cosine_dist = torch.cat(cosine_dist_l, dim=0)
cs_flow.raw_distances = - (cs_flow.cosine_dist - 1) / 2 ### why -
relative_dist = cs_flow.calc_relative_distances()
cs_flow.__calculate_CS(relative_dist)
return cs_flow
def calc_relative_distances(self, axis=TensorAxis.C):
epsilon = 1e-5
div = torch.min(self.raw_distances, dim=axis, keepdim=True)[0]
relative_dist = self.raw_distances / (div + epsilon)
return relative_dist
@staticmethod
def sum_normalize(cs, axis=TensorAxis.C):
reduce_sum = torch.sum(cs, dim=axis, keepdim=True)
cs_normalize = torch.div(cs, reduce_sum)
return cs_normalize
def center_by_T(self, T_features, I_features):
# assuming both input are of the same size
# calculate stas over [batch, height, width], expecting 1x1xDepth tensor
axes = [0, 1, 2]
self.meanT = T_features.mean(0, keepdim=True).mean(1, keepdim=True).mean(2, keepdim=True)
self.varT = T_features.var(0, keepdim=True).var(1, keepdim=True).var(2, keepdim=True)
self.T_features_centered = T_features - self.meanT
self.I_features_centered = I_features - self.meanT
return self.T_features_centered, self.I_features_centered
@staticmethod
def l2_normalize_channelwise(features):
norms = features.norm(p=2, dim=TensorAxis.C, keepdim=True)
features = features.div(norms)
return features
def patch_decomposition(self, T_features):
# 1HWC --> 11PC --> PC11, with P=H*W
(N, H, W, C) = T_features.shape
P = H * W
patches_PC11 = T_features.reshape(shape=(1, 1, P, C)).permute(dims=(2, 3, 0, 1))
return patches_PC11
@staticmethod
def pdist2(x, keepdim=False):
sx = x.shape
x = x.reshape(shape=(sx[0], sx[1] * sx[2], sx[3]))
differences = x.unsqueeze(2) - x.unsqueeze(1)
distances = torch.sum(differences**2, -1)
if keepdim:
distances = distances.reshape(shape=(sx[0], sx[1], sx[2], sx[3]))
return distances
@staticmethod
def calcR_static(sT, order='C', deformation_sigma=0.05):
# oreder can be C or F (matlab order)
pixel_count = sT[0] * sT[1]
rangeRows = range(0, sT[1])
rangeCols = range(0, sT[0])
Js, Is = np.meshgrid(rangeRows, rangeCols)
row_diff_from_first_row = Is
col_diff_from_first_col = Js
row_diff_from_first_row_3d_repeat = np.repeat(row_diff_from_first_row[:, :, np.newaxis], pixel_count, axis=2)
col_diff_from_first_col_3d_repeat = np.repeat(col_diff_from_first_col[:, :, np.newaxis], pixel_count, axis=2)
rowDiffs = -row_diff_from_first_row_3d_repeat + row_diff_from_first_row.flatten(order).reshape(1, 1, -1)
colDiffs = -col_diff_from_first_col_3d_repeat + col_diff_from_first_col.flatten(order).reshape(1, 1, -1)
R = rowDiffs ** 2 + colDiffs ** 2
R = R.astype(np.float32)
R = np.exp(-(R) / (2 * deformation_sigma ** 2))
return R
# --------------------------------------------------
# CX loss
# --------------------------------------------------
def CX_loss(T_features, I_features, deformation=False, dis=False):
# T_features = tf.convert_to_tensor(T_features, dtype=tf.float32)
# I_features = tf.convert_to_tensor(I_features, dtype=tf.float32)
# since this is a convertion of tensorflow to pytorch we permute the tensor from
# T_features = normalize_tensor(T_features)
# I_features = normalize_tensor(I_features)
# since this originally Tensorflow implemntation
# we modify all tensors to be as TF convention and not as the convention of pytorch.
def from_pt2tf(Tpt):
Ttf = Tpt.permute(0, 2, 3, 1)
return Ttf
# N x C x H x W --> N x H x W x C
T_features_tf = from_pt2tf(T_features)
I_features_tf = from_pt2tf(I_features)
# cs_flow = CSFlow.create_using_dotP(I_features_tf, T_features_tf, sigma=1.0)
cs_flow = CSFlow.create_using_L2(I_features_tf, T_features_tf, sigma=1.0)
# sum_normalize:
# To:
cs = cs_flow.cs_NHWC
if deformation:
deforma_sigma = 0.001
sT = T_features_tf.shape[1:2 + 1]
R = CSFlow.calcR_static(sT, deformation_sigma=deforma_sigma)
cs *= torch.Tensor(R).unsqueeze(dim=0).cuda()
if dis:
CS = []
k_max_NC = torch.max(torch.max(cs, dim=1)[1], dim=1)[1]
indices = k_max_NC.cpu()
N, C = indices.shape
for i in range(N):
CS.append((C - len(torch.unique(indices[i, :]))) / C)
score = torch.FloatTensor(CS)
else:
# reduce_max X and Y dims
# cs = CSFlow.pdist2(cs,keepdim=True)
k_max_NC = torch.max(torch.max(cs, dim=1)[0], dim=1)[0]
# reduce mean over C dim
CS = torch.mean(k_max_NC, dim=1)
# score = 1/CS
# score = torch.exp(-CS*10)
score = -torch.log(CS)
# reduce mean over N dim
# CX_loss = torch.mean(CX_loss)
return score
def symetric_CX_loss(T_features, I_features):
score = (CX_loss(T_features, I_features) + CX_loss(I_features, T_features)) / 2
return score
| 9,761 | 38.362903 | 117 | py |
fastai | fastai-master/setup.py | from pkg_resources import parse_version
from configparser import ConfigParser
import setuptools,re,sys
assert parse_version(setuptools.__version__)>=parse_version('36.2')
# note: all settings are in settings.ini; edit there, not here
config = ConfigParser(delimiters=['='])
config.read('settings.ini')
cfg = config['DEFAULT']
cfg_keys = 'version description keywords author author_email'.split()
expected = cfg_keys + "lib_name user branch license status min_python audience language".split()
for o in expected: assert o in cfg, "missing expected setting: {}".format(o)
setup_cfg = {o:cfg[o] for o in cfg_keys}
if len(sys.argv)>1 and sys.argv[1]=='version':
print(setup_cfg['version'])
exit()
licenses = {
'apache2': ('Apache Software License 2.0','OSI Approved :: Apache Software License'),
}
statuses = [ '1 - Planning', '2 - Pre-Alpha', '3 - Alpha',
'4 - Beta', '5 - Production/Stable', '6 - Mature', '7 - Inactive' ]
py_versions = '2.0 2.1 2.2 2.3 2.4 2.5 2.6 2.7 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10'.split()
min_python = cfg['min_python']
lic = licenses[cfg['license']]
requirements = ['pip', 'packaging']
if cfg.get('requirements'): requirements += cfg.get('requirements','').split()
if cfg.get('pip_requirements'): requirements += cfg.get('pip_requirements','').split()
dev_requirements = (cfg.get('dev_requirements') or '').split()
long_description = open('README.md', encoding='utf8').read()
# 
for ext in ['png', 'svg']:
long_description = re.sub(r'!\['+ext+'\]\((.*)\)', '+'/'+cfg['branch']+'/\\1)', long_description)
long_description = re.sub(r'src=\"(.*)\.'+ext+'\"', 'src=\"https://raw.githubusercontent.com/{}/{}'.format(cfg['user'],cfg['lib_name'])+'/'+cfg['branch']+'/\\1.'+ext+'\"', long_description)
setuptools.setup(
name = 'fastai',
license = lic[0],
classifiers = [
'Development Status :: ' + statuses[int(cfg['status'])],
'Intended Audience :: ' + cfg['audience'].title(),
'License :: ' + lic[1],
'Natural Language :: ' + cfg['language'].title(),
] + ['Programming Language :: Python :: '+o for o in py_versions[py_versions.index(min_python):]],
url = cfg['git_url'],
packages = setuptools.find_packages(),
include_package_data = True,
install_requires = requirements,
extras_require={ 'dev': dev_requirements },
python_requires = '>=' + cfg['min_python'],
long_description = long_description,
long_description_content_type = 'text/markdown',
zip_safe = False,
entry_points = {
'console_scripts': cfg.get('console_scripts','').split(),
'nbdev': [f'{cfg.get("lib_path")}={cfg.get("lib_path")}._modidx:d']
},
**setup_cfg)
| 2,822 | 43.109375 | 193 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.