repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
NCLS-Corpora | NCLS-Corpora-master/code/beaver-2task+/beaver/data/field.py | # -*- coding: utf-8 -*-
from typing import List
import torch
EOS_TOKEN = "<eos>"
BOS_TOKEN = "<bos>"
UNK_TOKEN = "<unk>"
PAD_TOKEN = "<pad>"
class Field(object):
def __init__(self, bos: bool, eos: bool, pad: bool, unk: bool):
self.bos_token = BOS_TOKEN if bos else None
self.eos_token = EOS_TOKEN if eos else None
self.unk_token = UNK_TOKEN if unk else None
self.pad_token = PAD_TOKEN if pad else None
self.vocab = None
def load_vocab(self, words: List[str], specials: List[str]):
self.vocab = Vocab(words, specials)
def process(self, batch, device):
max_len = max(len(x) for x in batch)
padded, length = [], []
for x in batch:
bos = [self.bos_token] if self.bos_token else []
eos = [self.eos_token] if self.eos_token else []
pad = [self.pad_token] * (max_len - len(x))
padded.append(bos + x + eos + pad)
length.append(len(x) + len(bos) + len(eos))
padded = torch.tensor([self.encode(ex) for ex in padded])
return padded.long().to(device)
def encode(self, tokens):
ids = []
for tok in tokens:
if tok in self.vocab.stoi:
ids.append(self.vocab.stoi[tok])
else:
ids.append(self.unk_id)
return ids
def decode(self, ids):
tokens = []
for tok in ids:
tok = self.vocab.itos[tok]
if tok == self.eos_token:
break
if tok == self.bos_token:
continue
tokens.append(tok)
return " ".join(tokens).replace("@@ ", "").replace("@@", "")
@property
def special(self):
return [tok for tok in [self.unk_token, self.pad_token, self.bos_token, self.eos_token] if tok is not None]
@property
def pad_id(self):
return self.vocab.stoi[self.pad_token]
@property
def eos_id(self):
return self.vocab.stoi[self.eos_token]
@property
def bos_id(self):
return self.vocab.stoi[self.bos_token]
@property
def unk_id(self):
return self.vocab.stoi[self.unk_token]
class Vocab(object):
def __init__(self, words: List[str], specials: List[str]):
self.itos = specials + words
self.stoi = {tok: i for i, tok in enumerate(self.itos)}
def __len__(self):
return len(self.itos)
| 2,418 | 25.582418 | 115 | py |
NCLS-Corpora | NCLS-Corpora-master/code/beaver-2task+/beaver/data/dataset.py | # -*- coding: utf-8 -*-
import random
from collections import namedtuple
from typing import Dict
import torch
from beaver.data.field import Field
Batch = namedtuple("Batch", ['src', 'tgt', 'batch_size'])
Example = namedtuple("Example", ['src', 'tgt'])
class TranslationDataset(object):
def __init__(self,
src_path: str,
tgt_path: str,
batch_size: int,
device: torch.device,
train: bool,
fields: Dict[str, Field]):
self.batch_size = batch_size
self.train = train
self.device = device
self.fields = fields
self.sort_key = lambda ex: (len(ex.src), len(ex.tgt))
examples = []
for src_line, tgt_line in zip(read_file(src_path), read_file(tgt_path)):
examples.append(Example(src_line, tgt_line))
examples, self.seed = self.sort(examples)
self.num_examples = len(examples)
self.batches = list(batch(examples, self.batch_size))
def __iter__(self):
while True:
if self.train:
random.shuffle(self.batches)
for minibatch in self.batches:
src = self.fields["src"].process([x.src for x in minibatch], self.device)
tgt = self.fields["tgt"].process([x.tgt for x in minibatch], self.device)
yield Batch(src=src, tgt=tgt, batch_size=len(minibatch))
if not self.train:
break
def sort(self, examples):
seed = sorted(range(len(examples)), key=lambda idx: self.sort_key(examples[idx]))
return sorted(examples, key=self.sort_key), seed
def read_file(path):
with open(path, encoding="utf-8") as f:
for line in f:
yield line.strip().split()
def batch(data, batch_size):
minibatch, cur_len = [], 0
for ex in data:
minibatch.append(ex)
cur_len = max(cur_len, len(ex.src), len(ex.tgt))
if cur_len * len(minibatch) > batch_size:
yield minibatch[:-1]
minibatch, cur_len = [ex], max(len(ex.src), len(ex.tgt))
if minibatch:
yield minibatch
| 2,164 | 29.069444 | 89 | py |
NCLS-Corpora | NCLS-Corpora-master/code/beaver-2task+/beaver/infer/beam.py | # -*- coding: utf-8 -*-
import torch
class Beam(object):
def __init__(self, beam_size, pad, bos, eos, device, lp):
self.size = beam_size
self.alpha = lp
self.scores = torch.full([beam_size], -1e20).float().to(device)
self.scores[0] = 0.
self.hypotheses = torch.full([1, beam_size], fill_value=pad).long().to(device)
self.hypotheses[0][0] = bos
self.eos = eos
self.finished = []
@property
def current_state(self):
return self.hypotheses[-1]
def advance(self, scores, origin, tokens):
self.scores = scores
self.hypotheses = torch.index_select(self.hypotheses, 1, origin)
self.hypotheses = torch.cat([self.hypotheses, tokens.unsqueeze(0)])
for idx, tok in enumerate(self.hypotheses[-1]):
if tok == self.eos:
self.finished.append((self.scores[idx].clone(), self.hypotheses[1:, idx]))
self.scores[idx] = -1e20
@property
def done(self):
max_score = max([self.length_penalty(score, self.hypotheses.size(0)) for score in self.scores])
max_finish = max([self.length_penalty(t[0], t[1].size(0)) for t in self.finished]) if self.finished else -1e20
return bool(max_score < max_finish)
@property
def best_hypothesis(self):
finished = sorted(self.finished, key=lambda t: self.length_penalty(t[0], t[1].size(0)), reverse=True)
if not finished:
return self.hypotheses[1:, 0]
return finished[0][1]
def length_penalty(self, score, length):
return score * (6 ** self.alpha) / ((5 + length) ** self.alpha)
| 1,652 | 32.06 | 118 | py |
NCLS-Corpora | NCLS-Corpora-master/code/beaver-2task+/beaver/infer/translator.py | # -*- coding: utf-8 -*-
import torch
from beaver.infer.beam import Beam
def beam_search(opt, model, src, fields, flag):
batch_size = src.size(0)
beam_size = opt.beam_size
device = src.device
encoder = model.encoder
if flag:
decoder = model.task1_decoder
generator = model.task1_generator
tgt_field = fields["task1_tgt"]
num_words = model.task1_generator.vocab_size
else:
decoder = model.task2_decoder
generator = model.task2_generator
tgt_field = fields["task2_tgt"]
num_words = model.task2_generator.vocab_size
beams = [Beam(opt.beam_size, tgt_field.pad_id, tgt_field.bos_id, tgt_field.eos_id,
device, opt.length_penalty) for _ in range(batch_size)]
src = src.repeat(1, beam_size).view(batch_size*beam_size, -1)
src_pad = src.eq(fields["src"].pad_id)
src_out = encoder(src, src_pad)
beam_expander = (torch.arange(batch_size) * beam_size).view(-1, 1).to(device)
previous = None
for i in range(opt.max_length):
if all((b.done for b in beams)):
break
# [batch_size x beam_size, 1]
current_token = torch.cat([b.current_state for b in beams]).unsqueeze(-1)
tgt_pad = current_token.eq(tgt_field.pad_id)
out, previous = decoder(current_token, src_out, src_pad, tgt_pad, previous, i)
previous_score = torch.stack([b.scores for b in beams]).unsqueeze(-1)
out = generator(out).view(batch_size, beam_size, -1)
if i < opt.min_length:
out[:, :, tgt_field.eos_id] = -1e15
# find topk candidates
scores, indexes = (out + previous_score).view(batch_size, -1).topk(beam_size)
# find origins and token
origins = (indexes.view(-1) // num_words).view(batch_size, beam_size)
tokens = (indexes.view(-1) % num_words).view(batch_size, beam_size)
for j, b in enumerate(beams):
b.advance(scores[j], origins[j], tokens[j])
origins = (origins + beam_expander).view(-1)
previous = torch.index_select(previous, 0, origins)
return [b.best_hypothesis for b in beams]
| 2,156 | 32.184615 | 86 | py |
NCLS-Corpora | NCLS-Corpora-master/code/beaver-2task+/beaver/model/embeddings.py | # -*- coding: utf-8 -*-
import math
import torch
import torch.nn as nn
def positional_encoding(dim, max_len=5000):
pe = torch.zeros(max_len, dim)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) * -(math.log(10000.0) / dim)))
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
return pe
class Embedding(nn.Module):
def __init__(self, embedding_dim, vocab_size, padding_idx, dropout):
self.word_padding_idx = padding_idx
self.embedding_dim = embedding_dim
pe = positional_encoding(embedding_dim)
super(Embedding, self).__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=padding_idx)
self.register_buffer('pe', pe)
self.dropout = nn.Dropout(p=dropout)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.embedding.weight, mean=0.0, std=self.embedding_dim ** -0.5)
@property
def padding_idx(self):
return self.word_padding_idx
def forward(self, x, timestep=0):
embedding = self.embedding(x) * math.sqrt(self.embedding_dim) + self.pe[timestep:timestep + x.size(1)]
return self.dropout(embedding)
| 1,313 | 31.04878 | 110 | py |
NCLS-Corpora | NCLS-Corpora-master/code/beaver-2task+/beaver/model/transformer.py | # -*- coding: utf-8 -*-
import math
import torch
import torch.nn as nn
class FeedForward(nn.Module):
def __init__(self, hidden_size, inner_size, dropout):
super(FeedForward, self).__init__()
self.linear_in = nn.Linear(hidden_size, inner_size, bias=False)
self.linear_out = nn.Linear(inner_size, hidden_size, bias=False)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.linear_in.weight)
nn.init.xavier_uniform_(self.linear_out.weight)
def forward(self, x):
y = self.linear_in(x)
y = self.relu(y)
y = self.dropout(y)
y = self.linear_out(y)
return y
class EncoderLayer(nn.Module):
def __init__(self, hidden_size, dropout, head_count, ff_size):
super(EncoderLayer, self).__init__()
self.self_attn = MultiHeadedAttention(head_count, hidden_size, dropout)
self.feed_forward = FeedForward(hidden_size, ff_size, dropout)
self.dropout = nn.ModuleList([nn.Dropout(dropout) for _ in range(2)])
self.norm = nn.ModuleList([nn.LayerNorm(hidden_size) for _ in range(2)])
def forward(self, x, mask):
# self attention
y = self.self_attn(self.norm[0](x), mask=mask)
x = x + self.dropout[0](y)
# feed forward
y = self.feed_forward(self.norm[1](x))
x = x + self.dropout[1](y)
return x
class Encoder(nn.Module):
def __init__(self, num_layers, num_heads, hidden_size, dropout, ff_size, embedding):
self.num_layers = num_layers
super(Encoder, self).__init__()
self.embedding = embedding
self.layers = nn.ModuleList([EncoderLayer(hidden_size, dropout, num_heads, ff_size) for _ in range(num_layers)])
self.norm = nn.LayerNorm(hidden_size)
def forward(self, src, src_pad):
src_mask = src_pad.unsqueeze(1)
output = self.embedding(src)
for i in range(self.num_layers):
output = self.layers[i](output, src_mask)
return self.norm(output)
class DecoderLayer(nn.Module):
def __init__(self, hidden_size, dropout, head_count, ff_size):
super(DecoderLayer, self).__init__()
self.self_attn = MultiHeadedAttention(head_count, hidden_size, dropout)
self.src_attn = MultiHeadedAttention(head_count, hidden_size, dropout)
self.feed_forward = FeedForward(hidden_size, ff_size, dropout)
self.norm = nn.ModuleList([nn.LayerNorm(hidden_size, eps=1e-6) for _ in range(3)])
self.dropout = nn.ModuleList([nn.Dropout(dropout) for _ in range(3)])
def forward(self, x, enc_out, src_mask, tgt_mask, previous=None):
all_input = x if previous is None else torch.cat((previous, x), dim=1)
# self attention
y = self.self_attn(self.norm[0](x), self.norm[0](all_input), mask=tgt_mask)
x = x + self.dropout[0](y)
# encoder decoder attention
y = self.src_attn(self.norm[1](x), enc_out, mask=src_mask)
x = x + self.dropout[1](y)
# feed forward
y = self.feed_forward(self.norm[2](x))
x = x + self.dropout[2](y)
return x, all_input
class Decoder(nn.Module):
def __init__(self, num_layers, num_heads, hidden_size, dropout, ff_size, embedding):
self.num_layers = num_layers
super(Decoder, self).__init__()
self.embedding = embedding
self.layers = nn.ModuleList([DecoderLayer(hidden_size, dropout, num_heads, ff_size) for _ in range(num_layers)])
self.register_buffer("upper_triangle", torch.triu(torch.ones(1000, 1000), diagonal=1).byte())
self.register_buffer("zero_mask", torch.zeros(1).byte())
self.norm = nn.LayerNorm(hidden_size, eps=1e-6)
def forward(self, tgt, enc_out, src_pad, tgt_pad, previous=None, timestep=0):
output = self.embedding(tgt, timestep)
tgt_len = tgt.size(1)
src_mask = src_pad.unsqueeze(1)
tgt_mask = tgt_pad.unsqueeze(1)
upper_triangle = self.upper_triangle[:tgt_len, :tgt_len]
# tgt mask: 0 if not upper and not pad
tgt_mask = torch.gt(tgt_mask + upper_triangle, 0)
saved_inputs = []
for i in range(self.num_layers):
prev_layer = None if previous is None else previous[:, i]
tgt_mask = tgt_mask if previous is None else self.zero_mask
output, all_input = self.layers[i](output, enc_out, src_mask, tgt_mask, prev_layer)
saved_inputs.append(all_input)
return self.norm(output), torch.stack(saved_inputs, dim=1)
class MultiHeadedAttention(nn.Module):
def __init__(self, head_count, model_dim, dropout):
self.dim_per_head = model_dim // head_count
self.head_count = head_count
super(MultiHeadedAttention, self).__init__()
self.linear_q = nn.Linear(model_dim, model_dim, bias=False)
self.linear_k = nn.Linear(model_dim, model_dim, bias=False)
self.linear_v = nn.Linear(model_dim, model_dim, bias=False)
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
self.final_linear = nn.Linear(model_dim, model_dim)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.linear_q.weight)
nn.init.xavier_uniform_(self.linear_k.weight)
nn.init.xavier_uniform_(self.linear_v.weight)
nn.init.xavier_uniform_(self.final_linear.weight)
def forward(self, query, memory=None, mask=None):
memory = query if memory is None else memory
def split_head(x):
# B x L x D => B x h x L x d
return x.view(x.size(0), -1, self.head_count, self.dim_per_head).transpose(1, 2)
def combine_head(x):
# B x h x L x d => B x L x D
return x.transpose(1, 2).contiguous().view(x.size(0), -1, self.head_count * self.dim_per_head)
# 1) Project q, k, v.
q = split_head(self.linear_q(query))
k = split_head(self.linear_k(memory))
v = split_head(self.linear_v(memory))
# 2) Calculate and scale scores.
q = q / math.sqrt(self.dim_per_head)
scores = torch.matmul(q, k.transpose(2, 3))
mask = mask.unsqueeze(1).expand_as(scores)
scores.masked_fill_(mask, -1e18)
# 3) Apply attention dropout and compute context vectors.
weights = self.dropout(self.softmax(scores))
context = combine_head(torch.matmul(weights, v))
return self.final_linear(context)
| 6,591 | 35.622222 | 120 | py |
NCLS-Corpora | NCLS-Corpora-master/code/beaver-2task+/beaver/model/nmt_model.py | # -*- coding: utf-8 -*-
from typing import Dict
import torch
import torch.nn as nn
from beaver.model.embeddings import Embedding
from beaver.model.transformer import Decoder, Encoder
class Generator(nn.Module):
def __init__(self, hidden_size: int, tgt_vocab_size: int):
self.vocab_size = tgt_vocab_size
super(Generator, self).__init__()
self.linear_hidden = nn.Linear(hidden_size, tgt_vocab_size)
self.lsm = nn.LogSoftmax(dim=-1)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.linear_hidden.weight)
def forward(self, dec_out):
score = self.linear_hidden(dec_out)
lsm_score = self.lsm(score)
return lsm_score
class NMTModel(nn.Module):
def __init__(self, encoder: Encoder,
task1_decoder: Decoder,
task2_decoder: Decoder,
task1_generator: Generator,
task2_generator: Generator):
super(NMTModel, self).__init__()
self.encoder = encoder
self.task1_decoder = task1_decoder
self.task2_decoder = task2_decoder
self.task1_generator = task1_generator
self.task2_generator = task2_generator
def forward(self, source, target, flag):
target = target[:, :-1] # shift left
source_pad = source.eq(self.encoder.embedding.word_padding_idx)
target_pad = target.eq(self.task1_decoder.embedding.word_padding_idx)
enc_out = self.encoder(source, source_pad)
if flag: # task1
decoder_outputs, _ = self.task1_decoder(target, enc_out, source_pad, target_pad)
return self.task1_generator(decoder_outputs)
else: # task2
decoder_outputs, _ = self.task2_decoder(target, enc_out, source_pad, target_pad)
return self.task2_generator(decoder_outputs)
@classmethod
def load_model(cls, model_opt,
pad_ids: Dict[str, int],
vocab_sizes: Dict[str, int],
checkpoint=None):
source_embedding = Embedding(embedding_dim=model_opt.hidden_size,
dropout=model_opt.dropout,
padding_idx=pad_ids["src"],
vocab_size=vocab_sizes["src"])
target_embedding_task2 = Embedding(embedding_dim=model_opt.hidden_size,
dropout=model_opt.dropout,
padding_idx=pad_ids["task2_tgt"],
vocab_size=vocab_sizes["task2_tgt"])
if model_opt.mono:
# 单语摘要,task1 share source embedding
target_embedding_task1 = source_embedding
else:
target_embedding_task1 = target_embedding_task2
encoder = Encoder(model_opt.layers,
model_opt.heads,
model_opt.hidden_size,
model_opt.dropout,
model_opt.ff_size,
source_embedding)
task1_decoder = Decoder(model_opt.layers,
model_opt.heads,
model_opt.hidden_size,
model_opt.dropout,
model_opt.ff_size,
target_embedding_task1)
task2_decoder = Decoder(model_opt.layers,
model_opt.heads,
model_opt.hidden_size,
model_opt.dropout,
model_opt.ff_size,
target_embedding_task2)
task1_generator = Generator(model_opt.hidden_size, vocab_sizes["task1_tgt"])
task2_generator = Generator(model_opt.hidden_size, vocab_sizes["task2_tgt"])
model = cls(encoder, task1_decoder, task2_decoder, task1_generator, task2_generator)
if checkpoint is None and model_opt.train_from:
checkpoint = torch.load(model_opt.train_from, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint["model"])
elif checkpoint is not None:
model.load_state_dict(checkpoint)
return model
| 4,315 | 38.962963 | 100 | py |
rpn_bo | rpn_bo-main/Code and results/brusselator_pde_MLP.py | import os
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE']='false'
#
from jax import vmap, random, jit
from jax import numpy as np
import numpy as onp
from rpn_bo_utilities import uniform_prior
from rpn_bo_models import EnsembleRegression
from rpn_bo_dataloaders import BootstrapLoader
from rpn_bo_acquisitions import MCAcquisition
onp.random.seed(1234)
# Helper functions
normalize = vmap(lambda x, mu, std: (x-mu)/std, in_axes=(0,0,0))
denormalize = vmap(lambda x, mu, std: x*std + mu, in_axes=(0,0,0))
# vectorial input space dimension and its search space
dim = 4
lb = np.array([0.1, 0.1, 0.01, 0.01])
ub = np.array([5.0, 5.0, 5.0, 5.0])
p_x = uniform_prior(lb, ub)
bounds = (lb, ub)
# vectorial output space dimension
N_y = 64
dim_y = 2*N_y**2
# function mapping the vectorial input x to the vectorial output consisting of the solution to the 2D Brusselator PDE evaluated at N_yxN_y grid points
def f(x):
from pde import PDE, FieldCollection, ScalarField, UnitGrid
a = x[0]
b = x[1]
d0 = x[2]
d1 = x[3]
eq = PDE(
{
"u": f"{d0} * laplace(u) + {a} - ({b} + 1) * u + u**2 * v",
"v": f"{d1} * laplace(v) + {b} * u - u**2 * v",
}
)
# initialize state
grid = UnitGrid([N_y, N_y])
u = ScalarField(grid, a, label="Field $u$")
v = b / a + 0.1 * ScalarField.random_normal(grid, label="Field $v$", seed=10)
state = FieldCollection([u, v])
sol = eq.solve(state, t_range=20, dt=1e-3)
sol_tensor = []
sol_tensor.append(sol[0].data)
sol_tensor.append(sol[1].data)
sol_tensor = onp.array(sol_tensor)
ss = sol_tensor[onp.isnan(sol_tensor)]
sol_tensor[onp.isnan(sol_tensor)] = 1e5 * onp.random.randn(*ss.shape)
return sol_tensor.flatten()
#### General simulation params ####
N = 5
prev = 0 # previous independent random runs
nTrSet = 10-prev # total independent random runs to perform
#### RPN-BO hyperparameters ####
num_restarts_acq = 500
nIter = 20
q1 = 2
nIter_q1 = nIter//q1
ensemble_size = 128
batch_size = N
fraction = 0.8
layers = [dim, 64, 64, dim_y]
nIter_RPN = 10000
options = {'criterion': 'EI', # LCB EI
'kappa': 2.0,
'weights': None} # exact gmm None
train_key = random.PRNGKey(0)
case = 'results/brusselator_pde_MLP'
# prediction function mapping vectorial output to scalar obective value
def output(y):
y = y.reshape((2,N_y,N_y))
weighting = onp.ones((2,N_y,N_y))/10
weighting[:, [0, 1, -2, -1], :] = 1.0
weighting[:, :, [0, 1, -2, -1]] = 1.0
weighted_samples = weighting * y
return np.var(weighted_samples)
for j in range(nTrSet):
print('Train Set:',j+1)
# Initial training data
X = np.load(case+'/X_'+str(j+prev)+'.npy')
y = np.load(case+'/y_'+str(j+prev)+'.npy')
X_loc = X
y_loc = y
batch_size_loc = batch_size
# list to contain BO results
opt = []
yo_loc = vmap(output)(y_loc)
opt.append( np.min(yo_loc) )
print('Run %s, Nx %s, Ny %s, init %s, best %s' % (str(j+prev), X_loc.shape[0], y_loc.shape[0], opt[0], opt[-1]))
for it in range(nIter_q1):
# Create data set
train_key = random.split(train_key, 2)[0]
dataset = BootstrapLoader(X_loc, y_loc, batch_size_loc, ensemble_size, fraction, 1, rng_key=train_key)
(mu_X, sigma_X), (mu_y, sigma_y) = dataset.norm_const
# Initialize model
train_key = random.split(train_key, 2)[0]
model = EnsembleRegression(layers, ensemble_size, train_key)
# Train model
model.train(dataset, nIter=nIter_RPN)
@jit
def predict(x):
# accepts and returns un-normalized data
x = np.tile(x[np.newaxis,:,:], (ensemble_size, 1, 1))
x = normalize(x, mu_X, sigma_X)
params = vmap(model.get_params)(model.opt_state)
params_prior = vmap(model.get_params)(model.prior_opt_state)
opt_params = (params, params_prior)
samples = model.posterior(opt_params, x)
samples = denormalize(samples, mu_y, sigma_y)
samples = samples.reshape((samples.shape[0],samples.shape[1],2,N_y,N_y))
weighting = onp.ones((2,N_y,N_y))/10
weighting[:, [0, 1, -2, -1], :] = 1.0
weighting[:, :, [0, 1, -2, -1]] = 1.0
weighted_samples = weighting * samples
return np.var(weighted_samples, axis=(-3,-2,-1))[:,:,None]
# Fit GMM if needed for weighted acquisition functions
weights_fn = lambda x: np.ones(x.shape[0],)
kappa = options['kappa']
args = (kappa,)
acq_model = MCAcquisition(predict,
bounds,
*args,
acq_fn = options['criterion'],
output_weights=weights_fn)
# Optimize acquisition with L-BFGS to inquire new point(s)
new_X = acq_model.next_best_point(q = q1, num_restarts = num_restarts_acq)
new_X = new_X.reshape(q1,dim)
# Obtain the new data
new_y = []
for i in range(new_X.shape[0]):
new_y.append(f(new_X[i,:]))
new_y = onp.array(new_y)
# Augment training data
X_loc = np.concatenate([X_loc, new_X], axis = 0) # augment the vectorial input dataset during the BO process
y_loc = np.concatenate([y_loc, new_y], axis = 0) # augment the vectorial output dataset during the BO process
yo_loc = vmap(output)(y_loc)
opt.append( np.min(yo_loc) ) # augment the objective values of the constructed dataset during the BO process
batch_size_loc += q1
print('Run %s, Nx %s, Ny %s, init %s, best %s' % (str(j+prev), X_loc.shape[0], y_loc.shape[0], opt[0], opt[-1]))
np.save(case+'/opt_'+str(j+prev)+'.npy',onp.array(opt)) # save the constructed objective tensor by RPN-BO
| 6,056 | 32.65 | 150 | py |
rpn_bo | rpn_bo-main/Code and results/environmental_model_function_DON.py | import os
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE']='false'
from jax import vmap, random, jit
from jax import numpy as np
import numpy as onp
from rpn_bo_utilities import uniform_prior
from rpn_bo_models import ParallelDeepOnet
from rpn_bo_dataloaders import DataGenerator_batch
from rpn_bo_acquisitions import MCAcquisition
onp.random.seed(1234)
# Helper functions
normalize = vmap(lambda x, mu, std: (x-mu)/std, in_axes=(0,0,0))
denormalize = vmap(lambda x, mu, std: x*std + mu, in_axes=(0,0,0))
# vectorial input space dimension and its search space
dim = 4
lb = np.array([7.0, 0.02, 0.01, 30.01])
ub = np.array([13.0, 0.12, 3.0, 30.295])
true_x = np.array([10.0, 0.07, 1.505, 30.1525])
p_x = uniform_prior(lb, ub)
bounds = (lb, ub)
# pollutant concentration function
def c(s,t,M,D,L,tau):
c1 = M/np.sqrt(4*np.pi*D*t)*np.exp(-s**2/4/D/t)
c2 = M/np.sqrt(4*np.pi*D*(t-tau))*np.exp(-(s-L)**2/4/D/(t-tau))
return np.where(t>tau, c1+c2, c1)
s1 = np.array([0.0, 1.0, 2.5])
t1 = np.array([15.0, 30.0, 45.0, 60.0])
ST = np.meshgrid(s1, t1)
STo = np.array(ST).T
# function mapping the vectorial input x to the vectorial output consisting of the concentration evaluation at 3x4 grid points
def f(x):
res = []
for i in range(STo.shape[0]):
resl = []
for j in range(STo.shape[1]):
resl.append( c(STo[i,j,0],STo[i,j,1],x[0],x[1],x[2],x[3]) )
res.append(np.array(resl))
return np.array(res)
#### General simulation params ####
N = 5
prev = 0 # previous independent random runs
nTrSet = 10-prev # total independent random runs to perform
#### DeepONet functional evaluation points ####
m = 4
P1 = 4
P2 = 3
Ms = 2.5
Mt = 60.0
soln_dim = 1
s1 = np.array([0.0, 1.0, 2.5])/Ms
t1 = np.array([15.0, 30.0, 45.0, 60.0])/Mt
Tm, Xm = np.meshgrid(t1, s1)
y_test_sample = np.hstack([Tm.flatten()[:,None], Xm.flatten()[:,None]])
#### RPN-BO hyperparameters ####
num_restarts_acq = 500
nIter = 30
q1 = 1
nIter_q1 = nIter//q1
N_ensemble = 128
fraction = 0.8
branch_layers = [m, 64, 64]
trunk_layers = [2, 64, 64]
nIter_RPN = 5000
options = {'criterion': 'TS', # LCB EI TS
'kappa': 2.0,
'weights': None} # exact gmm None
train_key = random.PRNGKey(0)
key_TS = random.PRNGKey(123)
case = 'results/environmental_model_function_DON'
true_y = f(true_x)
true_y = np.expand_dims(true_y, axis = 2)
for j in range(nTrSet):
print('Train Set:',j+1)
# Initial training data
X = np.load(case+'/X_'+str(j+prev)+'.npy')
y = np.load(case+'/y_'+str(j+prev)+'.npy')
X = np.array(X)
y = np.array(y)
y = y.reshape(y.shape[0],P2,P1)
y = np.expand_dims(y, axis = 3)
X_loc = X
y_loc = y
batch_size_loc = 12 # max value is P1*P2
# list to contain BO results
opt = []
yo_loc = np.sum((y_loc-true_y)**2, axis = (1,2))
opt.append( np.min(yo_loc) )
for it in range(nIter_q1):
sigma_X = X_loc.std(0)
mu_X = X_loc.mean(0)
sigma_y = y_loc.std(0)
mu_y = y_loc.mean(0)
# Create data set
usol_train = (y_loc-mu_y)/sigma_y
u0_train = (X_loc-mu_X)/sigma_X
batch_size_all_loc = int(fraction*12*X_loc.shape[0])
dataset = DataGenerator_batch(usol_train, u0_train, s1, t1, P1, P2, batch_size_loc, batch_size_all_loc, N_ensemble)
# Initialize model
model = ParallelDeepOnet(branch_layers, trunk_layers, N_ensemble, soln_dim)
# Train model
model.train(dataset, nIter=nIter_RPN)
@jit
def predict(x):
x = (x-mu_X)/sigma_X
u_test_sample = np.tile(x, (P1*P2, 1))
samples = model.predict_s(u_test_sample, y_test_sample) # N_ensemble x P1*P2 x soln_dim
samples = samples.reshape((samples.shape[0],P1,P2,samples.shape[-1])) # N_ensemble x P1 x P2 x soln_dim
samples = np.transpose(samples, (0, 2, 1, 3)) # N_ensemble x P2 x P1 x soln_dim
samples = sigma_y*samples+mu_y
samples = np.sum((samples-true_y)**2, axis = (1,2))[:,:,None]
return samples
# Fit GMM if needed for weighted acquisition functions
weights_fn = lambda x: np.ones(x.shape[0],)
if options['criterion']=='TS':
key_TS = random.split(key_TS, 2)[0]
args = (key_TS,)
else:
kappa = options['kappa']
args = (kappa,)
acq_model = MCAcquisition(predict,
bounds,
*args,
acq_fn = options['criterion'],
output_weights=weights_fn)
# Optimize acquisition with L-BFGS to inquire new point(s)
new_X = acq_model.next_best_point(q = q1, num_restarts = num_restarts_acq)
new_X = new_X.reshape(q1,dim)
# Obtain the new vectorial output
new_y = vmap(f)(new_X)
new_y = np.expand_dims(new_y, axis = 3)
# Augment training data
X_loc = np.concatenate([X_loc, new_X], axis = 0) # augment the vectorial input dataset during the BO process
y_loc = np.concatenate([y_loc, new_y], axis = 0) # augment the vectorial output dataset during the BO process
yo_loc = np.sum((y_loc-true_y)**2, axis = 1)
opt.append( np.min(yo_loc) ) # augment the objective values of the constructed dataset during the BO process
print('Run %s, Nx %s, Ny %s, init %s, best %s' % (str(j+prev), X_loc.shape[0], y_loc.shape[0], opt[0], opt[-1]))
np.save(case+'/opt_'+str(j+prev)+'.npy',np.array(opt)) # save the constructed objective tensor by RPN-BO
| 5,739 | 32.764706 | 126 | py |
rpn_bo | rpn_bo-main/Code and results/brusselator_pde_DON.py | import os
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE']='false'
from jax import vmap, random, jit
from jax import numpy as np
from pyDOE import lhs
import numpy as onp
from rpn_bo_utilities import uniform_prior, output_weights
from rpn_bo_models import ParallelDeepOnet
from rpn_bo_dataloaders import DataGenerator_batch
from rpn_bo_acquisitions import MCAcquisition
# vectorial input space dimension and its search space
dim = 4
lb = np.array([0.1, 0.1, 0.01, 0.01])
ub = np.array([5.0, 5.0, 5.0, 5.0])
p_x = uniform_prior(lb, ub)
# vectorial output space dimension and DeepONet functional evaluation points
N_y = 64
output_dim = (N_y, N_y, 2)
soln_dim = 2
P1 = output_dim[0]
P2 = output_dim[1]
arr_s = np.linspace(0, 1, P1)
arr_t = np.linspace(0, 1, P2)
s_grid, t_grid = np.meshgrid(arr_s, arr_t)
y_grid = np.concatenate([s_grid[:, :, None], t_grid[:, :, None]], axis=-1).reshape((-1, 2))
mu_grid = y_grid.mean(0)
sigma_grid = y_grid.std(0)
y_grid = (y_grid - mu_grid) / sigma_grid
# function mapping the vectorial input x to the vectorial output consisting of the solution to the 2D Brusselator PDE evaluated at N_yxN_y grid points
def f(x):
from pde import PDE, FieldCollection, ScalarField, UnitGrid
a = x[0]
b = x[1]
d0 = x[2]
d1 = x[3]
eq = PDE(
{
"u": f"{d0} * laplace(u) + {a} - ({b} + 1) * u + u**2 * v",
"v": f"{d1} * laplace(v) + {b} * u - u**2 * v",
}
)
# initialize state
grid = UnitGrid([N_y, N_y])
u = ScalarField(grid, a, label="Field $u$")
v = b / a + 0.1 * ScalarField.random_normal(grid, label="Field $v$", seed=10)
state = FieldCollection([u, v])
sol = eq.solve(state, t_range=20, dt=1e-3)
sol_tensor = []
sol_tensor.append(sol[0].data)
sol_tensor.append(sol[1].data)
sol_tensor = onp.array(sol_tensor)
ss = sol_tensor[onp.isnan(sol_tensor)]
sol_tensor[onp.isnan(sol_tensor)] = 1e5 * onp.random.randn(*ss.shape)
return np.transpose(np.array(sol_tensor),(1,2,0))
#### General simulation params ####
N = 5
prev = 0
nTrSet = 30-prev
#### RPN-BO hyperparameters ####
nIter = 30
N_ensemble = 16
batch_size = P1 * P2
batch_size_all = P1 * P2 * N
branch_layers = [dim, 64, 64]
trunk_layers = [2, 64, 64]
nIter_RPN = 1000
acq_fct = 'LCB' # 'LCB', 'TS', 'LW_LCB'
case = 'results/brusselator_pde_DON'
# prediction function mapping vectorial output to scalar obective value
def output(new_y):
weighting = onp.ones((2, 64, 64)) / 10
weighting[:, [0, 1, -2, -1], :] = 1.0
weighting[:, :, [0, 1, -2, -1]] = 1.0
weighted = weighting * np.transpose(new_y, (2, 0, 1))
return np.var(weighted, axis=(-3, -2, -1))
for j in range(nTrSet):
# Initial training data
onp.random.seed(j)
X = lb + (ub - lb) * lhs(dim, N)
y = np.array([f(x) for x in X])
opt = []
opt.append(np.min(np.array([output(yi) for yi in y])))
keys, keys_ts, keys_trans, keys_noise, keys_loader = random.split(random.PRNGKey(j), nIter * N).reshape((N, nIter, -1))
print('Run %s, Nx %s, Ny %s, init %s, best %s' % (str(j+prev), X.shape[0], y.shape[0], opt[0], opt[-1]))
for it in range(nIter):
# Create data set
mu_X = X.mean(0)
sigma_X = X.std(0)
mu_y = y.mean(0)
sigma_y = y.std(0)
u0_train = (X - mu_X) / sigma_X
usol_train = (y - mu_y) / sigma_y
dataset = DataGenerator_batch(usol_train, u0_train, arr_s, arr_t, P1=P1, P2=P2, batch_size=batch_size, batch_size_all=batch_size_all, N_ensemble=N_ensemble, y=y_grid, rng_key=keys_loader[it])
# Initialize model
model = ParallelDeepOnet(branch_layers, trunk_layers, N_ensemble, soln_dim)
# Train model
model.train(dataset, nIter=nIter_RPN)
@jit
def predict(x):
x = (x - mu_X) / sigma_X
u_test_sample = vmap(lambda x: np.tile(x, (P1 * P2, 1)))(x)
samples = model.predict_s(u_test_sample.reshape((-1, dim)), np.tile(y_grid, (x.shape[0], 1)))
samples = samples.reshape((-1, P1, P2, samples.shape[-1]))
samples = vmap(lambda s: s * sigma_y + mu_y)(samples)
samples = samples.reshape((N_ensemble, x.shape[0], P1, P2, samples.shape[-1]))
samples = np.transpose(samples, (0, 1, 4, 2, 3))
weighting = onp.ones((2, 64, 64)) / 10
weighting[:, [0, 1, -2, -1], :] = 1.0
weighting[:, :, [0, 1, -2, -1]] = 1.0
weighted_samples = weighting * samples
return np.var(weighted_samples, axis=(-3, -2, -1))[:, :, None]
kappa = 2
weights_fn = lambda x: np.ones(x.shape[0])
if acq_fct == 'TS':
args = (keys_ts[it], )
num_restarts = 100
acq_fn = 'TS'
elif acq_fct == 'LCB':
weights_fn = lambda x: np.ones(x.shape[0],)
args = (kappa, )
num_restarts = 100
acq_fn = 'LCB'
elif acq_fct == 'LW_LCB':
predict_fn = lambda x: np.mean(predict(x), axis=0)
num_samples = 100
weights_fn = output_weights(predict_fn, uniform_prior(lb, ub).pdf, (lb, ub), method='gmm', num_samples=num_samples, num_comp=5)
args = (kappa, )
num_restarts = 100
acq_fn = 'LCB'
acq_model = MCAcquisition(predict, (lb, ub), *args, acq_fn=acq_fn, output_weights=weights_fn)
# Optimize acquisition with L-BFGS to inquire new point(s)
new_X = acq_model.next_best_point(q=1, num_restarts=num_restarts, seed_id=100 * j + it)
# Obtain the new data
new_y = f(new_X)
# Augment training data
X = np.concatenate([X, new_X[None, :]]) # augment the vectorial input dataset during the BO process
y = np.concatenate([y, new_y[None, :, :, :]]) # augment the vectorial output dataset during the BO process
opt.append(np.minimum(opt[-1],output(new_y))) # augment the objective values of the constructed dataset during the BO process
print('Run %s, Nx %s, Ny %s, init %s, best %s' % (str(j+prev), X.shape[0], y.shape[0], opt[0], opt[-1]))
del model, dataset
np.save(case+'/opt_'+str(j+prev)+'.npy',onp.array(opt)) # save the constructed objective tensor by RPN-BO
| 6,387 | 35.090395 | 199 | py |
rpn_bo | rpn_bo-main/Code and results/environmental_model_function_MLP.py | import os
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE']='false'
from jax import vmap, random, jit
from jax import numpy as np
import numpy as onp
from rpn_bo_utilities import uniform_prior
from rpn_bo_models import EnsembleRegression
from rpn_bo_dataloaders import BootstrapLoader
from rpn_bo_acquisitions import MCAcquisition
onp.random.seed(1234)
# Helper functions
normalize = vmap(lambda x, mu, std: (x-mu)/std, in_axes=(0,0,0))
denormalize = vmap(lambda x, mu, std: x*std + mu, in_axes=(0,0,0))
# vectorial input space dimension and its search space
dim = 4
lb = np.array([7.0, 0.02, 0.01, 30.01])
ub = np.array([13.0, 0.12, 3.0, 30.295])
true_x = np.array([10.0, 0.07, 1.505, 30.1525])
p_x = uniform_prior(lb, ub)
bounds = (lb, ub)
# vectorial output space dimension
dim_y = 12
# pollutant concentration function
def c(s,t,M,D,L,tau):
c1 = M/np.sqrt(4*np.pi*D*t)*np.exp(-s**2/4/D/t)
c2 = M/np.sqrt(4*np.pi*D*(t-tau))*np.exp(-(s-L)**2/4/D/(t-tau))
return np.where(t>tau, c1+c2, c1)
s1 = np.array([0.0, 1.0, 2.5])
t1 = np.array([15.0, 30.0, 45.0, 60.0])
ST = np.meshgrid(s1, t1)
ST = np.array(ST).T.reshape(-1,2)
# function mapping the vectorial input x to the vectorial output consisting of the concentration evaluation at 3x4 grid points
def f(x):
res = []
for i in range(ST.shape[0]):
res.append( c(ST[i,0],ST[i,1],x[0],x[1],x[2],x[3]) )
return np.array(res)
#### General simulation params ####
N = 5
prev = 0 # previous independent random runs
nTrSet = 10-prev # total independent random runs to perform
#### RPN-BO hyperparameters ####
num_restarts_acq = 500
nIter = 30
q1 = 1
nIter_q1 = nIter//q1
ensemble_size = 128
batch_size = N
fraction = 0.8
layers = [dim, 64, 64, 64, 64, dim_y]
nIter_RPN = 5000
options = {'criterion': 'LCB', # LCB EI TS
'kappa': 2.0,
'weights': None} # exact gmm None
train_key = random.PRNGKey(0)
key_TS = random.PRNGKey(123)
case = 'results/environmental_model_function_MLP'
true_y = f(true_x)
for j in range(nTrSet):
print('Train Set:',j+1)
# Initial training data
X = np.load(case+'/X_'+str(j+prev)+'.npy')
y = np.load(case+'/y_'+str(j+prev)+'.npy')
X_loc = X
y_loc = y
batch_size_loc = batch_size
# list to contain BO results
opt = []
yo_loc = np.sum((y_loc-true_y)**2, axis = 1)
opt.append( np.min(yo_loc) )
print('Run %s, Nx %s, Ny %s, init %s, best %s' % (str(j+prev), X_loc.shape[0], y_loc.shape[0], opt[0], opt[-1]))
for it in range(nIter_q1):
# Create data set
train_key = random.split(train_key, 2)[0]
dataset = BootstrapLoader(X_loc, y_loc, batch_size_loc, ensemble_size, fraction, 1, rng_key=train_key)
(mu_X, sigma_X), (mu_y, sigma_y) = dataset.norm_const
# Initialize model
train_key = random.split(train_key, 2)[0]
model = EnsembleRegression(layers, ensemble_size, train_key)
# Train model
model.train(dataset, nIter=nIter_RPN)
@jit
def predict(x):
# accepts and returns un-normalized data
x = np.tile(x[np.newaxis,:,:], (ensemble_size, 1, 1))
x = normalize(x, mu_X, sigma_X)
params = vmap(model.get_params)(model.opt_state)
params_prior = vmap(model.get_params)(model.prior_opt_state)
opt_params = (params, params_prior)
samples = model.posterior(opt_params, x)
samples = denormalize(samples, mu_y, sigma_y)
samples = np.sum((samples-true_y)**2, axis = 2)[:,:,None]
return samples
# Fit GMM if needed for weighted acquisition functions
weights_fn = lambda x: np.ones(x.shape[0],)
if options['criterion']=='TS':
key_TS = random.split(key_TS, 2)[0]
args = (key_TS,)
else:
kappa = options['kappa']
args = (kappa,)
acq_model = MCAcquisition(predict,
bounds,
*args,
acq_fn = options['criterion'],
output_weights=weights_fn)
# Optimize acquisition with L-BFGS to inquire new point(s)
new_X = acq_model.next_best_point(q = q1, num_restarts = num_restarts_acq)
new_X = new_X.reshape(q1,dim)
# Obtain the new vectorial output
new_y = vmap(f)(new_X)
# Augment training data
X_loc = np.concatenate([X_loc, new_X], axis = 0) # augment the vectorial input dataset during the BO process
y_loc = np.concatenate([y_loc, new_y], axis = 0) # augment the vectorial output dataset during the BO process
yo_loc = np.sum((y_loc-true_y)**2, axis = 1)
opt.append( np.min(yo_loc) ) # augment the objective values of the constructed dataset during the BO process
batch_size_loc += q1
print('Run %s, Nx %s, Ny %s, init %s, best %s' % (str(j+prev), X_loc.shape[0], y_loc.shape[0], opt[0], opt[-1]))
np.save(case+'/opt_'+str(j+prev)+'.npy',np.array(opt)) # save the constructed objective tensor by RPN-BO
| 5,266 | 33.424837 | 126 | py |
rpn_bo | rpn_bo-main/Code and results/rpn_bo_architectures.py | from jax import numpy as np
from jax import random
def MLP(layers, activation=np.tanh):
def init(rng_key):
def init_layer(key, d_in, d_out):
k1, k2 = random.split(key)
glorot_stddev = 1. / np.sqrt((d_in + d_out) / 2.)
W = glorot_stddev*random.normal(k1, (d_in, d_out))
b = np.zeros(d_out)
return W, b
key, *keys = random.split(rng_key, len(layers))
params = list(map(init_layer, keys, layers[:-1], layers[1:]))
return params
def apply(params, inputs):
for W, b in params[:-1]:
outputs = np.dot(inputs, W) + b
inputs = activation(outputs)
W, b = params[-1]
outputs = np.dot(inputs, W) + b
return outputs
return init, apply
| 783 | 33.086957 | 69 | py |
rpn_bo | rpn_bo-main/Code and results/optical_interferometer_MLP_step_0.py | from jax import numpy as np
from jax.scipy.special import logsumexp
from jax import vmap
N_y = 64 # each frame is an N_y by N_y image
xx, yy = np.meshgrid( np.arange(N_y) / N_y, np.arange(N_y) / N_y )
# prediction function mapping vectorial output to scalar obective value
def output(y):
y = y.reshape((16,N_y,N_y)) # form the 16 frames, from the vectorial shaped tensors
intens = np.exp(-((xx - 0.5) ** 2 + (yy - 0.5) ** 2) / (0.95) ** 2) * y
ivec = np.sum(intens, axis = (-1, -2))
smax = logsumexp(ivec, -1)
smin = -logsumexp(-ivec, -1)
return - (smax - smin) / (smax + smin)
case_l = ['results/optical_interferometer_MLP']
# create new files for vectorial inputs and outputs and best objective values which will be augmented by newly acquired points during BO process
for case in case_l:
for prev in range(5):
X = np.load(case+'/X_'+str(prev)+'.npy')
y = np.load(case+'/y_'+str(prev)+'.npy')
yo = vmap(output)(y)
np.save(case+'/opt_'+str(prev)+'.npy',np.array(np.min(yo))[None])
np.save(case+'/X_loc_'+str(prev)+'.npy',X)
np.save(case+'/y_loc_'+str(prev)+'.npy',y)
| 1,179 | 37.064516 | 144 | py |
rpn_bo | rpn_bo-main/Code and results/rpn_bo_acquisitions.py | from jax import numpy as np
from jax import jit, vjp, random
from jax.scipy.special import expit as sigmoid
import numpy as onp
from functools import partial
from pyDOE import lhs
from tqdm import trange
from rpn_bo_optimizers import minimize_lbfgs
class MCAcquisition:
def __init__(self, posterior, bounds, *args,
acq_fn = 'LCB', output_weights=lambda x: np.ones(x.shape[0])):
self.posterior = posterior
self.bounds = bounds # domain bounds
self.args = args # arguments required by different acquisition functions
self.acq_fn = acq_fn # a string indicating the chosen acquisition function
self.weights = output_weights # a callable function returning the likelihood weighted weights
def evaluate(self, x):
# Inputs are (q x d), use vmap to vectorize across a batch
# samples[:,:,0] corresponds to the objective function
# samples[:,:,1:] corresponds to the constraints
# samples[:,:,i] are (ensemble_size x q)
q = x.shape[0]
# Common acquisition functions
if self.acq_fn == 'EI':
best = self.args[0]
samples = self.posterior(x)[:,:,0]
reparam = np.maximum(best-samples, 0)
EI = np.mean(np.max(reparam, axis=-1))
return -EI
elif self.acq_fn == 'LCB':
kappa = self.args[0]
samples = self.posterior(x)[:,:,0]
mu = np.mean(samples, axis=0, keepdims=True)
weights = self.weights(x).reshape(1,q)
reparam = mu - np.sqrt(0.5*np.pi*kappa) * weights * np.abs(samples - mu)
LCB = np.mean(np.min(reparam, axis=-1))
return LCB
elif self.acq_fn == 'TS':
rng_key = self.args[0]
samples = self.posterior(x)[:,:,0]
idx = random.randint(rng_key, (1,), minval=0, maxval=samples.shape[0])
reparam = samples[idx,:].reshape(1,q)
TS = np.mean(np.min(reparam, axis=-1))
return TS
elif self.acq_fn == 'US':
samples = self.posterior(x)[:,:,0]
mu = np.mean(samples, axis=0, keepdims=True)
weights = self.weights(x).reshape(1,q)
reparam = np.sqrt(0.5*np.pi) * weights * np.abs(samples - mu)
US = np.mean(np.max(reparam, axis=-1))
return -US
elif self.acq_fn == 'CLSF':
kappa = self.args[0]
samples = self.posterior(x)[:,:,0]
mu = np.mean(samples, axis=0, keepdims=True)
weights = self.weights(x).reshape(1,q)
reparam = np.abs(np.sqrt(0.5*np.pi) / (np.abs(mu)**(1.0/kappa) + 1e-8) * weights * np.abs(samples - mu))
CLSF = np.mean(np.max(reparam, axis=-1))
return -np.log(CLSF)
# Constrained acquisition functions
elif self.acq_fn == 'EIC':
best = self.args[0]
samples = self.posterior(x)
# Objective
objective = samples[:,:,0]
reparam = np.maximum(best-objective, 0)
EI = np.mean(np.max(reparam, axis=-1))
# Constraints
constraints = samples[:,:,1:]
indicator = sigmoid(constraints/1e-6) # a smooth indicator function
feasible = np.prod(np.mean(np.max(indicator, axis=1), axis=0))
return -EI*feasible
elif self.acq_fn == 'LCBC':
kappa = self.args[0]
threshold = self.args[1]
samples = self.posterior(x)
# Objective
objective = samples[:,:,0]
mu = np.mean(objective, axis=0, keepdims=True)
weights = self.weights(x).reshape(1,q)
reparam = mu - threshold - np.sqrt(0.5*np.pi*kappa) * weights * np.abs(objective - mu)
LCB = np.mean(np.min(reparam, axis=-1))
# Constraints
constraints = samples[:,:,1:] # (ensemble_size x q)
indicator = sigmoid(constraints/1e-6) # a smooth indicator function
feasible = np.prod(np.mean(np.max(indicator, axis=1), axis=0))
return LCB*feasible
# That's all for now..
else:
raise NotImplementedError
@partial(jit, static_argnums=(0,))
def acq_value_and_grad(self, inputs):
primals, f_vjp = vjp(self.evaluate, inputs)
grads = f_vjp(np.ones_like(primals))[0]
return primals, grads
# optimization is performed in the normalized input space
def next_best_point(self, q = 1, num_restarts = 10, seed_id=0, maxfun=15000):
lb, ub = self.bounds
dim = lb.shape[0]
# Define objective that returns float64 NumPy arrays
def objective(x):
x = x.reshape(q, dim)
value, grads = self.acq_value_and_grad(x)
out = (onp.array(value, dtype=onp.float64),
onp.array(grads.flatten(), dtype=onp.float64))
return out
# Optimize with random restarts
loc, acq = [], []
onp.random.seed(seed_id)
init = lb + (ub-lb)*lhs(dim, q*num_restarts)
x0 = init.reshape(num_restarts, q, dim)
dom_bounds = tuple(map(tuple, np.tile(np.vstack((lb, ub)).T,(q,1))))
for i in trange(num_restarts):
pos, val = minimize_lbfgs(objective, x0[i,:,:].flatten(), bnds = dom_bounds, maxfun=maxfun)
loc.append(pos)
acq.append(val)
loc = np.vstack(loc)
acq = np.vstack(acq)
idx_best = np.argmin(acq)
x_new = loc[idx_best,:]
return x_new
| 5,600 | 42.418605 | 116 | py |
rpn_bo | rpn_bo-main/Code and results/rpn_bo_models.py | from jax import numpy as np
from jax import grad, vmap, random, jit
from jax.example_libraries import optimizers
from jax.nn import relu, gelu
from functools import partial
from tqdm import trange
import itertools
from rpn_bo_architectures import MLP
class EnsembleRegression:
def __init__(self, layers, ensemble_size, rng_key = random.PRNGKey(0), activation=np.tanh):
# Network initialization and evaluation functions
self.init, self.apply = MLP(layers, activation)
self.init_prior, self.apply_prior = MLP(layers, activation)
# Random keys
k1, k2, k3 = random.split(rng_key, 3)
keys_1 = random.split(k1, ensemble_size)
keys_2 = random.split(k2, ensemble_size)
keys_3 = random.split(k2, ensemble_size)
# Initialize
params = vmap(self.init)(keys_1)
params_prior = vmap(self.init_prior)(keys_2)
# Use optimizers to set optimizer initialization and update functions
lr = optimizers.exponential_decay(1e-3, decay_steps=1000, decay_rate=0.999)
self.opt_init, \
self.opt_update, \
self.get_params = optimizers.adam(lr)
self.opt_state = vmap(self.opt_init)(params)
self.prior_opt_state = vmap(self.opt_init)(params_prior)
self.key_opt_state = vmap(self.opt_init)(keys_3)
# Logger
self.itercount = itertools.count()
self.loss_log = []
# Define the forward pass
def net_forward(self, params, params_prior, inputs):
Y_pred = self.apply(params, inputs) + self.apply_prior(params_prior, inputs)
return Y_pred
def loss(self, params, params_prior, batch):
inputs, targets = batch
# Compute forward pass
outputs = vmap(self.net_forward, (None, None, 0))(params, params_prior, inputs)
# Compute loss
loss = np.mean((targets - outputs)**2)
return loss
# Define the update step
def step(self, i, opt_state, prior_opt_state, key_opt_state, batch):
params = self.get_params(opt_state)
params_prior = self.get_params(prior_opt_state)
g = grad(self.loss)(params, params_prior, batch)
return self.opt_update(i, g, opt_state)
def monitor_loss(self, opt_state, prior_opt_state, batch):
params = self.get_params(opt_state)
params_prior = self.get_params(prior_opt_state)
loss_value = self.loss(params, params_prior, batch)
return loss_value
# Optimize parameters in a loop
def train(self, dataset, nIter = 1000):
data = iter(dataset)
pbar = trange(nIter)
# Define vectorized SGD step across the entire ensemble
v_step = jit(vmap(self.step, in_axes = (None, 0, 0, 0, 0)))
v_monitor_loss = jit(vmap(self.monitor_loss, in_axes = (0, 0, 0)))
# Main training loop
for it in pbar:
batch = next(data)
self.opt_state = v_step(it, self.opt_state, self.prior_opt_state, self.key_opt_state, batch)
# Logger
if it % 100 == 0:
loss_value = v_monitor_loss(self.opt_state, self.prior_opt_state, batch)
self.loss_log.append(loss_value)
pbar.set_postfix({'Max loss': loss_value.max()})
# Evaluates predictions at test points
@partial(jit, static_argnums=(0,))
def posterior(self, params, inputs):
params, params_prior = params
samples = vmap(self.net_forward, (0, 0, 0))(params, params_prior, inputs)
return samples
class ParallelDeepOnet:
def __init__(self, branch_layers, trunk_layers, N_ensemble, dim):
self.dim = dim
# Network initialization and evaluation functions
self.branch_init, self.branch_apply = MLP(branch_layers, activation=relu) # jelu
self.branch_init_prior, self.branch_apply_prior = MLP(branch_layers, activation=relu)
self.trunk_init, self.trunk_apply = MLP(trunk_layers, activation=relu)
self.trunk_init_prior, self.trunk_apply_prior = MLP(trunk_layers, activation=relu)
# Initialize
v_branch_params = vmap(self.branch_init)(random.split(random.PRNGKey(1234), N_ensemble))
v_branch_params_prior = vmap(self.branch_init_prior)(random.split(random.PRNGKey(123), N_ensemble))
v_trunk_params = vmap(self.trunk_init)(random.split(random.PRNGKey(4321), N_ensemble))
v_trunk_params_prior = vmap(self.trunk_init_prior)(random.split(random.PRNGKey(321), N_ensemble))
# If you want to initialize the weight W with 0.1 for all elements
W = 0.1*np.ones((N_ensemble, branch_layers[-1], self.dim))
# If you want to initialize the weight W with Xavier initialization (This is helpful to check if the method work)
# Because if the value of different output dimension are same, using the above W will result in same predictions.
# glorot_stddev = 1. / np.sqrt((branch_layers[-1] + self.dim) / 2.)
# W = glorot_stddev*random.normal(random.PRNGKey(123), (N_ensemble, branch_layers[-1], self.dim))
v_params = (v_branch_params, v_trunk_params, W)
v_params_prior = (v_branch_params_prior, v_trunk_params_prior)
# Use optimizers to set optimizer initialization and update functions
lr = optimizers.exponential_decay(1e-3,decay_steps=1000,decay_rate=0.999)
# lr = 1e-4
self.opt_init, \
self.opt_update, \
self.get_params = optimizers.adam(lr)
self.v_opt_state = vmap(self.opt_init)(v_params)
self.v_prior_opt_state = vmap(self.opt_init)(v_params_prior)
# Logger
self.itercount = itertools.count()
self.loss_log = []
# Define the operator net
def operator_net(self, params, params_prior, u, y):
branch_params, trunk_params, W = params
branch_params_prior, trunk_params_prior = params_prior
B = self.branch_apply(branch_params, u) + self.branch_apply_prior(branch_params_prior, u)
T = self.trunk_apply(trunk_params, y) + self.trunk_apply_prior(trunk_params_prior, y)
#outputs = np.sum(B * T)
outputs = np.dot(B * T, W)
return outputs
@partial(jit, static_argnums=(0,))
def loss(self, params, params_prior, batch):
# Fetch data
# inputs: (u, y), shape = (N, m), (N,1)
# outputs: s, shape = (N,1)
inputs, outputs = batch
u, y = inputs
s, w = outputs
# Compute forward pass
pred = vmap(self.operator_net, (None, None, 0, 0))(params, params_prior, u, y)
# Compute loss
loss = np.mean(1./w**2 * (s - pred)**2)
return loss
# Define a compiled update step
# @partial(jit, static_argnums=(0,))
def step(self, i, opt_state, prior_opt_state, batch):
params = self.get_params(opt_state)
params_prior = self.get_params(prior_opt_state)
g = grad(self.loss, argnums = 0)(params, params_prior, batch)
return self.opt_update(i, g, opt_state)
# Optimize parameters in a loop
def train(self, dataset, nIter = 10000):
data = iter(dataset)
pbar = trange(nIter)
# Define v_step that vectorize the step operation
self.v_step = jit(vmap(self.step, in_axes = [None, 0, 0, 0]))
# Main training loop
for it in pbar:
batch = next(data)
self.v_opt_state = self.v_step(it, self.v_opt_state, self.v_prior_opt_state, batch)
# Logger
if it % 200 == 0:
params = vmap(self.get_params)(self.v_opt_state)
params_prior = vmap(self.get_params)(self.v_prior_opt_state)
branch_params_prior, trunk_params_prior = params_prior
loss_value = vmap(self.loss, (0, 0, 0))(params, params_prior, batch)
self.loss_log.append(loss_value)
pbar.set_postfix({'Max loss': loss_value.max()})
def operator_net_pred_single(self, params, params_prior, U_star, Y_star):
s_pred_single = vmap(self.operator_net, (None, None, 0, 0))(params, params_prior, U_star, Y_star)
return s_pred_single
# Evaluates predictions at test points
@partial(jit, static_argnums=(0,))
def predict_s(self, U_star, Y_star):
params = vmap(self.get_params)(self.v_opt_state)
params_prior = vmap(self.get_params)(self.v_prior_opt_state)
s_pred = vmap(self.operator_net_pred_single, (0, 0, None,None))(params, params_prior, U_star, Y_star)
return s_pred
| 8,651 | 41.411765 | 121 | py |
rpn_bo | rpn_bo-main/Code and results/optical_interferometer_DON_step_0.py | from jax import numpy as np
from jax.scipy.special import logsumexp
output_dim = (64, 64, 16) # 16 frames of 64 by 64 images
P1 = output_dim[0]
P2 = output_dim[1]
xx, yy = np.meshgrid( np.arange(P1) / P1, np.arange(P2) / P2 )
# prediction function mapping vectorial output to scalar obective value
def output(new_y):
intens = np.exp(-((xx - 0.5) ** 2 + (yy - 0.5) ** 2) / (0.95) ** 2) * new_y
ivec = np.sum(intens, axis = (-1, -2))
smax = logsumexp(ivec, -1)
smin = -logsumexp(-ivec, -1)
return - (smax - smin) / (smax + smin)
case_l = ['results/optical_interferometer_DON']
# create new files for vectorial inputs and outputs and best objective values which will be augmented by newly acquired points during BO process
for case in case_l:
for prev in range(5):
X = np.load(case+'/X_'+str(prev)+'.npy')
y = np.load(case+'/y_'+str(prev)+'.npy')
y = y.reshape((y.shape[0],output_dim[2],P1,P2))
errs = [output(yi) for yi in y]
np.save(case+'/opt_'+str(prev)+'.npy',np.array(np.min(np.array(errs)))[None])
y = np.transpose(y, (0, 2, 3, 1))
np.save(case+'/X_loc_'+str(prev)+'.npy',X)
np.save(case+'/y_loc_'+str(prev)+'.npy',y)
| 1,269 | 34.277778 | 144 | py |
rpn_bo | rpn_bo-main/Code and results/optical_interferometer_MLP_all_steps.py | import os
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE']='false'
from jax import vmap, random, jit
from jax import numpy as np
from jax.scipy.special import logsumexp
from jax.nn import relu
from gym_interf import InterfEnv
import numpy as onp
from rpn_bo_utilities import uniform_prior
from rpn_bo_models import EnsembleRegression
from rpn_bo_dataloaders import BootstrapLoader
from rpn_bo_acquisitions import MCAcquisition
onp.random.seed(1234)
# Helper functions
normalize = vmap(lambda x, mu, std: (x-mu)/std, in_axes=(0,0,0))
denormalize = vmap(lambda x, mu, std: x*std + mu, in_axes=(0,0,0))
# vectorial input space dimension and its search space
dim = 4
lb = -np.ones((dim,))
ub = np.ones((dim,))
p_x = uniform_prior(lb, ub)
bounds = (lb, ub)
# vectorial output space dimension
N_y = 64
xx, yy = np.meshgrid( np.arange(N_y) / N_y, np.arange(N_y) / N_y )
dim_y = 16*N_y**2
# function mapping the vectorial input x to the vectorial output consisting of the 16 images
def f(x):
gym = InterfEnv()
gym.reset(actions=(1e-4, 1e-4, 1e-4, 1e-4))
action = x[:4]
state = gym.step(action)
return state[0].flatten()
#### General simulation params ####
N = 15
prev = 0 # previous independent random runs
nTrSet = 10-prev # total independent random runs to perform
#### RPN-BO hyperparameters ####
num_restarts_acq = 500
nIter = 100 - N
q1 = 1
nIter_q1 = nIter//q1
ensemble_size = 32
batch_size = N
fraction = 0.8
layers = [dim, 64, 64, 16*N_y**2]
nIter_RPN = 5000
options = {'criterion': 'LCB', # 'TS' 'LCB',
'kappa': 2.0,
'weights': None, # exact gmm None
}
train_key = random.PRNGKey(0)
key_TS = random.PRNGKey(123)
case = 'results/optical_interferometer_MLP'
# prediction function mapping vectorial output to scalar obective value
def output(y):
y = y.reshape((16,N_y,N_y))
intens = np.exp(-((xx - 0.5) ** 2 + (yy - 0.5) ** 2) / (0.95) ** 2) * y
ivec = np.sum(intens, axis = (-1, -2))
smax = logsumexp(ivec, -1)
smin = -logsumexp(-ivec, -1)
return - (smax - smin) / (smax + smin)
for j in range(nTrSet):
print('Train Set:',j+1)
# Initial training data
X = np.load(case+'/X_'+str(j+prev)+'.npy')
y = np.load(case+'/y_'+str(j+prev)+'.npy')
X_loc = X
y_loc = y
batch_size_loc = batch_size
# array to contain BO results
yo_loc = vmap(output)(y)
opt = np.array(np.min(yo_loc))[None]
print('Run %s, Nx %s, Ny %s, init %s, best %s' % (str(j+prev), X_loc.shape[0], y_loc.shape[0], opt[0], opt[-1]))
for it in range(nIter_q1):
# Create data set
train_key = random.split(train_key, 2)[0]
dataset = BootstrapLoader(X_loc, y_loc, batch_size_loc, ensemble_size, fraction, 0, rng_key=train_key)
(mu_X, sigma_X), (mu_y, sigma_y) = dataset.norm_const
# Initialize model
train_key = random.split(train_key, 2)[0]
model = EnsembleRegression(layers, ensemble_size, train_key, relu)
# Train model
model.train(dataset, nIter=nIter_RPN)
@jit
def predict(x):
# accepts and returns un-normalized data
x = np.tile(x[np.newaxis,:,:], (ensemble_size, 1, 1))
x = normalize(x, mu_X, sigma_X)
params = vmap(model.get_params)(model.opt_state)
params_prior = vmap(model.get_params)(model.prior_opt_state)
opt_params = (params, params_prior)
samples = model.posterior(opt_params, x)
samples = denormalize(samples, mu_y, sigma_y)
samples = samples.reshape((samples.shape[0],samples.shape[1],16,N_y,N_y))
intens = np.exp(-((xx - 0.5) ** 2 + (yy - 0.5) ** 2) / (0.95) ** 2) * samples
ivec = np.sum(intens, axis = (-1, -2))
smax = logsumexp(ivec, -1)
smin = -logsumexp(-ivec, -1)
v = np.exp( (smax - smin) / (smax + smin) )
return -v[:,:,None]
# Fit GMM if needed for weighted acquisition functions
weights_fn = lambda x: np.ones(x.shape[0],)
if options['criterion']=='TS':
key_TS = random.split(key_TS, 2)[0]
args = (key_TS,)
else:
kappa = options['kappa']
args = (kappa,)
acq_model = MCAcquisition(predict,
bounds,
*args,
acq_fn = options['criterion'],
output_weights=weights_fn)
# Optimize acquisition with L-BFGS to inquire new point(s)
new_X = acq_model.next_best_point(q = q1, num_restarts = num_restarts_acq)
new_X = new_X.reshape(q1,dim)
# Obtain the new vectorial output
new_y = []
for i in range(new_X.shape[0]):
new_y.append(f(new_X[i,:]))
new_y = np.array(new_y)
# Augment training data
X_loc = np.concatenate([X_loc, new_X], axis = 0) # augment the vectorial input dataset during the BO process
y_loc = np.concatenate([y_loc, new_y], axis = 0) # augment the vectorial output dataset during the BO process
opt = np.concatenate( ( opt, np.minimum(opt[-1],output(new_y[0,:]))[None] ) , axis=0 ) # augment the objective values of the constructed dataset during the BO process
# Save augmented datasets and obejctive values
np.save(case+'/X_loc_'+str(j+prev)+'.npy', X_loc) # save the constructed vectorial input dataset by RPN-BO
np.save(case+'/y_loc_'+str(j+prev)+'.npy', y_loc) # save the constructed vectorial output dataset by RPN-BO
np.save(case+'/opt_'+str(j+prev)+'.npy',opt) # save the constructed objective tensor by RPN-BO
batch_size_loc += q1
print('Run %s, Nx %s, Ny %s, init %s, best %s' % (str(j+prev), X_loc.shape[0], y_loc.shape[0], opt[0], opt[-1]))
| 5,958 | 35.335366 | 174 | py |
rpn_bo | rpn_bo-main/Code and results/optical_interferometer_DON_step_1.py | import os
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE']='false'
from jax import vmap, random, jit
from jax import numpy as np
from jax.scipy.special import logsumexp
from rpn_bo_models import ParallelDeepOnet
from rpn_bo_dataloaders import DataGenerator_batch
from rpn_bo_acquisitions import MCAcquisition
from rpn_bo_utilities import uniform_prior
# vectorial input space dimension and its search space
dim = 4
lb = -np.ones((dim,))
ub = np.ones((dim,))
p_x = uniform_prior(lb, ub)
# vectorial output space dimension
output_dim = (64, 64, 16)
soln_dim = output_dim[2]
P1 = output_dim[0]
P2 = output_dim[1]
xx, yy = np.meshgrid( np.arange(P1) / P1, np.arange(P2) / P2 )
# initial training space
case = 'results/optical_interferometer_DON'
prev = 0
X = np.load(case+'/X_loc_'+str(prev)+'.npy')
y = np.load(case+'/y_loc_'+str(prev)+'.npy')
opt = np.load(case+'/opt_'+str(prev)+'.npy')
N = X.shape[0]
#### RPN-BO hyperparameters ####
num_restarts_acq = 500
q1 = 1
N_ensemble = 16
fraction = 1
branch_layers = [dim, 32, 32]
trunk_layers = [2, 32, 32]
nIter_RPN = 5000
acq_fct = 'EI'
batch_size = P1 * P2
batch_size_all = int(fraction * P1 * P2 * N)
#### DeepONet functional evaluation points ####
arr_s = np.linspace(0, 1, P1)
arr_t = np.linspace(0, 1, P2)
s_grid, t_grid = np.meshgrid(arr_s, arr_t)
y_grid = np.concatenate([s_grid[:, :, None], t_grid[:, :, None]], axis=-1).reshape((-1, 2))
mu_grid = y_grid.mean(0)
sigma_grid = y_grid.std(0)
y_grid = (y_grid - mu_grid) / sigma_grid
train_key = random.PRNGKey(0)
key_TS = random.PRNGKey(123)
print('Run %s, Nx %s, Ny %s, init %s, best %s' % (str(prev), X.shape[0], y.shape[0], opt[0], np.min(opt)))
# Change random seed for different optimization iterations and different random independent runs
train_key = random.split(train_key, 2)[0]
key_TS = random.split(key_TS, 2)[0]
for i in range(prev):
for i in range(85):
train_key = random.split(train_key, 2)[0]
key_TS = random.split(key_TS, 2)[0]
for i in range(N-15):
train_key = random.split(train_key, 2)[0]
key_TS = random.split(key_TS, 2)[0]
# Create data set
mu_X = np.zeros(X.shape[1],)
sigma_X = np.ones(X.shape[1],)
mu_y = np.zeros((y.shape[1],y.shape[2],y.shape[3]))
sigma_y = np.max(np.abs(y)) * np.ones((y.shape[1],y.shape[2],y.shape[3]))
u0_train = (X - mu_X) / sigma_X
usol_train = (y - mu_y) / sigma_y
dataset = DataGenerator_batch(usol_train, u0_train, arr_s, arr_t, P1=P1, P2=P2, batch_size=batch_size, batch_size_all=batch_size_all, N_ensemble=N_ensemble, rng_key=train_key, y=y_grid)
# Initialize model
train_key = random.split(train_key, 2)[0]
model = ParallelDeepOnet(branch_layers, trunk_layers, N_ensemble, soln_dim)
# Train model
model.train(dataset, nIter=nIter_RPN)
@jit
def predict(x):
x = (x - mu_X) / sigma_X
u_test_sample = vmap(lambda x: np.tile(x, (P1 * P2, 1)))(x)
samples = model.predict_s(u_test_sample.reshape((-1, dim)), np.tile(y_grid, (x.shape[0], 1)))
samples = samples.reshape((-1, P1, P2, samples.shape[-1]))
samples = vmap(lambda s: s * sigma_y + mu_y)(samples)
samples = samples.reshape((N_ensemble, x.shape[0], P1, P2, samples.shape[-1]))
samples = np.transpose(samples, (0, 1, 4, 2, 3))
intens = np.exp(-((xx - 0.5) ** 2 + (yy - 0.5) ** 2) / (0.95) ** 2) * samples
ivec = np.sum(intens, axis = (-1, -2))
smax = logsumexp(ivec, -1)
smin = -logsumexp(-ivec, -1)
v = np.exp( (smax - smin) / (smax + smin) )
return -v[:, :, None]
kappa = 2
weights_fn = lambda x: np.ones(x.shape[0])
if acq_fct == 'EI':
args = (opt[-1], )
elif acq_fct == 'TS':
key_TS = random.split(key_TS, 2)[0]
args = (key_TS, )
elif acq_fct == 'LCB':
weights_fn = lambda x: np.ones(x.shape[0],)
args = (kappa,)
acq_model = MCAcquisition(predict,
(lb, ub),
*args,
acq_fn=acq_fct,
output_weights=weights_fn)
# Optimize acquisition with L-BFGS to inquire new point(s)
new_X = acq_model.next_best_point(q=q1, num_restarts=num_restarts_acq, seed_id=85*prev + (N-15))
X = np.concatenate([X, new_X[None, :]]) # augment the vectorial input dataset during the BO process
np.save(case+'/X_loc_'+str(prev)+'.npy',X) # save the constructed vectorial input dataset by RPN-BO
| 4,309 | 33.206349 | 185 | py |
rpn_bo | rpn_bo-main/Code and results/optical_interferometer_MLP_step_2.py | from jax import numpy as np
from jax.scipy.special import logsumexp
from gym_interf import InterfEnv
# function mapping the vectorial input x to the vectorial output consisting of the 16 images
def f(x):
gym = InterfEnv()
gym.reset(actions=(1e-4, 1e-4, 1e-4, 1e-4))
action = x[:4]
state = gym.step(action)
return state[0].flatten()
N_y = 64 # each frame is a N_y by N_y image
xx, yy = np.meshgrid( np.arange(N_y) / N_y, np.arange(N_y) / N_y )
# prediction function mapping vectorial output to scalar obective value
def output(y):
y = y.reshape((16,N_y,N_y))
intens = np.exp(-((xx - 0.5) ** 2 + (yy - 0.5) ** 2) / (0.95) ** 2) * y
ivec = np.sum(intens, axis = (-1, -2))
smax = logsumexp(ivec, -1)
smin = -logsumexp(-ivec, -1)
return - (smax - smin) / (smax + smin)
case = 'results/optical_interferometer_MLP'
prev = 0 # change from 0 to 4 to consdier different random and independent run
X = np.load(case+'/X_loc_'+str(prev)+'.npy') # load vectorial inputs for the constructed dataset so far during the BO process
y = np.load(case+'/y_loc_'+str(prev)+'.npy') # load vectorial outputs for the constructed dataset so far during the BO process
new_y = f(X[-1,:]) # compute the vectorial output (the 16 images) of the newly acquired point
y = np.concatenate([y, new_y[None,:]], axis = 0) # augment the vectorial output dataset during the BO process
np.save(case+'/y_loc_'+str(prev)+'.npy',y) # save the constructed vectorial output dataset by RPN-BO
opt = np.load(case+'/opt_'+str(prev)+'.npy') # load best objective for the constructed dataset so far during the BO process
opt = np.concatenate( ( opt, np.minimum(opt[-1],output(new_y))[None] ) , axis=0 ) # augment the objective values of the constructed dataset during the BO process
np.save(case+'/opt_'+str(prev)+'.npy',opt) # save the constructed objective tensor by RPN-BO
print('new_X: ', X[-1,:], 'new obj:', output(new_y), 'opt obj: ',np.min(np.array(opt))) # output the newly acquired point, its corresponding objective value, and the best objective value so far in the BO process
| 2,088 | 51.225 | 211 | py |
rpn_bo | rpn_bo-main/Code and results/optical_interferometer_DON_step_2.py | from jax import numpy as np
from jax.scipy.special import logsumexp
from gym_interf import InterfEnv
output_dim = (64, 64, 16) # 16 frames of 64 by 64 images
soln_dim = output_dim[2]
P1 = output_dim[0]
P2 = output_dim[1]
# function mapping the vectorial input x to the vectorial output consisting of the 16 images
def f(x):
gym = InterfEnv()
gym.reset(actions=(1e-4, 1e-4, 1e-4, 1e-4))
action = x[:4]
state = gym.step(action)
return state[0]
xx, yy = np.meshgrid( np.arange(P1) / P1, np.arange(P2) / P2 )
# prediction function mapping vectorial output to scalar obective value
def output(new_y):
intens = np.exp(-((xx - 0.5) ** 2 + (yy - 0.5) ** 2) / (0.95) ** 2) * new_y
ivec = np.sum(intens, axis = (-1, -2))
smax = logsumexp(ivec, -1)
smin = -logsumexp(-ivec, -1)
return - (smax - smin) / (smax + smin)
case = 'results/optical_interferometer_DON'
prev = 0 # change from 0 to 4 to consdier different random and independent run
X = np.load(case+'/X_loc_'+str(prev)+'.npy') # load vectorial inputs for the constructed dataset so far during the BO process
y = np.load(case+'/y_loc_'+str(prev)+'.npy') # load vectorial outputs for the constructed dataset so far during the BO process
new_y = f(X[-1,:]) # compute the vectorial output (the 16 images) of the newly acquired point
y = np.concatenate([ y, np.transpose(new_y[None, :, :, :], (0, 2, 3, 1)) ]) # augment the vectorial output dataset during the BO process
np.save(case+'/y_loc_'+str(prev)+'.npy',y) # save the constructed vectorial output dataset by RPN-BO
opt = np.load(case+'/opt_'+str(prev)+'.npy') # load best objective for the constructed dataset so far during the BO process
opt = np.concatenate( ( opt, np.minimum(opt[-1],output(new_y))[None] ) , axis=0 )
np.save(case+'/opt_'+str(prev)+'.npy',opt) # save the constructed objective tensor by RPN-BO
print('new_X: ', X[-1,:], 'new obj:', output(new_y), 'min obj: ',np.min(np.array(opt)))
| 1,953 | 45.52381 | 136 | py |
rpn_bo | rpn_bo-main/Code and results/optical_interferometer_MLP_step_1.py | import os
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE']='false'
from jax import vmap, random, jit
from jax import numpy as np
from jax.scipy.special import logsumexp
from jax.nn import relu
import numpy as onp
from rpn_bo_models import EnsembleRegression
from rpn_bo_dataloaders import BootstrapLoader
from rpn_bo_acquisitions import MCAcquisition
from rpn_bo_utilities import uniform_prior
onp.random.seed(1234)
# Helper functions
normalize = vmap(lambda x, mu, std: (x-mu)/std, in_axes=(0,0,0))
denormalize = vmap(lambda x, mu, std: x*std + mu, in_axes=(0,0,0))
# vectorial input space dimension and its search space
dim = 4
lb = -np.ones((dim,))
ub = np.ones((dim,))
p_x = uniform_prior(lb, ub)
bounds = (lb, ub)
# vectorial output space dimension
N_y = 64
xx, yy = np.meshgrid( np.arange(N_y) / N_y, np.arange(N_y) / N_y )
dim_y = 16*N_y**2
# initial training space
case = 'results/optical_interferometer_MLP'
prev = 0
X = np.load(case+'/X_loc_'+str(prev)+'.npy')
y = np.load(case+'/y_loc_'+str(prev)+'.npy')
opt = np.load(case+'/opt_'+str(prev)+'.npy')
N = X.shape[0]
#### RPN-BO hyperparameters ####
num_restarts_acq = 500
q1 = 1
ensemble_size = 32
batch_size = N
fraction = 1
layers = [dim, 64, 64, dim_y]
nIter_RPN = 5000
options = {'criterion': 'LCB', # 'TS' 'LCB', EI
'kappa': 2.0,
'weights': None, # exact gmm None
}
train_key = random.PRNGKey(0)
key_TS = random.PRNGKey(123)
print('Run %s, Nx %s, Ny %s, init %s, best %s' % (str(prev), X.shape[0], y.shape[0], opt[0], np.min(opt)))
# Change random seed for different optimization iterations and different random independent runs
train_key = random.split(train_key, 2)[0]
key_TS = random.split(key_TS, 2)[0]
for i in range(prev):
for i in range(85):
train_key = random.split(train_key, 2)[0]
key_TS = random.split(key_TS, 2)[0]
for i in range(N-15):
train_key = random.split(train_key, 2)[0]
key_TS = random.split(key_TS, 2)[0]
# Create data set
dataset = BootstrapLoader(X, y, batch_size, ensemble_size, fraction, 0, rng_key=train_key)
(mu_X, sigma_X), (mu_y, sigma_y) = dataset.norm_const
# Initialize model
train_key = random.split(train_key, 2)[0]
model = EnsembleRegression(layers, ensemble_size, train_key, relu)
# Train model
model.train(dataset, nIter=nIter_RPN)
# prediction function using trained RPN
# mapping vectorial input to scalar obective value
@jit
def predict(x):
# accepts and returns un-normalized data
x = np.tile(x[np.newaxis,:,:], (ensemble_size, 1, 1))
x = normalize(x, mu_X, sigma_X)
params = vmap(model.get_params)(model.opt_state)
params_prior = vmap(model.get_params)(model.prior_opt_state)
opt_params = (params, params_prior)
samples = model.posterior(opt_params, x)
samples = denormalize(samples, mu_y, sigma_y)
samples = samples.reshape((samples.shape[0],samples.shape[1],16,N_y,N_y))
intens = np.exp(-((xx - 0.5) ** 2 + (yy - 0.5) ** 2) / (0.95) ** 2) * samples
ivec = np.sum(intens, axis = (-1, -2))
smax = logsumexp(ivec, -1)
smin = -logsumexp(-ivec, -1)
v = np.exp( (smax - smin) / (smax + smin) )
return -v[:,:,None]
# Fit GMM if needed for weighted acquisition functions
weights_fn = lambda x: np.ones(x.shape[0],)
if options['criterion']=='TS':
args = (key_TS,)
else:
kappa = options['kappa']
args = (kappa,)
acq_model = MCAcquisition(predict,
bounds,
*args,
acq_fn = options['criterion'],
output_weights=weights_fn)
# Optimize acquisition with L-BFGS to inquire new point(s)
new_X = acq_model.next_best_point(q = q1, num_restarts = num_restarts_acq, seed_id = 85*prev + (N-15))
new_X = new_X.reshape(q1,dim)
X = np.concatenate([X, new_X], axis = 0) # augment the vectorial input dataset during the BO process
np.save(case+'/X_loc_'+str(prev)+'.npy',X) # save the constructed vectorial input dataset by RPN-BO
| 3,973 | 31.842975 | 106 | py |
rpn_bo | rpn_bo-main/Code and results/rpn_bo_utilities.py | from jax import numpy as np
from jax import jit, vmap, random
from jax.scipy.stats import multivariate_normal, uniform
import numpy as onp
from scipy.stats import gaussian_kde
from sklearn import mixture
from pyDOE import lhs
from KDEpy import FFTKDE
def fit_kde(predict_fn, prior_pdf, bounds, num_samples=10000, bw=None):
onp.random.seed(1)
lb, ub = bounds
dim = lb.shape[0]
X = lb + (ub-lb)*lhs(dim, num_samples)
y = predict_fn(X)
weights = prior_pdf(X)
y, weights = onp.array(y), onp.array(weights)
y = y.flatten()
if bw is None:
try:
sc = gaussian_kde(y, weights=weights)
bw = onp.sqrt(sc.covariance).flatten()[0]
except:
bw = 1.0
if bw < 1e-8:
bw = 1.0
kde_pdf_x, kde_pdf_y = FFTKDE(bw=bw).fit(y, weights).evaluate()
return kde_pdf_x, kde_pdf_y
def fit_gmm(predict_fn, prior_pdf, bounds, num_samples, num_comp):
onp.random.seed(0)
lb, ub = bounds
dim = lb.shape[0]
# Evaluate input prior
X = lb + (ub-lb)*lhs(dim, num_samples)
p_x = prior_pdf(X)[:,None]
# Interpolate output KDE
y = predict_fn(X)
kde_pdf_x, kde_pdf_y = fit_kde(predict_fn, prior_pdf, bounds)
p_y = np.clip(np.interp(y, kde_pdf_x, kde_pdf_y), a_min=0.0) + 1e-8
# Weights
weights = p_x/p_y
# Rescale weights as probability distribution
weights = onp.array(weights, dtype = onp.float64)
weights = weights / onp.sum(weights)
# Scale inputs to [0, 1]^D
lb, ub = bounds
X = (X - lb) / (ub - lb)
# Sample from analytical w
indices = np.arange(num_samples)
idx = onp.random.choice(indices, num_samples, p=weights.flatten())
X_train = X[idx]
# fit GMM
clf = mixture.GaussianMixture(n_components=num_comp,
covariance_type='full')
clf.fit(onp.array(X_train, dtype=np.float64))
out = (np.array(clf.weights_),
np.array(clf.means_),
np.array(clf.covariances_))
return out
def output_weights(predict_fn, prior_pdf, bounds, method='exact', num_samples=10000, num_comp=2):
# Compute exact likelihood ratio
if method == 'exact':
onp.random.seed(0)
lb, ub = bounds
dim = lb.shape[0]
X = lb + (ub-lb)*lhs(dim, num_samples)
kde_pdf_x, kde_pdf_y = fit_kde(predict_fn, prior_pdf, bounds)
p_x = lambda x: prior_pdf(x)[:,None]
p_y = lambda x: np.clip(np.interp(predict_fn(x), kde_pdf_x, kde_pdf_y), a_min=0.0) + 1e-8
ratio = lambda x: p_x(x)/p_y(x)
volume = np.prod(ub-lb)
norm_const = np.mean(ratio(X))*volume
def compute_w(x):
w = ratio(x)/norm_const
return w.flatten()
# GMM approximation
elif method == 'gmm':
gmm_vars = fit_gmm(predict_fn, prior_pdf, bounds, num_samples, num_comp)
def compute_w(x):
# expects normalized inputs
weights, means, covs = gmm_vars
lb, ub = bounds
x = (x - lb) / (ub - lb)
gmm_mode = lambda w, mu, cov: w*multivariate_normal.pdf(x, mu, cov)
w = np.sum(vmap(gmm_mode)(weights, means, covs), axis = 0)
return w/np.prod(ub-lb)
elif method == 'None':
compute_w = lambda x: np.ones(x.shape[0])
else:
raise NotImplementedError
return jit(compute_w)
# Helper functions for computing output-weighted acquisitions
class uniform_prior:
def __init__(self, lb, ub):
self.lb = lb
self.ub = ub
self.dim = lb.shape[0]
def sample(self, rng_key, N):
return self.lb + (self.ub-self.lb)*random.uniform(rng_key, (N, self.dim))
def pdf(self, x):
return np.sum(uniform.pdf(x, self.lb, self.ub-self.lb), axis=-1)
class gaussian_prior:
def __init__(self, mu, cov):
self.mu = mu
self.cov = cov
self.dim = mu.shape[0]
def sample(self, rng_key, N):
return random.multivariate_normal(rng_key, self.mu, self.cov, (N,))
def pdf(self, x):
return multivariate_normal.pdf(x, self.mu, self.cov)
| 4,097 | 34.327586 | 97 | py |
rpn_bo | rpn_bo-main/Code and results/create_BO_cv_plots.py | problem = 'comp_blades_shape' # choose from 'environment' 'brusselator' 'optical_interferometer' 'comp_blades_shape'
from matplotlib import pyplot as plt
plt.close('all')
plt.rcParams.update(plt.rcParamsDefault)
plt.rcParams.update({'font.weight': 'bold',
'font.size': 28,
'lines.linewidth': 1.5,
'axes.labelsize': 36,
'axes.titlesize': 36,
'xtick.labelsize': 28,
'ytick.labelsize': 28,
'legend.fontsize': 36,
'axes.linewidth': 4,
"pgf.texsystem": "pdflatex", # change this if using xetex or lautex
"text.usetex": True, # use LaTeX to write all text
})
plt.rcParams['text.latex.preamble'] = [r'\usepackage{sfmath} \boldmath']
import torch
import numpy as np
dat_file = np.load("./HOGP_results.npz", allow_pickle = True)
list(dat_file.keys())
dat = dat_file["obj"].reshape(-1)[0]
dat.keys()
from scipy import stats
################################################
############### environment ####################
################################################
if problem == 'environment':
N = 5
nIter = 30
q1 = 1
nTrSet = 10
dispersion_scale = 0.2
case = 'results/environmental_model_function_MLP_LCB'
case2 = 'results/environmental_model_function_MLP_EI'
case3 = 'results/environmental_model_function_MLP_TS'
case4 = 'results/environmental_model_function_DON_LCB'
case5 = 'results/environmental_model_function_DON_EI'
case6 = 'results/environmental_model_function_DON_TS'
opt_q1_RPN = []
opt_q1_RPN2 = []
opt_q1_RPN3 = []
opt_q1_RPN4 = []
opt_q1_RPN5 = []
opt_q1_RPN6 = []
for j in range(nTrSet):
opt = np.load(case+'/opt_'+str(j)+'.npy')
opt_q1_RPN.append(np.array(opt))
opt2 = np.load(case2+'/opt_'+str(j)+'.npy')
opt_q1_RPN2.append(np.array(opt2))
opt3 = np.load(case3+'/opt_'+str(j)+'.npy')
opt_q1_RPN3.append(np.array(opt3))
opt4 = np.load(case4+'/opt_'+str(j)+'.npy')
opt_q1_RPN4.append(np.array(opt4))
opt5 = np.load(case5+'/opt_'+str(j)+'.npy')
opt_q1_RPN5.append(np.array(opt5))
opt6 = np.load(case6+'/opt_'+str(j)+'.npy')
opt_q1_RPN6.append(np.array(opt6))
opt_q1_RPN = np.array(opt_q1_RPN)
opt_q1_RPN2 = np.array(opt_q1_RPN2)
opt_q1_RPN3 = np.array(opt_q1_RPN3)
opt_q1_RPN4 = np.array(opt_q1_RPN4)
opt_q1_RPN5 = np.array(opt_q1_RPN5)
opt_q1_RPN6 = np.array(opt_q1_RPN6)
m_q1_RPN, std_q1_RPN = np.median(opt_q1_RPN, axis = 0), stats.median_abs_deviation(opt_q1_RPN, axis = 0)
m_q1_RPN2, std_q1_RPN2 = np.median(opt_q1_RPN2, axis = 0), stats.median_abs_deviation(opt_q1_RPN2, axis = 0)
m_q1_RPN3, std_q1_RPN3 = np.median(opt_q1_RPN3, axis = 0), stats.median_abs_deviation(opt_q1_RPN3, axis = 0)
m_q1_RPN4, std_q1_RPN4 = np.median(opt_q1_RPN4, axis = 0), stats.median_abs_deviation(opt_q1_RPN4, axis = 0)
m_q1_RPN5, std_q1_RPN5 = np.median(opt_q1_RPN5, axis = 0), stats.median_abs_deviation(opt_q1_RPN5, axis = 0)
m_q1_RPN6, std_q1_RPN6 = np.median(opt_q1_RPN6, axis = 0), stats.median_abs_deviation(opt_q1_RPN6, axis = 0)
lower_q1_RPN = np.log10(np.clip(m_q1_RPN - dispersion_scale*std_q1_RPN, a_min=0., a_max = np.inf) + 1e-10)
upper_q1_RPN = np.log10(m_q1_RPN + dispersion_scale*std_q1_RPN + 1e-10)
lower_q1_RPN2 = np.log10(np.clip(m_q1_RPN2 - dispersion_scale*std_q1_RPN2, a_min=0., a_max = np.inf) + 1e-10)
upper_q1_RPN2 = np.log10(m_q1_RPN2 + dispersion_scale*std_q1_RPN2 + 1e-10)
lower_q1_RPN3 = np.log10(np.clip(m_q1_RPN3 - dispersion_scale*std_q1_RPN3, a_min=0., a_max = np.inf) + 1e-10)
upper_q1_RPN3 = np.log10(m_q1_RPN3 + dispersion_scale*std_q1_RPN3 + 1e-10)
lower_q1_RPN4 = np.log10(np.clip(m_q1_RPN4 - dispersion_scale*std_q1_RPN4, a_min=0., a_max = np.inf) + 1e-10)
upper_q1_RPN4 = np.log10(m_q1_RPN4 + dispersion_scale*std_q1_RPN4 + 1e-10)
lower_q1_RPN5 = np.log10(np.clip(m_q1_RPN5 - dispersion_scale*std_q1_RPN5, a_min=0., a_max = np.inf) + 1e-10)
upper_q1_RPN5 = np.log10(m_q1_RPN5 + dispersion_scale*std_q1_RPN5 + 1e-10)
lower_q1_RPN6 = np.log10(np.clip(m_q1_RPN6 - dispersion_scale*std_q1_RPN6, a_min=0., a_max = np.inf) + 1e-10)
upper_q1_RPN6 = np.log10(m_q1_RPN6 + dispersion_scale*std_q1_RPN6 + 1e-10)
fig = plt.figure(figsize=(21, 9))
ax = plt.subplot(111)
ax.plot(N+q1*np.arange(nIter+1), np.log10(m_q1_RPN)[:nIter+1], color='black', label = r'\textbf{RPN - MLP - LCB}')
ax.fill_between(N+q1*np.arange(nIter+1), lower_q1_RPN[:nIter+1], upper_q1_RPN[:nIter+1], facecolor='black', alpha=0.3)
ax.plot(N+q1*np.arange(nIter+1), np.log10(m_q1_RPN2)[:nIter+1], color='blue', label = r'\textbf{RPN - MLP - EI}')
ax.fill_between(N+q1*np.arange(nIter+1), lower_q1_RPN2[:nIter+1], upper_q1_RPN2[:nIter+1], facecolor='blue', alpha=0.3)
ax.plot(N+q1*np.arange(nIter+1), np.log10(m_q1_RPN3)[:nIter+1], color='limegreen', label = r'\textbf{RPN - MLP - TS}')
ax.fill_between(N+q1*np.arange(nIter+1), lower_q1_RPN3[:nIter+1], upper_q1_RPN3[:nIter+1], facecolor='limegreen', alpha=0.3)
ax.plot(N+q1*np.arange(nIter+1), np.log10(m_q1_RPN4), '-.', color='black', label = r'\textbf{RPN - DON - LCB}')
ax.fill_between(N+q1*np.arange(nIter+1), lower_q1_RPN4, upper_q1_RPN4, facecolor='red', alpha=0.3)
ax.plot(N+q1*np.arange(nIter+1), np.log10(m_q1_RPN5), '-.', color='blue', label = r'\textbf{RPN - DON - EI}')
ax.fill_between(N+q1*np.arange(nIter+1), lower_q1_RPN5, upper_q1_RPN5, facecolor='blue', alpha=0.3)
ax.plot(N+q1*np.arange(nIter+1), np.log10(m_q1_RPN6), '-.', color='limegreen', label = r'\textbf{RPN - DON - TS}')
ax.fill_between(N+q1*np.arange(nIter+1), lower_q1_RPN6, upper_q1_RPN6, facecolor='limegreen', alpha=0.3)
plt.xticks(np.arange(N, N+nIter+1, N))
plt.xlim([5,35])
ax.grid(color='Grey', linestyle='-', linewidth=0.5)
sample_means = dat["env_means"]
sample_stds = dat["env_stds"]
keys = dat["env_keys"]
key_dict = {"rnd": r'\textbf{Random}', "rnd_cf": r'\textbf{Random-CF}', "ei": r'\textbf{EI}', "ei_cf": r'\textbf{EI-CF}', \
"ei_hogp_cf": r'\textbf{EI-HOGP-CF}', "ei_hogp_cf_smooth": r'\textbf{EI-HOGP-CF + GP}'}
steps = torch.linspace(5, 35, 30)
for i, key in enumerate(keys):
ax.fill_between(steps,
sample_means[i] - sample_stds[i] / 20**0.5,
sample_means[i] + sample_stds[i] / 20**0.5,
alpha = 0.1)
ax.plot(steps, sample_means[i], '--', linewidth=3, label = key_dict[key])
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', frameon=False, fontsize = 36, bbox_to_anchor=(0.98, 0.5))
plt.xlabel(r'\textbf{Function Evaluations}')
plt.ylabel(r'\textbf{Log10(Regret)}')
plt.savefig('figures/environmental_model_function.png',dpi=300,bbox_inches='tight')
################################################
################ brusselator ###################
################################################
if problem == 'brusselator':
N = 5
nIter = 20
q1 = 1
q2 = 2
nIter_q1 = nIter//q1
nIter_q2 = nIter//q2
nIter_q1_DON = 30
nTrSet = 10
nTrSet_DON = 30
dispersion_scale = 0.2
case = 'results/brusselator_pde_MLP_LCB'
case_EI = 'results/brusselator_pde_MLP_EI'
case_q2 = 'results/brusselator_pde_MLP_EI_q_2'
case_LCB_q2 = 'results/brusselator_pde_MLP_LCB_q_2'
case2 = 'results/brusselator_pde_DON_TS'
case3 = 'results/brusselator_pde_DON_LW_LCB'
case4 = 'results/brusselator_pde_DON_LCB'
opt_q1_RPN = []
opt_q1_RPN_EI = []
opt_q2_RPN = []
opt_LCB_q2_RPN = []
opt_q1_RPN2 = []
opt_q1_RPN3 = []
opt_q1_RPN4 = []
for j in range(nTrSet):
opt = np.load(case+'/opt_'+str(j)+'.npy')
opt_q1_RPN.append(np.array(opt))
opt = np.load(case_q2+'/opt_'+str(j)+'.npy')
opt_q2_RPN.append(np.array(opt))
opt = np.load(case_EI+'/opt_'+str(j)+'.npy')
opt_q1_RPN_EI.append(np.array(opt))
opt = np.load(case_LCB_q2+'/opt_'+str(j)+'.npy')
opt_LCB_q2_RPN.append(np.array(opt))
opt_q1_RPN = np.array(opt_q1_RPN)
opt_q2_RPN = np.array(opt_q2_RPN)
opt_q1_RPN_EI = np.array(opt_q1_RPN_EI)
opt_LCB_q2_RPN = np.array(opt_LCB_q2_RPN)
for j in range(nTrSet):
opt = np.load(case2+'/opt_'+str(j)+'.npy')
opt_q1_RPN2.append(np.array(opt))
opt = np.load(case3+'/opt_'+str(j)+'.npy')
opt_q1_RPN3.append(np.array(opt))
opt = np.load(case4+'/opt_'+str(j)+'.npy')
opt_q1_RPN4.append(np.array(opt))
m_q1_RPN, std_q1_RPN = np.median(opt_q1_RPN, axis = 0), stats.median_abs_deviation(opt_q1_RPN, axis = 0)
m_q1_RPN2, std_q1_RPN2 = np.median(opt_q1_RPN2, axis = 0), stats.median_abs_deviation(opt_q1_RPN2, axis = 0)
m_q1_RPN3, std_q1_RPN3 = np.median(opt_q1_RPN3, axis = 0), stats.median_abs_deviation(opt_q1_RPN3, axis = 0)
m_q1_RPN4, std_q1_RPN4 = np.median(opt_q1_RPN4, axis = 0), stats.median_abs_deviation(opt_q1_RPN4, axis = 0)
m_q2_RPN, std_q2_RPN = np.median(opt_q2_RPN, axis = 0), stats.median_abs_deviation(opt_q2_RPN, axis = 0)
m_q1_RPN_EI, std_q1_RPN_EI = np.median(opt_q1_RPN_EI, axis = 0), stats.median_abs_deviation(opt_q1_RPN_EI, axis = 0)
m_LCB_q2_RPN, std_LCB_q2_RPN = np.median(opt_LCB_q2_RPN, axis = 0), stats.median_abs_deviation(opt_LCB_q2_RPN, axis = 0)
lower_q1_RPN = np.log10(np.clip(m_q1_RPN - dispersion_scale*std_q1_RPN, a_min=0., a_max = np.inf) + 1e-8)
upper_q1_RPN = np.log10(m_q1_RPN + dispersion_scale*std_q1_RPN + 1e-8)
lower_q1_RPN2 = np.log10(np.clip(m_q1_RPN2 - dispersion_scale*std_q1_RPN2, a_min=0., a_max = np.inf) + 1e-10)
upper_q1_RPN2 = np.log10(m_q1_RPN2 + dispersion_scale*std_q1_RPN2 + 1e-10)
lower_q1_RPN3 = np.log10(np.clip(m_q1_RPN3 - dispersion_scale*std_q1_RPN3, a_min=0., a_max = np.inf) + 1e-10)
upper_q1_RPN3 = np.log10(m_q1_RPN3 + dispersion_scale*std_q1_RPN3 + 1e-10)
lower_q1_RPN4 = np.log10(np.clip(m_q1_RPN4 - dispersion_scale*std_q1_RPN4, a_min=0., a_max = np.inf) + 1e-10)
upper_q1_RPN4 = np.log10(m_q1_RPN4 + dispersion_scale*std_q1_RPN4 + 1e-10)
lower_q2_RPN = np.log10(np.clip(m_q2_RPN - dispersion_scale*std_q2_RPN, a_min=0., a_max = np.inf) + 1e-8)
upper_q2_RPN = np.log10(m_q2_RPN + dispersion_scale*std_q2_RPN + 1e-8)
lower_q1_RPN_EI = np.log10(np.clip(m_q1_RPN_EI - dispersion_scale*std_q1_RPN_EI, a_min=0., a_max = np.inf) + 1e-8)
upper_q1_RPN_EI = np.log10(m_q1_RPN_EI + dispersion_scale*std_q1_RPN_EI + 1e-8)
lower_LCB_q2_RPN = np.log10(np.clip(m_LCB_q2_RPN - dispersion_scale*std_LCB_q2_RPN, a_min=0., a_max = np.inf) + 1e-8)
upper_LCB_q2_RPN = np.log10(m_LCB_q2_RPN + dispersion_scale*std_LCB_q2_RPN + 1e-8)
fig = plt.figure(figsize=(21, 9))
ax = plt.subplot(111)
ax.plot(N+q1*np.arange(nIter_q1+1), np.log10(m_q1_RPN), color='black', label = r'\textbf{RPN - MLP - LCB}')
ax.fill_between(N+q1*np.arange(nIter_q1+1), lower_q1_RPN, upper_q1_RPN, facecolor='black', alpha=0.3)
ax.plot(N+q2*np.arange(nIter_q2+1), np.log10(m_LCB_q2_RPN), color='slategrey', label = r'\textbf{RPN - MLP - LCB, q=2}')
ax.fill_between(N+q2*np.arange(nIter_q2+1), lower_LCB_q2_RPN, upper_LCB_q2_RPN, facecolor='slategrey', alpha=0.3)
ax.plot(N+q1*np.arange(nIter_q1+1), np.log10(m_q1_RPN_EI), color='blue', label = r'\textbf{RPN - MLP - EI}')
ax.fill_between(N+q1*np.arange(nIter_q1+1), lower_q1_RPN_EI, upper_q1_RPN_EI, facecolor='blue', alpha=0.3)
ax.plot(N+q2*np.arange(nIter_q2+1), np.log10(m_q2_RPN), color='lightskyblue', label = r'\textbf{RPN - MLP - EI, q=2}')
ax.fill_between(N+q2*np.arange(nIter_q2+1), lower_q2_RPN, upper_q2_RPN, facecolor='lightskyblue', alpha=0.3)
ax.plot(N+q1*np.arange(nIter_q1_DON+1), np.log10(m_q1_RPN4), '-.', color='black', label = r'\textbf{RPN - DON - LCB}')
ax.fill_between(N+q1*np.arange(nIter_q1_DON+1), lower_q1_RPN4, upper_q1_RPN4, facecolor='black', alpha=0.3)
ax.plot(N+q1*np.arange(nIter_q1_DON+1), np.log10(m_q1_RPN3), '-.', color='hotpink', label = r'\textbf{RPN - DON - LCB-LW}')
ax.fill_between(N+q1*np.arange(nIter_q1_DON+1), lower_q1_RPN3, upper_q1_RPN3, facecolor='hotpink', alpha=0.3)
ax.plot(N+q1*np.arange(nIter_q1_DON+1), np.log10(m_q1_RPN2), '-.', color='limegreen', label = r'\textbf{RPN - DON - TS}')
ax.fill_between(N+q1*np.arange(nIter_q1_DON+1), lower_q1_RPN2, upper_q1_RPN2, facecolor='limegreen', alpha=0.3)
plt.xticks(np.arange(N, N+nIter_q1_DON+1, N))
plt.xlim([5,35])
ax.grid(color='Grey', linestyle='-', linewidth=0.5)
sample_means = dat["pde_means"]
sample_stds = dat["pde_stds"]
keys = dat["pde_keys"]
key_dict = {"rnd": r'\textbf{Random}', "rnd_cf": r'\textbf{Random-CF}', "ei": r'\textbf{EI}', "ei_cf": r'\textbf{EI-CF}', \
"ei_hogp_cf": r'\textbf{EI-HOGP-CF}', "ei_hogp_cf_smooth": r'\textbf{EI-HOGP-CF + GP}'}
steps = torch.linspace(5, 35, 30)
for i, key in enumerate(keys):
ax.fill_between(steps,
sample_means[i] - sample_stds[i] / 20**0.5,
sample_means[i] + sample_stds[i] / 20**0.5,
alpha = 0.1)
ax.plot(steps, sample_means[i], '--', linewidth=3, label = key_dict[key])
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', frameon=False, fontsize = 36, bbox_to_anchor=(0.98, 0.5))
plt.xlabel(r'\textbf{Function Evaluations}')
plt.ylabel(r'\textbf{Log10(Variance)}')
plt.savefig('figures/brusselator_pde.png', dpi=300,bbox_inches='tight')
################################################
############ optical_interferometer ############
################################################
if problem == 'optical_interferometer':
N = 15
nIter = 85
q1 = 1
nIter_q1 = nIter//q1
nTrSet = 5
dispersion_scale = 0.2
case = 'results/optical_interferometer_MLP_LCB'
case2 = 'results/optical_interferometer_MLP_EI'
case3 = 'results/optical_interferometer_MLP_TS'
case4 = 'results/optical_interferometer_DON_EI'
opt_q1_RPN = []
opt_q1_RPN2 = []
opt_q1_RPN3 = []
opt_q1_RPN4 = []
for j in range(nTrSet):
opt = -np.load(case+'/'+'opt_'+str(j)+'.npy')[:nIter_q1+1]
opt2 = -np.load(case2+'/'+'opt_'+str(j)+'.npy')[:nIter_q1+1]
opt3 = -np.load(case3+'/'+'opt_'+str(j)+'.npy')[:nIter_q1+1]
opt4 = -np.load(case4+'/'+'opt_'+str(j)+'.npy')[:nIter_q1+1]
opt_q1_RPN.append(np.array(opt))
opt_q1_RPN2.append(np.array(opt2))
opt_q1_RPN3.append(np.array(opt3))
opt_q1_RPN4.append(np.array(opt4))
opt_q1_RPN = np.array(opt_q1_RPN)
opt_q1_RPN2 = np.array(opt_q1_RPN2)
opt_q1_RPN3 = np.array(opt_q1_RPN3)
opt_q1_RPN4 = np.array(opt_q1_RPN4)
m_q1_RPN, std_q1_RPN = np.median(opt_q1_RPN, axis = 0), stats.median_abs_deviation(opt_q1_RPN, axis = 0)
m_q1_RPN2, std_q1_RPN2 = np.median(opt_q1_RPN2, axis = 0), stats.median_abs_deviation(opt_q1_RPN2, axis = 0)
m_q1_RPN3, std_q1_RPN3 = np.median(opt_q1_RPN3, axis = 0), stats.median_abs_deviation(opt_q1_RPN3, axis = 0)
m_q1_RPN4, std_q1_RPN4 = np.median(opt_q1_RPN4, axis = 0), stats.median_abs_deviation(opt_q1_RPN4, axis = 0)
lower_q1_RPN = np.clip(m_q1_RPN - dispersion_scale*std_q1_RPN, a_min=0., a_max = np.inf) + 1e-8
upper_q1_RPN = m_q1_RPN + dispersion_scale*std_q1_RPN + 1e-8
lower_q1_RPN2 = np.clip(m_q1_RPN2 - dispersion_scale*std_q1_RPN2, a_min=0., a_max = np.inf) + 1e-8
upper_q1_RPN2 = m_q1_RPN2 + dispersion_scale*std_q1_RPN2 + 1e-8
lower_q1_RPN3 = np.clip(m_q1_RPN3 - dispersion_scale*std_q1_RPN3, a_min=0., a_max = np.inf) + 1e-8
upper_q1_RPN3 = m_q1_RPN3 + dispersion_scale*std_q1_RPN3 + 1e-8
lower_q1_RPN4 = np.clip(m_q1_RPN4 - dispersion_scale*std_q1_RPN4, a_min=0., a_max = np.inf) + 1e-8
upper_q1_RPN4 = m_q1_RPN4 + dispersion_scale*std_q1_RPN4 + 1e-8
fig = plt.figure(figsize=(21, 9))
ax = plt.subplot(111)
ax.plot(N+q1*np.arange(nIter_q1+1), m_q1_RPN, color='black', label = r'\textbf{RPN - MLP - LCB}')
ax.fill_between(N+q1*np.arange(nIter_q1+1), lower_q1_RPN, upper_q1_RPN, facecolor='black', alpha=0.3)
ax.plot(N+q1*np.arange(nIter_q1+1), m_q1_RPN2, color='blue', label = r'\textbf{RPN - MLP - EI}')
ax.fill_between(N+q1*np.arange(nIter_q1+1), lower_q1_RPN2, upper_q1_RPN2, facecolor='blue', alpha=0.3)
ax.plot(N+q1*np.arange(nIter_q1+1), m_q1_RPN3, color='limegreen', label = r'\textbf{RPN - MLP - TS}')
ax.fill_between(N+q1*np.arange(nIter_q1+1), lower_q1_RPN3, upper_q1_RPN3, facecolor='limegreen', alpha=0.3)
ax.plot(N+q1*np.arange(nIter_q1+1), m_q1_RPN4, '-.', color='blue', label = r'\textbf{RPN - DON - EI}')
ax.fill_between(N+q1*np.arange(nIter_q1+1), lower_q1_RPN4, upper_q1_RPN4, facecolor='blue', alpha=0.3)
ax.grid(color='Grey', linestyle='-', linewidth=0.5)
plt.xlim([15,100])
sample_means = dat["optics_means"]
sample_stds = dat["optics_stds"]
keys = dat["optics_keys"]
key_dict = {"rnd": r'\textbf{Random}', "rnd_cf": r'\textbf{Random-CF}', "ei": r'\textbf{EI}', "ei_cf": r'\textbf{EI-CF}', \
"ei_hogp_cf": r'\textbf{EI-HOGP-CF}', "ei_hogp_cf_smooth": r'\textbf{EI-HOGP-CF + GP}'}
steps = torch.linspace(0, 100, 100)
for i, key in enumerate(keys):
plt.fill_between(steps,
sample_means[i] - sample_stds[i] / 45**0.5,
sample_means[i] + sample_stds[i] / 45**0.5,
alpha = 0.1)
plt.plot(steps, sample_means[i], '--', linewidth=3, label = key_dict[key])
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', frameon=False, fontsize = 36, bbox_to_anchor=(0.98, 0.5))
plt.xlabel(r'\textbf{Function Evaluations}')
plt.ylabel(r'\textbf{Visibility (V)}')
plt.savefig('figures/optical_interferometer.png', dpi=300,bbox_inches='tight')
################################################
############### comp_blades_shape ###############
################################################
if problem == 'comp_blades_shape':
dispersion_scale = 0.2
x_MFGP = np.load('results/compressor_blades_shape_MLP/x_MFGP.npy')
mean_MFGP = np.load('results/compressor_blades_shape_MLP/mean_MFGP.npy')
std_MFGP = np.load('results/compressor_blades_shape_MLP/std_MFGP.npy')
x_SFGP = np.load('results/compressor_blades_shape_MLP/x_SFGP.npy')
mean_SFGP = np.load('results/compressor_blades_shape_MLP/mean_SFGP.npy')
std_SFGP = np.load('results/compressor_blades_shape_MLP/std_SFGP.npy')
x_MFRPN = np.load('results/compressor_blades_shape_MLP/x_MFRPN.npy')
mean_MFRPN = np.load('results/compressor_blades_shape_MLP/mean_MFRPN.npy')
std_MFRPN = np.load('results/compressor_blades_shape_MLP/std_MFRPN.npy')
lower_SFGP = mean_SFGP - dispersion_scale*std_SFGP
upper_SFGP = mean_SFGP + dispersion_scale*std_SFGP
lower_MFGP = mean_MFGP - dispersion_scale*std_MFGP
upper_MFGP = mean_MFGP + dispersion_scale*std_MFGP
lower_MFRPN = mean_MFRPN - dispersion_scale*std_MFRPN
upper_MFRPN = mean_MFRPN + dispersion_scale*std_MFRPN
plt.figure(figsize = (16, 9), facecolor = "w")
plt.plot(x_MFRPN,mean_MFRPN, linewidth=3, color='black', label = r'\textbf{MF - RPN - LCBC}')
plt.fill_between(x_MFRPN, lower_MFRPN, upper_MFRPN, facecolor='black', alpha=0.3)
plt.plot(x_SFGP,mean_SFGP, '--', linewidth=3, color='magenta', label = r'\textbf{SF - GP - LCBC}')
plt.fill_between(x_SFGP, lower_SFGP, upper_SFGP, facecolor='magenta', alpha=0.3)
plt.plot(x_MFGP,mean_MFGP, '--', linewidth=3, color='orange', label = r'\textbf{MF - GP - LCBC}')
plt.fill_between(x_MFGP, lower_MFGP, upper_MFGP, facecolor='orange', alpha=0.3)
plt.xlim([0,200])
plt.grid(color='Grey', linestyle='-', linewidth=0.5)
plt.legend(fontsize = 28)
plt.xlabel(r'\textbf{Cost (unit of CFD HF evaluations) for aqcuired points}')
plt.ylabel(r'\textbf{Optimization objective}')
plt.show()
plt.savefig('figures/comp_blades_shape.png',dpi=300,bbox_inches='tight')
| 20,736 | 52.862338 | 128 | py |
rpn_bo | rpn_bo-main/Code and results/rpn_bo_dataloaders.py | from jax import vmap, random, jit
from jax import numpy as np
from functools import partial
from torch.utils import data
class BootstrapLoader(data.Dataset):
def __init__(self, X, y, batch_size=128, ensemble_size=32, fraction=0.5, is_Gauss=1, LF_pred=None, rng_key=random.PRNGKey(1234)):
'Initialization'
self.N = X.shape[0]
self.batch_size = batch_size
self.ensemble_size = ensemble_size
self.bootstrap_size = int(self.N*fraction)
self.is_Gauss = is_Gauss
self.key = rng_key
# Create the bootstrapped partitions
keys = random.split(rng_key, ensemble_size)
if LF_pred is None:
self.X, self.y = vmap(self.__bootstrap, (None,None,0))(X, y, keys)
else:
self.X, self.y = vmap(self.__bootstrapMF, (None,None,0,0))(X, y, LF_pred, keys)
# Each bootstrapped data-set has its own normalization constants
self.norm_const = vmap(self.normalization_constants, in_axes=(0,0))(self.X, self.y)
@partial(jit, static_argnums=(0,))
def normalization_constants(self, X, y):
if self.is_Gauss == 1:
mu_X, sigma_X = X.mean(0), X.std(0)
mu_y, sigma_y = y.mean(0), y.std(0)
else:
mu_X, sigma_X = np.zeros(X.shape[1],), np.ones(X.shape[1],)
mu_y = np.zeros(y.shape[1],)
sigma_y = np.max(np.abs(y)) * np.ones(y.shape[1],)
return (mu_X, sigma_X), (mu_y, sigma_y)
@partial(jit, static_argnums=(0,))
def __bootstrap(self, X, y, key):
idx = random.choice(key, self.N, (self.bootstrap_size,), replace=False)
inputs = X[idx,:]
targets = y[idx,:]
return inputs, targets
@partial(jit, static_argnums=(0,))
def __bootstrapMF(self, X, y, yLH, key):
idx = random.choice(key, self.N, (self.bootstrap_size,), replace=False)
inputs = np.concatenate([X[idx,:], yLH[idx,:]], axis=1)
targets = y[idx,:]
return inputs, targets
@partial(jit, static_argnums=(0,))
def __data_generation(self, key, X, y, norm_const):
'Generates data containing batch_size samples'
(mu_X, sigma_X), (mu_y, sigma_y) = norm_const
idx = random.choice(key, self.N, (self.batch_size,), replace=False)
X = X[idx,:]
y = y[idx,:]
X = (X - mu_X)/sigma_X
y = (y - mu_y)/sigma_y
return X, y
def __getitem__(self, index):
'Generate one batch of data'
self.key, subkey = random.split(self.key)
keys = random.split(self.key, self.ensemble_size)
inputs, targets = vmap(self.__data_generation, (0,0,0,0))(keys,
self.X,
self.y,
self.norm_const)
return inputs, targets
class DataGenerator_batch(data.Dataset):
def __init__(self, usol, u0_train, s1, t1, P1 = 100, P2 = 100,
batch_size=64, batch_size_all=512, N_ensemble = 10, rng_key=random.PRNGKey(1234), y=None):
'Initialization'
self.usol = usol
self.u0_train = u0_train
self.N_train_realizations = usol.shape[0]
self.P1 = P1
self.P2 = P2
self.dim = usol.shape[-1]
u_samples_reshape = usol.reshape(self.N_train_realizations, P1*P2, self.dim) # realizations x (mxp) x dim
self.norms = vmap(np.linalg.norm, (0, None, None))(u_samples_reshape, np.inf, 0) # realizations x dim
T, X = np.meshgrid(t1, s1)
if y == None:
self.y = np.hstack([T.flatten()[:,None], X.flatten()[:,None]])
else:
self.y = y
self.batch_size = batch_size
self.batch_size_all = batch_size_all
self.N_ensemble = N_ensemble
self.key = rng_key
def __getitem__(self, index):
'Generate one batch of data'
self.key, subkey = random.split(self.key)
v_subkey = random.split(subkey, self.N_train_realizations)
u_temp, y_temp, s_temp, w_temp = self.__get_realizations(v_subkey)
self.key, subkey = random.split(self.key)
v_subkey = random.split(subkey, self.N_ensemble)
inputs, outputs = vmap(self.__data_generation, (0, None, None, None, None))(v_subkey, u_temp, y_temp, s_temp, w_temp)
return inputs, outputs
@partial(jit, static_argnums=(0,))
def __data_generation(self, key, u_temp, y_temp, s_temp, w_temp):
'Generates data containing batch_size samples'
idx = random.choice(key, self.N_train_realizations * self.batch_size, (self.batch_size_all,), replace=False)
u = u_temp[idx,:]
y = y_temp[idx,:]
s = s_temp[idx,:]
w = w_temp[idx,:]
# Construct batch
inputs = (u, y)
outputs = (s, w)
return inputs, outputs
@partial(jit, static_argnums=(0,))
def __get_realizations(self, key):
idx_train = np.arange(self.N_train_realizations)
u_temp, y_temp, s_temp, w_temp = vmap(self.__generate_one_realization_data, (0, 0, None, None, None))(key, idx_train, self.usol, self.u0_train, self.norms)
u_temp = np.float32(u_temp.reshape(self.N_train_realizations * self.batch_size,-1))
y_temp = np.float32(y_temp.reshape(self.N_train_realizations * self.batch_size,-1))
s_temp = np.float32(s_temp.reshape(self.N_train_realizations * self.batch_size,-1))
w_temp = np.float32(w_temp.reshape(self.N_train_realizations * self.batch_size,-1))
return u_temp, y_temp, s_temp, w_temp
def __generate_one_realization_data(self, key, idx, usol, u0_train, norms):
u = usol[idx]
u0 = u0_train[idx]
ww = norms[idx]
s = np.swapaxes(u, 0, 1)
s = s.reshape(self.P1*self.P2, self.dim)
u = np.tile(u0, (self.batch_size, 1))
w = np.tile(ww, (self.batch_size, 1)) # for dim > 1, otherwise, w = np.tile(ww, (self.batch_size))
idx_keep = random.choice(key, s.shape[0], (self.batch_size,), replace=False)
return u, self.y[idx_keep,:], s[idx_keep], w
| 6,289 | 42.680556 | 163 | py |
mcfit | mcfit-master/mcfit/mcfit.py | import math
import cmath
import warnings
import numpy
try:
import jax
jax.config.update("jax_enable_x64", True)
except ModuleNotFoundError as e:
JAXNotFoundError = e
class mcfit(object):
r"""Compute integral transforms as a multiplicative convolution.
The generic form is
.. math:: G(y) = \int_0^\infty F(x) K(xy) \frac{dx}x
Here :math:`F(x)` is the input function, :math:`G(y)` is the output
function, and :math:`K(xy)` is the integral kernel.
One is free to scale all three functions by a power law
.. math:: g(y) = \int_0^\infty f(x) k(xy) \frac{dx}x
in which :math:`f(x) = x^{-q} F(x)`, :math:`g(y) = y^q G(y)`, and
:math:`k(t) = t^q K(t)`.
The tilt parameter :math:`q` shifts power of :math:`x` between the input
function and the kernel.
Parameters
----------
x : (Nin,) array_like
log-spaced input argument
MK : callable
Mellin transform of the kernel
.. math:: U_K(z) \equiv \int_0^\infty t^{z-1} K(t) dt
q : float
power-law tilt, can be used to balance :math:`f` at large and small
:math:`x`. Avoid the singularities in `MK`
N : int or complex, optional
size of convolution, if complex then replaced by the smallest power of
2 that is at least `N.imag` times the size of `x`; the input function
is padded symmetrically to this size before convolution (see the
`extrap` argument for available options); `N=len(x)` turns off the
padding
lowring : bool, optional
if True and `N` is even, set `y` according to the low-ringing
condition, otherwise see `xy`
xy : float, optional
reciprocal product :math:`x_{min} y_{max} = x_{max} y_{min}` to be used
when `lowring` is False or `N` is odd.
`xy = x[0] * y_max = x[1] * y[-1] = ... = x[i] * y[-i] = ... = x[-1] * y[1] = x_max * y[0]`.
Note that :math:`x_{max}` is not included in `x` but bigger than
`x.max()` by one log interval due to the discretization of the periodic
approximant, and likewise for :math:`y_{max}`
backend : str in {'numpy', 'jax'}, optional
Which backend to use.
Attributes
----------
Nin : int
input size, and that of the output if not `keeppads`
N : int
convolution size, and that of the output if `keeppads`
x : (Nin,) ndarray
input argument
y : (Nin,) ndarray
output argument
_x_ : (N,) ndarray
padded `x`
_y_ : (N,) ndarray
padded `y`
xy : float
reciprocal product
prefac : array_like
a function of `x` (excluding the tilt factor :math:`x^{-q}`) to
convert an integral to the normal form
postfac : array_like
a function of `y` (excluding the tilt factor :math:`y^{-q}`) to
convert an integral to the normal form
xfac : (Nin,) ndarray
a function of `x` (including the tilt factor :math:`x^{-q}`) to
multiply before the convolution
yfac : (Nin,) ndarray
a function of `y` (including the tilt factor :math:`y^{-q}`) to
multiply after the convolution
_xfac_ : (N,) ndarray
padded `_xfac_`
_yfac_ : (N,) ndarray
padded `_yfac_`
Methods
-------
__call__
matrix
Examples
--------
>>> x = numpy.logspace(-3, 3, num=60, endpoint=False)
>>> A = 1 / (1 + x*x)**1.5
>>> H = mcfit.mcfit(x, mcfit.kernels.Mellin_BesselJ(0), q=1, lowring=True)
>>> y, B = H(x**2 * A, extrap=True)
>>> numpy.allclose(B, numpy.exp(-y))
More conveniently, use the Hankel transform subclass
>>> y, B = mcfit.transforms.Hankel(x, lowring=True)(A, extrap=True)
Notes
-----
Caveats about q
References
----------
.. [1] J. D. Talman. Numerical Fourier and Bessel Transforms in Logarithmic Variables.
Journal of Computational Physics, 29:35-48, October 1978.
.. [2] A. J. S. Hamilton. Uncorrelated modes of the non-linear power spectrum.
MNRAS, 312:257-284, February 2000.
"""
def __init__(self, x, MK, q, N=2j, lowring=False, xy=1, backend='numpy'):
if backend == 'numpy':
self.np = numpy
#self.jit = lambda fun: fun # TODO maybe use Numba?
elif backend == 'jax':
try:
self.np = jax.numpy
#self.jit = jax.jit # TODO maybe leave it to the user? jax.jit for CPU too
except NameError:
raise JAXNotFoundError
else:
raise ValueError(f"backend {backend} not supported")
#self.__call__ = self.jit(self.__call__)
#self.matrix = self.jit(self.matrix)
self.x = self.np.asarray(x)
self.Nin = len(x)
self.MK = MK
self.q = q
self.N = N
self.lowring = lowring
self.xy = xy
self._setup()
self.prefac = 1
self.postfac = 1
@property
def prefac(self):
return self._prefac
@prefac.setter
def prefac(self, value):
self._prefac = value
self.xfac = self._prefac * self.x**(-self.q)
self._xfac_ = self._pad(self.xfac, 0, True, False)
@property
def postfac(self):
return self._postfac
@postfac.setter
def postfac(self, value):
self._postfac = value
self.yfac = self._postfac * self.y**(-self.q)
self._yfac_ = self._pad(self.yfac, 0, True, True)
def _setup(self):
if self.Nin < 2:
raise ValueError(f"input size {self.Nin} must not be smaller than 2")
Delta = math.log(self.x[-1] / self.x[0]) / (self.Nin - 1)
x_head = self.x[:8]
if not self.np.allclose(self.np.log(x_head[1:] / x_head[:-1]), Delta,
rtol=1e-3):
warnings.warn("input must be log-spaced")
if isinstance(self.N, complex):
folds = math.ceil(math.log2(self.Nin * self.N.imag))
self.N = 2**folds
if self.N < self.Nin:
raise ValueError(f"convolution size {self.N} must not be smaller than "
f"the input size {self.Nin}")
if self.lowring and self.N % 2 == 0:
lnxy = Delta / math.pi * cmath.phase(self.MK(self.q + 1j * math.pi / Delta))
self.xy = math.exp(lnxy)
else:
lnxy = math.log(self.xy)
self.y = math.exp(lnxy - Delta) / self.x[::-1]
self._x_ = self._pad(self.x, 0, True, False)
self._y_ = self._pad(self.y, 0, True, True)
m = numpy.arange(0, self.N//2 + 1)
self._u = self.MK(self.q + 2j * math.pi / self.N / Delta * m)
self._u *= numpy.exp(-2j * math.pi * lnxy / self.N / Delta * m)
self._u = self.np.asarray(self._u, dtype=(self.x[0] + 0j).dtype)
# following is unnecessary because hfft ignores the imag at Nyquist anyway
#if not self.lowring and self.N % 2 == 0:
# self._u[self.N//2] = self._u[self.N//2].real
def __call__(self, F, axis=-1, extrap=False, keeppads=False, convonly=False):
"""Evaluate the integral.
Parameters
----------
F : (..., Nin, ...) or (..., N, ...) array_like
input function; to be padded according to `extrap` in size from
`Nin` to `N`, but not if already of size `N`
axis : int, optional
axis along which to integrate
extrap : {bool, 'const'} or 2-tuple, optional
Method to extrapolate `F`.
For a 2-tuple, the two elements are for the left and right pads,
whereas a single value applies to both ends.
Options are:
* True: power-law extrapolation using the end segment
* False: zero padding
* 'const': constant padding with the end point value
keeppads : bool, optional
whether to keep the padding in the output
convonly : bool, optional
whether to skip the scaling by `_xfac_` and `_yfac_`, useful for
evaluating integral with multiple kernels
Returns
-------
y : (Nin,) or (N,) ndarray
log-spaced output argument
G : (..., Nin, ...) or (..., N, ...) ndarray
output function
"""
F = self.np.asarray(F)
to_axis = [1] * F.ndim
to_axis[axis] = -1
f = self._pad(F, axis, extrap, False)
if not convonly:
f = self._xfac_.reshape(to_axis) * f
# convolution
f = self.np.fft.rfft(f, axis=axis) # f(x_n) -> f_m
g = f * self._u.reshape(to_axis) # f_m -> g_m
g = self.np.fft.hfft(g, n=self.N, axis=axis) / self.N # g_m -> g(y_n)
if not keeppads:
G = self._unpad(g, axis, True)
if not convonly:
G = self.yfac.reshape(to_axis) * G
return self.y, G
else:
_G_ = g
if not convonly:
_G_ = self._yfac_.reshape(to_axis) * _G_
return self._y_, _G_
def inv(self):
"""Invert the transform.
After calling this method, calling the instance will do the inverse
transform. Calling this twice return the instance to the original
transform.
"""
self.x, self.y = self.y, self.x
self._x_, self._y_ = self._y_, self._x_
self.xfac, self.yfac = 1 / self.yfac, 1 / self.xfac
self._xfac_, self._yfac_ = 1 / self._yfac_, 1 / self._xfac_
self._u = 1 / self._u.conj()
def matrix(self, full=False, keeppads=True):
"""Return matrix form of the integral transform.
Parameters
----------
full : bool, optional
when False return two vector factors and convolution matrix
separately, otherwise return full transformation matrix
keeppads : bool, optional
whether to keep the padding in the output
Returns
-------
If full is False, output separately
a : (1, N) or (1, Nin) ndarray
"After" factor, `_yfac_` or `yfac`
b : (N,) or (Nin,) ndarray
"Before" factor, `_xfac_` or `xfac`
C : (N, N) or (Nin, Nin) ndarray
Convolution matrix, circulant
Otherwise, output the full matrix, combining `a`, `b`, and `C`
M : (N, N) or (Nin, Nin) ndarray
Full transformation matrix, `M = a * C * b`
Notes
-----
`M`, `a`, `b`, and `C` are padded by default.
This is not meant for evaluation with matrix multiplication but in case
one is interested in the tranformation itself.
When `N` is even and `lowring` is False, :math:`C C^{-1}` and :math:`M
M^{-1}` can deviate from the identity matrix because the imaginary part
of the Nyquist modes are dropped.
The convolution matrix is a circulant matrix, with its first row and
first column being the Fourier transform of :math:`u_m`.
Indeed :math:`u_m` are the eigenvalues of the convolution matrix, that
are diagonalized by the DFT matrix.
Thus :math:`1/u_m` are the eigenvalues of the inverse convolution
matrix.
"""
v = self.np.fft.hfft(self._u, n=self.N) / self.N
idx = sum(self.np.ogrid[0:self.N, -self.N:0])
C = v[idx] # follow scipy.linalg.{circulant,toeplitz,hankel}
if keeppads:
a = self._yfac_.copy()
b = self._xfac_.copy()
else:
a = self.yfac.copy()
b = self.xfac.copy()
C = self._unpad(C, 0, True)
C = self._unpad(C, 1, False)
a = a.reshape(-1, 1)
if not full:
return a, b, C
else:
return a * C * b
def _pad(self, a, axis, extrap, out):
"""Add padding to an array.
Parameters
----------
a : (..., Nin, ...) or (..., N, ...) ndarray
array to be padded, but not if already of size `N`
axis : int
axis along which to pad
extrap : {bool, 'const'} or 2-tuple
Method to extrapolate `a`.
For a 2-tuple, the two elements are for the left and right pads,
whereas a single value applies to both ends.
Options are:
* True: power-law extrapolation using the end segment
* False: zero padding
* 'const': constant padding with the end point value
out : bool
pad the output if True, otherwise the input; the two cases have
their left and right pad sizes reversed
"""
if a.shape[axis] == self.N:
return a
elif a.shape[axis] != self.Nin:
raise ValueError("array size must be that of the input or the convolution")
axis %= a.ndim # to fix the indexing below with axis+1
to_axis = [1] * a.ndim
to_axis[axis] = -1
Npad = self.N - self.Nin
if out:
_Npad, Npad_ = Npad - Npad//2, Npad//2
else:
_Npad, Npad_ = Npad//2, Npad - Npad//2
try:
_extrap, extrap_ = extrap
except (TypeError, ValueError):
_extrap = extrap_ = extrap
if isinstance(_extrap, bool):
if _extrap:
end = self.np.take(a, self.np.array([0]), axis=axis)
ratio = self.np.take(a, self.np.array([1]), axis=axis) / end
exp = self.np.arange(-_Npad, 0).reshape(to_axis)
_a = end * ratio ** exp
else:
_a = self.np.zeros(a.shape[:axis] + (_Npad,) + a.shape[axis+1:])
elif _extrap == 'const':
end = self.np.take(a, self.np.array([0]), axis=axis)
_a = self.np.repeat(end, _Npad, axis=axis)
else:
raise ValueError(f"left extrap {_extrap} not supported")
if isinstance(extrap_, bool):
if extrap_:
end = self.np.take(a, self.np.array([-1]), axis=axis)
ratio = end / self.np.take(a, self.np.array([-2]), axis=axis)
exp = self.np.arange(1, Npad_ + 1).reshape(to_axis)
a_ = end * ratio ** exp
else:
a_ = self.np.zeros(a.shape[:axis] + (Npad_,) + a.shape[axis+1:])
elif extrap_ == 'const':
end = self.np.take(a, self.np.array([-1]), axis=axis)
a_ = self.np.repeat(end, Npad_, axis=axis)
else:
raise ValueError(f"right extrap {extrap_} not supported")
return self.np.concatenate((_a, a, a_), axis=axis)
def _unpad(self, a, axis, out):
"""Undo padding in an array.
Parameters
----------
a : (..., N, ...) or (..., Nin, ...) ndarray
array to be unpadded, but not if already of size `Nin`
axis : int
axis along which to unpad
out : bool
unpad the output if True, otherwise the input; the two cases have
their left and right pad sizes reversed
"""
if a.shape[axis] == self.Nin:
return a
elif a.shape[axis] != self.N:
raise ValueError("array size must be that of the input or the convolution")
Npad = self.N - self.Nin
if out:
_Npad, Npad_ = Npad - Npad//2, Npad//2
else:
_Npad, Npad_ = Npad//2, Npad - Npad//2
return self.np.take(a, self.np.arange(_Npad, self.N - Npad_), axis=axis)
| 15,495 | 34.541284 | 100 | py |
FCtL | FCtL-main/train_deep_globe.py | #!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import torch
import torch.nn as nn
from torchvision import transforms
from tqdm import tqdm
from dataset.deep_globe import DeepGlobe, classToRGB, is_image_file
from utils.loss import FocalLoss
from utils.lr_scheduler import LR_Scheduler
from tensorboardX import SummaryWriter
from helper import create_model_load_weights, get_optimizer, Trainer, Evaluator, collate, collate_test
from option import Options
args = Options().parse()
dataset = args.dataset
if dataset == 1:
pass
elif dataset == 2:
args.n_class = 2
args.data_path = "./data_1/"
args.model_path = "./saved_models_1/"
args.log_path = "./runs_1/"
n_class = args.n_class #2
print("n_class:",n_class)
torch.backends.cudnn.deterministic = True
data_path = args.data_path #data
model_path = args.model_path #saved_models
log_path = args.log_path #log
if not os.path.isdir(model_path): os.mkdir(model_path)
if not os.path.isdir(log_path): os.mkdir(log_path)
print("data_path:",data_path , "model_path:",model_path, "log_path",log_path)
task_name = args.task_name
print("task_name:",task_name)
mode = args.mode
train = args.train
val = args.val
print("mode:",mode, "train:",train, "val:",val)
###################################
print("preparing datasets and dataloaders......")
batch_size = args.batch_size
num_worker = 0
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
ids_train = [image_name for image_name in os.listdir(os.path.join(data_path, "train", "Sat")) if is_image_file(image_name)]
ids_test = [image_name for image_name in os.listdir(os.path.join(data_path, "offical_crossvali", "Sat")) if is_image_file(image_name)]
ids_val = [image_name for image_name in os.listdir(os.path.join(data_path, "crossvali", "Sat")) if is_image_file(image_name)]
dataset_train = DeepGlobe(dataset, os.path.join(data_path, "train"), ids_train, label=True, transform=True)
dataloader_train = torch.utils.data.DataLoader(dataset=dataset_train, batch_size=batch_size, num_workers=num_worker, collate_fn=collate, shuffle=True, pin_memory=True)
dataset_test = DeepGlobe(dataset, os.path.join(data_path, "offical_crossvali"), ids_test, label=False)
dataloader_test = torch.utils.data.DataLoader(dataset=dataset_test, batch_size=batch_size, num_workers=num_worker, collate_fn=collate_test, shuffle=False, pin_memory=True)
dataset_val = DeepGlobe(dataset, os.path.join(data_path, "crossvali"), ids_val, label=True)
dataloader_val = torch.utils.data.DataLoader(dataset=dataset_val, batch_size=batch_size, num_workers=num_worker, collate_fn=collate, shuffle=False, pin_memory=True)
print('train_len:',len(ids_train))
print('test_len:',len(ids_test))
print('val_len:',len(ids_val))
##### sizes are (w, h) ##############################
# make sure margin / 32 is over 1.5 AND size_g is divisible by 4
size_p = (args.size_p, args.size_p) # cropped local patch size 508
size_g = (args.size_g, args.size_g) # resize global image size 508
sub_batch_size = args.sub_batch_size # batch size for train local patches 6
###################################
print("creating models......")
pre_path = os.path.join(model_path, args.pre_path)
glo_path_10 = os.path.join(model_path, args.glo_path_10)
glo_path_15 = os.path.join(model_path, args.glo_path_15)
print("pre_path:", pre_path, "medium_path:", glo_path_10, "large_path:", glo_path_15)
model, global_fixed_medium, global_fixed_large = create_model_load_weights(n_class, pre_path, glo_path_10, glo_path_15, mode)
###################################
num_epochs = args.num_epochs #50
lens = args.lens
start = args.start
learning_rate = args.lr #5e-05
context10 = args.context10
context15 = args.context15
optimizer = get_optimizer(model, learning_rate)
scheduler = LR_Scheduler('poly', learning_rate, num_epochs, len(dataloader_train))
##################################
criterion1 = FocalLoss(gamma=3)
criterion = lambda x,y: criterion1(x, y)
if val:
writer = SummaryWriter(log_dir=log_path + task_name)
f_log = open(log_path + task_name + ".log", 'w')
trainer = Trainer(criterion, optimizer, n_class, size_p, size_g, sub_batch_size, mode, dataset, context10, context15)
evaluator = Evaluator(n_class, size_p, size_g, sub_batch_size, mode, train, dataset, context10, context15)
best_pred = 0.0
print("start training......")
for epoch in range(start, start + lens):
if not train:
break
trainer.set_train(model)
optimizer.zero_grad()
tbar = tqdm(dataloader_train); train_loss = 0
for i_batch, sample_batched in enumerate(tbar):
scheduler(optimizer, i_batch, epoch, best_pred) #update lr
loss = trainer.train(sample_batched, model, global_fixed_medium, global_fixed_large)
train_loss += loss.item()
score_train = trainer.get_scores()
tbar.set_description('epoch:%d Train loss: %.3f; mIoU: %.3f' % (epoch, train_loss / (i_batch + 1),
np.mean(np.nan_to_num(score_train["iou"][1:]))))
writer.add_scalar('train_loss', loss, epoch * len(dataloader_train) + i_batch)
writer.add_scalar('train_miou', np.mean(np.nan_to_num(score_train["iou"][1:])), epoch * len(dataloader_train) + i_batch)
score_train = trainer.get_scores()
trainer.reset_metrics()
# torch.cuda.empty_cache()
if (epoch+1) % 5 == 0:
with torch.no_grad():
print("evaling...")
model.eval()
tbar = tqdm(dataloader_val)
for i_batch, sample_batched in enumerate(tbar):
predictions = evaluator.eval_test(sample_batched, model, global_fixed_medium, global_fixed_large)
score_val = evaluator.get_scores()
# use [1:] since class0 is not considered in deep_globe metric
tbar.set_description('mIoU: %.3f' % (np.mean(np.nan_to_num(score_val["iou"])[1:])))
images = sample_batched['image']
labels = sample_batched['label'] # PIL images
if i_batch * batch_size + len(images) > (epoch % len(dataloader_val)) and i_batch * batch_size <= (epoch % len(dataloader_val)):
writer.add_image('image', transforms.ToTensor()(images[(epoch % len(dataloader_val)) - i_batch * batch_size]), epoch)
writer.add_image('mask', classToRGB(dataset, np.array(labels[(epoch % len(dataloader_val)) - i_batch * batch_size])) , epoch)
writer.add_image('prediction', classToRGB(dataset, predictions[(epoch % len(dataloader_val)) - i_batch * batch_size]), epoch)
torch.save(model.state_dict(), model_path + task_name + ".epoch" + str(epoch) + ".pth")
score_val = evaluator.get_scores()
evaluator.reset_metrics()
if np.mean(np.nan_to_num(score_val["iou"][1:])) > best_pred: best_pred = np.mean(np.nan_to_num(score_val["iou"][1:]))
log = ""
log = log + 'epoch [{}/{}] IoU: train = {:.4f}, val = {:.4f}'.format(epoch+1, num_epochs, np.mean(np.nan_to_num(score_train["iou"][1:])), np.mean(np.nan_to_num(score_val["iou"][1:]))) + "\n"
log = log + "train: " + str(score_train["iou"]) + "\n"
log = log + "val:" + str(score_val["iou"]) + "\n"
log += "================================\n"
print(log)
f_log.write(log)
f_log.flush()
writer.add_scalars('IoU', {'train iou': np.mean(np.nan_to_num(score_train["iou"][1:])), 'validation iou': np.mean(np.nan_to_num(score_val["iou"][1:]))}, epoch)
if val: f_log.close()
if not train:
with torch.no_grad():
print("testing...")
model.eval()
tbar = tqdm(dataloader_test)
for i_batch, sample_batched in enumerate(tbar):
predictions = evaluator.eval_test(sample_batched, model, global_fixed_medium, global_fixed_large)
images = sample_batched['image']
if not os.path.isdir("./prediction/"): os.mkdir("./prediction/")
for i in range(len(images)):
transforms.functional.to_pil_image(classToRGB(dataset, predictions[i])).save("./prediction/" + sample_batched['id'][i] + "_mask.png")
| 8,219 | 46.514451 | 202 | py |
FCtL | FCtL-main/helper.py | #!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import, division, print_function
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from utils.metrics import ConfusionMatrix
from PIL import Image, ImageOps
from models.fcn import FCN8, MiniFCN8
# torch.cuda.synchronize()
# torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
transformer = transforms.Compose([
transforms.ToTensor(),
])
def resize(images, shape, label=False):
'''
resize PIL images
shape: (w, h)
'''
resized = list(images)
for i in range(len(images)):
if label:
resized[i] = images[i].resize(shape, Image.NEAREST)
else:
resized[i] = images[i].resize(shape, Image.BILINEAR)
return resized
def _mask_transform(mask):
target = np.array(mask).astype('int32')
return target
def masks_transform(masks, numpy=False):
'''
masks: list of PIL images
'''
targets = []
for m in masks:
targets.append(_mask_transform(m))
targets = np.array(targets)
if numpy:
return targets
else:
return torch.from_numpy(targets).long().cuda()
def images_transform(images):
'''
images: list of PIL images
'''
inputs = []
for img in images:
inputs.append(transformer(img))
inputs = torch.stack(inputs, dim=0).cuda()
return inputs
def get_patch_info(shape, p_size):
'''
shape: origin image size, (x, y)
p_size: patch size (square)
return: n_x, n_y, step_x, step_y
'''
x = shape[0]
y = shape[1]
n = m = 1
while x > n * p_size:
n += 1
while p_size - 1.0 * (x - p_size) / (n - 1) < 50:
n += 1
while y > m * p_size:
m += 1
while p_size - 1.0 * (y - p_size) / (m - 1) < 50:
m += 1
return n, m, (x - p_size) * 1.0 / (n - 1), (y - p_size) * 1.0 / (m - 1)
def global2patch(images, p_size):
'''
image/label => patches
p_size: patch size
return: list of PIL patch images; coordinates: images->patches; ratios: (h, w)
'''
patches = []; coordinates = []; templates = []; sizes = []; ratios = [(0, 0)] * len(images); patch_ones = np.ones(p_size)
for i in range(len(images)):
w, h = images[i].size
size = (h, w)
sizes.append(size)
ratios[i] = (float(p_size[0]) / size[0], float(p_size[1]) / size[1])
template = np.zeros(size)
n_x, n_y, step_x, step_y = get_patch_info(size, p_size[0])
patches.append([images[i]] * (n_x * n_y))
coordinates.append([(0, 0)] * (n_x * n_y))
for x in range(n_x):
if x < n_x - 1: top = int(np.round(x * step_x))
else: top = size[0] - p_size[0]
for y in range(n_y):
if y < n_y - 1: left = int(np.round(y * step_y))
else: left = size[1] - p_size[1]
template[top:top+p_size[0], left:left+p_size[1]] += patch_ones
coordinates[i][x * n_y + y] = (1.0 * top / size[0], 1.0 * left / size[1])
patches[i][x * n_y + y] = transforms.functional.crop(images[i], top, left, p_size[0], p_size[1]) #508 508
templates.append(Variable(torch.Tensor(template).expand(1, 1, -1, -1)).cuda())
return patches, coordinates, templates, sizes, ratios
def global2bigpatch(images, p_size, mul=2):
if mul == 1.5:
sz = int(p_size[0]/4)
elif mul == 2:
sz = int(p_size[0]/2)
elif mul == 2.5:
sz = int(p_size[0]*3/4)
elif mul == 3:
sz = int(p_size[0])
elif mul == 4:
sz = int(p_size[0]*3/2)
patches = []; coordinates = []; templates = []; sizes = []; ratios = [(0, 0)] * len(images); patch_ones = np.ones(p_size)
for i in range(len(images)):
w, h = images[i].size
big = ImageOps.expand(images[i],(sz, sz, sz, sz),fill='black')
size = (h, w)
n_x, n_y, step_x, step_y = get_patch_info(size, p_size[0])
patches.append([big] * (n_x * n_y))
for x in range(n_x):
if x < n_x - 1: top = int(np.round(x * step_x))
else: top = size[0] - p_size[0]
for y in range(n_y):
if y < n_y - 1: left = int(np.round(y * step_y))
else: left = size[1] - p_size[1]
patches[i][x * n_y + y] = transforms.functional.crop(big, top, left, int(p_size[0]*mul), int(p_size[1]*mul)).resize(p_size, Image.BILINEAR) #508 508
return patches#, coordinates, templates, sizes, ratios
def patch2global(patches, n_class, sizes, coordinates, p_size):
'''
predicted patches (after classify layer) => predictions
return: list of np.array
'''
predictions = [ np.zeros((n_class, size[0], size[1])) for size in sizes ]
for i in range(len(sizes)):
for j in range(len(coordinates[i])):
top, left = coordinates[i][j]
top = int(np.round(top * sizes[i][0])); left = int(np.round(left * sizes[i][1]))
predictions[i][:, top: top + p_size[0], left: left + p_size[1]] += patches[i][j]
return predictions
def collate(batch):
image = [ b['image'] for b in batch ] # w, h
label = [ b['label'] for b in batch ]
id = [ b['id'] for b in batch ]
return {'image': image, 'label': label, 'id': id}
def collate_test(batch):
image = [ b['image'] for b in batch ] # w, h
id = [ b['id'] for b in batch ]
return {'image': image, 'id': id}
def create_model_load_weights(n_class, pre_path="", glo_path_10="", glo_path_15="", mode=1):
model = FCN8(n_class, mode)
model = nn.DataParallel(model)
model = model.cuda()
if pre_path != './saved_models_1/':
print('prepareing model...')
# load fixed basic global branch
partial = torch.load(pre_path)
state = model.state_dict()
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in partial.items() if k in state}
# 2. overwrite entries in the existing state dict
state.update(pretrained_dict)
# 3. load the new state dict
model.load_state_dict(state)
global_fixed_10 = None
if mode == 2 or mode == 3:
print('prepareing global_10 model...')
global_fixed_10 = MiniFCN8(n_class)
global_fixed_10 = nn.DataParallel(global_fixed_10)
global_fixed_10 = global_fixed_10.cuda()
if glo_path_10 != './saved_models_1/':
partial = torch.load(glo_path_10)
state = global_fixed_10.state_dict()
pretrained_dict = {k: v for k, v in partial.items() if k in state}
state.update(pretrained_dict)
global_fixed_10.load_state_dict(state)
global_fixed_10.eval()
global_fixed_15 = None
if mode == 3:
print('prepareing global_15 model...')
global_fixed_15 = MiniFCN8(n_class)
global_fixed_15 = nn.DataParallel(global_fixed_15)
global_fixed_15 = global_fixed_15.cuda()
if glo_path_15 != './saved_models_1/':
partial = torch.load(glo_path_15)
state = global_fixed_15.state_dict()
pretrained_dict = {k: v for k, v in partial.items() if k in state}
state.update(pretrained_dict)
global_fixed_15.load_state_dict(state)
global_fixed_15.eval()
return model, global_fixed_10, global_fixed_15
def get_optimizer(model, learning_rate=2e-5):
optimizer = torch.optim.Adam([
{'params': model.module.parameters(), 'lr': learning_rate},
], weight_decay=5e-4)
return optimizer
class Trainer(object):
def __init__(self, criterion, optimizer, n_class, size_p, size_g, sub_batch_size=6, mode=1, dataset=1, context10=2, context15=3):
self.criterion = criterion
self.optimizer = optimizer
self.metrics = ConfusionMatrix(n_class)
self.n_class = n_class
self.size_p = size_p
self.size_g = size_g
self.sub_batch_size = sub_batch_size
self.mode = mode
self.context10 = context10
self.context15 = context15
def set_train(self, model):
model.module.train()
def get_scores(self):
score = self.metrics.get_scores()
return score
def reset_metrics(self):
self.metrics.reset()
def train(self, sample, model, global_fixed_10, global_fixed_15):
images, labels = sample['image'], sample['label'] # PIL images
labels_npy = masks_transform(labels, numpy=True) # label of origin size in numpy
patches, coordinates, templates, sizes, ratios = global2patch(images, self.size_p)
label_patches, _, _, _, _ = global2patch(labels, self.size_p)
predicted_patches = [ np.zeros((len(coordinates[i]), self.n_class, self.size_p[0], self.size_p[1])) for i in range(len(images)) ]
##################1 2 3
if self.mode != 1:
big_patches_10 = global2bigpatch(images, self.size_p, self.context10)
if self.mode == 3:
big_patches_15 = global2bigpatch(images, self.size_p, self.context15)
pool5_10, pool5_15 = None, None
# training with patches ###########################################
for i in range(len(images)):
j = 0
while j < len(coordinates[i]):
patches_var = images_transform(patches[i][j : j+self.sub_batch_size]) # b, c, h, w
label_patches_var = masks_transform(label_patches[i][j : j+self.sub_batch_size])
big_patches_10_var=None
if self.mode != 1:
big_patches_10_var = images_transform(big_patches_10[i][j : j+self.sub_batch_size])
if self.mode == 3:
with torch.no_grad():
pool5_10 = global_fixed_10.forward(big_patches_10_var)
big_patches_15_var = images_transform(big_patches_15[i][j : j+self.sub_batch_size])
pool5_15 = global_fixed_15.forward(big_patches_15_var)
if self.mode == 1 or self.mode == 2:
output_patches = model.forward(patches_var, y=big_patches_10_var)
else:
output_patches = model.forward(patches_var, pool5_10, pool5_15)
loss = self.criterion(output_patches, label_patches_var)
loss.backward()
# patch predictions
predicted_patches[i][j:j+output_patches.size()[0]] = F.interpolate(output_patches, size=self.size_p, mode='nearest').data.cpu().numpy()
j += self.sub_batch_size
self.optimizer.step()
self.optimizer.zero_grad()
####################################################################################
scores = np.array(patch2global(predicted_patches, self.n_class, sizes, coordinates, self.size_p)) # merge softmax scores from patches (overlaps)
predictions = scores.argmax(1) # b, h, w
self.metrics.update(labels_npy, predictions)
return loss
class Evaluator(object):
def __init__(self, n_class, size_p, size_g, sub_batch_size=6, mode=1, val=True, dataset=1, context10=2, context15=3):
self.metrics = ConfusionMatrix(n_class)
self.n_class = n_class
self.size_p = size_p
self.size_g = size_g
self.sub_batch_size = sub_batch_size
self.mode = mode
self.val = val
self.context10 = context10
self.context15 = context15
if not val:
self.flip_range = [False, True]
self.rotate_range = [0, 1, 2, 3]
else:
self.flip_range = [False]
self.rotate_range = [0]
def get_scores(self):
score = self.metrics.get_scores()
return score
def reset_metrics(self):
self.metrics.reset()
def eval_test(self, sample, model, global_fixed_10, global_fixed_15):
with torch.no_grad():
images = sample['image']
if self.val:
labels = sample['label'] # PIL images
labels_npy = masks_transform(labels, numpy=True)
images = [ image.copy() for image in images ]
scores = [ np.zeros((1, self.n_class, images[i].size[1], images[i].size[0])) for i in range(len(images)) ]
for flip in self.flip_range:
if flip:
# we already rotated images for 270'
for b in range(len(images)):
images[b] = transforms.functional.rotate(images[b], 90) # rotate back!
images[b] = transforms.functional.hflip(images[b])
for angle in self.rotate_range:
if angle > 0:
for b in range(len(images)):
images[b] = transforms.functional.rotate(images[b], 90)
# prepare global images onto cuda
patches, coordinates, templates, sizes, ratios = global2patch(images, self.size_p)
predicted_patches = [ np.zeros((len(coordinates[i]), self.n_class, self.size_p[0], self.size_p[1])) for i in range(len(images)) ]
if self.mode == 2 or self.mode == 3:
big_patches_10 = global2bigpatch(images, self.size_p, self.context10)
if self.mode == 3:
big_patches_15 = global2bigpatch(images, self.size_p, self.context15)
# eval with patches ###########################################
for i in range(len(images)):
j = 0
while j < len(coordinates[i]):
patches_var = images_transform(patches[i][j : j+self.sub_batch_size]) # b, c, h, w
big_patches_10_var = None
if self.mode == 2 or self.mode == 3:
big_patches_10_var = images_transform(big_patches_10[i][j : j+self.sub_batch_size])
if self.mode == 1 or self.mode == 2:
output_patches = model.forward(patches_var, y=big_patches_10_var)
else: ##3
pool5_10 = global_fixed_10.forward(big_patches_10_var)
big_patches_15_var = images_transform(big_patches_15[i][j : j+self.sub_batch_size])
pool5_15 = global_fixed_15.forward(big_patches_15_var)
output_patches = model.forward(patches_var, pool5_10, pool5_15)
# patch predictions
predicted_patches[i][j:j+output_patches.size()[0]] += F.interpolate(output_patches, size=self.size_p, mode='nearest').data.cpu().numpy()
j += patches_var.size()[0]
if flip:
scores[i] += np.flip(np.rot90(np.array(patch2global(predicted_patches[i:i+1], self.n_class, sizes[i:i+1], coordinates[i:i+1], self.size_p)), k=angle, axes=(3, 2)), axis=3) # merge softmax scores from patches (overlaps)
else:
scores[i] += np.rot90(np.array(patch2global(predicted_patches[i:i+1], self.n_class, sizes[i:i+1], coordinates[i:i+1], self.size_p)), k=angle, axes=(3, 2)) # merge softmax scores from patches (overlaps)
###############################################################
# patch predictions ###########################
predictions = [ score.argmax(1)[0] for score in scores ]
if self.val:
self.metrics.update(labels_npy, predictions)
###################################################
return predictions
| 16,004 | 42.140162 | 246 | py |
FCtL | FCtL-main/option.py | import os
import argparse
import torch
class Options():
def __init__(self):
parser = argparse.ArgumentParser(description='PyTorch Segmentation')
# model and dataset
parser.add_argument('--n_class', type=int, default=7, help='segmentation classes')
parser.add_argument('--data_path', type=str, help='path to dataset where images store')
parser.add_argument('--model_path', type=str, help='path to store trained model files, no need to include task specific name')
parser.add_argument('--log_path', type=str, help='path to store tensorboard log files, no need to include task specific name')
parser.add_argument('--task_name', type=str, help='task name for naming saved model files and log files')
parser.add_argument('--mode', type=int, default=1, choices=[1, 2, 3], help='mode for training procedure. 1.fcn 2.fcn+1 3.fcn+2')
parser.add_argument('--dataset', type=int, default=2, choices=[1, 2], help='dataset for training procedure. 1.deep 2.IA')
parser.add_argument('--train', action='store_true', default=False, help='train')
parser.add_argument('--val', action='store_true', default=False, help='val')
parser.add_argument('--context10', type=int, default=2, help='context10')
parser.add_argument('--context15', type=int, default=3, help='context15')
parser.add_argument('--pre_path', type=str, default="", help='name for pre model path')
parser.add_argument('--glo_path_10', type=str, default="", help='name for medium model path')
parser.add_argument('--glo_path_15', type=str, default="", help='name for large model path')
parser.add_argument('--batch_size', type=int, default=6, help='batch size for origin global image (without downsampling)')
parser.add_argument('--sub_batch_size', type=int, default=6, help='batch size for using local image patches')
parser.add_argument('--size_p', type=int, default=508, help='size (in pixel) for cropped local image')
parser.add_argument('--size_g', type=int, default=508, help='size (in pixel) for resized global image')
# the parser
self.parser = parser
def parse(self):
args = self.parser.parse_args()
args.num_epochs = 100
args.start = 50
args.lens = 50
args.lr = 5e-5
return args
| 2,367 | 61.315789 | 136 | py |
FCtL | FCtL-main/dataset/deep_globe.py | import os
import torch.utils.data as data
import numpy as np
from PIL import Image, ImageFile
import random
from torchvision.transforms import ToTensor
from torchvision import transforms
import cv2
ImageFile.LOAD_TRUNCATED_IMAGES = True
def is_image_file(filename):
return any(filename.endswith(extension) for extension in [".png", ".jpg", ".jpeg", "tif"])
def classToRGB(dataset, label):
l, w = label.shape[0], label.shape[1]
colmap = np.zeros(shape=(l, w, 3)).astype(np.float32)
if dataset == 1:
pass
else:
indices = np.where(label == 1)
colmap[indices[0].tolist(), indices[1].tolist(), :] = [255, 255, 255]
indices = np.where(label == 0)
colmap[indices[0].tolist(), indices[1].tolist(), :] = [0, 0, 0]
transform = ToTensor();
# plt.imshow(colmap)
# plt.show()
return transform(colmap)
def class_to_target(inputs, numClass):
batchSize, l, w = inputs.shape[0], inputs.shape[1], inputs.shape[2]
target = np.zeros(shape=(batchSize, l, w, numClass), dtype=np.float32)
for index in range(numClass):
indices = np.where(inputs == index)
temp = np.zeros(shape=numClass, dtype=np.float32)
temp[index] = 1
target[indices[0].tolist(), indices[1].tolist(), indices[2].tolist(), :] = temp
return target.transpose(0, 3, 1, 2)
def label_bluring(inputs):
batchSize, numClass, height, width = inputs.shape
outputs = np.ones((batchSize, numClass, height, width), dtype=np.float)
for batchCnt in range(batchSize):
for index in range(numClass):
outputs[batchCnt, index, ...] = cv2.GaussianBlur(inputs[batchCnt, index, ...].astype(np.float), (7, 7), 0)
return outputs
class DeepGlobe(data.Dataset):
"""input and label image dataset"""
def __init__(self, dataset, root, ids, label=False, transform=False):
super(DeepGlobe, self).__init__()
"""
Args:
fileDir(string): directory with all the input images.
transform(callable, optional): Optional transform to be applied on a sample
"""
self.dataset = dataset
self.root = root
self.label = label
self.transform = transform
self.ids = ids
self.color_jitter = transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.04)
def __getitem__(self, index):
sample = {}
sample['id'] = self.ids[index][:-8]
image = Image.open(os.path.join(self.root, "Sat/" + self.ids[index])) # w, h
sample['image'] = image
if self.label:
if self.dataset == 1 :
pass
else:
label = Image.open(os.path.join(self.root, 'Label/' + self.ids[index].replace('_sat.tif', '_mask.png')))
sample['label'] = label
if self.transform and self.label:
image, label = self._transform(image, label)
sample['image'] = image
sample['label'] = label
return sample
def _transform(self, image, label):
if np.random.random() > 0.5:
image = transforms.functional.hflip(image)
label = transforms.functional.hflip(label)
if np.random.random() > 0.5:
degree = random.choice([90, 180, 270])
image = transforms.functional.rotate(image, degree)
label = transforms.functional.rotate(label, degree)
return image, label
def __len__(self):
return len(self.ids)
| 3,527 | 32.6 | 120 | py |
FCtL | FCtL-main/models/base_model.py | import logging
import torch.nn as nn
import numpy as np
class BaseModel(nn.Module):
def __init__(self):
super(BaseModel, self).__init__()
self.logger = logging.getLogger(self.__class__.__name__)
def forward(self):
raise NotImplementedError
def summary(self):
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
nbr_params = sum([np.prod(p.size()) for p in model_parameters])
def __str__(self):
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
nbr_params = sum([np.prod(p.size()) for p in model_parameters])
return super(BaseModel, self).__str__()
#return summary(self, input_shape=(2, 3, 224, 224))
| 734 | 32.409091 | 79 | py |
FCtL | FCtL-main/models/fcn.py | from .base_model import BaseModel
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
from .helpers import get_upsampling_weight
import torch
from itertools import chain
from .FCtL import FCtL
class MiniFCN8(BaseModel):
def __init__(self, num_classes, pretrained=True):
super(MiniFCN8, self).__init__()
self.vgg = models.vgg16(pretrained)
self.features = list(self.vgg.features.children())
self.classifier = list(self.vgg.classifier.children())
# Pad the input to enable small inputs and allow matching feature maps
self.features[0].padding = (100, 100)
# Enbale ceil in max pool, to avoid different sizes when upsampling
for layer in self.features:
if 'MaxPool' in layer.__class__.__name__:
layer.ceil_mode = True
self.big_pool3 = nn.Sequential(*self.features[:17])
self.big_pool4 = nn.Sequential(*self.features[17:24])
self.big_pool5 = nn.Sequential(*self.features[24:])
def forward(self, x):
pool3_2 = self.big_pool3(x)
pool4_2 = self.big_pool4(pool3_2)
pool5_2 = self.big_pool5(pool4_2)
return pool5_2
class FCN8(BaseModel):
def __init__(self, num_classes, mode=1, pretrained=True, freeze_bn=False, freeze_backbone=False):
super(FCN8, self).__init__()
self.mode = mode
self.vgg = models.vgg16(pretrained)
self.features = list(self.vgg.features.children())
self.classifier = list(self.vgg.classifier.children())
# Pad the input to enable small inputs and allow matching feature maps
self.features[0].padding = (100, 100)
# Enbale ceil in max pool, to avoid different sizes when upsampling
for layer in self.features:
if 'MaxPool' in layer.__class__.__name__:
layer.ceil_mode = True
# Extract pool3, pool4 and pool5 from the VGG net
self.pool3 = nn.Sequential(*self.features[:17])
self.pool4 = nn.Sequential(*self.features[17:24])
self.pool5 = nn.Sequential(*self.features[24:])
if self.mode == 2:
self.big_pool3 = nn.Sequential(*self.features[:17])
self.big_pool4 = nn.Sequential(*self.features[17:24])
self.big_pool5 = nn.Sequential(*self.features[24:])
if self.mode == 2 or self.mode == 3:
self.big_attention = FCtL(512, 512)
# Adjust the depth of pool3 and pool4 to num_classe
self.adj_pool3 = nn.Conv2d(256, num_classes, kernel_size=1)
self.adj_pool4 = nn.Conv2d(512, num_classes, kernel_size=1)
# Replace the FC layer of VGG with conv layers
conv6 = nn.Conv2d(512, 4096, kernel_size=7)
conv7 = nn.Conv2d(4096, 4096, kernel_size=1)
output = nn.Conv2d(4096, num_classes, kernel_size=1)
# Copy the weights from VGG's FC pretrained layers
conv6.weight.data.copy_(self.classifier[0].weight.data.view(
conv6.weight.data.size()))
conv6.bias.data.copy_(self.classifier[0].bias.data)
conv7.weight.data.copy_(self.classifier[3].weight.data.view(
conv7.weight.data.size()))
conv7.bias.data.copy_(self.classifier[3].bias.data)
# Get the outputs
self.output = nn.Sequential(conv6, nn.ReLU(inplace=True), nn.Dropout(),
conv7, nn.ReLU(inplace=True), nn.Dropout(),
output)
# We'll need three upsampling layers, upsampling (x2 +2) the ouputs
# upsampling (x2 +2) addition of pool4 and upsampled output
# upsampling (x8 +8) the final value (pool3 + added output and pool4)
self.up_output = nn.ConvTranspose2d(num_classes, num_classes,
kernel_size=4, stride=2, bias=False)
self.up_pool4_out = nn.ConvTranspose2d(num_classes, num_classes,
kernel_size=4, stride=2, bias=False)
self.up_final = nn.ConvTranspose2d(num_classes, num_classes,
kernel_size=16, stride=8, bias=False)
# We'll use guassian kernels for the upsampling weights
self.up_output.weight.data.copy_(
get_upsampling_weight(num_classes, num_classes, 4))
self.up_pool4_out.weight.data.copy_(
get_upsampling_weight(num_classes, num_classes, 4))
self.up_final.weight.data.copy_(
get_upsampling_weight(num_classes, num_classes, 16))
# We'll freeze the wights, this is a fixed upsampling and not deconv
for m in self.modules():
if isinstance(m, nn.ConvTranspose2d):
m.weight.requires_grad = False
if freeze_bn: self.freeze_bn()
if freeze_backbone:
set_trainable([self.pool3, self.pool4, self.pool5], False)
def forward(self, x, pool5_10=None, pool5_15=None, y=None):
imh_H, img_W = x.size()[2], x.size()[3]
# Forward the image
pool3 = self.pool3(x)
pool4 = self.pool4(pool3)
pool5 = self.pool5(pool4)
if self.mode == 2:
pool3_10 = self.big_pool3(y)
pool4_10 = self.big_pool4(pool3_10)
pool5_10 = self.big_pool5(pool4_10)
if self.mode == 2 or self.mode == 3:
pool5 = self.big_attention(pool5, pool5_10, pool5_15)
output = self.output(pool5)
# Get the outputs and upsmaple them
up_output = self.up_output(output) #7*36*36
# Adjust pool4 and add the uped-outputs to pool4
adjstd_pool4 = self.adj_pool4(0.01 * pool4)
add_out_pool4 = self.up_pool4_out(adjstd_pool4[:, :, 5: (5 + up_output.size()[2]),
5: (5 + up_output.size()[3])]
+ up_output)
# Adjust pool3 and add it to the uped last addition
adjstd_pool3 = self.adj_pool3(0.0001 * pool3)
final_value = self.up_final(adjstd_pool3[:, :, 9: (9 + add_out_pool4.size()[2]), 9: (9 + add_out_pool4.size()[3])]
+ add_out_pool4)
# Remove the corresponding padded regions to the input img size
final_value = final_value[:, :, 31: (31 + imh_H), 31: (31 + img_W)].contiguous()
return final_value
def get_backbone_params(self):
return chain(self.pool3.parameters(), self.pool4.parameters(), self.pool5.parameters(), self.output.parameters())
def get_decoder_params(self):
return chain(self.up_output.parameters(), self.adj_pool4.parameters(), self.up_pool4_out.parameters(),
self.adj_pool3.parameters(), self.up_final.parameters())
def freeze_bn(self):
for module in self.modules():
if isinstance(module, nn.BatchNorm2d): module.eval()
| 6,910 | 43.301282 | 122 | py |
FCtL | FCtL-main/models/FCtL.py | import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import init
import math
class _FCtL(nn.Module):
def __init__(self, inplanes, planes, lr_mult, weight_init_scale):
conv_nd = nn.Conv2d
bn_nd = nn.BatchNorm2d
super(_FCtL, self).__init__()
self.conv_value = conv_nd(inplanes, inplanes, kernel_size=1, bias=False)
self.conv_value_1 = conv_nd(inplanes, inplanes, kernel_size=1, bias=False)
self.conv_value_2 = conv_nd(inplanes, inplanes, kernel_size=1, bias=False)
self.conv_out = None
self.conv_query = conv_nd(inplanes, planes, kernel_size=1)
self.conv_key = conv_nd(inplanes, planes, kernel_size=1)
self.conv_query_1 = conv_nd(inplanes, planes, kernel_size=1)
self.conv_key_1 = conv_nd(inplanes, planes, kernel_size=1)
self.conv_query_2 = conv_nd(inplanes, planes, kernel_size=1)
self.conv_key_2 = conv_nd(inplanes, planes, kernel_size=1)
self.in_1 = conv_nd(512, 512, kernel_size=1)
self.in_2 = conv_nd(512, 512, kernel_size=1)
self.in_3 = conv_nd(512, 512, kernel_size=1)
self.trans = conv_nd(512*3, 512*3, kernel_size=1)
self.out_1 = conv_nd(512, 512, kernel_size=1)
self.out_2 = conv_nd(512, 512, kernel_size=1)
self.out_3 = conv_nd(512, 512, kernel_size=1)
self.softmax = nn.Softmax(dim=2)
self.softmax_H = nn.Softmax(dim=0)
self.gamma = nn.Parameter(torch.zeros(1))
self.gamma_1 = nn.Parameter(torch.zeros(1))
self.gamma_2 = nn.Parameter(torch.zeros(1))
self.weight_init_scale = weight_init_scale
self.reset_parameters()
self.reset_lr_mult(lr_mult)
self.reset_weight_and_weight_decay()
def reset_parameters(self):
for m in self.modules():
if isinstance(m, nn.Conv3d) or isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
init.zeros_(m.bias)
m.inited = True
def reset_lr_mult(self, lr_mult):
if lr_mult is not None:
for m in self.modules():
m.lr_mult = lr_mult
else:
print('not change lr_mult')
def reset_weight_and_weight_decay(self):
init.normal_(self.conv_query.weight, 0, 0.01*self.weight_init_scale)
init.normal_(self.conv_key.weight, 0, 0.01*self.weight_init_scale)
self.conv_query.weight.wd=0.0
self.conv_query.bias.wd=0.0
self.conv_key.weight.wd=0.0
self.conv_key.bias.wd=0.0
def forward(self, x, y=None, z=None):
residual = x
value = self.conv_value(y)
value = value.view(value.size(0), value.size(1), -1)
out_sim = None
if z is not None:
value_1 = self.conv_value_1(z)
value_1 = value_1.view(value_1.size(0), value_1.size(1), -1)
out_sim_1 = None
value_2 = self.conv_value_2(x)
value_2 = value_2.view(value_2.size(0), value_2.size(1), -1)
out_sim_2 = None
query = self.conv_query(x)
key = self.conv_key(y)
query = query.view(query.size(0), query.size(1), -1)
key = key.view(key.size(0), key.size(1), -1)
if z is not None:
query_1 = self.conv_query_1(x)
key_1 = self.conv_key_1(z)
query_1 = query_1.view(query_1.size(0), query_1.size(1), -1)
key_1 = key_1.view(key_1.size(0), key_1.size(1), -1)
query_2 = self.conv_query_2(x)
key_2 = self.conv_key_2(x)
query_2 = query_2.view(query_2.size(0), query_2.size(1), -1)
key_2 = key_2.view(key_2.size(0), key_2.size(1), -1)
sim_map = torch.bmm(query.transpose(1, 2), key)
sim_map = self.softmax(sim_map)
out_sim = torch.bmm(sim_map, value.transpose(1, 2))
out_sim = out_sim.transpose(1, 2)
out_sim = out_sim.view(out_sim.size(0), out_sim.size(1), *x.size()[2:])
out_sim = self.gamma * out_sim
if z is not None:
sim_map_1 = torch.bmm(query_1.transpose(1, 2), key_1)
sim_map_1 = self.softmax(sim_map_1)
out_sim_1 = torch.bmm(sim_map_1, value_1.transpose(1, 2))
out_sim_1 = out_sim_1.transpose(1, 2)
out_sim_1 = out_sim_1.view(out_sim_1.size(0), out_sim_1.size(1), *x.size()[2:])
out_sim_1 = self.gamma_1 * out_sim_1
sim_map_2 = torch.bmm(query_2.transpose(1, 2), key_2)
sim_map_2 = self.softmax(sim_map_2)
out_sim_2 = torch.bmm(sim_map_2, value_2.transpose(1, 2))
out_sim_2 = out_sim_2.transpose(1, 2)
out_sim_2 = out_sim_2.view(out_sim_2.size(0), out_sim_2.size(1), *x.size()[2:])
out_sim_2 = self.gamma_2 * out_sim_2
if z is not None:
H_1 = self.in_1(out_sim)
H_2 = self.in_2(out_sim_1)
H_3 = self.in_3(out_sim_2)
H_cat = torch.cat((H_1, H_2, H_3), 1)
H_tra = self.trans(H_cat)
H_spl = torch.split(H_tra, 512, dim=1)
H_4 = torch.sigmoid(self.out_1(H_spl[0]))
H_5 = torch.sigmoid(self.out_2(H_spl[1]))
H_6 = torch.sigmoid(self.out_3(H_spl[2]))
H_st = torch.stack((H_4, H_5, H_6), 0)
H_all = self.softmax_H(H_st)
if z is not None:
out = residual + H_all[0] * out_sim + H_all[1] * out_sim_1 + H_all[2] * out_sim_2
else:
out = residual + out_sim
return out
class FCtL(_FCtL):
def __init__(self, inplanes, planes, lr_mult=None, weight_init_scale=1.0):
super(FCtL, self).__init__(inplanes=inplanes, planes=planes, lr_mult=lr_mult, weight_init_scale=weight_init_scale)
| 5,866 | 39.462069 | 122 | py |
FCtL | FCtL-main/models/helpers.py | import os
import torch
import torch.nn as nn
import numpy as np
import math
import PIL
def dir_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def initialize_weights(*models):
for model in models:
for m in model.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.)
m.bias.data.fill_(1e-4)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, 0.0001)
m.bias.data.zero_()
def get_upsampling_weight(in_channels, out_channels, kernel_size):
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)
weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size), dtype=np.float64)
weight[list(range(in_channels)), list(range(out_channels)), :, :] = filt
return torch.from_numpy(weight).float()
def colorize_mask(mask, palette):
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
new_mask = PIL.Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
def set_trainable_attr(m,b):
m.trainable = b
for p in m.parameters(): p.requires_grad = b
def apply_leaf(m, f):
c = m if isinstance(m, (list, tuple)) else list(m.children())
if isinstance(m, nn.Module):
f(m)
if len(c)>0:
for l in c:
apply_leaf(l,f)
def set_trainable(l, b):
apply_leaf(l, lambda m: set_trainable_attr(m,b)) | 1,837 | 31.245614 | 94 | py |
FCtL | FCtL-main/utils/loss.py | import torch.nn as nn
import torch.nn.functional as F
import torch
def one_hot(index, classes):
# index is flatten (during ignore) ##################
size = index.size()[:1] + (classes,)
view = index.size()[:1] + (1,)
#####################################################
mask = torch.Tensor(size).fill_(0).cuda()
index = index.view(view)
ones = 1.
return mask.scatter_(1, index, ones)
class FocalLoss(nn.Module):
def __init__(self, gamma=0, eps=1e-7, size_average=True, one_hot=True, ignore=None):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.eps = eps
self.size_average = size_average
self.one_hot = one_hot
self.ignore = ignore
def forward(self, input, target):
'''
only support ignore at 0
'''
B, C, H, W = input.size()
input = input.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C
target = target.view(-1) #96774
if self.ignore is not None:
valid = (target != self.ignore)
input = input[valid]
target = target[valid]
if self.one_hot: target = one_hot(target, input.size(1))
probs = F.softmax(input, dim=1)
probs = (probs * target).sum(1)
probs = probs.clamp(self.eps, 1. - self.eps)
log_p = probs.log()
batch_loss = -(torch.pow((1 - probs), self.gamma)) * log_p
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
| 1,585 | 27.321429 | 90 | py |
inFairness | inFairness-main/setup.py | from setuptools import setup, find_packages
with open("README.md", "r") as f:
long_description = f.read()
setup(
name="inFairness",
packages=[
"inFairness",
*["inFairness." + p for p in find_packages(where="./inFairness")],
],
package_dir={"": ".",},
install_requires=[
"numpy>=1.21.6",
"pandas>=1.3.5",
"POT>=0.8.0",
"scikit-learn>=0.24.2",
"scipy>=1.5.4",
"torch>=1.13.0"
],
description="inFairness is a Python package to train and audit individually fair PyTorch models",
long_description=long_description,
long_description_content_type="text/markdown",
version="0.2.3",
url="https://github.com/IBM/inFairness",
author="IBM Research",
author_email="mayank.agarwal@ibm.com, aldo.pareja@ibm.com, onkarbhardwaj@ibm.com, mikhail.yurochkin@ibm.com",
keywords=[
"individual fairness",
"ai fairness",
"trustworthy ai",
"machine learning",
],
python_requires=">=3.7",
)
| 1,031 | 27.666667 | 113 | py |
inFairness | inFairness-main/examples/postprocess-sentiment-analysis/data.py | import torch
import re
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
sns.set_context(rc={'figure.figsize': (9, 9)}, font_scale=2.)
TOKEN_RE = re.compile(r"\w.*?\b")
def load_embeddings(filename):
"""
Load a DataFrame from the generalized text format used by word2vec, GloVe,
fastText, and ConceptNet Numberbatch. The main point where they differ is
whether there is an initial line with the dimensions of the matrix.
"""
labels = []
rows = []
with open(filename, encoding='utf-8') as infile:
for i, line in enumerate(infile):
items = line.rstrip().split(' ')
if len(items) == 2:
# This is a header row giving the shape of the matrix
continue
labels.append(items[0])
values = np.array([float(x) for x in items[1:]], 'f')
rows.append(values)
arr = np.vstack(rows)
return pd.DataFrame(arr, index=labels, dtype='f')
def load_lexicon(filepath):
"""
load a file from Bing Liu's sentiment lexicon containing
English words in Latin-1 encoding
One file contains a list of positive words, and the other
contains a list of negative words. The files contain comment
lines starting with ';' and blank lines, which should be skipped
"""
lexicon = []
with open(filepath, encoding='latin-1') as infile:
for line in infile:
line = line.rstrip()
if line and not line.startswith(';'):
lexicon.append(line)
return lexicon
def load_data(data_path, embeddings_path, state=0):
pos_words = load_lexicon(data_path + '/positive-words.txt')
neg_words = load_lexicon(data_path + '/negative-words.txt')
embeddings = load_embeddings(embeddings_path)
# filter words that do not appear in the embedding index
pos_words = [word for word in pos_words if word in embeddings.index]
neg_words = [word for word in neg_words if word in embeddings.index]
pos_vectors = embeddings.loc[pos_words].dropna()
neg_vectors = embeddings.loc[neg_words].dropna()
vectors = pd.concat([pos_vectors, neg_vectors])
targets = np.array([1 for entry in pos_vectors.index] + [-1 for entry in neg_vectors.index])
labels = list(pos_vectors.index) + list(neg_vectors.index)
train_vectors, test_vectors, train_targets, test_targets, train_vocab, test_vocab = \
train_test_split(vectors, targets, labels, test_size=0.1, random_state=state)
## Data
X_train = train_vectors.values
X_test = test_vectors.values
y_train = train_targets
y_train[y_train == -1] = 0
y_test = test_targets
y_test[y_test == -1] = 0
return embeddings, X_train, X_test, y_train, y_test, train_vocab, test_vocab
def load_test_names(embeddings):
NAMES_BY_ETHNICITY = {
# The first two lists are from the Caliskan et al. appendix describing the
# Word Embedding Association Test.
'White': [
'Adam', 'Chip', 'Harry', 'Josh', 'Roger', 'Alan', 'Frank', 'Ian', 'Justin',
'Ryan', 'Andrew', 'Fred', 'Jack', 'Matthew', 'Stephen', 'Brad', 'Greg', 'Jed',
'Paul', 'Todd', 'Brandon', 'Hank', 'Jonathan', 'Peter', 'Wilbur', 'Amanda',
'Courtney', 'Heather', 'Melanie', 'Sara', 'Amber', 'Crystal', 'Katie',
'Meredith', 'Shannon', 'Betsy', 'Donna', 'Kristin', 'Nancy', 'Stephanie',
'Bobbie-Sue', 'Ellen', 'Lauren', 'Peggy', 'Sue-Ellen', 'Colleen', 'Emily',
'Megan', 'Rachel', 'Wendy'
],
'Black': [
'Alonzo', 'Jamel', 'Lerone', 'Percell', 'Theo', 'Alphonse', 'Jerome',
'Leroy', 'Rasaan', 'Torrance', 'Darnell', 'Lamar', 'Lionel', 'Rashaun',
'Tyree', 'Deion', 'Lamont', 'Malik', 'Terrence', 'Tyrone', 'Everol',
'Lavon', 'Marcellus', 'Terryl', 'Wardell', 'Aiesha', 'Lashelle', 'Nichelle',
'Shereen', 'Temeka', 'Ebony', 'Latisha', 'Shaniqua', 'Tameisha', 'Teretha',
'Jasmine', 'Latonya', 'Shanise', 'Tanisha', 'Tia', 'Lakisha', 'Latoya',
'Sharise', 'Tashika', 'Yolanda', 'Lashandra', 'Malika', 'Shavonn',
'Tawanda', 'Yvette'
]
}
NAMES_BY_ETHNICITY['White'] = [n.lower() for n in NAMES_BY_ETHNICITY['White'] if n.lower() in embeddings.index]
NAMES_BY_ETHNICITY['Black'] = [n.lower() for n in NAMES_BY_ETHNICITY['Black'] if n.lower() in embeddings.index]
white_female_start = NAMES_BY_ETHNICITY['White'].index('amanda')
black_female_start = NAMES_BY_ETHNICITY['Black'].index('aiesha')
test_gender = white_female_start*['Male'] + (len(NAMES_BY_ETHNICITY['White']) - white_female_start)*['Female']
test_gender += black_female_start*['Male'] + (len(NAMES_BY_ETHNICITY['Black']) - black_female_start)*['Female']
test_df = pd.DataFrame({'name':NAMES_BY_ETHNICITY['White'] + NAMES_BY_ETHNICITY['Black'],
'race':len(NAMES_BY_ETHNICITY['White'])*['White'] + len(NAMES_BY_ETHNICITY['Black'])*['Black'],
'gender':test_gender})
test_names_embed = embeddings.loc[test_df['name']].values
return test_df, test_names_embed
def load_nyc_names(names_path, embeddings):
names_df = pd.read_csv(names_path)
ethnicity_fixed = []
for n in names_df['Ethnicity']:
if n.startswith('BLACK'):
ethnicity_fixed.append('Black')
if n.startswith('WHITE'):
ethnicity_fixed.append('White')
if n.startswith('ASIAN'):
ethnicity_fixed.append('Asian')
if n.startswith('HISPANIC'):
ethnicity_fixed.append('Hispanic')
names_df['Ethnicity'] = ethnicity_fixed
names_df = names_df[np.logical_or(names_df['Ethnicity']=='Black', names_df['Ethnicity']=='White')]
names_df['Child\'s First Name'] = [n.lower() for n in names_df['Child\'s First Name']]
names_from_df = names_df['Child\'s First Name'].values.tolist()
idx_keep = []
for i, n in enumerate(names_from_df):
if n in embeddings.index:
idx_keep.append(i)
names_df = names_df.iloc[idx_keep]
names_from_df = names_df['Child\'s First Name'].values.tolist()
names_embed = embeddings.loc[names_from_df].values
return names_embed
def print_summary(test_df, method_name, test_accuracy):
print(method_name + ' test accuracy %f' % test_accuracy)
mean_sentiments_race = []
for r in ['Black', 'White']:
mean_sent = test_df[method_name + '_logits'][test_df['race']==r].mean()
mean_sentiments_race.append(mean_sent)
print(method_name + ' %s mean sentiment is %f' %(r, mean_sent))
print(method_name + ' race mean sentiment difference is %f\n' % np.abs(mean_sentiments_race[0] - mean_sentiments_race[1]))
mean_sentiments_gender = []
for g in ['Female', 'Male']:
mean_sent = test_df[method_name + '_logits'][test_df['gender']==g].mean()
mean_sentiments_gender.append(mean_sent)
print(method_name + ' %s mean sentiment is %f' %(g, mean_sent))
print(method_name + ' gender mean sentiment difference is %f\n' % np.abs(mean_sentiments_gender[0] - mean_sentiments_gender[1]))
fig, axs = plt.subplots(nrows=1, ncols=2, sharey=True, figsize=(6*2, 6))
sns.boxplot(x='race', y=method_name + '_logits', data=test_df, ax=axs[0]).set_title(method_name, fontsize=20)
sns.boxplot(x='gender', y=method_name + '_logits', data=test_df, ax=axs[1]).set_title(method_name, fontsize=20)
axs[0].set_ylim([-0.1, 1.1])
axs[0].set_xlabel('Race', size=18)
axs[0].set_ylabel('Sentiment', size=18, labelpad=-5)
axs[1].set_ylim([-0.1, 1.1])
axs[1].set_xlabel('Gender', size=18)
axs[1].set_ylabel('Sentiment', size=18, labelpad=-5)
plt.tick_params(axis='both', which='major', labelsize=16)
plt.show()
return
def embed_sentence(text, embedding):
tokens = [token.casefold() for token in TOKEN_RE.findall(text)]
with torch.no_grad():
sentence_embeddings = []
for token in tokens:
vec = embedding.loc[token].dropna()
sentence_embeddings.append(torch.Tensor(vec).view(1, -1))
sentence_embeddings = torch.cat(sentence_embeddings, dim=0).mean(dim=0, keepdim=True)
return sentence_embeddings
def text_to_sentiment(text, network, embedding):
tokens = [token.casefold() for token in TOKEN_RE.findall(text)]
with torch.no_grad():
sentence_embeddings = []
for token in tokens:
vec = embedding.loc[token].dropna()
sentence_embeddings.append(torch.Tensor(vec).view(1, -1))
sentence_embeddings = torch.cat(sentence_embeddings, dim=0).mean(dim=0, keepdim=True)
sentiment = network(sentence_embeddings)
sentiment = torch.nn.functional.softmax(sentiment.mean(dim=0, keepdim=True), dim=-1)
mean_sentiment = sentiment.data.numpy()[0]
return mean_sentiment
def format_sentiment_score(score):
if score[0] > score[1]:
return 'Negative with score ' + '{:.2f}%'.format(score[1]*100)
elif score[1] > score[0]:
return 'Positive with score ' + '{:.2f}%'.format(score[1]*100)
return 'Neutral with score ' + '{:.2f}%'.format(score[1]*100)
def get_positive_negative_sents(template=None):
if template is None:
template= "This movie is "
pos_words = "remarkable magnificent wondrous amazing astounding incredible stunning astonishing awe-inspiring breathtaking grand majestic spectacular splendid stupendous tremendous wonderful extraordinary impressive jaw-dropping marvellousUK mind-blowing overwhelming staggering striking beautiful brilliant eye-opening eye-popping fabulous glorious humbling imposing intense marvelousUS mind-boggling phenomenal startling stupefying amazeballs confounding dramatic miraculous monumental moving out of this world portentous prodigious sublime unbelievable something else surprising awful sensational fantastic fab great terrific unreal utmost exceptional unusual preternatural stellar heavy outstanding bad fantabulous flabbergasting exciting fascinating out-of-this-world embarrassing state-of-the-art mortifying superb shaming discomfiting awe-striking sobering dazzling super chastening uncommon inspiring inspirational notable noteworthy overcoming thrilling all that and a bag of chips stirring formidable magical excellent enthralling fantastical theatrical exhilarating superior gee-whizz royal dynamite fat large smashing considerable radical titantic surpassing belief too much first-rate heart-stopping first-class"
pos_words = pos_words.split(" ")
neg_words = "ordinary boring mediocre unremarkable lackluster mundane plain unimpressive uninteresting vapid average drab dreary dull insipid mind-numbing monotonous run-of-the-mill standard tame trite trivial unamazing unexceptional unexciting uninspired uninspiring vanilla aweless common commonplace customary flat humdrum lifeless normal prosaic routine simple stale typical unmemorable unnoteworthy usual wearisome everyday indifferent pedestrian undistinguished regular traditional familiar conventional household insignificant unpretentious generic characterless bland stereotypical uneventful unstimulating discreet inconspicuous habitual minor predictable quotidian wonted workaday unimportant inferior modest fixed general stock mainstream fair nondescript humble stereotyped cut-and-dry cut-and-dried not special banal day-to-day garden variety OK tedious unmoving tiresome staid quiet discouraging depressing upsetting"
neg_words = neg_words.split(" ")
pos_sentences = [template + word for word in pos_words]
neg_sentences = [template + word for word in neg_words]
return pos_sentences, neg_sentences
| 11,968 | 44.858238 | 1,229 | py |
inFairness | inFairness-main/examples/fair-ranking-synthetic-data/trainer.py |
class Trainer(object):
"""Main trainer class that orchestrates the entire learning routine
Use this class to start training a model using individual fairness routines
Args:
dataloader (torch.util.data.DataLoader): training data loader
model (inFairness.fairalgo): Individual fairness algorithm
optimizer (torch.optim): Model optimizer
max_iterations (int): Number of training steps
"""
def __init__(self, dataloader, model, optimizer, max_iterations, print_loss_period=0):
self.dataloader = dataloader
self.model = model
self.optimizer = optimizer
self.max_iterations = max_iterations
self._dataloader_iter = iter(self.dataloader)
self.print_loss_period = print_loss_period
def run_step(self):
try:
data = next(self._dataloader_iter)
except StopIteration:
self._dataloader_iter = iter(self.dataloader)
data = next(self._dataloader_iter)
if isinstance(data, list) or isinstance(data, tuple):
model_output = self.model(*data)
elif isinstance(data, dict):
model_output = self.model(**data)
else:
raise AttributeError(
"Data format not recognized. Only `list`, `tuple`, and `dict` are recognized."
)
if self.print_loss_period:
if self.step_count % self.print_loss_period == 0:
print(f'loss {self.step_count}', model_output.loss)
self.optimizer.zero_grad()
model_output.loss.backward()
self.optimizer.step()
def train(self):
self.model.train(True)
for self.step_count in range(self.max_iterations):
self.run_step()
| 1,760 | 31.018182 | 94 | py |
inFairness | inFairness-main/examples/adult-income-prediction/data.py | import os
import requests
import pandas as pd
import numpy as np
import torch
from sklearn.preprocessing import StandardScaler
from sklearn.utils.random import sample_without_replacement
def _download_data_(rootdir=None):
URLS = {
'train': 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data',
'test': 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test'
}
dirpaths = {}
if rootdir is None:
rootdir = "./dataset"
os.makedirs(rootdir, exist_ok=True)
for fname, url in URLS.items():
fout = os.path.join(rootdir, f'{fname}.csv')
r = requests.get(url)
with open(fout, 'w') as f:
f.write(r.content.decode('utf-8'))
dirpaths[fname] = fout
return dirpaths
def _read_data_(fpath, train_or_test):
names = [
'age', 'workclass', 'fnlwgt', 'education',
'education-num', 'marital-status', 'occupation',
'relationship', 'race', 'sex', 'capital-gain',
'capital-loss', 'hours-per-week', 'native-country',
'annual-income'
]
if train_or_test == 'train':
data = pd.read_csv(
fpath, sep=',', header=None, names=names,
na_values=['?'], skipinitialspace=True
)
elif train_or_test == 'test':
data = pd.read_csv(
fpath, sep=',', header=None, names=names,
na_values=['?'], skiprows=1, skipinitialspace=True
)
data['annual-income'] = data['annual-income'].str.rstrip('.')
data['annual-income'] = data['annual-income'].replace({'<=50K': 0, '>50K': 1})
return data
def load_data(rootdir=None):
# download data from UCI repository
dirpaths = _download_data_(rootdir=rootdir)
train_data = _read_data_(dirpaths['train'], 'train')
test_data = _read_data_(dirpaths['test'], 'test')
data = pd.concat([train_data, test_data], ignore_index=True)
# remove rows with NaNs
data.dropna(inplace=True)
categorical_vars = [
'workclass', 'marital-status', 'occupation',
'relationship', 'race', 'sex', 'native-country'
]
data = pd.get_dummies(data, columns=categorical_vars)
cols_to_drop = [
'race_Amer-Indian-Eskimo', 'race_Asian-Pac-Islander', 'race_Black',
'race_Other', 'sex_Female', 'native-country_Cambodia', 'native-country_Canada',
'native-country_China', 'native-country_Columbia', 'native-country_Cuba',
'native-country_Dominican-Republic', 'native-country_Ecuador',
'native-country_El-Salvador', 'native-country_England', 'native-country_France',
'native-country_Germany', 'native-country_Greece', 'native-country_Guatemala',
'native-country_Haiti', 'native-country_Holand-Netherlands', 'native-country_Honduras',
'native-country_Hong', 'native-country_Hungary', 'native-country_India', 'native-country_Iran',
'native-country_Ireland', 'native-country_Italy', 'native-country_Jamaica', 'native-country_Japan',
'native-country_Laos', 'native-country_Mexico', 'native-country_Nicaragua',
'native-country_Outlying-US(Guam-USVI-etc)', 'native-country_Peru', 'native-country_Philippines',
'native-country_Poland', 'native-country_Portugal', 'native-country_Puerto-Rico', 'native-country_Scotland',
'native-country_South', 'native-country_Taiwan', 'native-country_Thailand', 'native-country_Trinadad&Tobago',
'native-country_United-States', 'native-country_Vietnam', 'native-country_Yugoslavia',
'fnlwgt', 'education'
]
data.drop(cols_to_drop, axis=1, inplace=True)
# Split into train/test splits
train_data = data.sample(frac=0.8, random_state=123)
test_data = data.drop(train_data.index).reset_index(drop=True)
train_data = train_data.reset_index(drop=True)
# Standardize continuous columns
continuous_vars = [
'age', 'education-num', 'capital-gain',
'capital-loss', 'hours-per-week'
]
scaler = StandardScaler().fit(train_data[continuous_vars])
train_data[continuous_vars] = scaler.transform(train_data[continuous_vars])
test_data[continuous_vars] = scaler.transform(test_data[continuous_vars])
train_data = get_input_output_df(train_data)
test_data = get_input_output_df(test_data)
return train_data, test_data
def get_input_output_df(data):
cols = sorted(data.columns)
output_col = 'annual-income'
input_cols = [col for col in cols if col not in output_col]
df_X = data[input_cols]
df_Y = data[output_col]
return df_X, df_Y
def convert_df_to_tensor(data_X_df, data_Y_df):
data_X = torch.tensor(data_X_df.values).float()
data_Y = torch.tensor(data_Y_df.values)
return data_X, data_Y
def generate_pairs(len1, len2, n_pairs=100):
"""
vanilla sampler of random pairs (might sample same pair up to permutation)
n_pairs > len1*len2 should be satisfied
"""
idx = sample_without_replacement(len1*len2, n_pairs)
return np.vstack(np.unravel_index(idx, (len1, len2)))
def create_data_pairs(X_train, Y_train, Y_gender_train, n_comparable=10000, n_incomparable=10000):
y_gender_train_np = Y_gender_train.detach().numpy()
y_train_np = Y_train.detach().numpy()
X_train_np = X_train.detach().numpy()
# Create comparable pairs
comparable_X1 = None
comparable_X2 = None
K = 2
for i in range(K):
c0_idx = np.where((1*(y_gender_train_np==0) + (y_train_np==i))==2)[0]
c1_idx = np.where((1*(y_gender_train_np==1) + (y_train_np==i))==2)[0]
pairs_idx = generate_pairs(len(c0_idx), len(c1_idx), n_pairs=n_comparable // K)
if comparable_X1 is None:
comparable_X1 = X_train_np[c0_idx[pairs_idx[0]]]
comparable_X2 = X_train_np[c1_idx[pairs_idx[1]]]
else:
comparable_X1 = np.vstack((comparable_X1, X_train_np[c0_idx[pairs_idx[0]]]))
comparable_X2 = np.vstack((comparable_X2, X_train_np[c1_idx[pairs_idx[1]]]))
# Create incomparable pairs
c0_idx = np.where(y_train_np==0)[0]
c1_idx = np.where(y_train_np==1)[0]
pairs_idx = generate_pairs(len(c0_idx), len(c1_idx), n_pairs=n_incomparable)
incomparable_X1 = X_train_np[c0_idx[pairs_idx[0]]]
incomparable_X2 = X_train_np[c1_idx[pairs_idx[1]]]
# Join the two sets (comparable and incomparable) to create X and Y
X1 = np.vstack((comparable_X1, incomparable_X1))
X2 = np.vstack((comparable_X2, incomparable_X2))
Y_pairs = np.zeros(n_comparable + n_incomparable)
Y_pairs[:n_comparable] = 1
X1 = torch.from_numpy(X1)
X2 = torch.from_numpy(X2)
Y_pairs = torch.from_numpy(Y_pairs)
return X1, X2, Y_pairs
| 6,759 | 33.666667 | 118 | py |
inFairness | inFairness-main/examples/adult-income-prediction/metrics.py | import torch
import numpy as np
from sklearn.metrics import confusion_matrix
def accuracy(model, test_dl, device):
model.eval()
corr, total = 0, 0
for x, y in test_dl:
x, y = x.to(device), y.to(device)
y_pred = model(x)
_, y_pred = torch.max(y_pred, dim=1)
total += y.shape[0]
corr += torch.sum(y_pred == y)
score = corr / float(total)
return score
def balanced_accuracy(model, test_dl, device):
model.eval()
Y_gold, Y_predicted = [], []
for x, y in test_dl:
x, y = x.to(device), y.to(device)
y_pred = model(x)
_, y_pred = torch.max(y_pred, dim=-1)
y_pred = y_pred.squeeze().detach().cpu().tolist()
Y_predicted.extend(y_pred)
Y_gold.extend(y.detach().cpu().tolist())
conf_matrix = confusion_matrix(Y_gold, Y_predicted)
true_neg = conf_matrix[0][0]
false_neg = conf_matrix[1][0]
true_pos = conf_matrix[1][1]
false_pos = conf_matrix[0][1]
TPR = true_pos / float(true_pos + false_neg)
TNR = true_neg / float(true_neg + false_pos)
acc = 0.5 * (TPR + TNR)
return acc
def spouse_consistency(model, test_dl, test_dl_flipped, device):
model.eval()
predictions_original = []
for x, _ in test_dl:
x = x.to(device)
y_pred = model(x)
_, y_pred = torch.max(y_pred, dim=-1)
y_pred = y_pred.squeeze().detach().cpu().tolist()
predictions_original.extend(y_pred)
predictions_flipped = []
for x, _ in test_dl_flipped:
x = x.to(device)
y_pred = model(x)
_, y_pred = torch.max(y_pred, dim=-1)
y_pred = y_pred.squeeze().detach().cpu().tolist()
predictions_flipped.extend(y_pred)
predictions_original = np.array(predictions_original)
predictions_flipped = np.array(predictions_flipped)
score = np.mean(predictions_original == predictions_flipped)
return score
| 1,950 | 24.012821 | 64 | py |
inFairness | inFairness-main/examples/sentiment-analysis/data.py | import torch
import re
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
sns.set_context(rc={'figure.figsize': (9, 9)}, font_scale=2.)
TOKEN_RE = re.compile(r"\w.*?\b")
def load_embeddings(filename):
"""
Load a DataFrame from the generalized text format used by word2vec, GloVe,
fastText, and ConceptNet Numberbatch. The main point where they differ is
whether there is an initial line with the dimensions of the matrix.
"""
labels = []
rows = []
with open(filename, encoding='utf-8') as infile:
for i, line in enumerate(infile):
items = line.rstrip().split(' ')
if len(items) == 2:
# This is a header row giving the shape of the matrix
continue
labels.append(items[0])
values = np.array([float(x) for x in items[1:]], 'f')
rows.append(values)
arr = np.vstack(rows)
return pd.DataFrame(arr, index=labels, dtype='f')
def load_lexicon(filepath):
"""
load a file from Bing Liu's sentiment lexicon containing
English words in Latin-1 encoding
One file contains a list of positive words, and the other
contains a list of negative words. The files contain comment
lines starting with ';' and blank lines, which should be skipped
"""
lexicon = []
with open(filepath, encoding='latin-1') as infile:
for line in infile:
line = line.rstrip()
if line and not line.startswith(';'):
lexicon.append(line)
return lexicon
def load_data(data_path, embeddings_path, state=0):
pos_words = load_lexicon(data_path + '/positive-words.txt')
neg_words = load_lexicon(data_path + '/negative-words.txt')
embeddings = load_embeddings(embeddings_path)
# filter words that do not appear in the embedding index
pos_words = [word for word in pos_words if word in embeddings.index]
neg_words = [word for word in neg_words if word in embeddings.index]
pos_vectors = embeddings.loc[pos_words].dropna()
neg_vectors = embeddings.loc[neg_words].dropna()
vectors = pd.concat([pos_vectors, neg_vectors])
targets = np.array([1 for entry in pos_vectors.index] + [-1 for entry in neg_vectors.index])
labels = list(pos_vectors.index) + list(neg_vectors.index)
train_vectors, test_vectors, train_targets, test_targets, train_vocab, test_vocab = \
train_test_split(vectors, targets, labels, test_size=0.1, random_state=state)
## Data
X_train = train_vectors.values
X_test = test_vectors.values
y_train = train_targets
y_train[y_train == -1] = 0
y_test = test_targets
y_test[y_test == -1] = 0
return embeddings, X_train, X_test, y_train, y_test, train_vocab, test_vocab
def load_test_names(embeddings):
NAMES_BY_ETHNICITY = {
# The first two lists are from the Caliskan et al. appendix describing the
# Word Embedding Association Test.
'White': [
'Adam', 'Chip', 'Harry', 'Josh', 'Roger', 'Alan', 'Frank', 'Ian', 'Justin',
'Ryan', 'Andrew', 'Fred', 'Jack', 'Matthew', 'Stephen', 'Brad', 'Greg', 'Jed',
'Paul', 'Todd', 'Brandon', 'Hank', 'Jonathan', 'Peter', 'Wilbur', 'Amanda',
'Courtney', 'Heather', 'Melanie', 'Sara', 'Amber', 'Crystal', 'Katie',
'Meredith', 'Shannon', 'Betsy', 'Donna', 'Kristin', 'Nancy', 'Stephanie',
'Bobbie-Sue', 'Ellen', 'Lauren', 'Peggy', 'Sue-Ellen', 'Colleen', 'Emily',
'Megan', 'Rachel', 'Wendy'
],
'Black': [
'Alonzo', 'Jamel', 'Lerone', 'Percell', 'Theo', 'Alphonse', 'Jerome',
'Leroy', 'Rasaan', 'Torrance', 'Darnell', 'Lamar', 'Lionel', 'Rashaun',
'Tyree', 'Deion', 'Lamont', 'Malik', 'Terrence', 'Tyrone', 'Everol',
'Lavon', 'Marcellus', 'Terryl', 'Wardell', 'Aiesha', 'Lashelle', 'Nichelle',
'Shereen', 'Temeka', 'Ebony', 'Latisha', 'Shaniqua', 'Tameisha', 'Teretha',
'Jasmine', 'Latonya', 'Shanise', 'Tanisha', 'Tia', 'Lakisha', 'Latoya',
'Sharise', 'Tashika', 'Yolanda', 'Lashandra', 'Malika', 'Shavonn',
'Tawanda', 'Yvette'
]
}
NAMES_BY_ETHNICITY['White'] = [n.lower() for n in NAMES_BY_ETHNICITY['White'] if n.lower() in embeddings.index]
NAMES_BY_ETHNICITY['Black'] = [n.lower() for n in NAMES_BY_ETHNICITY['Black'] if n.lower() in embeddings.index]
white_female_start = NAMES_BY_ETHNICITY['White'].index('amanda')
black_female_start = NAMES_BY_ETHNICITY['Black'].index('aiesha')
test_gender = white_female_start*['Male'] + (len(NAMES_BY_ETHNICITY['White']) - white_female_start)*['Female']
test_gender += black_female_start*['Male'] + (len(NAMES_BY_ETHNICITY['Black']) - black_female_start)*['Female']
test_df = pd.DataFrame({'name':NAMES_BY_ETHNICITY['White'] + NAMES_BY_ETHNICITY['Black'],
'race':len(NAMES_BY_ETHNICITY['White'])*['White'] + len(NAMES_BY_ETHNICITY['Black'])*['Black'],
'gender':test_gender})
test_names_embed = embeddings.loc[test_df['name']].values
return test_df, test_names_embed
def load_nyc_names(names_path, embeddings):
names_df = pd.read_csv(names_path)
ethnicity_fixed = []
for n in names_df['Ethnicity']:
if n.startswith('BLACK'):
ethnicity_fixed.append('Black')
if n.startswith('WHITE'):
ethnicity_fixed.append('White')
if n.startswith('ASIAN'):
ethnicity_fixed.append('Asian')
if n.startswith('HISPANIC'):
ethnicity_fixed.append('Hispanic')
names_df['Ethnicity'] = ethnicity_fixed
names_df = names_df[np.logical_or(names_df['Ethnicity']=='Black', names_df['Ethnicity']=='White')]
names_df['Child\'s First Name'] = [n.lower() for n in names_df['Child\'s First Name']]
names_from_df = names_df['Child\'s First Name'].values.tolist()
idx_keep = []
for i, n in enumerate(names_from_df):
if n in embeddings.index:
idx_keep.append(i)
names_df = names_df.iloc[idx_keep]
names_from_df = names_df['Child\'s First Name'].values.tolist()
names_embed = embeddings.loc[names_from_df].values
return names_embed
def print_summary(test_df, method_name, test_accuracy):
print(method_name + ' test accuracy %f' % test_accuracy)
mean_sentiments_race = []
for r in ['Black', 'White']:
mean_sent = test_df[method_name + '_logits'][test_df['race']==r].mean()
mean_sentiments_race.append(mean_sent)
print(method_name + ' %s mean sentiment is %f' %(r, mean_sent))
print(method_name + ' race mean sentiment difference is %f\n' % np.abs(mean_sentiments_race[0] - mean_sentiments_race[1]))
mean_sentiments_gender = []
for g in ['Female', 'Male']:
mean_sent = test_df[method_name + '_logits'][test_df['gender']==g].mean()
mean_sentiments_gender.append(mean_sent)
print(method_name + ' %s mean sentiment is %f' %(g, mean_sent))
print(method_name + ' gender mean sentiment difference is %f\n' % np.abs(mean_sentiments_gender[0] - mean_sentiments_gender[1]))
fig, axs = plt.subplots(nrows=1, ncols=2, sharey=True, figsize=(6*2, 6))
sns.boxplot(x='race', y=method_name + '_logits', data=test_df, ax=axs[0]).set_title(method_name, fontsize=20)
sns.boxplot(x='gender', y=method_name + '_logits', data=test_df, ax=axs[1]).set_title(method_name, fontsize=20)
axs[0].set_ylim([-0.1, 1.1])
axs[0].set_xlabel('Race', size=18)
axs[0].set_ylabel('Sentiment', size=18, labelpad=-5)
axs[1].set_ylim([-0.1, 1.1])
axs[1].set_xlabel('Gender', size=18)
axs[1].set_ylabel('Sentiment', size=18, labelpad=-5)
plt.tick_params(axis='both', which='major', labelsize=16)
plt.show()
return
def text_to_sentiment(text, network, embedding, device):
tokens = [token.casefold() for token in TOKEN_RE.findall(text)]
with torch.no_grad():
sentence_embeddings = []
for token in tokens:
vec = embedding.loc[token].dropna()
sentence_embeddings.append(torch.Tensor(vec).view(1, -1))
sentence_embeddings = torch.cat(sentence_embeddings, dim=0).mean(dim=0, keepdim=True).to(device)
sentiment = network(sentence_embeddings)
sentiment = torch.nn.functional.softmax(sentiment.mean(dim=0, keepdim=True), dim=-1)
mean_sentiment = sentiment.data.detach().cpu().numpy()[0]
return mean_sentiment
def format_sentiment_score(score):
if score[0] > score[1]:
return 'Negative with score ' + '{:.2f}%'.format(score[1]*100)
elif score[1] > score[0]:
return 'Positive with score ' + '{:.2f}%'.format(score[1]*100)
return 'Neutral with score ' + '{:.2f}%'.format(score[1]*100) | 9,005 | 38.327511 | 132 | py |
inFairness | inFairness-main/examples/synthetic-data/trainer.py |
class Trainer(object):
"""Main trainer class that orchestrates the entire learning routine
Use this class to start training a model using individual fairness routines
Args:
dataloader (torch.util.data.DataLoader): training data loader
model (inFairness.fairalgo): Individual fairness algorithm
optimizer (torch.optim): Model optimizer
max_iterations (int): Number of training steps
"""
def __init__(self, dataloader, model, optimizer, max_iterations):
self.dataloader = dataloader
self.model = model
self.optimizer = optimizer
self.max_iterations = max_iterations
self._dataloader_iter = iter(self.dataloader)
def run_step(self):
try:
data = next(self._dataloader_iter)
except StopIteration:
self._dataloader_iter = iter(self.dataloader)
data = next(self._dataloader_iter)
if isinstance(data, list) or isinstance(data, tuple):
model_output = self.model(*data)
elif isinstance(data, dict):
model_output = self.model(**data)
else:
raise AttributeError(
"Data format not recognized. Only `list`, `tuple`, and `dict` are recognized."
)
self.optimizer.zero_grad()
model_output.loss.backward()
self.optimizer.step()
def train(self):
self.model.train(True)
for step_count in range(self.max_iterations):
self.run_step() | 1,516 | 29.959184 | 94 | py |
inFairness | inFairness-main/tests/postprocessing/test_data_ds.py | import pytest
import torch
import numpy as np
from inFairness.distances import EuclideanDistance
from inFairness.postprocessing.data_ds import PostProcessingDataStore
def test_add_data():
ntries = 10
B, D = 10, 50
distance_x = EuclideanDistance()
data_ds = PostProcessingDataStore(distance_x)
counter = 0
for _ in range(ntries):
X = torch.rand(size=(B, D))
Y = torch.rand(size=(B,))
counter += B
data_ds.add_datapoints(X, Y)
assert data_ds.n_samples == counter
assert np.array_equal(
list(data_ds.distance_matrix.shape),
[counter, counter]
)
def test_reset_data():
B, D = 10, 50
distance_x = EuclideanDistance()
data_ds = PostProcessingDataStore(distance_x)
X = torch.rand(size=(B, D))
Y = torch.rand(size=(B,))
data_ds.add_datapoints(X, Y)
assert data_ds.n_samples == B
assert np.array_equal(list(data_ds.distance_matrix.shape), [B, B])
data_ds.reset()
assert data_ds.n_samples == 0
assert data_ds.distance_matrix is None
assert data_ds.data_X is None
assert data_ds.data_Y is None
| 1,153 | 22.08 | 70 | py |
inFairness | inFairness-main/tests/postprocessing/test_glif.py | import pytest
import torch
import torch.nn.functional as F
import numpy as np
from inFairness.distances import EuclideanDistance
from inFairness.postprocessing import GraphLaplacianIF
def test_postprocess_incorrectargs():
params = (1.0, 1.0, 100.0, True)
dist_x = EuclideanDistance()
pp = GraphLaplacianIF(dist_x, True)
with pytest.raises(AssertionError):
pp.postprocess(None, *params)
with pytest.raises(AssertionError):
pp.postprocess("coordinate-descent", *params)
@pytest.mark.parametrize(
"lambda_param,scale,threshold,normalize,dim,output_probas",
[
(1.0, 1.0, 100.0, True, 2, True),
(1.0, 1.0, 100.0, False, 2, True),
(1.0, 1.0, 100.0, True, 10, True),
(1.0, 1.0, 100.0, False, 10, True),
(1.0, 1.0, 100.0, True, 2, False),
(1.0, 1.0, 100.0, False, 2, False),
(1.0, 1.0, 100.0, True, 10, False),
(1.0, 1.0, 100.0, False, 10, False),
],
)
def test_postprocess_exact(lambda_param, scale, threshold, normalize, dim, output_probas):
B, E = 50, 100
X = torch.rand(size=(B, E))
Y = torch.rand(size=(B, dim))
if output_probas:
Y = F.softmax(Y, dim=-1)
dist_x = EuclideanDistance()
pp = GraphLaplacianIF(dist_x, is_output_probas=output_probas)
pp.add_datapoints(X, Y)
exact_solution = pp.postprocess("exact", lambda_param, scale, threshold, normalize)
assert np.array_equal(list(Y.shape), list(exact_solution.y_solution.shape))
coo_solution = pp.postprocess(
"coordinate-descent", lambda_param, scale,
threshold, normalize, batchsize=16, epochs=50
)
assert np.array_equal(list(Y.shape), list(coo_solution.y_solution.shape))
exact_obj = exact_solution.objective
coo_obj = coo_solution.objective
for key in ['y_dist', 'L_objective', 'overall_objective']:
assert abs(exact_obj[key] - coo_obj[key]) < 1e-3
| 1,926 | 29.109375 | 90 | py |
inFairness | inFairness-main/tests/distances/test_common_distances.py | import pytest
import math
import torch
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from inFairness import distances
def test_euclidean_distance():
dist = distances.EuclideanDistance()
X = torch.FloatTensor([[0.0, 0.0], [1.0, 1.0]])
Y = torch.FloatTensor([[1.0, 1.0], [1.0, 1.0]])
res = torch.FloatTensor([[math.sqrt(2)], [0.0]])
assert torch.all(dist(X, Y) == res)
def test_protected_euclidean_distance():
protected_attrs = [1] # Make the second dimension protected attribute
num_attrs = 3
dist = distances.ProtectedEuclideanDistance()
dist.fit(protected_attrs, num_attrs)
X = torch.FloatTensor(
[
[0.0, 1.0, 0.0],
[0.0, 2.0, 0.0],
[0.0, 0.0, 0.0],
[1.0, 0.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 2.0, 1.0],
]
)
Y = torch.FloatTensor(
[
[1.0, 0.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 2.0, 1.0],
[1.0, 2.0, 1.0],
[1.0, 0.0, 1.0],
[1.0, 1.0, 1.0],
]
)
res = torch.FloatTensor(
[[math.sqrt(2)], [math.sqrt(2)], [math.sqrt(2)], [0.0], [0.0], [0.0]]
)
assert torch.all(dist(X, Y) == res), f"{dist(X, Y)} :: {res}"
@pytest.mark.parametrize(
"itemwise_dist",
[(False), (True)],
)
def test_svd_sensitive_subspace_distance(itemwise_dist):
n_samples = 10
n_features = 50
n_components = 10
X_train = torch.rand((100, n_features))
n_samples = 10
X1 = torch.rand((n_samples, n_features)).requires_grad_()
X2 = torch.rand((n_samples, n_features)).requires_grad_()
metric = distances.SVDSensitiveSubspaceDistance()
metric.fit(X_train, n_components)
dist = metric(X1, X2, itemwise_dist)
if itemwise_dist:
assert list(dist.shape) == [n_samples, 1]
assert dist.requires_grad == True
else:
assert list(dist.shape) == [1, n_samples, n_samples]
assert dist.requires_grad == True
if itemwise_dist:
dist = metric(X1, X1, itemwise_dist)
assert torch.all(dist == 0)
assert dist.requires_grad == True
@pytest.mark.parametrize(
"itemwise_dist",
[(False), (True)],
)
def test_svd_sensitive_subspace_distance_multiple_similar_data(itemwise_dist):
n_samples = 10
n_features = 50
n_components = 10
X_train = [torch.rand((100, n_features)) for _ in range(10)]
n_samples = 10
X1 = torch.rand((n_samples, n_features)).requires_grad_()
X2 = torch.rand((n_samples, n_features)).requires_grad_()
metric = distances.SVDSensitiveSubspaceDistance()
metric.fit(X_train, n_components)
dist = metric(X1, X2, itemwise_dist)
if itemwise_dist:
assert list(dist.shape) == [n_samples, 1]
assert dist.requires_grad == True
else:
assert list(dist.shape) == [1, n_samples, n_samples]
assert dist.requires_grad == True
if itemwise_dist:
dist = metric(X1, X1, itemwise_dist)
assert torch.all(dist == 0)
assert dist.requires_grad == True
def test_svd_sensitive_subspace_distance_raises_error():
n_components = 10
X_train = None
metric = distances.SVDSensitiveSubspaceDistance()
with pytest.raises(TypeError):
metric.fit(X_train, n_components)
@pytest.mark.parametrize(
"itemwise_dist",
[(False), (True)],
)
def test_explore_sensitive_subspace_distance(itemwise_dist):
n_features = 50
n_samples = 100
X1 = torch.rand((n_samples, n_features)).requires_grad_()
X2 = torch.rand((n_samples, n_features)).requires_grad_()
Y = torch.randint(low=0, high=2, size=(n_samples,))
metric = distances.EXPLOREDistance()
metric.fit(X1, X2, Y, iters=100, batchsize=8)
dist = metric(X1, X2, itemwise_dist)
if itemwise_dist:
assert list(dist.shape) == [n_samples, 1]
assert dist.requires_grad == True
else:
assert list(dist.shape) == [1, n_samples, n_samples]
assert dist.requires_grad == True
if itemwise_dist:
dist = metric(X1, X1, itemwise_dist)
assert torch.all(dist == 0)
assert dist.requires_grad == True
def test_squared_euclidean_distance():
x1 = 2 * torch.ones(2)
x2 = torch.zeros(2)
dist = distances.SquaredEuclideanDistance()
dist.fit(num_dims=2)
distx1x2 = dist(x1, x2, True)
assert distx1x2.item() == 8
distx1x1 = dist(x1, x1, True)
assert distx1x1 == 0
def test_logistic_reg_distance_protected_idx():
X_train = torch.rand(size=(100, 3))
mean = X_train.mean(dim=0, keepdim=True)
std = X_train.std(dim=0, keepdim=True)
X_train = (X_train - mean) / std
protected_attr = torch.randint(low=0, high=2, size=(100, 1))
X_train[:, 0:1] += protected_attr
X_train = torch.hstack((X_train, protected_attr))
dist = distances.LogisticRegSensitiveSubspace()
dist.fit(X_train, protected_idxs=[3])
assert dist.basis_vectors_.shape == (4, 2)
assert dist.basis_vectors_[0, 0] > dist.basis_vectors_[1, 0]
assert len(dist.logistic_regression_models) == 1
for model in dist.logistic_regression_models:
assert isinstance(model, LogisticRegression)
def test_logistic_reg_distance_no_protected_idx():
X_train = torch.rand(size=(100, 5))
protected_attr = torch.randint(low=0, high=2, size=(100, 2)).long()
dist = distances.LogisticRegSensitiveSubspace()
dist.fit(X_train, data_SensitiveAttrs=protected_attr)
assert dist.basis_vectors_.shape == (5, 2)
assert len(dist.logistic_regression_models) == 2
for model in dist.logistic_regression_models:
assert isinstance(model, LogisticRegression)
def test_logistic_reg_distance_raises_error():
X_train = torch.rand(size=(100, 5))
protected_attr = torch.randint(low=0, high=2, size=(100, 2)).long()
dist = distances.LogisticRegSensitiveSubspace()
with pytest.raises(AssertionError):
dist.fit(X_train, data_SensitiveAttrs=protected_attr, protected_idxs=[1, 2])
protected_attr = torch.randint(low=0, high=6, size=(100, 2)).long()
dist = distances.LogisticRegSensitiveSubspace()
with pytest.raises(AssertionError):
dist.fit(X_train, protected_attr)
def test_wasserstein_distance():
"""
uses a SquaredEuclidean special case of a Mahalanobis distance to reduce the set difference between
2 batches of elements.
"""
squared_euclidean = distances.SquaredEuclideanDistance()
squared_euclidean.fit(num_dims=2)
sigma = squared_euclidean.sigma
wasserstein_dist = distances.WassersteinDistance()
wasserstein_dist.fit(sigma)
x1 = torch.randn(3, 10, 2)
x2 = torch.nn.Parameter(torch.ones_like(x1))
optimizer = torch.optim.Adam([x2], lr=0.01)
for i in range(1000):
optimizer.zero_grad()
loss = wasserstein_dist(x1, x2).sum()
loss.backward()
optimizer.step()
"""
if two sets are close in the euclidean space, the sum of the elements in the two sets must add to a similar
value
"""
assert (torch.abs(x1.sum(dim=1).sum(dim=1) - x2.sum(dim=1).sum(dim=1)) < 3.0).all()
| 7,261 | 26.507576 | 112 | py |
inFairness | inFairness-main/tests/distances/test_distance_state.py | import pytest
import torch
from inFairness import distances
def test_mahalanobis_dist_state_buffer_set():
dist = distances.MahalanobisDistances()
sigma = torch.rand(size=(10, 10))
dist.fit(sigma)
state_dict = dist.state_dict()
assert "sigma" in state_dict
assert torch.all(state_dict["sigma"] == sigma)
sigma = torch.rand(size=(10, 10))
dist.fit(sigma)
state_dict = dist.state_dict()
assert "sigma" in state_dict
assert torch.all(state_dict["sigma"] == sigma)
def test_mahalanobis_dist_state_update():
dist = distances.MahalanobisDistances()
sigma = torch.rand(size=(10, 10))
dist.fit(sigma)
state_dict = dist.state_dict()
assert "sigma" in state_dict
assert torch.all(state_dict["sigma"] == sigma)
dist1 = distances.MahalanobisDistances()
dist1.load_state_dict(state_dict)
state_dict1 = dist1.state_dict()
assert "sigma" in state_dict1
assert torch.all(state_dict1["sigma"] == sigma)
def test_squared_euclidean_dist_state():
dist = distances.SquaredEuclideanDistance()
dist.fit(num_dims=5)
state_dict = dist.state_dict()
assert "sigma" in state_dict
assert torch.all(torch.eye(5) == state_dict["sigma"])
def test_protected_euclidean_dist_state():
protected_attrs = [1]
num_attrs = 3
dist = distances.ProtectedEuclideanDistance()
dist.fit(protected_attrs, num_attrs)
protected_vec = torch.ones(num_attrs)
protected_vec[protected_attrs] = 0.0
state_dict = dist.state_dict()
assert "protected_vector" in state_dict
assert torch.all(protected_vec == state_dict["protected_vector"])
def test_svd_distance_state():
n_features = 50
n_components = 10
X_train = torch.rand((100, n_features))
metric = distances.SVDSensitiveSubspaceDistance()
metric.fit(X_train, n_components)
state = metric.state_dict()
assert "sigma" in state
sigma = state["sigma"]
assert sigma.shape == (n_features, n_features)
metric_new = distances.SVDSensitiveSubspaceDistance()
metric_new.load_state_dict(state)
new_state = metric_new.state_dict()
assert torch.all(new_state["sigma"] == sigma)
def test_explore_distance_state():
n_features = 50
n_samples = 100
X1 = torch.rand((n_samples, n_features)).requires_grad_()
X2 = torch.rand((n_samples, n_features)).requires_grad_()
Y = torch.randint(low=0, high=2, size=(n_samples,))
metric = distances.EXPLOREDistance()
metric.fit(X1, X2, Y, iters=100, batchsize=8)
state = metric.state_dict()
assert "sigma" in state
sigma = state["sigma"]
assert sigma.shape == (n_features, n_features)
metric_new = distances.EXPLOREDistance()
metric_new.load_state_dict(state)
new_state = metric_new.state_dict()
assert torch.all(new_state["sigma"] == sigma)
def test_logreg_distance_state():
n_samples, n_features = 100, 3
X_train = torch.rand(size=(n_samples, n_features))
mean = X_train.mean(dim=0, keepdim=True)
std = X_train.std(dim=0, keepdim=True)
X_train = (X_train - mean) / std
protected_attr = torch.randint(low=0, high=2, size=(n_samples, 1))
X_train[:, 0:1] += protected_attr
X_train = torch.hstack((X_train, protected_attr))
metric = distances.LogisticRegSensitiveSubspace()
metric.fit(X_train, protected_idxs=[3])
state = metric.state_dict()
assert "sigma" in state
sigma = state["sigma"]
assert sigma.shape == (n_features+1, n_features+1)
metric_new = distances.EXPLOREDistance()
metric_new.load_state_dict(state)
new_state = metric_new.state_dict()
assert torch.all(new_state["sigma"] == sigma)
def test_wasserstein_dist_state():
squared_euclidean = distances.SquaredEuclideanDistance()
squared_euclidean.fit(num_dims=2)
sigma = squared_euclidean.sigma
wasserstein_dist = distances.WassersteinDistance()
wasserstein_dist.fit(sigma)
state = wasserstein_dist.state_dict()
assert "sigma" in state
assert torch.all(state["sigma"] == sigma)
metric_new = distances.WassersteinDistance()
metric_new.load_state_dict(state)
new_state = metric_new.state_dict()
assert torch.all(new_state["sigma"] == sigma) | 4,247 | 26.230769 | 70 | py |
inFairness | inFairness-main/tests/utils/test_normalized_discounted_cumulative_gain.py | import torch
import inFairness.utils.ndcg as ndcg
def test_normalized_discounted_cumulative_gain():
x = torch.tensor([10, 8.0, 1.0])
assert ndcg.normalized_discounted_cumulative_gain(x) == 1.0
x = torch.tensor([1.,2,3])
assert ndcg.normalized_discounted_cumulative_gain(x) - 0.7397 < 0.01
batch_x = torch.arange(8, dtype=torch.float).reshape(2,4)
assert (ndcg.vect_normalized_discounted_cumulative_gain(batch_x) - 0.6447 < 1e-2).all()
batch_x,_ = torch.sort(batch_x, descending=True, dim=1)
assert (ndcg.vect_normalized_discounted_cumulative_gain(batch_x) - 1. < 1e-2).all()
| 597 | 32.222222 | 89 | py |
inFairness | inFairness-main/tests/utils/test_plackett_luce.py | import torch
from torch.nn.parameter import Parameter
from functorch import vmap
from inFairness.utils import plackett_luce
from inFairness.utils.plackett_luce import PlackettLuce
from inFairness.utils.ndcg import vect_normalized_discounted_cumulative_gain as v_ndcg
vect_gather = vmap(torch.gather, in_dims=(None,None, 0))
batched_v_ndcg = vmap(v_ndcg, in_dims=(0))
def test_batch_plackett_luce():
"""
the idea of this test is to use normalized discounted cumulative gain to evaluate how
good the underlying plackett_luce distribution approximates some ideal relevance
after optimization, the parameterized dummy_logits should assign the highest value to
the most relevant item in the query.
"""
relevances1 = torch.arange(3,dtype=torch.float)
relevances2 = torch.arange(2,-1,-1, dtype=torch.float)
relevances = torch.stack([relevances1, relevances2])
montecarlo_samples = 100
dummy_logits = Parameter(torch.randn(2,3))
plackett_luce = PlackettLuce(dummy_logits)
optimizer = torch.optim.Adam([dummy_logits],lr=0.01)
for _ in range(1000):
optimizer.zero_grad()
sampled_indices = plackett_luce.sample((montecarlo_samples,))
log_probs = plackett_luce.log_prob(sampled_indices)
pred_relevances = vect_gather(relevances,1,sampled_indices)
utility = -batched_v_ndcg(pred_relevances)*log_probs
utility.mean().backward()
optimizer.step()
#the dummy logits should be increasing for the increasing relevances and decreasing for the others
dummy_increasing, dummy_decreasing = dummy_logits[0], dummy_logits[1]
assert all([(dummy_increasing[i] <= dummy_increasing[i+1]).item() for i in range(2)])
assert all([(dummy_decreasing[i] >= dummy_decreasing[i+1]).item() for i in range(2)])
| 1,752 | 34.77551 | 100 | py |
inFairness | inFairness-main/tests/auditor/test_senstir_auditor.py | import pytest
import torch
from mock import patch
from inFairness.auditor import SenSTIRAuditor
from inFairness.distances import (
SensitiveSubspaceDistance,
SquaredEuclideanDistance,
)
def mock_torch_rand_like(*size):
return torch.ones_like(*size)
@patch("torch.rand_like", mock_torch_rand_like)
def test_sestirauditor_generate_worst_case_examples():
batch_size = 2
query_size = 10
feature_size = 2
num_steps = 1000
lr = 0.005
max_noise = 0.5
min_noise = -0.5
lambda_param = torch.tensor(3000.0)
# let's create a Sensitive subspace distance in the input space
distance_x = SensitiveSubspaceDistance()
# we use the second dimension in the basis vector because the projection complement will give us the first
basis_vectors_ = torch.tensor([[0], [1.]])
distance_x.fit(basis_vectors_)
# distance between sets of items
distance_y = SquaredEuclideanDistance()
distance_y.fit(num_dims=query_size)
auditor = SenSTIRAuditor(
distance_x, distance_y, num_steps, lr, max_noise, min_noise
)
# let's create a dummy network equally sensitive in both dimensions
network = torch.nn.Linear(feature_size, 1, bias=None)
network.weight.data = torch.ones((1, feature_size))
# now some dummy batch of queries
Q = torch.randn(batch_size, query_size, feature_size)
Q_worst = auditor.generate_worst_case_examples(
network, Q, lambda_param, torch.optim.Adam
)
# since the first dimension is sensitive, the examples should differ quite a bit in the second dimension while being similar in the first
first_dim_Q = Q[:, :, 0]
second_dim_Q = Q[:, :, 1]
first_dim_Q_worst = Q_worst[:, :, 0]
second_dim_Q_worst = Q_worst[:, :, 1]
# if two sets differ, their values should add to a high value
assert (torch.abs(second_dim_Q.sum(1) - second_dim_Q_worst.sum(1)) > 10.0).all()
# if two sets are close, their sum should add to a similar value
assert (torch.abs(first_dim_Q.sum(1) - first_dim_Q_worst.sum(1)) < 1.0).all()
| 2,070 | 30.378788 | 141 | py |
inFairness | inFairness-main/tests/auditor/test_sensei_auditor.py | import pytest
import numpy as np
from mock import patch
import torch
from torch.nn import functional as F
from inFairness.auditor import SenSeIAuditor
def mock_adam_optim(
params, lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False
):
return torch.optim.SGD(params, lr=lr)
def my_dist(s, t):
return torch.norm(s - t, dim=0).pow(2)
class MockPerceptron(torch.nn.Module):
def __init__(self, xdim, ydim):
super().__init__()
self.fc = torch.nn.Linear(xdim, ydim, dtype=float, bias=False)
def forward(self, x):
output = self.fc(x)
return output
def mock_torch_rand(*size):
return torch.ones(*size)
def test_sensei_init():
xdim = 3
ydim = 1
n_fair_steps = 1
fair_lr = 1.0
network = MockPerceptron(xdim, ydim)
lambda_param = torch.tensor(1.0)
distance_x = my_dist
distance_y = my_dist
n_fair_steps = 100
fair_lr = 100
sensei = SenSeIAuditor(
distance_x=distance_x, distance_y=distance_y, num_steps=n_fair_steps, lr=fair_lr
)
assert sensei.num_steps == n_fair_steps
assert sensei.lr == fair_lr
@patch("torch.optim.Adam", mock_adam_optim)
@patch("torch.rand", mock_torch_rand)
def test_sensrauditor_generate_worse_case_examples():
minibatch_size = 2
xdim = 3
ydim = 1
n_fair_steps = 1
fair_lr = 1.0
max_noise = 0.2
min_noise = 0.0
x = torch.from_numpy(np.ones([minibatch_size, xdim]))
y = torch.from_numpy(np.zeros([minibatch_size, ydim]))
network = MockPerceptron(xdim, ydim)
lamb = torch.tensor(1.0)
distance_x = my_dist
distance_y = my_dist
for param in network.parameters():
param.data.fill_(float(1.0))
se_auditor = SenSeIAuditor(
distance_x=distance_x,
distance_y=distance_y,
num_steps=n_fair_steps,
lr=fair_lr,
max_noise=max_noise,
min_noise=min_noise,
)
output = se_auditor.generate_worst_case_examples(
network=network, x=x, lambda_param=lamb
)
assert np.array_equal(list(output.size()), list(x.size()))
@pytest.mark.parametrize(
"audit_threshold,lambda_param,confidence,optimizer",
[
(None, None, 0.95, None),
(None, None, 0.95, torch.optim.Adam),
(1.25, None, 0.95, None),
(1.25, 0.25, 0.85, torch.optim.Adam),
],
)
def test_sensei_auditing(audit_threshold, lambda_param, confidence, optimizer):
xdim = 50
ydim = 1
B = 100
network = MockPerceptron(xdim, ydim)
loss_fn = F.mse_loss
distance_x = my_dist
distance_y = my_dist
n_fair_steps = 10
fair_lr = 0.01
auditor = SenSeIAuditor(
distance_x=distance_x, distance_y=distance_y, num_steps=n_fair_steps, lr=fair_lr
)
X_audit = torch.rand(size=(B, xdim), dtype=torch.float64)
Y_audit = torch.rand(size=(B, ydim), dtype=torch.float64)
response = auditor.audit(
network,
X_audit,
Y_audit,
loss_fn,
audit_threshold,
lambda_param,
confidence,
optimizer,
)
assert response.lossratio_mean is not None and isinstance(
response.lossratio_mean, float
)
assert response.lossratio_std is not None and isinstance(
response.lossratio_std, float
)
assert response.lower_bound is not None and isinstance(response.lower_bound, float)
if audit_threshold is None:
assert response.threshold is None
assert response.pval is None
assert response.confidence is None
assert response.is_model_fair is None
else:
assert response.threshold is not None and isinstance(response.threshold, float)
assert response.pval is not None and isinstance(response.pval, float)
assert response.confidence == confidence
assert response.is_model_fair is not None and isinstance(
response.is_model_fair, bool
)
| 3,926 | 24.5 | 88 | py |
inFairness | inFairness-main/tests/auditor/test_auditor.py | from re import X
import pytest
import numpy as np
from inFairness.auditor import Auditor
from mock import patch
import torch
from torch.nn import functional as F
def mock_adam_optim(
params, lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False
):
return torch.optim.SGD(params, lr=lr)
def my_dist(s, t):
return torch.norm(s - t, dim=0).pow(2)
class MockPerceptron(torch.nn.Module):
def __init__(self, xdim, ydim):
super().__init__()
self.fc = torch.nn.Linear(xdim, ydim, dtype=float, bias=False)
def forward(self, x):
output = self.fc(x)
return output
def mock_torch_rand(*size):
return torch.ones(*size)
def test_auditor_loss_ratio():
xdim = 50
ydim = 1
B = 100
network = MockPerceptron(xdim, ydim)
loss_fn = F.l1_loss
auditor = Auditor()
X_audit = torch.rand(size=(B, xdim), dtype=torch.float64)
X_worst = torch.rand(size=(B, xdim), dtype=torch.float64)
Y_audit = torch.rand(size=(B, ydim), dtype=torch.float64)
loss_ratio = auditor.compute_loss_ratio(X_audit, X_worst, Y_audit, network, loss_fn)
assert np.array_equal(loss_ratio.shape, [B, 1])
| 1,177 | 21.226415 | 88 | py |
inFairness | inFairness-main/tests/auditor/test_sensr_auditor.py | import pytest
import numpy as np
from inFairness.auditor import SenSRAuditor
from mock import patch
import torch
from torch.nn import functional as F
def mock_adam_optim(
params, lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False
):
return torch.optim.SGD(params, lr=lr)
def my_dist(s, t):
return torch.norm(s - t, dim=0).pow(2)
class MockPerceptron(torch.nn.Module):
def __init__(self, xdim, ydim):
super().__init__()
self.fc = torch.nn.Linear(xdim, ydim, dtype=float, bias=False)
def forward(self, x):
output = self.fc(x)
return output
def mock_torch_rand(*size):
return torch.ones(*size)
def test_sensrauditor_init():
xdim = 3
ydim = 1
n_fair_steps = 100
fair_lr = 100
network = MockPerceptron(xdim, ydim)
loss_fn = F.mse_loss
lamb = torch.tensor(1.0)
distance_x = my_dist
sensr = SenSRAuditor(
loss_fn=loss_fn, distance_x=distance_x, num_steps=n_fair_steps, lr=fair_lr
)
assert sensr.num_steps == n_fair_steps
assert sensr.lr == fair_lr
@patch("torch.optim.Adam", mock_adam_optim)
@patch("torch.rand", mock_torch_rand)
def test_sensrauditor_generate_worse_case_examples():
minibatch_size = 2
xdim = 3
ydim = 1
n_fair_steps = 1
fair_lr = 1.0
max_noise = 0.2
min_noise = 0.0
x = torch.from_numpy(np.ones([minibatch_size, xdim]))
y = torch.from_numpy(np.zeros([minibatch_size, ydim]))
network = MockPerceptron(xdim, ydim)
loss_fn = F.mse_loss
lamb = torch.tensor(1.0)
distance_x = my_dist
for param in network.parameters():
param.data.fill_(float(1.0))
sr_auditor = SenSRAuditor(
loss_fn=loss_fn,
distance_x=distance_x,
num_steps=n_fair_steps,
lr=fair_lr,
max_noise=max_noise,
min_noise=min_noise,
)
output = sr_auditor.generate_worst_case_examples(
network=network, x=x, y=y, lambda_param=lamb
)
assert np.array_equal(list(output.size()), list(x.size()))
@pytest.mark.parametrize(
"audit_threshold,lambda_param,confidence,optimizer",
[
(None, None, 0.95, None),
(None, None, 0.95, torch.optim.Adam),
(1.25, None, 0.95, None),
(1.25, 0.25, 0.85, torch.optim.Adam),
],
)
def test_sensr_auditing(audit_threshold, lambda_param, confidence, optimizer):
xdim = 50
ydim = 1
B = 100
network = MockPerceptron(xdim, ydim)
loss_fn = F.mse_loss
distance_x = my_dist
n_fair_steps = 10
fair_lr = 0.01
auditor = SenSRAuditor(
loss_fn=loss_fn, distance_x=distance_x, num_steps=n_fair_steps, lr=fair_lr
)
X_audit = torch.rand(size=(B, xdim), dtype=torch.float64)
Y_audit = torch.rand(size=(B, ydim), dtype=torch.float64)
response = auditor.audit(
network, X_audit, Y_audit, audit_threshold, lambda_param, confidence, optimizer
)
assert response.lossratio_mean is not None and isinstance(
response.lossratio_mean, float
)
assert response.lossratio_std is not None and isinstance(
response.lossratio_std, float
)
assert response.lower_bound is not None and isinstance(response.lower_bound, float)
if audit_threshold is None:
assert response.threshold is None
assert response.pval is None
assert response.confidence is None
assert response.is_model_fair is None
else:
assert response.threshold is not None and isinstance(response.threshold, float)
assert response.pval is not None and isinstance(response.pval, float)
assert response.confidence == confidence
assert response.is_model_fair is not None and isinstance(
response.is_model_fair, bool
)
| 3,772 | 25.384615 | 87 | py |
inFairness | inFairness-main/tests/fairalgo/test_sensei.py | import pytest
import numpy as np
from inFairness.auditor import SenSeIAuditor
from inFairness.fairalgo import SenSeI
from mock import patch
import torch
from torch.nn import functional as F
def mock_generate_worst_case_examples(cls, network, x, lambda_param):
return torch.ones_like(x) * -1.0
def mock_dist(s, t):
return torch.norm(s - t, dim=0).pow(2)
class MockPerceptron(torch.nn.Module):
def __init__(self, xdim, ydim):
super().__init__()
self.fc = torch.nn.Linear(xdim, ydim, dtype=float, bias=False)
def forward(self, x):
output = self.fc(x)
return output
@patch(
"inFairness.auditor.SenSeIAuditor.generate_worst_case_examples",
mock_generate_worst_case_examples,
)
def test_sensei_forward_train():
minibatch_size = 2
xdim = 3
ydim = 1
n_fair_steps = 1
lr = 1.0
max_noise = 0.2
min_noise = 0.0
x = torch.from_numpy(np.ones([minibatch_size, xdim]))
y = torch.from_numpy(np.zeros([minibatch_size, ydim]))
network = MockPerceptron(xdim, ydim)
loss_fn = F.mse_loss
rho = 1.0
eps = 1.0
distance_x = mock_dist
distance_y = mock_dist
for param in network.parameters():
param.data.fill_(float(1.0))
sensei = SenSeI(
network, distance_x, distance_y, loss_fn, rho, eps, n_fair_steps, lr
)
response = sensei.forward(x, y)
assert torch.abs(torch.mean(response.loss) - torch.tensor(81.0)) < 0.000001
assert torch.abs(torch.mean(response.y_pred) - torch.tensor(3.0)) < 0.000001
assert isinstance(sensei.auditor, SenSeIAuditor)
| 1,593 | 24.709677 | 80 | py |
inFairness | inFairness-main/tests/fairalgo/test_senstir.py | import torch
from inFairness.distances import (
SensitiveSubspaceDistance,
SquaredEuclideanDistance,
)
from inFairness.fairalgo import SenSTIR
def generate_test_data(num_batches, queries_per_batch, items_per_query):
num_features = 2
item_data = torch.rand(
num_batches, queries_per_batch, items_per_query, num_features
)
relevances = torch.sum(item_data, dim=3)
# mask the second dimension for some items
mask = torch.ones(num_batches, queries_per_batch, items_per_query, 1)
mask = torch.cat([mask, mask.clone().bernoulli_(0.8)], dim=3)
item_data *= mask
return item_data, relevances
def test_senstir():
num_steps = 200
queries_per_batch = 10
items_per_query = 5
feature_size = 2
# dummy synthetic data
item_data, relevances = generate_test_data(
num_steps, queries_per_batch, items_per_query
)
# let's create a Sensitive subspace distance in the input space
distance_x = SensitiveSubspaceDistance()
# we use the second dimension in the basis vector because the projection complement will give us the first
basis_vectors_ = torch.tensor([[0], [1.]])
distance_x.fit(basis_vectors_)
distance_y = SquaredEuclideanDistance()
distance_y.fit(num_dims=items_per_query)
# dummy network equally sensitive in both dimensions
network = torch.nn.Linear(feature_size, 1, bias=None)
network.weight.data = (
torch.ones((1, feature_size)) + torch.rand((1, feature_size)) * 0.01
)
fair_algo = SenSTIR(
network,
distance_x,
distance_y,
rho=0.1,
eps=0.001,
auditor_nsteps=10,
auditor_lr=0.05,
monte_carlo_samples_ndcg=60,
)
fair_algo.train()
optimizer = torch.optim.Adam(fair_algo.parameters(), lr=0.01)
for i in range(num_steps):
optimizer.zero_grad()
loss = fair_algo(item_data[i], relevances[i]).loss
loss.backward()
optimizer.step()
weights = network.weight.data.squeeze()
# the ratio of the first component of this vector should be greater than 3
# so that the response of the network should be majorly on the first dimension
assert weights[0] / weights[1] > 3.0
| 2,236 | 28.826667 | 110 | py |
inFairness | inFairness-main/tests/fairalgo/test_sensr.py | import pytest
import numpy as np
from inFairness.auditor import SenSRAuditor
from inFairness.fairalgo import SenSR
from mock import patch
import torch
from torch.nn import functional as F
def mock_generate_worst_case_examples(cls, network, x, y, lambda_param):
return torch.ones_like(x) * -1.0
def mock_dist(s, t):
return torch.norm(s - t, dim=0).pow(2)
class MockPerceptron(torch.nn.Module):
def __init__(self, xdim, ydim):
super().__init__()
self.fc = torch.nn.Linear(xdim, ydim, dtype=float, bias=False)
def forward(self, x):
output = self.fc(x)
return output
@patch(
"inFairness.auditor.SenSRAuditor.generate_worst_case_examples",
mock_generate_worst_case_examples,
)
def test_sensr_forward_train():
minibatch_size = 2
xdim = 3
ydim = 1
n_fair_steps = 1
lr_lamb = 1.0
lr_param = 1.0
max_noise = 0.2
min_noise = 0.0
x = torch.from_numpy(np.ones([minibatch_size, xdim]))
y = torch.from_numpy(np.zeros([minibatch_size, ydim]))
network = MockPerceptron(xdim, ydim)
loss_fn = F.mse_loss
eps = 1.0
distance_x = mock_dist
for param in network.parameters():
param.data.fill_(float(1.0))
sensr = SenSR(
network, distance_x, loss_fn, eps, lr_lamb, lr_param, n_fair_steps, lr_lamb
)
response = sensr.forward(x, y)
assert torch.abs(torch.mean(response.loss) - torch.tensor(9.0)) < 0.000001
assert torch.abs(torch.mean(response.y_pred) - torch.tensor(3.0)) < 0.000001
assert isinstance(sensr.auditor, SenSRAuditor)
| 1,576 | 24.852459 | 83 | py |
inFairness | inFairness-main/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
package_path = os.path.abspath('../..')
infairness_path = os.path.join(package_path, 'inFairness')
sys.path.insert(0, package_path)
# -- Project information -----------------------------------------------------
project = 'inFairness'
copyright = '2022, IBM Research'
author = 'IBM Research'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.githubpages',
'sphinx.ext.mathjax',
'sphinxcontrib.apidoc',
'sphinx_design',
'myst_parser',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinxcontrib.gtagjs'
]
# API doc configuration
apidoc_module_dir = infairness_path
# apidoc_output_dir = ''
apidoc_excluded_paths = []
apidoc_separate_modules = True
# Google analytics tracking
gtagjs_ids = [
'G-3QDFV4L7YB',
]
# Napolean docstring configuration
napoleon_numpy_docstring = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# Markdown and ReST parser extension configuration
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'markdown',
'.md': 'markdown',
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'furo'
html_theme_options = {
"sidebar_hide_name": True,
"announcement": ('<p><small>Check out our demonstration exploring '
'individual and group fairness of three BERT-based '
'toxic text classification models '
'<a class="sd-sphinx-override sd-badge sd-text-wrap '
'sd-bg-dark sd-bg-text-dark reference external" target="_blank"'
'href="https://fairbert.vizhub.ai">'
f'<span>Demonstration</span></a></small></p>'),
"dark_css_variables": {
"color-announcement-background": "#935610",
"color-announcement-text": "#FFFFFF",
},
"light_css_variables": {
"color-announcement-background": "#935610",
"color-announcement-text": "#FFFFFF",
},
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_logo = "_static/imgs/infairness-logo.png"
html_title = project
intersphinx_mapping = {
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'torch': ('https://pytorch.org/docs/1.9.0/', None)
}
myst_enable_extensions = [
"amsmath",
"colon_fence",
"deflist",
"dollarmath",
"fieldlist",
"html_admonition",
"html_image",
"replacements",
"smartquotes",
"strikethrough",
"substitution",
"tasklist",
]
| 3,873 | 27.910448 | 79 | py |
inFairness | inFairness-main/inFairness/postprocessing/datainterfaces.py | from typing import Dict
import torch
from dataclasses import dataclass
@dataclass
class PostProcessingObjectiveResponse:
"""Class to store the result from a post-processing algorithm"""
y_solution: torch.Tensor = None
objective: Dict = None
| 256 | 20.416667 | 68 | py |
inFairness | inFairness-main/inFairness/postprocessing/data_ds.py | import torch
from inFairness.postprocessing.distance_ds import DistanceStructure
class PostProcessingDataStore(object):
"""Data strucuture to hold the data used for post-processing
Parameters
-------------
distance_x: inFairness.distances.Distance
Distance metric in the input space
"""
def __init__(self, distance_x):
self.data_X = None
self.data_Y = None
self.n_samples = 0
self.distance_ds = DistanceStructure(distance_x)
@property
def distance_matrix(self):
"""Distances between N data points. Shape: (N, N)"""
return self.distance_ds.distance_matrix
def add_datapoints_X(self, X: torch.Tensor):
"""Add datapoints to the input datapoints X
Parameters
------------
X: torch.Tensor
New data points to add to the input data
`X` should have the same dimensions as previous data
along all dimensions except the first (batch) dimension
"""
if self.data_X is None:
self.data_X = X
else:
self.data_X = torch.cat([self.data_X, X], dim=0)
def add_datapoints_Y(self, Y: torch.Tensor):
"""Add datapoints to the output datapoints Y
Parameters
------------
Y: torch.Tensor
New data points to add to the output data
`Y` should have the same dimensions as previous data
along all dimensions except the first (batch) dimension
"""
if self.data_Y is None:
self.data_Y = Y
else:
self.data_Y = torch.cat([self.data_Y, Y], dim=0)
def add_datapoints(self, X: torch.Tensor, Y: torch.Tensor):
"""Add new datapoints to the existing datapoints
Parameters
------------
X: torch.Tensor
New data points to add to the input data
`X` should have the same dimensions as previous data
along all dimensions except the first (batch) dimension
Y: torch.Tensor
New data points to add to the output data
`Y` should have the same dimensions as previous data
along all dimensions except the first (batch) dimension
"""
self.add_datapoints_X(X)
self.add_datapoints_Y(Y)
self.n_samples = self.n_samples + X.shape[0]
self.distance_ds.build_distance_matrix(self.data_X)
def reset(self):
"""Reset the data structure holding the data points for post-processing.
Invoking this operation removes all datapoints and resets the state back
to the initial state.
"""
self.data_X = None
self.data_Y = None
self.n_samples = 0
self.distance_ds.reset()
| 2,843 | 30.955056 | 80 | py |
inFairness | inFairness-main/inFairness/postprocessing/glif.py | import torch
import numpy as np
from inFairness.utils.postprocessing import (
build_graph_from_dists,
get_laplacian,
laplacian_solve,
)
from inFairness.postprocessing.base_postprocessing import BasePostProcessing
from inFairness.postprocessing.datainterfaces import PostProcessingObjectiveResponse
class GraphLaplacianIF(BasePostProcessing):
"""Implements the Graph Laplacian Individual Fairness Post-Processing method.
Proposed in `Post-processing for Individual Fairness <https://arxiv.org/abs/2110.13796>`_
Parameters
------------
distance_x: inFairness.distances.Distance
Distance metric in the input space
is_output_probas: bool
True if the `data_Y` (model output) are probabilities implying that
this is a classification setting, and False if the `data_Y` are
in euclidean space implying that this is a regression setting.
"""
def __init__(self, distance_x, is_output_probas):
super().__init__(distance_x, is_output_probas=is_output_probas)
self._METHOD_COORDINATE_KEY = "coordinate-descent"
self._METHOD_EXACT_KEY = "exact"
def __exact_pp__(self, lambda_param, scale, threshold, normalize):
"""Implements Exact version of post processing"""
y_hat = self.__get_yhat__()
W, idxs = build_graph_from_dists(
self.distance_matrix, scale, threshold, normalize
)
data_y_idxs = y_hat[idxs]
L = get_laplacian(W, normalize)
if normalize:
L = (L + L.T) / 2
y = laplacian_solve(L, data_y_idxs, lambda_param)
data_y_new = torch.clone(y_hat)
data_y_new[idxs] = y
objective = self.get_objective(
data_y_new, lambda_param, scale, threshold, normalize, W, idxs, L
)
return data_y_new, objective
def __coordinate_update__(
self,
yhat_batch,
W_batch,
y,
batchidx,
lambda_param,
D_inv_batch=None,
diag_W_batch=None,
D_batch=None,
):
W_xy = W_batch.unsqueeze(-1) * y.unsqueeze(0)
"""
Shapes:
W_batch: (bsz, nsamples)
y: (nsamples, ncls - 1)
W_xy: (bsz, nsamples, ncls-1)
W_xy_corr: (bsz, ncls-1)
numerator: (bsz, ncls-1)
denominator: (bsz, 1)
"""
if D_inv_batch is None:
W_xy_corr = torch.diagonal(W_xy[:, batchidx], offset=0, dim1=0, dim2=1).T
numerator = yhat_batch + lambda_param * (W_xy.sum(dim=1) - W_xy_corr)
denominator = 1 + lambda_param * (
W_batch.sum(dim=1, keepdim=True) - diag_W_batch.view(-1, 1)
)
y_new = numerator / denominator
else:
W_xy = W_xy * D_inv_batch.unsqueeze(-1)
W_xy_corr = torch.diagonal(W_xy[:, batchidx], offset=0, dim1=0, dim2=1).T
numerator = yhat_batch + (lambda_param * (W_xy.sum(dim=1) - W_xy_corr)) / 2
denominator = (
1
+ lambda_param
- lambda_param * diag_W_batch.view(-1, 1) / D_batch.view(-1, 1)
)
y_new = numerator / denominator
return y_new
def __coordinate_pp__(
self, lambda_param, scale, threshold, normalize, batchsize, epochs
):
"""Implements coordinate descent for large-scale data"""
y_hat = self.__get_yhat__()
y_copy = y_hat.clone()
n_samples = self.datastore.n_samples
W, idxs = build_graph_from_dists(
self.distance_matrix, scale, threshold, normalize
)
data_y_idxs = y_hat[idxs]
W_diag = torch.diag(W)
if normalize:
D = W.sum(dim=1)
D_inv = 1 / D.reshape(1, -1) + 1 / D.reshape(-1, 1)
for epoch_idx in range(epochs):
epoch_idxs_random = np.random.permutation(n_samples)
curridx = 0
while curridx < n_samples:
batchidxs = epoch_idxs_random[curridx : curridx + batchsize]
if normalize:
y_copy[batchidxs] = self.__coordinate_update__(
data_y_idxs[batchidxs],
W[batchidxs],
y_copy,
batchidxs,
lambda_param=lambda_param,
D_inv_batch=D_inv[batchidxs],
diag_W_batch=W_diag[batchidxs],
D_batch=D[batchidxs],
)
else:
y_copy[batchidxs] = self.__coordinate_update__(
data_y_idxs[batchidxs],
W[batchidxs],
y_copy,
batchidxs,
lambda_param=lambda_param,
diag_W_batch=W_diag[batchidxs],
)
curridx += batchsize
pp_sol = y_hat.clone()
pp_sol[idxs] = y_copy
objective = self.get_objective(
pp_sol, lambda_param, scale, threshold, normalize, W, idxs
)
return pp_sol, objective
def get_objective(
self,
y_solution,
lambda_param: float,
scale: float,
threshold: float,
normalize: bool = False,
W_graph=None,
idxs=None,
L=None,
):
"""Compute the objective values for the individual fairness as follows:
.. math:: \\widehat{\\mathbf{f}} = \\arg \\min_{\\mathbf{f}} \\ \\|\\mathbf{f} - \\hat{\\mathbf{y}}\\|_2^2 + \\lambda \\ \\mathbf{f}^{\\top}\\mathbb{L_n} \\mathbf{f}
Refer equation 3.1 in the paper
Parameters
------------
y_solution: torch.Tensor
Post-processed solution values of shape (N, C)
lambda_param: float
Weight for the Laplacian Regularizer
scale: float
Parameter used to scale the computed distances.
Refer equation 2.2 in the proposing paper.
threshold: float
Parameter used to construct the Graph from distances
Distances below provided threshold are considered to be
connected edges, while beyond the threshold are considered to
be disconnected. Refer equation 2.2 in the proposing paper.
normalize: bool
Whether to normalize the computed Laplacian or not
W_graph: torch.Tensor
Adjacency matrix of shape (N, N)
idxs: torch.Tensor
Indices of data points which are included in the adjacency matrix
L: torch.Tensor
Laplacian of the adjacency matrix
Returns
---------
objective: PostProcessingObjectiveResponse
post-processed solution containing two parts:
(a) Post-processed output probabilities of shape (N, C)
where N is the number of data samples, and C is the
number of output classes
(b) Objective values. Refer equation 3.1 in the paper
for an explanation of the various parts
"""
if W_graph is None or idxs is None:
W_graph, idxs = build_graph_from_dists(
self.distance_matrix, scale, threshold, normalize
)
if L is None:
L = get_laplacian(W_graph, normalize)
y_hat = self.__get_yhat__()
y_dist = ((y_hat - y_solution) ** 2).sum()
L_obj = lambda_param * (y_solution[idxs] * (L @ y_solution[idxs])).sum()
overall_objective = y_dist + L_obj
result = {
"y_dist": y_dist.item(),
"L_objective": L_obj.item(),
"overall_objective": overall_objective.item(),
}
return result
def postprocess(
self,
method: str,
lambda_param: float,
scale: float, # 0.001
threshold: float, # median of all distances if None
normalize: bool = False,
batchsize: int = None,
epochs: int = None,
):
"""Implements the Graph Laplacian Individual Fairness Post-processing algorithm
Parameters
-------------
method: str
GLIF method type. Possible values are:
(a) `coordinate-descent` method which is more suitable for
large-scale data and post-processes by batching data into minibatches
(see section 3.2.2 of the paper), or
(b) `exact` method which gets the exact solution but is not appropriate
for large-scale data (refer equation 3.3 in the paper).
lambda_param: float
Weight for the Laplacian Regularizer
scale: float
Parameter used to scale the computed distances.
Refer equation 2.2 in the proposing paper.
threshold: float
Parameter used to construct the Graph from distances
Distances below provided threshold are considered to be
connected edges, while beyond the threshold are considered to
be disconnected. Refer equation 2.2 in the proposing paper.
normalize: bool
Whether to normalize the computed Laplacian or not
batchsize: int
Batch size. *Required when method=`coordinate-descent`*
epochs: int
Number of coordinate descent epochs.
*Required when method=`coordinate-descent`*
Returns
-----------
solution: PostProcessingObjectiveResponse
post-processed solution containing two parts:
(a) Post-processed output probabilities of shape (N, C)
where N is the number of data samples, and C is the
number of output classes
(b) Objective values. Refer equation 3.1 in the paper
for an explanation of the various parts
"""
assert method in [
self._METHOD_COORDINATE_KEY,
self._METHOD_EXACT_KEY,
], f"`method` should be either `coordinate-descent` or `exact`. Value provided: {method}"
if method == self._METHOD_COORDINATE_KEY:
assert (
batchsize is not None and epochs is not None
), f"batchsize and epochs parameter is required but None provided"
if method == self._METHOD_EXACT_KEY:
data_y_new, objective = self.__exact_pp__(
lambda_param, scale, threshold, normalize
)
elif method == self._METHOD_COORDINATE_KEY:
data_y_new, objective = self.__coordinate_pp__(
lambda_param, scale, threshold, normalize, batchsize, epochs
)
if self.is_output_probas:
pp_sol = torch.exp(data_y_new) / (
1 + torch.exp(data_y_new).sum(axis=1, keepdim=True)
)
y_solution = torch.hstack((pp_sol, 1 - pp_sol.sum(axis=1, keepdim=True)))
else:
y_solution = data_y_new
result = PostProcessingObjectiveResponse(
y_solution=y_solution, objective=objective
)
return result
| 11,440 | 34.977987 | 174 | py |
inFairness | inFairness-main/inFairness/postprocessing/base_postprocessing.py | import torch
from typing import Tuple
from inFairness.postprocessing.data_ds import PostProcessingDataStore
class BasePostProcessing(object):
"""Base class for Post-Processing methods
Parameters
-------------
distance_x: inFairness.distances.Distance
Distance matrix in the input space
is_output_probas: bool
True if the `data_Y` (model output) are probabilities implying that
this is a classification setting, and False if the `data_Y` are
in euclidean space implying that this is a regression setting.
"""
def __init__(self, distance_x, is_output_probas):
self.distance_x = distance_x
self.is_output_probas = is_output_probas
self.datastore = PostProcessingDataStore(distance_x)
@property
def data(self):
"""Input and Output data used for post-processing
Returns
--------
data: Tuple(torch.Tensor, torch.Tensor)
A tuple of (X, Y) data points
"""
return (self.datastore.data_X, self.datastore.data_Y)
@property
def distance_matrix(self):
"""Distance matrix
Returns
--------
distance_matrix: torch.Tensor
Matrix of distances of shape (N, N) where
N is the number of data samples
"""
return self.datastore.distance_matrix
def add_datapoints(self, X: torch.Tensor, y: torch.Tensor):
"""Add datapoints to the post-processing method
Parameters
-----------
X: torch.Tensor
New input datapoints
y: torch.Tensor
New output datapoints
"""
self.datastore.add_datapoints(X, y)
def reset_datapoints(self):
"""Reset datapoints store back to its initial state"""
self.datastore.reset()
def postprocess(self, *args, **kwargs):
raise NotImplementedError("postprocess method not implemented by class")
def __get_yhat__(self):
_, data_y = self.data
if self.is_output_probas:
y_hat = torch.log(data_y[:, :-1]) - torch.log(data_y[:, -1]).view(-1, 1)
return y_hat
else:
return data_y
| 2,253 | 28.272727 | 84 | py |
inFairness | inFairness-main/inFairness/postprocessing/distance_ds.py | import torch
class DistanceStructure(object):
"""Data structure to store and track the distance matrix between data points
Parameters
-------------
distance_x: inFairness.distances.Distance
Distance metric in the input space
"""
def __init__(self, distance_x):
self.distance_x = distance_x
self.distance_matrix = None
def reset(self):
"""Reset the state of the data structure back to its initial state"""
self.distance_matrix = None
def build_distance_matrix(self, data_X):
"""Build the distance matrix between input data samples `data_X`
Parameters
-------------
data_X: torch.Tensor
Data points between which the distance matrix is to be computed
"""
nsamples_old = (
0 if self.distance_matrix is None else self.distance_matrix.shape[0]
)
nsamples_total = data_X.shape[0]
device = data_X.device
distance_matrix_new = torch.zeros(
size=(nsamples_total, nsamples_total), device=device
)
if self.distance_matrix is not None:
distance_matrix_new[:nsamples_old, :nsamples_old] = self.distance_matrix
dist = (
self.distance_x(
data_X[nsamples_old:nsamples_total], data_X, itemwise_dist=False
)
.detach()
.squeeze()
)
distance_matrix_new[nsamples_old:, :] = dist
distance_matrix_new[:, nsamples_old:] = dist.T
self.distance_matrix = distance_matrix_new.clone()
| 1,604 | 28.722222 | 84 | py |
inFairness | inFairness-main/inFairness/distances/wasserstein_distance.py | import torch
from ot import emd2
from inFairness.distances import MahalanobisDistances
class WassersteinDistance(MahalanobisDistances):
"""computes a batched Wasserstein Distance for pairs of sets of items on each batch in the tensors
with dimensions B, N, D and B, M, D where B and D are the batch and feature sizes and N and M are the number of items on each batch.
Currently only supporting distances inheriting from :class: `MahalanobisDistances`.
transforms an Mahalanobis Distance object so that the forward method becomes a differentiable batched
Wasserstein distance between sets of items. This Wasserstein distance will use the underlying Mahalanobis
distance as pairwise cost function to solve the optimal transport problem.
for more information see equation 2.5 of the reference bellow
References
----------
`Amanda Bower, Hamid Eftekhari, Mikhail Yurochkin, Yuekai Sun:
Individually Fair Rankings. ICLR 2021`
"""
def __init__(self):
super().__init__()
def forward(self, X1: torch.Tensor, X2: torch.Tensor):
"""computes a batch wasserstein distance implied by the cost function represented by an
underlying mahalanobis distance.
Parameters
--------------
X1: torch.Tensor
Data sample of shape (B, N, D)
X2: torch.Tensor
Data sample of shape (B, M, D)
Returns
--------
dist: torch.Tensor
Wasserstein distance of shape (B) between batch samples in X1 and X2
"""
costs = super().forward(X1, X2, itemwise_dist=False)
uniform_x1 = torch.ones(X1.shape[1]) / X1.shape[1]
uniform_x2 = torch.ones(X2.shape[1]) / X2.shape[1]
num_batches = X1.shape[0]
dist = torch.stack(
[emd2(uniform_x1, uniform_x2, costs[j]) for j in range(num_batches)]
)
return dist
| 1,924 | 34.648148 | 136 | py |
inFairness | inFairness-main/inFairness/distances/explore_distance.py | import numpy as np
import torch
from scipy.stats import logistic
from inFairness.utils import datautils
from inFairness.distances.mahalanobis_distance import MahalanobisDistances
class EXPLOREDistance(MahalanobisDistances):
"""Implements the Embedded Xenial Pairs Logistic Regression metric
(EXPLORE) defined in Section 2.2 of Two Simple Ways to Learn Individual
Fairness Metrics from Data.
EXPLORE defines the distance in the input space to be of the form:
.. math:: d_x(x_1, x_2) := \langle \phi(x_1) - \phi(x_2), \Sigma (\phi(x_1) - \phi(x_2)) \\rangle
where :math:`\phi(x)` is an embedding map and :math:`\Sigma` is a semi-positive
definite matrix.
The metric expects the data to be in the form of triplets
:math:`\{(x_{i_1}, x_{i_2}, y_i)\}_{i=1}^{n}` where :math:`y_i \in \{0, 1\}`
indicates whether the human considers :math:`x_{i_1}` and :math:`x_{i_2}`
comparable (:math:`y_i = 1` indicates comparable) or not.
References
-----------
`Mukherjee, Debarghya, Mikhail Yurochkin, Moulinath Banerjee, and Yuekai Sun.
"Two simple ways to learn individual fairness metrics from data." In
International Conference on Machine Learning, pp. 7097-7107. PMLR, 2020.`
"""
def __init__(self):
super().__init__()
def fit(self, X1, X2, Y, iters, batchsize, autoinfer_device=True):
"""Fit EXPLORE distance metric
Parameters
-----------
X1: torch.Tensor
first set of input samples
X2: torch.Tensor
second set of input samples
Y: torch.Tensor
:math:`y_i` vector containing 1 if corresponding elements from
X1 and X2 are comparable, and 0 if not
iters: int
number of iterations of SGD to compute the :math:`\Sigma` matrix
batchsize: int
batch size of each iteration
autoinfer_device: bool
Should the distance metric be automatically moved to an appropriate
device (CPU / GPU) or not? If set to True, it moves the metric
to the same device `X1` is on. If set to False, keeps the metric
on CPU.
"""
assert (
X1.shape[0] == X2.shape[0] == Y.shape[0]
), "Number of elements in X1, X2, and Y do not match"
X = datautils.convert_tensor_to_numpy(X1 - X2)
Y = datautils.convert_tensor_to_numpy(Y)
sigma = self.compute_sigma(X, Y, iters, batchsize)
super().fit(sigma)
if autoinfer_device:
device = datautils.get_device(X1)
super().to(device)
def __grad_likelihood__(self, X, Y, sigma):
"""Computes the gradient of the likelihood function using sigmoidal link"""
diag = np.einsum("ij,ij->i", np.matmul(X, sigma), X)
diag = np.maximum(diag, 1e-10)
prVec = logistic.cdf(diag)
sclVec = 2.0 / (np.exp(diag) - 1)
vec = (Y * prVec) - ((1 - Y) * prVec * sclVec)
grad = np.matmul(X.T * vec, X) / X.shape[0]
return grad
def __projPSD__(self, sigma):
"""Computes the projection onto the PSD cone"""
try:
L = np.linalg.cholesky(sigma)
sigma_hat = np.dot(L, L.T)
except np.linalg.LinAlgError:
d, V = np.linalg.eigh(sigma)
sigma_hat = np.dot(
V[:, d >= 1e-8], d[d >= 1e-8].reshape(-1, 1) * V[:, d >= 1e-8].T
)
return sigma_hat
def compute_sigma(self, X, Y, iters, batchsize):
N = X.shape[0]
P = X.shape[1]
sigma_t = np.random.normal(0, 1, P**2).reshape(P, P)
sigma_t = np.matmul(sigma_t, sigma_t.T)
sigma_t = sigma_t / np.linalg.norm(sigma_t)
curriter = 0
while curriter < iters:
batch_idxs = np.random.choice(N, size=batchsize, replace=False)
X_batch = X[batch_idxs]
Y_batch = Y[batch_idxs]
grad_t = self.__grad_likelihood__(X_batch, Y_batch, sigma_t)
t = 1.0 / (1 + curriter // 100)
sigma_t = self.__projPSD__(sigma_t - t * grad_t)
curriter += 1
sigma = torch.FloatTensor(sigma_t).detach()
return sigma
| 4,287 | 35.338983 | 101 | py |
inFairness | inFairness-main/inFairness/distances/euclidean_dists.py | import torch
from inFairness.distances.distance import Distance
class EuclideanDistance(Distance):
def __init__(self):
super().__init__()
def forward(self, x, y, itemwise_dist=True):
if itemwise_dist:
return torch.cdist(x.unsqueeze(1), y.unsqueeze(1)).reshape(-1, 1)
else:
return torch.cdist(x, y)
class ProtectedEuclideanDistance(Distance):
def __init__(self):
super().__init__()
self._protected_attributes = None
self._num_attributes = None
self.register_buffer("protected_vector", torch.Tensor())
def to(self, device):
"""Moves distance metric to a particular device
Parameters
------------
device: torch.device
"""
assert (
self.protected_vector is not None and len(self.protected_vector.size()) != 0
), "Please fit the metric before moving parameters to device"
self.device = device
self.protected_vector = self.protected_vector.to(self.device)
def fit(self, protected_attributes, num_attributes):
"""Fit Protected Euclidean Distance metric
Parameters
------------
protected_attributes: Iterable[int]
List of attribute indices considered to be protected.
The metric would ignore these protected attributes while
computing distance between data points.
num_attributes: int
Total number of attributes in the data points.
"""
self._protected_attributes = protected_attributes
self._num_attributes = num_attributes
self.protected_vector = torch.ones(num_attributes)
self.protected_vector[protected_attributes] = 0.0
def forward(self, x, y, itemwise_dist=True):
"""
:param x, y: a B x D matrices
:return: B x 1 matrix with the protected distance camputed between x and y
"""
protected_x = (x * self.protected_vector).unsqueeze(1)
protected_y = (y * self.protected_vector).unsqueeze(1)
if itemwise_dist:
return torch.cdist(protected_x, protected_y).reshape(-1, 1)
else:
return torch.cdist(protected_x, protected_y)
| 2,264 | 30.458333 | 88 | py |
inFairness | inFairness-main/inFairness/distances/logistic_sensitive_subspace.py | from typing import Iterable
import numpy as np
import torch
from sklearn.linear_model import LogisticRegression
from inFairness.distances import SensitiveSubspaceDistance
from inFairness.utils import datautils, validationutils
class LogisticRegSensitiveSubspace(SensitiveSubspaceDistance):
"""Implements the Softmax Regression model based fair metric as defined in Appendix B.1
of "Training individually fair ML models with sensitive subspace robustness" paper.
This metric assumes that the sensitive attributes are discrete and observed for a small subset
of training data. Assuming data of the form :math:`(X_i, K_i, Y_i)` where :math:`K_i` is the
sensitive attribute of the i-th subject, the model fits a softmax regression model to the data as:
.. math:: \mathbb{P}(K_i = l\\mid X_i) = \\frac{\exp(a_l^TX_i+b_l)}{\\sum_{l=1}^k \\exp(a_l^TX_i+b_l)},\\ l=1,\\ldots,k
Using the span of the matrix :math:`A=[a_1, \cdots, a_k]`, the fair metric is trained as:
.. math:: d_x(x_1,x_2)^2 = (x_1 - x_2)^T(I - P_{\\text{ran}(A)})(x_1 - x_2)
References
-------------
`Yurochkin, Mikhail, Amanda Bower, and Yuekai Sun. "Training individually fair
ML models with sensitive subspace robustness." arXiv preprint arXiv:1907.00020 (2019).`
"""
def __init__(self):
super().__init__()
self.basis_vectors_ = None
self._logreg_models = None
@property
def logistic_regression_models(self):
"""Logistic Regression models trained by the metric to predict each sensitive attribute
given inputs. The property is a list of logistic regression models each corresponding to
:math:`\mathbb{P}(K_i = l\\mid X_i)`. This property can be used to measure the performance
of the logistic regression models.
"""
return self._logreg_models
def fit(
self,
data_X: torch.Tensor,
data_SensitiveAttrs: torch.Tensor = None,
protected_idxs: Iterable[int] = None,
keep_protected_idxs: bool = True,
autoinfer_device: bool = True,
):
"""Fit Logistic Regression Sensitive Subspace distance metric
Parameters
--------------
data_X: torch.Tensor
Input data corresponding to either :math:`X_i` or :math:`(X_i, K_i)` in the equation above.
If the variable corresponds to :math:`X_i`, then the `y_train` parameter should be specified.
If the variable corresponds to :math:`(X_i, K_i)` then the `protected_idxs` parameter
should be specified to indicate the sensitive attributes.
data_SensitiveAttrs: torch.Tensor
Represents the sensitive attributes ( :math:`K_i` ) and is used when the `X_train` parameter
represents :math:`X_i` from the equation above. **Note**: This parameter is mutually exclusive
with the `protected_idxs` parameter. Specififying both the `data_SensitiveAttrs` and `protected_idxs`
parameters will raise an error
protected_idxs: Iterable[int]
If the `X_train` parameter above represents :math:`(X_i, K_i)`, then this parameter is used
to provide the indices of sensitive attributes in `X_train`. **Note**: This parameter is mutually exclusive
with the `protected_idxs` parameter. Specififying both the `data_SensitiveAttrs` and `protected_idxs`
parameters will raise an error
keep_protected_indices: bool
True, if while training the model, protected attributes will be part of the training data
Set to False, if for training the model, protected attributes will be excluded
Default = True
autoinfer_device: bool
Should the distance metric be automatically moved to an appropriate
device (CPU / GPU) or not? If set to True, it moves the metric
to the same device `X_train` is on. If set to False, keeps the metric
on CPU.
"""
if data_SensitiveAttrs is not None and protected_idxs is None:
basis_vectors_ = self.compute_basis_vectors_data(
X_train=data_X, y_train=data_SensitiveAttrs
)
elif data_SensitiveAttrs is None and protected_idxs is not None:
basis_vectors_ = self.compute_basis_vectors_protected_idxs(
data_X,
protected_idxs=protected_idxs,
keep_protected_idxs=keep_protected_idxs,
)
else:
raise AssertionError(
"Parameters `y_train` and `protected_idxs` are exclusive. Either of these two parameters should be None, and cannot be set to non-None values simultaneously."
)
super().fit(basis_vectors_)
self.basis_vectors_ = basis_vectors_
if autoinfer_device:
device = datautils.get_device(data_X)
super().to(device)
def compute_basis_vectors_protected_idxs(
self, data, protected_idxs, keep_protected_idxs=True
):
dtype = data.dtype
data = datautils.convert_tensor_to_numpy(data)
basis_vectors_ = []
num_attr = data.shape[1]
# Get input data excluding the protected attributes
protected_idxs = sorted(protected_idxs)
free_idxs = [idx for idx in range(num_attr) if idx not in protected_idxs]
X_train = data[:, free_idxs]
Y_train = data[:, protected_idxs]
self.__assert_sensitiveattrs_binary__(Y_train)
self._logreg_models = [
LogisticRegression(solver="liblinear", penalty="l1")
.fit(X_train, Y_train[:, idx])
for idx in range(len(protected_idxs))
]
coefs = np.array(
[
self._logreg_models[idx].coef_.squeeze()
for idx in range(len(protected_idxs))
]
) # ( |protected_idxs|, |free_idxs| )
if keep_protected_idxs:
# To keep protected indices, we add two basis vectors
# First, with logistic regression coefficients with 0 in
# protected indices. Second, with one-hot vectors with 1 in
# protected indices.
basis_vectors_ = np.empty(shape=(2 * len(protected_idxs), num_attr))
for i, protected_idx in enumerate(protected_idxs):
protected_basis_vector = np.zeros(shape=(num_attr))
protected_basis_vector[protected_idx] = 1.0
unprotected_basis_vector = np.zeros(shape=(num_attr))
np.put_along_axis(
unprotected_basis_vector, np.array(free_idxs), coefs[i], axis=0
)
basis_vectors_[2 * i] = unprotected_basis_vector
basis_vectors_[2 * i + 1] = protected_basis_vector
else:
# Protected indices are to be discarded. Therefore, we can
# simply return back the logistic regression coefficients
basis_vectors_ = coefs
basis_vectors_ = torch.tensor(basis_vectors_, dtype=dtype).T
basis_vectors_ = basis_vectors_.detach()
return basis_vectors_
def compute_basis_vectors_data(self, X_train, y_train):
dtype = X_train.dtype
X_train = datautils.convert_tensor_to_numpy(X_train)
y_train = datautils.convert_tensor_to_numpy(y_train)
self.__assert_sensitiveattrs_binary__(y_train)
basis_vectors_ = []
outdim = y_train.shape[-1]
self._logreg_models = [
LogisticRegression(solver="liblinear", penalty="l1")
.fit(X_train, y_train[:, idx])
for idx in range(outdim)
]
basis_vectors_ = np.array(
[
self._logreg_models[idx].coef_.squeeze()
for idx in range(outdim)
]
)
basis_vectors_ = torch.tensor(basis_vectors_, dtype=dtype).T
basis_vectors_ = basis_vectors_.detach()
return basis_vectors_
def __assert_sensitiveattrs_binary__(self, sensitive_attrs):
assert validationutils.is_tensor_binary(
sensitive_attrs
), "Sensitive attributes are required to be binary to learn the metric. Please binarize these attributes before fitting the metric."
| 8,395 | 39.757282 | 174 | py |
inFairness | inFairness-main/inFairness/distances/mahalanobis_distance.py | import torch
import numpy as np
from functorch import vmap
from inFairness.distances.distance import Distance
class MahalanobisDistances(Distance):
"""Base class implementing the Generalized Mahalanobis Distances
Mahalanobis distance between two points X1 and X2 is computed as:
.. math:: \\text{dist}(X_1, X_2) = (X_1 - X_2) \\Sigma (X_1 - X_2)^{T}
"""
def __init__(self):
super().__init__()
self.device = torch.device("cpu")
self._vectorized_dist = None
self.register_buffer("sigma", torch.Tensor())
def to(self, device):
"""Moves distance metric to a particular device
Parameters
------------
device: torch.device
"""
assert (
self.sigma is not None and len(self.sigma.size()) != 0
), "Please fit the metric before moving parameters to device"
self.device = device
self.sigma = self.sigma.to(self.device)
def fit(self, sigma):
"""Fit Mahalanobis Distance metric
Parameters
------------
sigma: torch.Tensor
Covariance matrix
"""
self.sigma = sigma
@staticmethod
def __compute_dist__(X1, X2, sigma):
"""Computes the distance between two data samples x1 and x2
Parameters
-----------
X1: torch.Tensor
Data sample of shape (n_features) or (N, n_features)
X2: torch.Tensor
Data sample of shape (n_features) or (N, n_features)
Returns:
dist: torch.Tensor
Distance between points x1 and x2. Shape: (N)
"""
# unsqueeze batch dimension if a vector is passed
if len(X1.shape) == 1:
X1 = X1.unsqueeze(0)
if len(X2.shape) == 1:
X2 = X2.unsqueeze(0)
X_diff = X1 - X2
dist = torch.sum((X_diff @ sigma) * X_diff, dim=-1, keepdim=True)
return dist
def __init_vectorized_dist__(self):
"""Initializes a vectorized version of the distance computation"""
if self._vectorized_dist is None:
self._vectorized_dist = vmap(
vmap(
vmap(self.__compute_dist__, in_dims=(None, 0, None)),
in_dims=(0, None, None),
),
in_dims=(0, 0, None),
)
def forward(self, X1, X2, itemwise_dist=True):
"""Computes the distance between data samples X1 and X2
Parameters
-----------
X1: torch.Tensor
Data samples from batch 1 of shape (n_samples_1, n_features)
X2: torch.Tensor
Data samples from batch 2 of shape (n_samples_2, n_features)
itemwise_dist: bool, default: True
Compute the distance in an itemwise manner or pairwise manner.
In the itemwise fashion (`itemwise_dist=False`), distance is
computed between the ith data sample in X1 to the ith data sample
in X2. Thus, the two data samples X1 and X2 should be of the same shape
In the pairwise fashion (`itemwise_dist=False`), distance is
computed between all the samples in X1 and all the samples in X2.
In this case, the two data samples X1 and X2 can be of different shapes.
Returns
----------
dist: torch.Tensor
Distance between samples of batch 1 and batch 2.
If `itemwise_dist=True`, item-wise distance is returned of
shape (n_samples, 1)
If `itemwise_dist=False`, pair-wise distance is returned of
shape (n_samples_1, n_samples_2)
"""
if itemwise_dist:
np.testing.assert_array_equal(
X1.shape,
X2.shape,
err_msg="X1 and X2 should be of the same shape for itemwise distance computation",
)
dist = self.__compute_dist__(X1, X2, self.sigma)
else:
self.__init_vectorized_dist__()
X1 = X1.unsqueeze(0) if len(X1.shape) == 2 else X1 # (B, N, D)
X2 = X2.unsqueeze(0) if len(X2.shape) == 2 else X2 # (B, M, D)
nsamples_x1 = X1.shape[1]
nsamples_x2 = X2.shape[1]
dist_shape = (-1, nsamples_x1, nsamples_x2)
dist = self._vectorized_dist(X1, X2, self.sigma).view(dist_shape)
return dist
class SquaredEuclideanDistance(MahalanobisDistances):
"""
computes the squared euclidean distance as a special case of the mahalanobis distance where:
.. math:: \\Sigma= I_{num_dims}
"""
def __init__(self):
super().__init__()
self.num_dims_ = None
def fit(self, num_dims: int):
"""Fit Square Euclidean Distance metric
Parameters
-----------------
num_dims: int
the number of dimensions of the space in which the Squared Euclidean distance will be used.
"""
self.num_dims_ = num_dims
sigma = torch.eye(self.num_dims_).detach()
super().fit(sigma)
| 5,180 | 30.785276 | 107 | py |
inFairness | inFairness-main/inFairness/distances/sensitive_subspace_dist.py | import numpy as np
import torch
from sklearn.decomposition import TruncatedSVD
from typing import List
from inFairness.distances.mahalanobis_distance import MahalanobisDistances
from inFairness.utils import datautils
class SensitiveSubspaceDistance(MahalanobisDistances):
"""Implements Sensitive Subspace metric base class that accepts the
basis vectors of a sensitive subspace, and computes a projection
that ignores the sensitive subspace.
The projection from the sensitive subspace basis vectors (A) is computed as:
.. math:: P^{'} = I - (A (A A^{T})^{-1} A^{T})
"""
def __init__(self):
super().__init__()
def fit(self, basis_vectors):
"""Fit Sensitive Subspace Distance metric
Parameters
--------------
basis_vectors: torch.Tensor
Basis vectors of the sensitive subspace
"""
sigma = self.compute_projection_complement(basis_vectors)
super().fit(sigma)
def compute_projection_complement(self, basis_vectors):
"""Compute the projection complement of the space
defined by the basis_vectors:
projection complement given basis vectors (A) is computed as:
.. math:: P^{'} = I - (A (A A^{T})^{-1} A^{T})
Parameters
-------------
basis_vectors: torch.Tensor
Basis vectors of the sensitive subspace
Dimension (d, k) where d is the data features dimension
and k is the number of protected dimensions
Returns
----------
projection complement: torch.Tensor
Projection complement computed as described above.
Shape (d, d) where d is the data feature dimension
"""
# Computing the orthogonal projection
# V(V V^T)^{-1} V^T
projection = torch.linalg.inv(torch.matmul(basis_vectors.T, basis_vectors))
projection = torch.matmul(basis_vectors, projection)
# Shape: (n_features, n_features)
projection = torch.matmul(projection, basis_vectors.T)
# Complement the projection as: (I - Proj)
projection_complement_ = torch.eye(projection.shape[0]) - projection
projection_complement_ = projection_complement_.detach()
return projection_complement_
class SVDSensitiveSubspaceDistance(SensitiveSubspaceDistance):
"""Sensitive Subspace metric that uses SVD to find the basis vectors of
the sensitive subspace. The metric learns a subspace from a set of
user-curated comparable data samples.
Proposed in Section B.2 of Training individually fair ML models
with sensitive subspace robustness
References
-------------
`Yurochkin, Mikhail, Amanda Bower, and Yuekai Sun. "Training individually fair
ML models with sensitive subspace robustness." arXiv preprint arXiv:1907.00020 (2019).`
"""
def __init__(self):
super().__init__()
self.n_components_ = None
def fit(self, X_train, n_components, autoinfer_device=True):
"""Fit SVD Sensitive Subspace distance metric parameters
Parameters
-------------
X_train: torch.Tensor | List[torch.Tensor]
Training data containing comparable data samples.
If only one set of comparable data samples is provided, the input
should be a torch.Tensor of shape :math:`(N, D)`. For multiple sets
of comparable data samples a list of shape
:math:`[ (N_1, D), \\cdots, (N_x, D)]` can be provided.
n_components: int
Desired number of latent variable dimensions
autoinfer_device: bool
Should the distance metric be automatically moved to an appropriate
device (CPU / GPU) or not? If set to True, it moves the metric
to the same device `X_train` is on. If set to False, keeps the metric
on CPU.
"""
self.n_components_ = n_components
basis_vectors = self.compute_basis_vectors(X_train, n_components)
super().fit(basis_vectors)
if autoinfer_device:
device = datautils.get_device(X_train)
super().to(device)
def __process_input_data__(self, X_train):
"""Process metric training data to convert from tensor to numpy and
remove the mean and concatenate if multiple sets of training data
is provided
"""
if isinstance(X_train, torch.Tensor) or isinstance(X_train, np.ndarray):
X_train = datautils.convert_tensor_to_numpy(X_train)
return X_train
if isinstance(X_train, list):
X_train = [datautils.convert_tensor_to_numpy(X) for X in X_train]
# Subtract mean and concatenate all sets of features
X_norm = np.vstack([X - np.mean(X, axis=0) for X in X_train])
return X_norm
raise TypeError(
"Provided data `X_train` should either be Tensor, np.ndarray or a list of these."
)
def compute_basis_vectors(self, X_train, n_components):
"""Compute basis vectors using SVD"""
X_train = self.__process_input_data__(X_train)
tSVD = TruncatedSVD(n_components=n_components)
tSVD.fit(X_train)
basis_vectors_ = tSVD.components_.T # Shape: (n_features, n_components)
basis_vectors_ = torch.Tensor(basis_vectors_)
return basis_vectors_
| 5,490 | 35.852349 | 95 | py |
inFairness | inFairness-main/inFairness/distances/distance.py | from abc import ABCMeta, abstractmethod
from torch import nn
class Distance(nn.Module, metaclass=ABCMeta):
"""
Abstract base class for model distances
"""
def __init__(self):
super().__init__()
def fit(self, **kwargs):
"""
Fits the metric parameters for learnable metrics
Default functionality is to do nothing. Subclass
should overwrite this method to implement custom fit
logic
"""
pass
def load_state_dict(self, state_dict, strict=True):
buffer_keys = [bufferitem[0] for bufferitem in self.named_buffers()]
for key, val in state_dict.items():
if key not in buffer_keys and strict:
raise AssertionError(
f"{key} not found in metric state and strict parameter is set to True. Either set strict parameter to False or remove extra entries from the state dictionary."
)
setattr(self, key, val)
@abstractmethod
def forward(self, x, y):
"""
Subclasses must override this method to compute particular distances
Returns:
Tensor: distance between two inputs
"""
| 1,196 | 28.925 | 179 | py |
inFairness | inFairness-main/inFairness/utils/datautils.py | from typing import Iterable
import torch
import numpy as np
from itertools import product
def generate_data_pairs(n_pairs, datasamples_1, datasamples_2=None, comparator=None):
"""Utility function to generate (in)comparable data pairs given data samples. Use case includes
creating a dataset of comparable and incomparable data for the EXPLORE distance metric which
learns from such data samples.
Parameters
------------
n_pairs: int
Number of pairs to construct
datasamples_1: numpy.ndarray
Array of data samples of shape (N_1, *)
datasamples_2: numpy.ndarray
(Optional) array of data samples of shape (N_2, *).
If datasamples_2 is provided, then data pairs are constructed between
datasamples_1 and datasamples_2.
If datasamples_2 is not provided, then data pairs are constructed within
datasamples_1
comparator: function
A lambda function that given two samples returns True if they should
be paired, and False if not.
If `comparator` is not defined, then random data samples are paired together.
Example: `comparator = lambda x, y: (x == y)`
Returns
----------
idxs: numpy.ndarray
A (n_pairs, 2) shaped array with indices of data sample pairs
"""
if datasamples_2 is None:
datasamples_2 = datasamples_1
nsamples_1 = datasamples_1.shape[0]
nsamples_2 = datasamples_2.shape[0]
if comparator is None:
ntotal = nsamples_1 * nsamples_2
assert (
n_pairs <= ntotal
), f"Number of desired data pairs {n_pairs} is greater than possible combinations {ntotal}"
idxs = np.random.choice(ntotal, n_pairs, replace=False)
idxs1, idxs2 = np.unravel_index(idxs, shape=(nsamples_1, nsamples_2))
idxs = np.stack((idxs1, idxs2), axis=-1)
else:
all_idxs = [
(idx1, idx2)
for idx1, idx2 in product(range(nsamples_1), range(nsamples_2))
if comparator(datasamples_1[idx1], datasamples_2[idx2])
]
assert n_pairs <= len(all_idxs), (
f"Number of desired data pairs {n_pairs} is greater than possible "
+ "combinations {len(all_idxs)}"
)
idx_positions = np.random.choice(len(all_idxs), n_pairs, replace=False)
idxs = np.array([all_idxs[x] for x in idx_positions])
return idxs
def convert_tensor_to_numpy(tensor):
"""Converts a PyTorch tensor to numpy array
If the provided `tensor` is not a PyTorch tensor, it returns the same object back
with no modifications
Parameters
-----------
tensor: torch.Tensor
Tensor to be converted to numpy array
Returns
----------
array_np: numpy.ndarray
Numpy array of the provided tensor
"""
if torch.is_tensor(tensor):
array_np = tensor.detach().cpu().numpy()
return array_np
return tensor
def include_exclude_terms(
data_terms: Iterable[str], include: Iterable[str] = (), exclude: Iterable[str] = ()
):
"""
given a set of data terms, return a resulting set depending on specified included and excluded terms.
Parameters
-----------
data_terms: string iterable
set of terms to be filtered
include: string iterable
set of terms to be included, if not specified all data_terms are included
exclude: string iterable
set of terms to be excluded from data_terms
Returns
----------
terms: list of strings
resulting terms in alphabetical order.
"""
terms = set(include) if len(include) > 0 else set(data_terms)
if len(exclude) > 0:
terms = terms.difference(set(exclude))
terms = sorted(list(terms))
return terms
def get_device(obj):
"""Returns a device (cpu/cuda) based on the type of the reference object
Parameters
-------------
obj: torch.Tensor
"""
device = torch.device("cpu")
# If reference object is a tensor, use its device
if torch.is_tensor(obj):
device = obj.device
# If reference object is a list, check if first element is a tensor
# and if it is a tensor, use it's device
if isinstance(obj, list) and torch.is_tensor(obj[0]):
device = obj[0].device
return device
| 4,492 | 31.092857 | 105 | py |
inFairness | inFairness-main/inFairness/utils/ndcg.py | import torch
from functorch import vmap
def discounted_cumulative_gain(relevances):
numerator = torch.pow(torch.tensor([2.0]), relevances)
denominator = torch.log2(torch.arange(len(relevances), dtype=torch.float) + 2)
return (numerator / denominator).sum()
def normalized_discounted_cumulative_gain(relevances):
"""Takes a vector of relevances and computes the normalized discounted cumulative gain
Refer (Wikipedia - Discounted Cumulative Gain)[https://en.wikipedia.org/wiki/Discounted_cumulative_gain]
for more information.
Parameters
---------
relevances: torch.Tensor
vector of dimension N where each element is the relevance of some objects in a particular order
Returns
-------
normalized_discounted_cumulative_gain: torch.Tensor
scalar value corresponding to the normalized discounted cumulative gain
"""
dcg = discounted_cumulative_gain(relevances)
sorted_rels, _ = torch.sort(relevances, descending=True)
idcg = discounted_cumulative_gain(sorted_rels)
return dcg / idcg
"""
vectorizes :func: `normalized_discounted_cumulative_gain` to work on a batch of vectors of relevances
given in a tensor of dimensions B,N. The output would be the NDCG on the last dimmension. And it's batched
version would return B samples.
"""
vect_normalized_discounted_cumulative_gain = vmap(
normalized_discounted_cumulative_gain, in_dims=(0)
)
"""
Adds a further outer dimension to the vectorized normalized discounted cumulative gain so it works
on monte carlo samples of rankings (e.g. samples of a plackett-luce distribution).
This function would take a tensor of size S,B,N and return a tensor of size S,B with the
ndcg of each vector.
"""
monte_carlo_vect_ndcg = vmap(vect_normalized_discounted_cumulative_gain, in_dims=(0,))
| 1,822 | 36.204082 | 108 | py |
inFairness | inFairness-main/inFairness/utils/plackett_luce.py | """
This file implements Plackett-Luce distribution and is taken from the
following source:
Source: Github PyTorch PR#50362 - Add Plackett-Luce Distribution
URL: https://github.com/pytorch/pytorch/pull/50362/
Author: Jeremy Salwen (https://github.com/jeremysalwen)
"""
from typing import Optional
import torch
from torch.distributions import Distribution, constraints
class PlackettLuce(Distribution):
"""
Creates a Plackett-Luce distribution over permutations, parameterized by :attr: `logits`.
The Plackett-Luce distribution defines a probability distribution over permutations by assigning a score `a_i` to
each element, and repeatedly choosing the next element by sampling from the remaining elements with a probability
proportional to their score.
If :attr:`logits` is 1-dimensional with length-`K`, each element is the log-score of the object at that index.
If :attr:`logits` is N-dimensional, the first N-1 dimensions are treated as a batch of log-score vectors.
This distribution supports batched operations with permutations of different sizes, by using the :attr:
`permutation_sizes` attribute to specify the permutation size of each score vector in the batch. If the
permutation_size is `N` for a given index of the batch, the first `N` entries of the resulting sample will be a
permutation of the number `1` through `N`, while the remainder have unspecified values.
Example::
>>> m = PlackettLuce(torch.tensor([[0, 1, -1], [0, 1, 2]]), torch.tensor([3, 2], dtype=torch.int64))
>>> m.sample()
tensor([[ 1, 0, 2],
[ 0, 1, 2]])
Args:
logits (Tensor): The log of the Plackett-Luce distribution scores `a_i`.
permutation_sizes (Tensor): Optional sizes of the permutations sampled by the distribution. Should match the
shape of the logits, with the last dimension stripped.
"""
arg_constraints = {"logits": constraints.real}
support = constraints.integer_interval(-1, torch.iinfo(torch.int64).max)
def __init__(
self,
logits: torch.Tensor,
permutation_sizes: Optional[torch.Tensor] = None,
validate_args=None,
):
batch_shape = logits.shape[:-1]
max_size = logits.shape[-1]
if permutation_sizes is None:
permutation_sizes = torch.full(
batch_shape, max_size, dtype=torch.int64, device=logits.device
)
else:
permutation_sizes = permutation_sizes.expand(batch_shape)
if validate_args:
if (logits < -1e30).any():
raise ValueError(
"Plackett-Luce implementation cannot handle logits less than -1e30"
)
self.logits = logits
self.permutation_sizes = permutation_sizes
# Mask is true for invalid indices
self.mask: torch.Tensor = (
torch.zeros(*batch_shape, max_size + 1, device=logits.device)
.scatter(-1, permutation_sizes.unsqueeze(-1), 1)[..., :-1]
.cumsum(dim=-1)
.bool()
)
event_shape = torch.Size((max_size,))
super(PlackettLuce, self).__init__(
batch_shape, event_shape, validate_args=validate_args
)
def sample(self, sample_shape=torch.Size()):
with torch.no_grad():
expanded = self.logits.expand(*sample_shape, *[-1] * len(self.logits.shape))
gumbel_noise = -torch.log(-torch.log(torch.rand_like(expanded)))
scores = torch.where(self.mask, -1e35, expanded + gumbel_noise)
sorted_scores, indices = torch.sort(scores, dim=-1, descending=True)
return indices.masked_fill(self.mask, -1).detach()
def log_prob(self, value: torch.Tensor):
if self._validate_args:
self._validate_sample(value)
return _plackett_luce_log_prob(
self.logits, self.permutation_sizes, self.mask, value
)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(PlackettLuce, _instance)
batch_shape = torch.Size(batch_shape)
logits_shape = batch_shape + (self.logits.shape[-1],)
new.logits = self.logits.expand(logits_shape)
new.mask = self.mask.expand(logits_shape)
new.permutation_sizes = self.permutation_sizes.expand(batch_shape)
super(PlackettLuce, new).__init__(
batch_shape, self.event_shape, validate_args=False
)
new._validate_args = self._validate_args
return new
def _validate_sample(self, value: torch.Tensor):
super()._validate_sample(value)
max_int64 = torch.iinfo(torch.int64).max
if (
value.masked_fill(self.mask, max_int64).sort(-1).values
!= torch.arange(0, value.shape[-1], dtype=torch.int64).masked_fill(
self.mask, max_int64
)
).any():
raise ValueError("Not a valid permutation or batch of permutations.")
@torch.jit.script_if_tracing
def _plackett_luce_log_prob(logits, permutation_sizes, mask, value):
value = value.masked_fill(mask, 0)
logits = logits.masked_fill(mask, -1e35).expand(value.shape)
log_probs = torch.zeros(value.shape[:-1], device=value.device)
for i in range(int(permutation_sizes.max())):
log_probs += torch.where(
mask[..., i],
0.0,
logits.log_softmax(dim=-1).gather(-1, value[..., i : i + 1]).squeeze(-1),
)
logits = logits.scatter(-1, value[..., i : i + 1], -1e35)
return log_probs
| 5,646 | 39.335714 | 117 | py |
inFairness | inFairness-main/inFairness/utils/params.py | import torch.nn
def freeze_network(network: torch.nn.Module):
"""Freeze network parameters.
:param network: torch.nn.Module
:type network: torch.nn.Module
"""
for p in network.parameters():
p.requires_grad = False
def unfreeze_network(network: torch.nn.Module):
"""Unfreeze network parameters.
:param network: torch.nn.Module
:type network: torch.nn.Module
"""
for p in network.parameters():
p.requires_grad = True
| 475 | 22.8 | 47 | py |
inFairness | inFairness-main/inFairness/utils/postprocessing.py | import torch
def build_graph_from_dists(
dists: torch.Tensor,
scale: float = None,
threshold: float = None,
normalize: bool = False,
):
"""Build the adjacency matrix `W` given distances
Parameters
-------------
dists: torch.Tensor
Distance matrix between data points. Shape: (N, N)
scale: float
Parameter used to scale the computed distances
threshold: float
Parameter used to determine if two data points are connected or not.
Distances below threshold value are connected, and beyond
threshold value are disconnected.
normalize: bool
Whether to normalize the adjacency matrix or not
Returns
----------
W: torch.Tensor
Adjancency matrix. It contains data points which are connected
to atleast one other datapoint. Isolated datapoints, or ones which
are not connected to any other datapoints, are not included in the
adjancency matrix.
idxs_in: torch.Tensor
Indices of data points which are included in the adjacency matrix
"""
scale = 1.0 if scale is None else scale
threshold = 1e10 if threshold is None else threshold
W = torch.exp(-(dists * scale)) * (torch.sqrt(dists) < threshold)
idxs_in = torch.where(W.sum(axis=1) > 0.0)[0]
W = W[idxs_in]
W = W[:, idxs_in]
if normalize:
D_inv_sqrt = 1.0 / torch.sqrt(W.sum(axis=1))
W = W * D_inv_sqrt * D_inv_sqrt.view(-1, 1)
return W, idxs_in
def get_laplacian(W: torch.Tensor, normalize: bool = False):
"""Get the Laplacian of the matrix `W`
Parameters
-------------
W: torch.Tensor
Adjacency matrix of shape (N, N)
normalize: bool
Whether to normalize the computed laplacian or not
Returns
-------------
Laplacian: torch.Tensor
Laplacian of the adjacency matrix
"""
D = W.sum(axis=1)
L = torch.diag(D) - W
if normalize:
L = L / D.view(-1, 1)
return L
def laplacian_solve(L: torch.Tensor, y_hat: torch.Tensor, lambda_param: float = None):
"""Solves a system of linear equations to get the post-processed output.
The system of linear equation it solves is:
:math:`\hat{{f}} = {(I + \lambda * L)}^{-1} \hat{y}`
Parameters
------------
L: torch.Tensor
Laplacian matrix
y_hat: torch.Tensor
Model predicted output class probabilities
lambda_param: float
Weight for the laplacian regularizer
Returns
----------
y: torch.Tensor
Post-processed solution according to the equation above
"""
lambda_param = 1.0 if lambda_param is None else lambda_param
n = L.shape[0]
y = torch.linalg.solve(lambda_param * L + torch.eye(n), y_hat)
return y
| 2,900 | 27.441176 | 86 | py |
inFairness | inFairness-main/inFairness/auditor/sensr_auditor.py | import torch
from torch.nn import Parameter
from inFairness.auditor import Auditor
from inFairness.utils.params import freeze_network, unfreeze_network
from inFairness.utils.datautils import get_device
class SenSRAuditor(Auditor):
"""SenSR Auditor implements the functionality to generate worst-case examples
by solving the following optimization equation:
.. math:: x_{t_b}^* \gets arg\max_{x \in X} l((x,y_{t_b}),h) - \lambda d_x^2(x_{t_b},x)
Proposed in `Training individually fair ML models with sensitive subspace robustness <https://arxiv.org/abs/1907.00020>`_
Parameters
--------------
loss_fn: torch.nn.Module
Loss function
distance_x: inFairness.distances.Distance
Distance metric in the input space
num_steps: int
Number of update steps should the auditor perform to find worst-case examples
lr: float
Learning rate
"""
def __init__(
self, loss_fn, distance_x, num_steps, lr, max_noise=0.1, min_noise=-0.1
):
self.loss_fn = loss_fn
self.distance_x = distance_x
self.num_steps = num_steps
self.lr = lr
self.max_noise = max_noise
self.min_noise = min_noise
super().__init__()
def generate_worst_case_examples(self, network, x, y, lambda_param, optimizer=None):
"""Generate worst case example given the input data sample batch `x`
Parameters
------------
network: torch.nn.Module
PyTorch network model
x: torch.Tensor
Batch of input datapoints
y: torch.Tensor
Batch of output datapoints
lambda_param: float
Lambda weighting parameter as defined in the equation above
optimizer: torch.optim.Optimizer, optional
PyTorch Optimizer object
Returns
---------
X_worst: torch.Tensor
Worst case examples for the provided input datapoints
"""
assert optimizer is None or issubclass(optimizer, torch.optim.Optimizer), (
"`optimizer` object should either be None or be a PyTorch optimizer "
+ "and an instance of the `torch.optim.Optimizer` class"
)
freeze_network(network)
lambda_param = lambda_param.detach()
delta = Parameter(
torch.rand_like(x) * (self.max_noise - self.min_noise) + self.min_noise
)
if optimizer is None:
optimizer = torch.optim.Adam([delta], lr=self.lr)
else:
optimizer = optimizer([delta], lr=self.lr)
for _ in range(self.num_steps):
optimizer.zero_grad()
x_worst = x + delta
input_dist = self.distance_x(x, x_worst)
out_x_worst = network(x_worst)
out_dist = self.loss_fn(out_x_worst, y)
audit_loss = -(out_dist - lambda_param * input_dist)
audit_loss.mean().backward()
optimizer.step()
unfreeze_network(network)
return (x + delta).detach()
def audit(
self,
network,
X_audit,
Y_audit,
audit_threshold=None,
lambda_param=None,
confidence=0.95,
optimizer=None,
):
"""Audit a model for individual fairness
Parameters
------------
network: torch.nn.Module
PyTorch network model
X_audit: torch.Tensor
Auditing data samples. Shape: (B, *)
Y_audit: torch.Tensor
Auditing data samples. Shape: (B)
loss_fn: torch.nn.Module
Loss function
audit_threshold: float, optional
Auditing threshold to consider a model individually fair or not
If `audit_threshold` is specified, the `audit` procedure determines
if the model is individually fair or not.
If `audit_threshold` is not specified, the `audit` procedure simply
returns the mean and lower bound of loss ratio, leaving the determination
of models' fairness to the user.
Default=None
lambda_param: float
Lambda weighting parameter as defined in the equation above
confidence: float, optional
Confidence value. Default = 0.95
optimizer: torch.optim.Optimizer, optional
PyTorch Optimizer object. Default: torch.optim.SGD
Returns
------------
audit_response: inFairness.auditor.datainterface.AuditorResponse
Audit response containing test statistics
"""
assert optimizer is None or issubclass(optimizer, torch.optim.Optimizer), (
"`optimizer` object should either be None or be a PyTorch optimizer "
+ "and an instance of the `torch.optim.Optimizer` class"
)
device = get_device(X_audit)
if lambda_param is None:
lambda_param = torch.tensor(1.0, device=device)
if isinstance(lambda_param, float):
lambda_param = torch.tensor(lambda_param, device=device)
if optimizer is None:
optimizer = torch.optim.SGD
X_worst = self.generate_worst_case_examples(
network=network,
x=X_audit,
y=Y_audit,
lambda_param=lambda_param,
optimizer=optimizer,
)
loss_ratio = self.compute_loss_ratio(
X_audit=X_audit,
X_worst=X_worst,
Y_audit=Y_audit,
network=network,
loss_fn=self.loss_fn,
)
audit_response = self.compute_audit_result(
loss_ratio, audit_threshold, confidence
)
return audit_response
| 5,871 | 32.175141 | 125 | py |
inFairness | inFairness-main/inFairness/auditor/sensei_auditor.py | import torch
from torch.nn import Parameter
from inFairness.auditor.auditor import Auditor
from inFairness.utils.params import freeze_network, unfreeze_network
from inFairness.utils.datautils import get_device
class SenSeIAuditor(Auditor):
"""SenSeI Auditor implements the functionality to generate worst-case examples
by solving the following optimization equation:
.. math:: x_{t_b}' \gets arg\max_{x' \in X}\{d_{Y}(h_{\\theta_t}(X_{t_b}),h_{\\theta_t}(x')) - \lambda_t d_{X}(X_{t_b},x')\}
Proposed in `SenSeI: Sensitive Set Invariance for Enforcing Individual Fairness <https://arxiv.org/abs/2006.14168>`_
Parameters
--------------
distance_x: inFairness.distances.Distance
Distance metric in the input space
distance_y: inFairness.distances.Distance
Distance metric in the output space
num_steps: int
Number of update steps should the auditor perform to find worst-case examples
lr: float
Learning rate
"""
def __init__(
self, distance_x, distance_y, num_steps, lr, max_noise=0.1, min_noise=-0.1
):
self.distance_x = distance_x
self.distance_y = distance_y
self.num_steps = num_steps
self.lr = lr
self.max_noise = max_noise
self.min_noise = min_noise
super().__init__()
def generate_worst_case_examples(self, network, x, lambda_param, optimizer=None):
"""Generate worst case example given the input data sample batch `x`
Parameters
------------
network: torch.nn.Module
PyTorch network model
x: torch.Tensor
Batch of input datapoints
lambda_param: float
Lambda weighting parameter as defined in the equation above
optimizer: torch.optim.Optimizer, optional
PyTorch Optimizer object. Default: torch.optim.Adam
Returns
---------
X_worst: torch.Tensor
Worst case examples for the provided input datapoints
"""
assert optimizer is None or issubclass(optimizer, torch.optim.Optimizer), (
"`optimizer` object should either be None or be a PyTorch optimizer "
+ "and an instance of the `torch.optim.Optimizer` class"
)
freeze_network(network)
lambda_param = lambda_param.detach()
delta = Parameter(
torch.rand_like(x) * (self.max_noise - self.min_noise) + self.min_noise
)
if optimizer is None:
optimizer = torch.optim.Adam([delta], lr=self.lr)
else:
optimizer = optimizer([delta], lr=self.lr)
for _ in range(self.num_steps):
optimizer.zero_grad()
x_worst = x + delta
input_dist = self.distance_x(x, x_worst)
out_x = network(x)
out_x_worst = network(x_worst)
out_dist = self.distance_y(out_x, out_x_worst)
audit_loss = -(out_dist - lambda_param * input_dist)
audit_loss.sum().backward()
optimizer.step()
unfreeze_network(network)
return (x + delta).detach()
def audit(
self,
network,
X_audit,
Y_audit,
loss_fn,
audit_threshold=None,
lambda_param=None,
confidence=0.95,
optimizer=None,
):
"""Audit a model for individual fairness
Parameters
------------
network: torch.nn.Module
PyTorch network model
X_audit: torch.Tensor
Auditing data samples. Shape: (B, *)
Y_audit: torch.Tensor
Auditing data samples. Shape: (B)
loss_fn: torch.nn.Module
Loss function
audit_threshold: float, optional
Auditing threshold to consider a model individually fair or not
If `audit_threshold` is specified, the `audit` procedure determines
if the model is individually fair or not.
If `audit_threshold` is not specified, the `audit` procedure simply
returns the mean and lower bound of loss ratio, leaving the determination
of models' fairness to the user.
Default=None
lambda_param: float
Lambda weighting parameter as defined in the equation above
confidence: float, optional
Confidence value. Default = 0.95
optimizer: torch.optim.Optimizer, optional
PyTorch Optimizer object. Default: torch.optim.SGD
Returns
------------
audit_response: inFairness.auditor.datainterface.AuditorResponse
Audit response containing test statistics
"""
assert optimizer is None or issubclass(optimizer, torch.optim.Optimizer), (
"`optimizer` object should either be None or be a PyTorch optimizer "
+ "and an instance of the `torch.optim.Optimizer` class"
)
device = get_device(X_audit)
if lambda_param is None:
lambda_param = torch.tensor(1.0, device=device)
elif isinstance(lambda_param, float):
lambda_param = torch.tensor(lambda_param, device=device)
if optimizer is None:
optimizer = torch.optim.SGD
X_worst = self.generate_worst_case_examples(
network=network, x=X_audit, lambda_param=lambda_param, optimizer=optimizer
)
loss_ratio = self.compute_loss_ratio(
X_audit=X_audit,
X_worst=X_worst,
Y_audit=Y_audit,
network=network,
loss_fn=loss_fn,
)
audit_response = self.compute_audit_result(
loss_ratio, audit_threshold, confidence
)
return audit_response
| 5,904 | 33.331395 | 128 | py |
inFairness | inFairness-main/inFairness/auditor/datainterface.py | import torch
from dataclasses import dataclass
@dataclass
class AuditorResponse:
"""Class to store a result from the auditor"""
lossratio_mean: float = None
lossratio_std: float = None
lower_bound: float = None
threshold: float = None
pval: float = None
confidence: float = None
is_model_fair: bool = None
| 342 | 19.176471 | 50 | py |
inFairness | inFairness-main/inFairness/auditor/senstir_auditor.py | import torch
from torch.nn.parameter import Parameter
from inFairness.distances import (
WassersteinDistance,
MahalanobisDistances,
)
from inFairness.auditor import Auditor
from inFairness.utils.params import freeze_network, unfreeze_network
class SenSTIRAuditor(Auditor):
"""SenSTIR Auditor generates worst-case examples by solving the
following optimization problem:
.. math:: q^{'} \gets arg\max_{q^{'}}\{||h_{\\theta_t}(q),h_{\\theta_t}(q^{'})||_{2}^{2} - \lambda_t d_{Q}(q,q^{'})\}
At a high level, it will find :math:`q^{'}` such that it maximizes the score difference, while keeping
a fair set distance `distance_q` with the original query `q` small.
Proposed in `Individually Fair Rankings <https://arxiv.org/abs/2103.11023>`_
Parameters
-----------
distance_x: inFairness.distances.Distance
Distance metric in the input space. Should be an instance of
:class:`~inFairness.distances.MahalanobisDistance`
distance_y: inFairness.distances.Distance
Distance metric in the output space. Should be an instance of
:class:`~inFairness.distances.MahalanobisDistance`
num_steps: int
number of optimization steps taken to produce the worst examples.
lr: float
learning rate of the optimization
max_noise: float
range of a uniform distribution determining the initial noise added to q to form q'
min_noise: float
range of a uniform distribution determining the initial noise added to q to form q'
"""
def __init__(
self,
distance_x: MahalanobisDistances,
distance_y: MahalanobisDistances,
num_steps: int,
lr: float,
max_noise: float = 0.1,
min_noise: float = -0.1,
):
self.distance_x = distance_x
self.distance_y = distance_y
self.num_steps = num_steps
self.lr = lr
self.max_noise = max_noise
self.min_noise = min_noise
self.distance_q = self.__init_query_distance__()
def __init_query_distance__(self):
"""Initialize Wasserstein distance metric from provided input distance metric"""
sigma_ = self.distance_x.sigma
distance_q = WassersteinDistance()
distance_q.fit(sigma=sigma_)
return distance_q
def generate_worst_case_examples(self, network, Q, lambda_param, optimizer=None):
"""Generate worst case examples given the input sample batch of queries Q (dimensions batch_size,num_items,num_features)
Parameters
-----------
network: torch.nn.Module
PyTorch network model that outputs scores per item
Q: torch.Tensor
tensor with dimensions batch_size, num_items, num_features containing the batch of queries for ranking
lambda_param: torch.float
Lambda weighting parameter as defined above
optimizer: torch.optim.Optimizer, optional
Pytorch Optimizer object
Returns
---------
q_worst: torch.Tensor
worst case queries for the provided input queries `Q`
"""
assert optimizer is None or issubclass(optimizer, torch.optim.Optimizer)
batch_size, num_items, _ = Q.shape
freeze_network(network)
lambda_param = lambda_param.detach()
delta = Parameter(
torch.rand_like(Q) * (self.max_noise - self.min_noise) + self.min_noise
)
if optimizer is None:
optimizer = torch.optim.Adam([delta], lr=self.lr)
else:
optimizer = optimizer([delta], lr=self.lr)
for _ in range(self.num_steps):
optimizer.zero_grad()
Q_worst = Q + delta
input_dist = self.distance_q(Q, Q_worst) # this is of size B
out_Q = network(Q).reshape(
batch_size, num_items
) # shape B,N,1 scores --> B,N
out_Q_worst = network(Q_worst).reshape(batch_size, num_items)
out_dist = self.distance_y(out_Q, out_Q_worst)
out_dist = out_dist.reshape(
-1
) # distance_y outputs B,1 whereas input_dist is B.
loss = (-(out_dist - lambda_param * input_dist)).sum()
loss.backward()
optimizer.step()
unfreeze_network(network)
return (Q + delta).detach()
| 4,386 | 34.096 | 128 | py |
inFairness | inFairness-main/inFairness/auditor/auditor.py | import torch
import numpy as np
from abc import ABCMeta
from scipy.stats import norm
from inFairness.utils.datautils import convert_tensor_to_numpy
from inFairness.auditor.datainterface import AuditorResponse
class Auditor(metaclass=ABCMeta):
"""
Abstract class for model auditors, e.g. Sensei or Sensr
"""
def __init__(self):
super(Auditor, self).__init__()
def generate_worst_case_examples(self, *args, **kwargs):
"""Generates worst-case example for the input data sample batch"""
raise NotImplementedError(
"Method `generate_worst_case_examples` not implemented."
)
def compute_loss_ratio(self, X_audit, X_worst, Y_audit, network, loss_fn):
"""Compute the loss ratio of samples computed by solving gradient flow attack
to original audit samples
Parameters
-------------
X_audit: torch.Tensor
Auditing samples. Shape (n_samples, n_features)
Y_audit: torch.Tensor
Labels of auditing samples. Shape: (n_samples)
lambda_param: float
Lambda weighting parameter as defined in the equation above
Returns
---------
loss_ratios: numpy.ndarray
Ratio of loss for samples computed using gradient
flow attack to original audit samples
"""
with torch.no_grad():
Y_pred_worst = network(X_worst)
Y_pred_original = network(X_audit)
loss_vals_adversarial = loss_fn(Y_pred_worst, Y_audit, reduction="none")
loss_vals_original = loss_fn(Y_pred_original, Y_audit, reduction="none")
loss_vals_adversarial = convert_tensor_to_numpy(loss_vals_adversarial)
loss_vals_original = convert_tensor_to_numpy(loss_vals_original)
loss_ratio = np.divide(loss_vals_adversarial, loss_vals_original)
return loss_ratio
def compute_audit_result(self, loss_ratios, threshold=None, confidence=0.95):
"""Computes auditing statistics given loss ratios and user-specified
acceptance threshold
Parameters
-------------
loss_ratios: numpy.ndarray
List of loss ratios between worst-case and normal data samples
threshold: float. optional
User-specified acceptance threshold value
If a value is not specified, the procedure simply returns the mean
and lower bound of loss ratio, leaving the detemination of models'
fairness to the user.
If a value is specified, the procedure also determines if the model
is individually fair or not.
confidence: float, optional
Confidence value. Default = 0.95
Returns
----------
audit_result: AuditorResponse
Data interface with auditing results and statistics
"""
loss_ratios = loss_ratios[np.isfinite(loss_ratios)]
lossratio_mean = np.mean(loss_ratios)
lossratio_std = np.std(loss_ratios)
N = loss_ratios.shape[0]
z = norm.ppf(confidence)
lower_bound = lossratio_mean - z * lossratio_std / np.sqrt(N)
if threshold is None:
response = AuditorResponse(
lossratio_mean=lossratio_mean,
lossratio_std=lossratio_std,
lower_bound=lower_bound,
)
else:
tval = (lossratio_mean - threshold) / lossratio_std
tval *= np.sqrt(N)
pval = 1 - norm.cdf(tval)
is_model_fair = False if pval < (1 - confidence) else True
response = AuditorResponse(
lossratio_mean=lossratio_mean,
lossratio_std=lossratio_std,
lower_bound=lower_bound,
threshold=threshold,
pval=pval,
confidence=confidence,
is_model_fair=is_model_fair,
)
return response
def audit(self, *args, **kwargs):
"""Audit model for individual fairness"""
raise NotImplementedError("Method not implemented")
| 4,199 | 34.294118 | 85 | py |
inFairness | inFairness-main/inFairness/fairalgo/datainterfaces.py | import torch
from dataclasses import dataclass
@dataclass
class FairModelResponse:
"""Class to store a result from the fairmodel algorithm"""
loss: torch.Tensor = None
y_pred: torch.Tensor = None
| 212 | 16.75 | 62 | py |
inFairness | inFairness-main/inFairness/fairalgo/sensei.py | import torch
from torch import nn
from inFairness.auditor import SenSeIAuditor
from inFairness.fairalgo.datainterfaces import FairModelResponse
from inFairness.utils import datautils
class SenSeI(nn.Module):
"""Implementes the Sensitive Set Invariane (SenSeI) algorithm.
Proposed in `SenSeI: Sensitive Set Invariance for Enforcing Individual Fairness <https://arxiv.org/abs/2006.14168>`_
Parameters
------------
network: torch.nn.Module
Network architecture
distance_x: inFairness.distances.Distance
Distance metric in the input space
distance_y: inFairness.distances.Distance
Distance metric in the output space
loss_fn: torch.nn.Module
Loss function
rho: float
:math:`\\rho` parameter in the SenSR algorithm
eps: float
:math:`\epsilon` parameter in the SenSR algorithm
auditor_nsteps: int
Number of update steps for the auditor to find worst-case examples
auditor_lr: float
Learning rate for the auditor
"""
def __init__(
self,
network,
distance_x,
distance_y,
loss_fn,
rho,
eps,
auditor_nsteps,
auditor_lr,
):
super().__init__()
self.distance_x = distance_x
self.distance_y = distance_y
self.network = network
self.loss_fn = loss_fn
self.lamb = None
self.rho = rho
self.eps = eps
self.auditor_nsteps = auditor_nsteps
self.auditor_lr = auditor_lr
self.auditor = self.__init_auditor__()
def __init_auditor__(self):
auditor = SenSeIAuditor(
distance_x=self.distance_x,
distance_y=self.distance_y,
num_steps=self.auditor_nsteps,
lr=self.auditor_lr,
)
return auditor
def forward_train(self, X, Y):
"""Forward method during the training phase"""
device = datautils.get_device(X)
minlambda = torch.tensor(1e-5, device=device)
if self.lamb is None:
self.lamb = torch.tensor(1.0, device=device)
if type(self.eps) is float:
self.eps = torch.tensor(self.eps, device=device)
Y_pred = self.network(X)
X_worst = self.auditor.generate_worst_case_examples(
self.network, X, lambda_param=self.lamb
)
dist_x = self.distance_x(X, X_worst)
mean_dist_x = dist_x.mean()
lr_factor = torch.maximum(mean_dist_x, self.eps) / torch.minimum(mean_dist_x, self.eps)
self.lamb = torch.max(
torch.stack(
[minlambda, self.lamb + lr_factor * (mean_dist_x - self.eps)]
)
)
Y_pred_worst = self.network(X_worst)
fair_loss = torch.mean(
self.loss_fn(Y_pred, Y) + self.rho * self.distance_y(Y_pred, Y_pred_worst)
)
response = FairModelResponse(loss=fair_loss, y_pred=Y_pred)
return response
def forward_test(self, X):
"""Forward method during the test phase"""
Y_pred = self.network(X)
response = FairModelResponse(y_pred=Y_pred)
return response
def forward(self, X, Y=None, *args, **kwargs):
"""Defines the computation performed at every call.
Parameters
------------
X: torch.Tensor
Input data
Y: torch.Tensor
Expected output data
Returns
----------
output: torch.Tensor
Model output
"""
if self.training:
return self.forward_train(X, Y)
else:
return self.forward_test(X)
| 3,743 | 27.580153 | 120 | py |
inFairness | inFairness-main/inFairness/fairalgo/senstir.py | import torch
from torch import nn
from functorch import vmap
from inFairness.auditor import SenSTIRAuditor
from inFairness.distances.mahalanobis_distance import MahalanobisDistances
from inFairness.fairalgo.datainterfaces import FairModelResponse
from inFairness.utils import datautils
from inFairness.utils.plackett_luce import PlackettLuce
from inFairness.utils.ndcg import monte_carlo_vect_ndcg
class SenSTIR(nn.Module):
"""Implementes the Sensitive Subspace Robustness (SenSR) algorithm.
Proposed in `Individually Fair Ranking <https://arxiv.org/abs/2103.11023>`_
Parameters
------------
network: torch.nn.Module
Network architecture
distance_x: inFairness.distances.Distance
Distance metric in the input space
distance_y: inFairness.distances.Distance
Distance metric in the output space
rho: float
:math:`\\rho` parameter in the SenSTIR algorithm (see Algorithm 1)
eps: float
:math:`\\epsilon` parameter in the SenSTIR algorithm (see Algorithm 1)
auditor_nsteps: int
Number of update steps for the auditor to find worst-case examples
auditor_lr: float
Learning rate for the auditor
monte_carlo_samples_ndcg: int
Number of monte carlo samples required to estimate the gradient of the
empirical version of expectation defined in equation SenSTIR in the reference
"""
def __init__(
self,
network: torch.nn.Module,
distance_x: MahalanobisDistances,
distance_y: MahalanobisDistances,
rho: float,
eps: float,
auditor_nsteps: int,
auditor_lr: float,
monte_carlo_samples_ndcg: int,
):
super().__init__()
self.network = network
self.distance_x = distance_x
self.distance_y = distance_y
self.rho = rho
self.eps = eps
self.auditor_nsteps = auditor_nsteps
self.auditor_lr = auditor_lr
self.monte_carlo_samples_ndcg = monte_carlo_samples_ndcg
self.lamb = None
self.auditor, self.distance_q = self.__init_auditor__()
self._vect_gather = vmap(torch.gather, (None, None, 0))
def __init_auditor__(self):
auditor = SenSTIRAuditor(
self.distance_x,
self.distance_y,
self.auditor_nsteps,
self.auditor_lr,
)
distance_q = auditor.distance_q
return auditor, distance_q
def forward_train(self, Q, relevances):
batch_size, num_items, num_features = Q.shape
device = datautils.get_device(Q)
min_lambda = torch.tensor(1e-5, device=device)
if self.lamb is None:
self.lamb = torch.tensor(1.0, device=device)
if type(self.eps) is float:
self.eps = torch.tensor(self.eps, device=device)
if self.rho > 0.0:
Q_worst = self.auditor.generate_worst_case_examples(
self.network, Q, self.lamb
)
mean_dist_q = self.distance_q(Q, Q_worst).mean()
# lr_factor = torch.maximum(mean_dist_q, self.eps) / torch.minimum(
# mean_dist_q, self.eps
# )
lr_factor = 0.5 * self.rho
self.lamb = torch.maximum(
min_lambda, self.lamb + lr_factor * (mean_dist_q - self.eps)
)
scores = self.network(Q).reshape(batch_size, num_items) # (B,N,1) --> B,N
scores_worst = self.network(Q_worst).reshape(batch_size, num_items)
else:
scores = self.network(Q).reshape(batch_size, num_items) # (B,N,1) --> B,N
scores_worst = torch.ones_like(scores)
fair_loss = torch.mean(
-self.__expected_ndcg__(self.monte_carlo_samples_ndcg, scores, relevances)
+ self.rho * self.distance_y(scores, scores_worst)
)
response = FairModelResponse(loss=fair_loss, y_pred=scores)
return response
def forward_test(self, Q):
"""Forward method during the test phase"""
scores = self.network(Q).reshape(Q.shape[:2]) # B,N,1 -> B,N
response = FairModelResponse(y_pred=scores)
return response
def forward(self, Q, relevances, **kwargs):
"""Defines the computation performed at every call.
Parameters
------------
X: torch.Tensor
Input data
Y: torch.Tensor
Expected output data
Returns
----------
output: torch.Tensor
Model output
"""
if self.training:
return self.forward_train(Q, relevances)
else:
return self.forward_test(Q)
def __expected_ndcg__(self, montecarlo_samples, scores, relevances):
"""
uses monte carlo samples to estimate the expected normalized discounted cumulative reward
by using REINFORCE. See section 2 of the reference bellow.
Parameters
-------------
scores: torch.Tensor of dimension B,N
predicted scores for the objects in a batch of queries
relevances: torch.Tensor of dimension B,N
corresponding true relevances of such objects
Returns
------------
expected_ndcg: torch.Tensor of dimension B
monte carlo approximation of the expected ndcg by sampling from a Plackett-Luce
distribution parameterized by :param:`scores`
"""
prob_dist = PlackettLuce(scores)
mc_rankings = prob_dist.sample((montecarlo_samples,))
mc_log_prob = prob_dist.log_prob(mc_rankings)
mc_relevances = self._vect_gather(relevances, 1, mc_rankings)
mc_ndcg = monte_carlo_vect_ndcg(mc_relevances)
expected_utility = (mc_ndcg * mc_log_prob).mean(dim=0)
return expected_utility
| 5,938 | 33.132184 | 97 | py |
inFairness | inFairness-main/inFairness/fairalgo/sensr.py | import torch
from torch import nn
from inFairness.auditor import SenSRAuditor
from inFairness.fairalgo.datainterfaces import FairModelResponse
from inFairness.utils import datautils
class SenSR(nn.Module):
"""Implementes the Sensitive Subspace Robustness (SenSR) algorithm.
Proposed in `Training individually fair ML models with sensitive subspace robustness <https://arxiv.org/abs/1907.00020>`_
Parameters
------------
network: torch.nn.Module
Network architecture
distance_x: inFairness.distances.Distance
Distance metric in the input space
loss_fn: torch.nn.Module
Loss function
eps: float
:math:`\epsilon` parameter in the SenSR algorithm
lr_lamb: float
:math:`\lambda` parameter in the SenSR algorithm
lr_param: float
:math:`\\alpha` parameter in the SenSR algorithm
auditor_nsteps: int
Number of update steps for the auditor to find worst-case examples
auditor_lr: float
Learning rate for the auditor
"""
def __init__(
self,
network,
distance_x,
loss_fn,
eps,
lr_lamb,
lr_param,
auditor_nsteps,
auditor_lr,
):
super().__init__()
self.distance_x = distance_x
self.network = network
self.loss_fn = loss_fn
self.lambda_param = None
self.eps = eps
self.lr_lamb = lr_lamb
self.lr_param = lr_param
self.auditor_nsteps = auditor_nsteps
self.auditor_lr = auditor_lr
self.auditor = self.__init_auditor__()
def __init_auditor__(self):
auditor = SenSRAuditor(
loss_fn=self.loss_fn,
distance_x=self.distance_x,
num_steps=self.auditor_nsteps,
lr=self.auditor_lr,
)
return auditor
def forward_train(self, X, Y):
"""Forward method during the training phase"""
device = datautils.get_device(X)
if self.lambda_param is None:
self.lambda_param = torch.tensor(1.0, device=device)
Y_pred = self.network(X)
X_worst = self.auditor.generate_worst_case_examples(
self.network, X, Y, lambda_param=self.lambda_param
)
self.lambda_param = torch.max(
torch.stack(
[
torch.tensor(0.0, device=device),
self.lambda_param
- self.lr_lamb * (self.eps - self.distance_x(X, X_worst).mean()),
]
)
)
Y_pred_worst = self.network(X_worst)
fair_loss = torch.mean(self.lr_param * self.loss_fn(Y_pred_worst, Y))
response = FairModelResponse(loss=fair_loss, y_pred=Y_pred)
return response
def forward_test(self, X):
"""Forward method during the test phase"""
Y_pred = self.network(X)
response = FairModelResponse(y_pred=Y_pred)
return response
def forward(self, X, Y=None, *args, **kwargs):
"""Defines the computation performed at every call.
Parameters
------------
X: torch.Tensor
Input data
Y: torch.Tensor
Expected output data
Returns
----------
output: torch.Tensor
Model output
"""
if self.training:
return self.forward_train(X, Y)
else:
return self.forward_test(X)
| 3,534 | 27.055556 | 125 | py |
kernel-prediction-networks-PyTorch | kernel-prediction-networks-PyTorch-master/KPN.py | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from torchsummary import summary
import torchvision.models as models
# KPN基本网路单元
class Basic(nn.Module):
def __init__(self, in_ch, out_ch, g=16, channel_att=False, spatial_att=False):
super(Basic, self).__init__()
self.channel_att = channel_att
self.spatial_att = spatial_att
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=3, stride=1, padding=1),
# nn.BatchNorm2d(out_ch),
nn.ReLU(),
nn.Conv2d(in_channels=out_ch, out_channels=out_ch, kernel_size=3, stride=1, padding=1),
# nn.BatchNorm2d(out_ch),
nn.ReLU(),
nn.Conv2d(in_channels=out_ch, out_channels=out_ch, kernel_size=3, stride=1, padding=1),
# nn.BatchNorm2d(out_ch),
nn.ReLU()
)
if channel_att:
self.att_c = nn.Sequential(
nn.Conv2d(2*out_ch, out_ch//g, 1, 1, 0),
nn.ReLU(),
nn.Conv2d(out_ch//g, out_ch, 1, 1, 0),
nn.Sigmoid()
)
if spatial_att:
self.att_s = nn.Sequential(
nn.Conv2d(in_channels=2, out_channels=1, kernel_size=7, stride=1, padding=3),
nn.Sigmoid()
)
def forward(self, data):
"""
Forward function.
:param data:
:return: tensor
"""
fm = self.conv1(data)
if self.channel_att:
# fm_pool = F.adaptive_avg_pool2d(fm, (1, 1)) + F.adaptive_max_pool2d(fm, (1, 1))
fm_pool = torch.cat([F.adaptive_avg_pool2d(fm, (1, 1)), F.adaptive_max_pool2d(fm, (1, 1))], dim=1)
att = self.att_c(fm_pool)
fm = fm * att
if self.spatial_att:
fm_pool = torch.cat([torch.mean(fm, dim=1, keepdim=True), torch.max(fm, dim=1, keepdim=True)[0]], dim=1)
att = self.att_s(fm_pool)
fm = fm * att
return fm
class KPN(nn.Module):
def __init__(self, color=True, burst_length=8, blind_est=False, kernel_size=[5], sep_conv=False,
channel_att=False, spatial_att=False, upMode='bilinear', core_bias=False):
super(KPN, self).__init__()
self.upMode = upMode
self.burst_length = burst_length
self.core_bias = core_bias
self.color_channel = 3 if color else 1
in_channel = (3 if color else 1) * (burst_length if blind_est else burst_length+1)
out_channel = (3 if color else 1) * (2 * sum(kernel_size) if sep_conv else np.sum(np.array(kernel_size) ** 2)) * burst_length
if core_bias:
out_channel += (3 if color else 1) * burst_length
# 各个卷积层定义
# 2~5层都是均值池化+3层卷积
self.conv1 = Basic(in_channel, 64, channel_att=False, spatial_att=False)
self.conv2 = Basic(64, 128, channel_att=False, spatial_att=False)
self.conv3 = Basic(128, 256, channel_att=False, spatial_att=False)
self.conv4 = Basic(256, 512, channel_att=False, spatial_att=False)
self.conv5 = Basic(512, 512, channel_att=False, spatial_att=False)
# 6~8层要先上采样再卷积
self.conv6 = Basic(512+512, 512, channel_att=channel_att, spatial_att=spatial_att)
self.conv7 = Basic(256+512, 256, channel_att=channel_att, spatial_att=spatial_att)
self.conv8 = Basic(256+128, out_channel, channel_att=channel_att, spatial_att=spatial_att)
self.outc = nn.Conv2d(out_channel, out_channel, 1, 1, 0)
self.kernel_pred = KernelConv(kernel_size, sep_conv, self.core_bias)
self.apply(self._init_weights)
@staticmethod
def _init_weights(m):
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data)
nn.init.constant_(m.bias.data, 0.0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
nn.init.constant_(m.bias.data, 0.0)
# 前向传播函数
def forward(self, data_with_est, data, white_level=1.0):
"""
forward and obtain pred image directly
:param data_with_est: if not blind estimation, it is same as data
:param data:
:return: pred_img_i and img_pred
"""
conv1 = self.conv1(data_with_est)
conv2 = self.conv2(F.avg_pool2d(conv1, kernel_size=2, stride=2))
conv3 = self.conv3(F.avg_pool2d(conv2, kernel_size=2, stride=2))
conv4 = self.conv4(F.avg_pool2d(conv3, kernel_size=2, stride=2))
conv5 = self.conv5(F.avg_pool2d(conv4, kernel_size=2, stride=2))
# 开始上采样 同时要进行skip connection
conv6 = self.conv6(torch.cat([conv4, F.interpolate(conv5, scale_factor=2, mode=self.upMode)], dim=1))
conv7 = self.conv7(torch.cat([conv3, F.interpolate(conv6, scale_factor=2, mode=self.upMode)], dim=1))
conv8 = self.conv8(torch.cat([conv2, F.interpolate(conv7, scale_factor=2, mode=self.upMode)], dim=1))
# return channel K*K*N
core = self.outc(F.interpolate(conv8, scale_factor=2, mode=self.upMode))
return self.kernel_pred(data, core, white_level)
class KernelConv(nn.Module):
"""
the class of computing prediction
"""
def __init__(self, kernel_size=[5], sep_conv=False, core_bias=False):
super(KernelConv, self).__init__()
self.kernel_size = sorted(kernel_size)
self.sep_conv = sep_conv
self.core_bias = core_bias
def _sep_conv_core(self, core, batch_size, N, color, height, width):
"""
convert the sep_conv core to conv2d core
2p --> p^2
:param core: shape: batch*(N*2*K)*height*width
:return:
"""
kernel_total = sum(self.kernel_size)
core = core.view(batch_size, N, -1, color, height, width)
if not self.core_bias:
core_1, core_2 = torch.split(core, kernel_total, dim=2)
else:
core_1, core_2, core_3 = torch.split(core, kernel_total, dim=2)
# output core
core_out = {}
cur = 0
for K in self.kernel_size:
t1 = core_1[:, :, cur:cur + K, ...].view(batch_size, N, K, 1, 3, height, width)
t2 = core_2[:, :, cur:cur + K, ...].view(batch_size, N, 1, K, 3, height, width)
core_out[K] = torch.einsum('ijklno,ijlmno->ijkmno', [t1, t2]).view(batch_size, N, K * K, color, height, width)
cur += K
# it is a dict
return core_out, None if not self.core_bias else core_3.squeeze()
def _convert_dict(self, core, batch_size, N, color, height, width):
"""
make sure the core to be a dict, generally, only one kind of kernel size is suitable for the func.
:param core: shape: batch_size*(N*K*K)*height*width
:return: core_out, a dict
"""
core_out = {}
core = core.view(batch_size, N, -1, color, height, width)
core_out[self.kernel_size[0]] = core[:, :, 0:self.kernel_size[0]**2, ...]
bias = None if not self.core_bias else core[:, :, -1, ...]
return core_out, bias
def forward(self, frames, core, white_level=1.0):
"""
compute the pred image according to core and frames
:param frames: [batch_size, N, 3, height, width]
:param core: [batch_size, N, dict(kernel), 3, height, width]
:return:
"""
if len(frames.size()) == 5:
batch_size, N, color, height, width = frames.size()
else:
batch_size, N, height, width = frames.size()
color = 1
frames = frames.view(batch_size, N, color, height, width)
if self.sep_conv:
core, bias = self._sep_conv_core(core, batch_size, N, color, height, width)
else:
core, bias = self._convert_dict(core, batch_size, N, color, height, width)
img_stack = []
pred_img = []
kernel = self.kernel_size[::-1]
for index, K in enumerate(kernel):
if not img_stack:
frame_pad = F.pad(frames, [K // 2, K // 2, K // 2, K // 2])
for i in range(K):
for j in range(K):
img_stack.append(frame_pad[..., i:i + height, j:j + width])
img_stack = torch.stack(img_stack, dim=2)
else:
k_diff = (kernel[index - 1] - kernel[index]) // 2
img_stack = img_stack[:, :, k_diff:-k_diff, ...]
# print('img_stack:', img_stack.size())
pred_img.append(torch.sum(
core[K].mul(img_stack), dim=2, keepdim=False
))
pred_img = torch.stack(pred_img, dim=0)
# print('pred_stack:', pred_img.size())
pred_img_i = torch.mean(pred_img, dim=0, keepdim=False).squeeze()
# if bias is permitted
if self.core_bias:
if bias is None:
raise ValueError('The bias should not be None.')
pred_img_i += bias
# print('white_level', white_level.size())
pred_img_i = pred_img_i / white_level
pred_img = torch.mean(pred_img_i, dim=1, keepdim=False)
# print('pred_img:', pred_img.size())
# print('pred_img_i:', pred_img_i.size())
return pred_img_i, pred_img
class LossFunc(nn.Module):
"""
loss function of KPN
"""
def __init__(self, coeff_basic=1.0, coeff_anneal=1.0, gradient_L1=True, alpha=0.9998, beta=100):
super(LossFunc, self).__init__()
self.coeff_basic = coeff_basic
self.coeff_anneal = coeff_anneal
self.loss_basic = LossBasic(gradient_L1)
self.loss_anneal = LossAnneal(alpha, beta)
def forward(self, pred_img_i, pred_img, ground_truth, global_step):
"""
forward function of loss_func
:param frames: frame_1 ~ frame_N, shape: [batch, N, 3, height, width]
:param core: a dict coverted by ......
:param ground_truth: shape [batch, 3, height, width]
:param global_step: int
:return: loss
"""
return self.coeff_basic * self.loss_basic(pred_img, ground_truth), self.coeff_anneal * self.loss_anneal(global_step, pred_img_i, ground_truth)
class LossBasic(nn.Module):
"""
Basic loss function.
"""
def __init__(self, gradient_L1=True):
super(LossBasic, self).__init__()
self.l1_loss = nn.L1Loss()
self.l2_loss = nn.MSELoss()
self.gradient = TensorGradient(gradient_L1)
def forward(self, pred, ground_truth):
return self.l2_loss(pred, ground_truth) + \
self.l1_loss(self.gradient(pred), self.gradient(ground_truth))
class LossAnneal(nn.Module):
"""
anneal loss function
"""
def __init__(self, alpha=0.9998, beta=100):
super(LossAnneal, self).__init__()
self.global_step = 0
self.loss_func = LossBasic(gradient_L1=True)
self.alpha = alpha
self.beta = beta
def forward(self, global_step, pred_i, ground_truth):
"""
:param global_step: int
:param pred_i: [batch_size, N, 3, height, width]
:param ground_truth: [batch_size, 3, height, width]
:return:
"""
loss = 0
for i in range(pred_i.size(1)):
loss += self.loss_func(pred_i[:, i, ...], ground_truth)
loss /= pred_i.size(1)
return self.beta * self.alpha ** global_step * loss
class TensorGradient(nn.Module):
"""
the gradient of tensor
"""
def __init__(self, L1=True):
super(TensorGradient, self).__init__()
self.L1 = L1
def forward(self, img):
w, h = img.size(-2), img.size(-1)
l = F.pad(img, [1, 0, 0, 0])
r = F.pad(img, [0, 1, 0, 0])
u = F.pad(img, [0, 0, 1, 0])
d = F.pad(img, [0, 0, 0, 1])
if self.L1:
return torch.abs((l - r)[..., 0:w, 0:h]) + torch.abs((u - d)[..., 0:w, 0:h])
else:
return torch.sqrt(
torch.pow((l - r)[..., 0:w, 0:h], 2) + torch.pow((u - d)[..., 0:w, 0:h], 2)
)
if __name__ == '__main__':
kpn = KPN(6, 5*5*6, True, True).cuda()
print(summary(kpn, (6, 224, 224), batch_size=4))
| 12,210 | 39.30033 | 150 | py |
kernel-prediction-networks-PyTorch | kernel-prediction-networks-PyTorch-master/train_eval_syn.py | import torch
import torch.optim as optim
from torch.optim import lr_scheduler
import torch.nn as nn
from torch.utils.data import DataLoader
import numpy as np
import argparse
import os, sys, time, shutil
from data_provider import OnTheFlyDataset, _configspec_path
from kpn_data_provider import TrainDataSet, UndosRGBGamma, sRGBGamma
from KPN import KPN, LossFunc
from utils.training_util import MovingAverage, save_checkpoint, load_checkpoint, read_config
from utils.training_util import calculate_psnr, calculate_ssim
from tensorboardX import SummaryWriter
from PIL import Image
from torchvision.transforms import transforms
def train(config, num_workers, num_threads, cuda, restart_train, mGPU):
# torch.set_num_threads(num_threads)
train_config = config['training']
arch_config = config['architecture']
batch_size = train_config['batch_size']
lr = train_config['learning_rate']
weight_decay = train_config['weight_decay']
decay_step = train_config['decay_steps']
lr_decay = train_config['lr_decay']
n_epoch = train_config['num_epochs']
use_cache = train_config['use_cache']
print('Configs:', config)
# checkpoint path
checkpoint_dir = train_config['checkpoint_dir']
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# logs path
logs_dir = train_config['logs_dir']
if not os.path.exists(logs_dir):
os.makedirs(logs_dir)
shutil.rmtree(logs_dir)
log_writer = SummaryWriter(logs_dir)
# dataset and dataloader
data_set = TrainDataSet(
train_config['dataset_configs'],
img_format='.bmp',
degamma=True,
color=False,
blind=arch_config['blind_est']
)
data_loader = DataLoader(
data_set,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers
)
dataset_config = read_config(train_config['dataset_configs'], _configspec_path())['dataset_configs']
# model here
model = KPN(
color=False,
burst_length=dataset_config['burst_length'],
blind_est=arch_config['blind_est'],
kernel_size=list(map(int, arch_config['kernel_size'].split())),
sep_conv=arch_config['sep_conv'],
channel_att=arch_config['channel_att'],
spatial_att=arch_config['spatial_att'],
upMode=arch_config['upMode'],
core_bias=arch_config['core_bias']
)
if cuda:
model = model.cuda()
if mGPU:
model = nn.DataParallel(model)
model.train()
# loss function here
loss_func = LossFunc(
coeff_basic=1.0,
coeff_anneal=1.0,
gradient_L1=True,
alpha=arch_config['alpha'],
beta=arch_config['beta']
)
# Optimizer here
if train_config['optimizer'] == 'adam':
optimizer = optim.Adam(
model.parameters(),
lr=lr
)
elif train_config['optimizer'] == 'sgd':
optimizer = optim.SGD(
model.parameters(),
lr=lr,
momentum=0.9,
weight_decay=weight_decay
)
else:
raise ValueError("Optimizer must be 'sgd' or 'adam', but received {}.".format(train_config['optimizer']))
optimizer.zero_grad()
# learning rate scheduler here
scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=lr_decay)
average_loss = MovingAverage(train_config['save_freq'])
if not restart_train:
try:
checkpoint = load_checkpoint(checkpoint_dir, 'best')
start_epoch = checkpoint['epoch']
global_step = checkpoint['global_iter']
best_loss = checkpoint['best_loss']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['lr_scheduler'])
print('=> loaded checkpoint (epoch {}, global_step {})'.format(start_epoch, global_step))
except:
start_epoch = 0
global_step = 0
best_loss = np.inf
print('=> no checkpoint file to be loaded.')
else:
start_epoch = 0
global_step = 0
best_loss = np.inf
if os.path.exists(checkpoint_dir):
pass
# files = os.listdir(checkpoint_dir)
# for f in files:
# os.remove(os.path.join(checkpoint_dir, f))
else:
os.mkdir(checkpoint_dir)
print('=> training')
burst_length = dataset_config['burst_length']
data_length = burst_length if arch_config['blind_est'] else burst_length+1
patch_size = dataset_config['patch_size']
for epoch in range(start_epoch, n_epoch):
epoch_start_time = time.time()
# decay the learning rate
lr_cur = [param['lr'] for param in optimizer.param_groups]
if lr_cur[0] > 5e-6:
scheduler.step()
else:
for param in optimizer.param_groups:
param['lr'] = 5e-6
print('='*20, 'lr={}'.format([param['lr'] for param in optimizer.param_groups]), '='*20)
t1 = time.time()
for step, (burst_noise, gt, white_level) in enumerate(data_loader):
if cuda:
burst_noise = burst_noise.cuda()
gt = gt.cuda()
# print('white_level', white_level, white_level.size())
#
pred_i, pred = model(burst_noise, burst_noise[:, 0:burst_length, ...], white_level)
#
loss_basic, loss_anneal = loss_func(sRGBGamma(pred_i), sRGBGamma(pred), sRGBGamma(gt), global_step)
loss = loss_basic + loss_anneal
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# update the average loss
average_loss.update(loss)
# calculate PSNR
psnr = calculate_psnr(pred.unsqueeze(1), gt.unsqueeze(1))
ssim = calculate_ssim(pred.unsqueeze(1), gt.unsqueeze(1))
# add scalars to tensorboardX
log_writer.add_scalar('loss_basic', loss_basic, global_step)
log_writer.add_scalar('loss_anneal', loss_anneal, global_step)
log_writer.add_scalar('loss_total', loss, global_step)
log_writer.add_scalar('psnr', psnr, global_step)
log_writer.add_scalar('ssim', ssim, global_step)
# print
print('{:-4d}\t| epoch {:2d}\t| step {:4d}\t| loss_basic: {:.4f}\t| loss_anneal: {:.4f}\t|'
' loss: {:.4f}\t| PSNR: {:.2f}dB\t| SSIM: {:.4f}\t| time:{:.2f} seconds.'
.format(global_step, epoch, step, loss_basic, loss_anneal, loss, psnr, ssim, time.time()-t1))
t1 = time.time()
# global_step
global_step += 1
if global_step % train_config['save_freq'] == 0:
if average_loss.get_value() < best_loss:
is_best = True
best_loss = average_loss.get_value()
else:
is_best = False
save_dict = {
'epoch': epoch,
'global_iter': global_step,
'state_dict': model.state_dict(),
'best_loss': best_loss,
'optimizer': optimizer.state_dict(),
'lr_scheduler': scheduler.state_dict()
}
save_checkpoint(
save_dict, is_best, checkpoint_dir, global_step, max_keep=train_config['ckpt_to_keep']
)
print('Epoch {} is finished, time elapsed {:.2f} seconds.'.format(epoch, time.time()-epoch_start_time))
def eval(config, args):
train_config = config['training']
arch_config = config['architecture']
use_cache = train_config['use_cache']
print('Eval Process......')
checkpoint_dir = train_config['checkpoint_dir']
if not os.path.exists(checkpoint_dir) or len(os.listdir(checkpoint_dir)) == 0:
print('There is no any checkpoint file in path:{}'.format(checkpoint_dir))
# the path for saving eval images
eval_dir = train_config['eval_dir']
if not os.path.exists(eval_dir):
os.mkdir(eval_dir)
files = os.listdir(eval_dir)
for f in files:
os.remove(os.path.join(eval_dir, f))
# dataset and dataloader
data_set = TrainDataSet(
train_config['dataset_configs'],
img_format='.bmp',
degamma=True,
color=False,
blind=arch_config['blind_est'],
train=False
)
data_loader = DataLoader(
data_set,
batch_size=1,
shuffle=False,
num_workers=args.num_workers
)
dataset_config = read_config(train_config['dataset_configs'], _configspec_path())['dataset_configs']
# model here
model = KPN(
color=False,
burst_length=dataset_config['burst_length'],
blind_est=arch_config['blind_est'],
kernel_size=list(map(int, arch_config['kernel_size'].split())),
sep_conv=arch_config['sep_conv'],
channel_att=arch_config['channel_att'],
spatial_att=arch_config['spatial_att'],
upMode=arch_config['upMode'],
core_bias=arch_config['core_bias']
)
if args.cuda:
model = model.cuda()
if args.mGPU:
model = nn.DataParallel(model)
# load trained model
ckpt = load_checkpoint(checkpoint_dir, args.checkpoint)
model.load_state_dict(ckpt['state_dict'])
print('The model has been loaded from epoch {}, n_iter {}.'.format(ckpt['epoch'], ckpt['global_iter']))
# switch the eval mode
model.eval()
# data_loader = iter(data_loader)
burst_length = dataset_config['burst_length']
data_length = burst_length if arch_config['blind_est'] else burst_length + 1
patch_size = dataset_config['patch_size']
trans = transforms.ToPILImage()
with torch.no_grad():
psnr = 0.0
ssim = 0.0
for i, (burst_noise, gt, white_level) in enumerate(data_loader):
if i < 100:
# data = next(data_loader)
if args.cuda:
burst_noise = burst_noise.cuda()
gt = gt.cuda()
white_level = white_level.cuda()
pred_i, pred = model(burst_noise, burst_noise[:, 0:burst_length, ...], white_level)
pred_i = sRGBGamma(pred_i)
pred = sRGBGamma(pred)
gt = sRGBGamma(gt)
burst_noise = sRGBGamma(burst_noise / white_level)
psnr_t = calculate_psnr(pred.unsqueeze(1), gt.unsqueeze(1))
ssim_t = calculate_ssim(pred.unsqueeze(1), gt.unsqueeze(1))
psnr_noisy = calculate_psnr(burst_noise[:, 0, ...].unsqueeze(1), gt.unsqueeze(1))
psnr += psnr_t
ssim += ssim_t
pred = torch.clamp(pred, 0.0, 1.0)
if args.cuda:
pred = pred.cpu()
gt = gt.cpu()
burst_noise = burst_noise.cpu()
trans(burst_noise[0, 0, ...].squeeze()).save(os.path.join(eval_dir, '{}_noisy_{:.2f}dB.png'.format(i, psnr_noisy)), quality=100)
trans(pred.squeeze()).save(os.path.join(eval_dir, '{}_pred_{:.2f}dB.png'.format(i, psnr_t)), quality=100)
trans(gt.squeeze()).save(os.path.join(eval_dir, '{}_gt.png'.format(i)), quality=100)
print('{}-th image is OK, with PSNR: {:.2f}dB, SSIM: {:.4f}'.format(i, psnr_t, ssim_t))
else:
break
print('All images are OK, average PSNR: {:.2f}dB, SSIM: {:.4f}'.format(psnr/100, ssim/100))
if __name__ == '__main__':
# argparse
parser = argparse.ArgumentParser(description='parameters for training')
parser.add_argument('--config_file', dest='config_file', default='kpn_specs/kpn_config.conf', help='path to config file')
parser.add_argument('--config_spec', dest='config_spec', default='kpn_specs/configspec.conf', help='path to config spec file')
parser.add_argument('--restart', action='store_true', help='Whether to remove all old files and restart the training process')
parser.add_argument('--num_workers', '-nw', default=4, type=int, help='number of workers in data loader')
parser.add_argument('--num_threads', '-nt', default=8, type=int, help='number of threads in data loader')
parser.add_argument('--cuda', '-c', action='store_true', help='whether to train on the GPU')
parser.add_argument('--mGPU', '-m', action='store_true', help='whether to train on multiple GPUs')
parser.add_argument('--eval', action='store_true', help='whether to work on the evaluation mode')
parser.add_argument('--checkpoint', '-ckpt', dest='checkpoint', type=str, default='best',
help='the checkpoint to eval')
args = parser.parse_args()
#
config = read_config(args.config_file, args.config_spec)
if args.eval:
eval(config, args)
else:
train(config, args.num_workers, args.num_threads, args.cuda, args.restart, args.mGPU)
| 13,114 | 37.014493 | 144 | py |
kernel-prediction-networks-PyTorch | kernel-prediction-networks-PyTorch-master/kpn_data_provider.py | import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import os
from PIL import Image
import numpy as np
from skimage.color import rgb2xyz
import inspect
from utils.training_util import read_config
from data_generation.data_utils import *
import torch.nn.functional as F
def sRGBGamma(tensor):
threshold = 0.0031308
a = 0.055
mult = 12.92
gamma = 2.4
res = torch.zeros_like(tensor)
mask = tensor > threshold
# image_lo = tensor * mult
# 0.001 is to avoid funny thing at 0.
# image_hi = (1 + a) * torch.pow(tensor + 0.001, 1.0 / gamma) - a
res[mask] = (1 + a) * torch.pow(tensor[mask] + 0.001, 1.0 / gamma) - a
res[1-mask] = tensor[1-mask] * mult
# return mask * image_hi + (1 - mask) * image_lo
return res
def UndosRGBGamma(tensor):
threshold = 0.0031308
a = 0.055
mult = 12.92
gamma = 2.4
res = torch.zeros_like(tensor)
mask = tensor > threshold
# image_lo = tensor / mult
# image_hi = torch.pow(tensor + a, gamma) / (1 + a)
res[1-mask] = tensor[1-mask] / mult
res[mask] = torch.pow(tensor[mask] + a, gamma) / (1 + a)
# return mask * image_hi + (1 - mask) * image_lo
return res
class Random_Horizontal_Flip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, tensor):
if np.random.rand() < self.p:
return torch.flip(tensor, dims=[-1])
return tensor
class Random_Vertical_Flip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, tensor):
if np.random.rand() < self.p:
return torch.flip(tensor, dims=[-2])
return tensor
class TrainDataSet(torch.utils.data.Dataset):
def __init__(self, config_file,
config_spec=None, img_format='.bmp', degamma=True, color=True, blind=False, train=True):
super(TrainDataSet, self).__init__()
if config_spec is None:
config_spec = self._configspec_path()
config = read_config(config_file, config_spec)
self.dataset_config = config['dataset_configs']
self.dataset_dir = self.dataset_config['dataset_dir']
self.images = list(filter(lambda x: True if img_format in x else False, os.listdir(self.dataset_dir)))
self.burst_size = self.dataset_config['burst_length']
self.patch_size = self.dataset_config['patch_size']
self.upscale = self.dataset_config['down_sample']
self.big_jitter = self.dataset_config['big_jitter']
self.small_jitter = self.dataset_config['small_jitter']
# 对应下采样之前图像的最大偏移量
self.jitter_upscale = self.big_jitter * self.upscale
# 对应下采样之前的图像的patch尺寸
self.size_upscale = self.patch_size * self.upscale + 2 * self.jitter_upscale
# 产生大jitter和小jitter之间的delta 在下采样之前的尺度上
self.delta_upscale = (self.big_jitter - self.small_jitter) * self.upscale
# 对应到原图的patch的尺寸
self.patch_size_upscale = self.patch_size * self.upscale
# 去伽马效应
self.degamma = degamma
# 是否用彩色图像进行处理
self.color = color
# 是否盲估计 盲估计即估计的噪声方差不会作为网络的输入
self.blind = blind
self.train = train
self.vertical_flip = Random_Vertical_Flip(p=0.5)
self.horizontal_flip = Random_Horizontal_Flip(p=0.5)
@staticmethod
def _configspec_path():
current_dir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe()))
)
return os.path.join(current_dir,
'dataset_specs/data_configspec.conf')
@staticmethod
def crop_random(tensor, patch_size):
return random_crop(tensor, 1, patch_size)[0]
# get一个item 根据index检索
def __getitem__(self, index):
# print(index)
image = Image.open(os.path.join(self.dataset_dir, self.images[index])).convert('RGB')
# 先转换为Tensor进行degamma
image = transforms.ToTensor()(image)
# if self.degamma:
# image = UndosRGBGamma(tensor=image)
image_crop = self.crop_random(image, self.size_upscale)
# 3*H*W 对应于较小jitter下
image_crop_small = image_crop[:, self.delta_upscale:-self.delta_upscale,
self.delta_upscale:-self.delta_upscale]
# 进一步进行random_crop所需的transform
# burst中的第一个不做偏移 后期作为target
# output shape: N*3*H*W
img_burst = []
for i in range(self.burst_size):
if i == 0:
img_burst.append(
image_crop[:, self.jitter_upscale:-self.jitter_upscale, self.jitter_upscale:-self.jitter_upscale]
)
else:
if np.random.binomial(1, min(1.0, np.random.poisson(lam=1.5) / self.burst_size)) == 0:
img_burst.append(
self.crop_random(
image_crop_small, self.patch_size_upscale
)
)
else: #big
img_burst.append(
self.crop_random(image_crop, self.patch_size_upscale)
)
image_burst = torch.stack(img_burst, dim=0)
image_burst = F.adaptive_avg_pool2d(image_burst, (self.patch_size, self.patch_size))
# label为patch中burst的第一个
if not self.color:
image_burst = 0.2989*image_burst[:, 0, ...] + 0.5870 * image_burst[:, 1, ...] + 0.1140*image_burst[:, 2, ...]
image_burst = torch.clamp(image_burst, 0.0, 1.0)
if self.degamma:
UndosRGBGamma(image_burst)
if self.train:
# data augment
image_burst = self.horizontal_flip(image_burst)
image_burst = self.vertical_flip(image_burst)
gt = image_burst[0, ...]
# 以上得到的patch size为burst*(3)*size*size
"""
数据加噪声等一系列处理 全部基于rgb图像做
"""
# 要产生[log10(0.1), log10(1.0)]之间的均匀分布随机数 也就是[0,1加负号即可]
# 产生pred之后 再除以white_level恢复原来的亮度
# batch中的每一个burst 产生一个white_level
white_level = torch.from_numpy(np.power(10, -np.random.rand(1, 1, 1))).type_as(image_burst)
# 论文中对图像亮度赋值进行线性缩放[0.1, 1]
image_burst = white_level * image_burst
# gray image
if not self.color:
# 生成随机的read和shot噪声方差
sigma_read = torch.from_numpy(
np.power(10, np.random.uniform(-3.0, -1.5, (1, 1, 1)))).type_as(image_burst)
sigma_shot = torch.from_numpy(
np.power(10, np.random.uniform(-4.0, -2.0, (1, 1, 1)))).type_as(image_burst)
# sigma_read = torch.from_numpy(2*np.power(10, np.array([[[-2.0]]]))).type_as(image_burst)
# sigma_shot = torch.from_numpy(6.4 * np.power(10, np.array([[[-3.0]]]))).type_as(image_burst)
# 产生噪声 依据论文中公式产生
sigma_read_com = sigma_read.expand_as(image_burst)
sigma_shot_com = sigma_shot.expand_as(image_burst)
# generate noise
burst_noise = torch.normal(image_burst, torch.sqrt(sigma_read_com**2 + image_burst * sigma_shot_com)).type_as(image_burst)
# burst_noise 恢复到[0,1] 截去外面的值
burst_noise = torch.clamp(burst_noise, 0.0, 1.0)
# 非盲估计 就要估计噪声的方差
if not self.blind:
# 接下来就是根据两个sigma 将估计的噪声标准差也作为输入 用burst中的第一个进行估计
# estimation shape: H*W
sigma_read_est = sigma_read.view(1, 1).expand_as(gt)
sigma_shot_est = sigma_shot.view(1, 1).expand_as(gt)
sigma_estimate = torch.sqrt(sigma_read_est ** 2 + sigma_shot_est.mul(
torch.max(torch.stack([burst_noise[0, ...], torch.zeros_like(burst_noise[0, ...])], dim=0), dim=0)[0]))
# 把噪声的估计和burst图像连接在一起
burst_noise = torch.cat([burst_noise, sigma_estimate.unsqueeze(0)], dim=0)
# 按照文章中的 ref Image作为target进行了训练 输出结果和ref很相似 没能起到太大的去噪作用
# return patches_with_noise, patches_with_noise[:, 0, ...], white_level
# 不含噪声的ref作为target进行测试
return burst_noise, gt, white_level
# color image
else:
# 生成随机的read和shot噪声方差
sigma_read = torch.from_numpy(
np.power(10, np.random.uniform(-3.0, -1.5, (1, 1, 1, 1)))).type_as(image_burst)
sigma_shot = torch.from_numpy(
np.power(10, np.random.uniform(-4.0, -2.0, (1, 1, 1, 1)))).type_as(image_burst)
# 产生噪声 依据论文中公式产生
sigma_read_com = sigma_read.expand_as(image_burst)
sigma_shot_com = sigma_shot.expand_as(image_burst)
# generate noise
burst_noise = torch.normal(image_burst,
torch.sqrt(sigma_read_com ** 2 + image_burst * sigma_shot_com)).type_as(image_burst)
# burst_noise 恢复到[0,1] 截去外面的值
burst_noise = torch.clamp(burst_noise, 0.0, 1.0)
# 非盲估计 就要估计噪声的方差
if not self.blind:
# 接下来就是根据两个sigma 将估计的噪声标准差也作为输入 用burst中的第一个进行估计
# estimation shape: H*W
sigma_read_est = sigma_read.view(1, 1, 1).expand_as(gt)
sigma_shot_est = sigma_shot.view(1, 1, 1).expand_as(gt)
sigma_estimate = torch.sqrt(sigma_read_est ** 2 + sigma_shot_est.mul(
torch.max(torch.stack([burst_noise[0, ...], torch.zeros_like(burst_noise[0, ...])], dim=0), dim=0)[0]))
# 把噪声的估计和burst图像连接在一起
burst_noise = torch.cat([burst_noise, sigma_estimate.unsqueeze(0)], dim=0)
white_level = white_level.unsqueeze(0)
return burst_noise, gt, white_level
def __len__(self):
return len(self.images)
if __name__ == '__main__':
# path = 'F:/BinZhang/Codes/deep-burst-denoising/data/train'
# dataset = TrainDataSet(path, '.jpg', 8, 128, 4, 16, 2, color=False)
# dataloader = DataLoader(dataset,
# batch_size=4,
# shuffle=True,
# num_workers=4)
# dataloader = iter(dataloader)
# a, b, c = next(dataloader)
# print(a.size(), b.size(), c.size())
hf = Random_Horizontal_Flip(0.5)
a = torch.randint(0, 10, (2, 2))
print(a, hf(a))
| 10,342 | 37.737828 | 134 | py |
kernel-prediction-networks-PyTorch | kernel-prediction-networks-PyTorch-master/data_provider.py | import glob
import inspect
import os
import zlib
from time import time
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.data as data
from PIL import Image
from torch import FloatTensor
from data_generation.pipeline import ImageDegradationPipeline
from utils.image_utils import bayer_crop_tensor
from utils.training_util import read_config
DEBUG_TIME = False
def _configspec_path():
current_dir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe()))
)
return os.path.join(current_dir,
'dataset_specs/data_configspec.conf')
class OnTheFlyDataset(data.Dataset):
def __init__(self,
config_file,
config_spec=None,
blind=False,
cropping="random",
cache_dir=None,
use_cache=False,
dataset_name="synthetic"):
""" Dataset for generating degraded images on the fly.
Args:
pipeline_configs: dictionary of boolean flags controlling how
pipelines are created.
pipeline_param_ranges: dictionary of ranges of params.
patch_dir: directory to load linear patches.
config_file: path to data config file
im_size: tuple of (w, h)
config_spec: path to data config spec file
cropping: cropping mode ["random", "center"]
"""
super().__init__()
if config_spec is None:
config_spec = _configspec_path()
config = read_config(config_file, config_spec)
# self.config_file = config_file
# dictionary of dataset configs
self.dataset_configs = config['dataset_configs']
# directory to load linear patches
patch_dir = self.dataset_configs['dataset_dir']
# dictionary of boolean flags controlling how pipelines are created
# (see data_configspec for detail).
self.pipeline_configs = config['pipeline_configs']
# dictionary of ranges of params (see data_configspec for detail).
self.pipeline_param_ranges = config['pipeline_param_ranges']
file_list = glob.glob(os.path.join(patch_dir,
'*.pth'))
file_list = [os.path.basename(f) for f in file_list]
file_list = [os.path.splitext(f)[0] for f in file_list]
self.file_list = sorted(file_list, key=lambda x: zlib.adler32(x.encode('utf-8')))
# print(self.file_list)
# self.pipeline_param_ranges = pipeline_param_ranges
# self.pipeline_configs = pipeline_configs
# print('Data Pipeline Configs: ', self.pipeline_configs)
# print('Data Pipeline Param Ranges: ', self.pipeline_param_ranges)
# some variables about the setting of dataset
self.data_root = patch_dir
self.im_size = self.dataset_configs['patch_size'] # the size after down-sample
extra_for_bayer = 2 # extra size used for the random choice for bayer pattern
self.big_jitter = self.dataset_configs['big_jitter']
self.small_jitter = self.dataset_configs['small_jitter']
self.down_sample = self.dataset_configs['down_sample']
# image size corresponding to original image (include big jitter)
self.im_size_upscale = (self.im_size + 2 * self.big_jitter + extra_for_bayer) * self.down_sample
# from big jitter image to real image with extra pixels to random choose the bayer pattern
self.big_restore_upscale = self.big_jitter * self.down_sample
# the shift pixels of small jitter within upscale
self.small_restore_upscale = self.small_jitter * self.down_sample
# from big jitter images to small jitter images
self.big2small_upscale = (self.big_jitter - self.small_jitter) * self.down_sample
#
self.im_size_extra = (self.im_size + extra_for_bayer) * self.down_sample
# blind estimate?
self.blind = blind
# others
self.cropping = cropping
self.use_cache = use_cache
self.cache_dir = cache_dir
sz = "{}x{}".format(self.im_size, self.im_size) \
if self.im_size is not None else "None"
self.dataset_name = "_".join([dataset_name, sz])
# add the codes by Bin Zhang
self.burst_length = self.dataset_configs['burst_length']
def _get_filename(self, idx):
# folder = os.path.join(self.cache_dir, self.dataset_name)
folder = self.cache_dir
if not os.path.exists(folder):
os.makedirs(folder)
# filename = os.path.join(folder, self.dataset_name + "_{:06d}.pth".format(idx))
filename = os.path.join(folder, "{:06d}.pth".format(idx))
return filename
def _save_tensor(self, tensor_dicts, idx):
filename = self._get_filename(idx)
try:
torch.save(tensor_dicts, filename)
except OSError as e:
print("Warning write failed.")
print(e)
def _load_tensor(self, idx):
filename = self._get_filename(idx)
return torch.load(filename)
def _random_log_uniform(self, a, b):
if self.legacy_uniform:
return np.random.uniform(a, b)
val = np.random.uniform(np.log(a), np.log(b))
return np.exp(val)
def _randomize_parameter(self):
if "use_log_uniform" in self.pipeline_configs:
self.legacy_uniform = not self.pipeline_configs["use_log_uniform"]
else:
self.legacy_uniform = True
exp_adjustment = np.random.uniform(self.pipeline_param_ranges["min_exposure_adjustment"],
self.pipeline_param_ranges["max_exposure_adjustment"])
poisson_k = self._random_log_uniform(self.pipeline_param_ranges["min_poisson_noise"],
self.pipeline_param_ranges["max_poisson_noise"])
read_noise_sigma = self._random_log_uniform(self.pipeline_param_ranges["min_gaussian_noise"],
self.pipeline_param_ranges["max_gaussian_noise"])
chromatic_aberration = np.random.uniform(self.pipeline_param_ranges["min_chromatic_aberration"],
self.pipeline_param_ranges["max_chromatic_aberration"])
motionblur_segment = np.random.randint(self.pipeline_param_ranges["min_motionblur_segment"],
self.pipeline_param_ranges["max_motionblur_segment"])
motion_blur = []
motion_blur_dir = []
for i in range(motionblur_segment):
motion_blur.append(np.random.uniform(self.pipeline_param_ranges["min_motion_blur"],
self.pipeline_param_ranges["max_motion_blur"])
)
motion_blur_dir.append(np.random.uniform(0.0, 360.0))
jpeg_quality = np.random.randint(self.pipeline_param_ranges["min_jpeg_quality"],
self.pipeline_param_ranges["max_jpeg_quality"])
denoise_sigma_s = self._random_log_uniform(self.pipeline_param_ranges["min_denoise_sigma_s"],
self.pipeline_param_ranges["max_denoise_sigma_s"])
denoise_sigma_r = self._random_log_uniform(self.pipeline_param_ranges["min_denoise_sigma_r"],
self.pipeline_param_ranges["max_denoise_sigma_r"])
denoise_color_sigma_ratio = self._random_log_uniform(
self.pipeline_param_ranges["min_denoise_color_sigma_ratio"],
self.pipeline_param_ranges["max_denoise_color_sigma_ratio"])
denoise_color_range_ratio = self._random_log_uniform(
self.pipeline_param_ranges["min_denoise_color_range_ratio"],
self.pipeline_param_ranges["max_denoise_color_range_ratio"])
unsharp_amount = np.random.uniform(self.pipeline_param_ranges["min_unsharp_amount"],
self.pipeline_param_ranges["max_unsharp_amount"])
denoise_median_sz = np.random.randint(self.pipeline_param_ranges["min_denoise_median_sz"],
self.pipeline_param_ranges["max_denoise_median_sz"])
quantize_bits = np.random.randint(self.pipeline_param_ranges["min_quantize_bits"],
self.pipeline_param_ranges["max_quantize_bits"])
wavelet_sigma = np.random.uniform(self.pipeline_param_ranges["min_wavelet_sigma"],
self.pipeline_param_ranges["max_wavelet_sigma"])
motionblur_th = np.random.uniform(self.pipeline_param_ranges["min_motionblur_th"],
self.pipeline_param_ranges["max_motionblur_th"])
motionblur_boost = self._random_log_uniform(self.pipeline_param_ranges["min_motionblur_boost"],
self.pipeline_param_ranges["max_motionblur_boost"])
return dict(
exp_adjustment=exp_adjustment,
poisson_k=poisson_k,
read_noise_sigma=read_noise_sigma,
chromatic_aberration=chromatic_aberration,
motion_blur=motion_blur,
motion_blur_dir=motion_blur_dir,
jpeg_quality=jpeg_quality,
denoise_sigma_s=denoise_sigma_s,
denoise_sigma_r=denoise_sigma_r,
denoise_color_sigma_ratio=denoise_color_sigma_ratio,
denoise_color_range_ratio=denoise_color_range_ratio,
unsharp_amount=unsharp_amount,
denoise_median=denoise_median_sz,
quantize_bits=quantize_bits,
wavelet_sigma=wavelet_sigma,
motionblur_th=motionblur_th,
motionblur_boost=motionblur_boost,
)
@staticmethod
def _create_pipeline(exp_adjustment,
poisson_k,
read_noise_sigma,
chromatic_aberration,
motion_blur_dir,
jpeg_quality,
denoise_sigma_s,
denoise_sigma_r,
denoise_color_sigma_ratio,
unsharp_amount,
denoise_color_only,
demosaick,
denoise,
jpeg_compression,
use_motion_blur,
use_chromatic_aberration,
use_unsharp_mask,
exposure_correction,
quantize,
quantize_bits=8,
denoise_guide_transform=None,
denoise_n_iter=1,
demosaick_use_median=False,
demosaick_n_iter=0,
use_median_denoise=False,
median_before_bilateral=False,
denoise_median=None,
denoise_median_ratio=1.0,
denoise_median_n_iter=1,
demosaicked_input=True,
log_blackpts=0.004,
bilateral_class="DenoisingSKImageBilateralNonDifferentiable",
demosaick_class="AHDDemosaickingNonDifferentiable",
demosaick_ahd_delta=2.0,
demosaick_ahd_sobel_sz=3,
demosaick_ahd_avg_sz=3,
use_wavelet=False,
wavelet_family="db2",
wavelet_sigma=None,
wavelet_th_method="BayesShrink",
wavelet_levels=None,
motion_blur=None,
motionblur_th=None,
motionblur_boost=None,
motionblur_segment=1,
debug=False,
bayer_crop_phase=None,
saturation=None,
use_autolevel=False,
autolevel_max=1.5,
autolevel_blk=1,
autolevel_wht=99,
denoise_color_range_ratio=1,
wavelet_last=False,
wavelet_threshold=None,
wavelet_filter_chrom=True,
post_tonemap_class=None,
post_tonemap_amount=None,
pre_tonemap_class=None,
pre_tonemap_amount=None,
post_tonemap_class2=None,
post_tonemap_amount2=None,
repair_hotdead_pixel=False,
hot_px_th=0.2,
white_balance=False,
white_balance_temp=6504,
white_balance_tint=0,
use_tone_curve3zones=False,
tone_curve_highlight=0.0,
tone_curve_midtone=0.0,
tone_curve_shadow=0.0,
tone_curve_midshadow=None,
tone_curve_midhighlight=None,
unsharp_radius=4.0,
unsharp_threshold=3.0,
**kwargs):
# Define image degradation pipeline
# add motion blur and chromatic aberration
configs_degrade = []
# Random threshold
if demosaicked_input:
# These are features that only make sense to simulate in
# demosaicked input.
if use_motion_blur:
configs_degrade += [
('MotionBlur', {'amt': motion_blur,
'direction': motion_blur_dir,
'kernel_sz': None,
'dynrange_th': motionblur_th,
'dynrange_boost': motionblur_boost,
}
)
]
if use_chromatic_aberration:
configs_degrade += [
('ChromaticAberration', {'scaling': chromatic_aberration}),
]
configs_degrade.append(('ExposureAdjustment', {'nstops': exp_adjustment}))
if demosaicked_input:
if demosaick:
configs_degrade += [
('BayerMosaicking', {}),
]
mosaick_pattern = 'bayer'
else:
mosaick_pattern = None
else:
mosaick_pattern = 'bayer'
# Add artificial noise.
configs_degrade += [
('PoissonNoise', {'sigma': poisson_k, 'mosaick_pattern': mosaick_pattern}),
('GaussianNoise', {'sigma': read_noise_sigma, 'mosaick_pattern': mosaick_pattern}),
]
if quantize:
configs_degrade += [
('PixelClip', {}),
('Quantize', {'nbits': quantize_bits}),
]
if repair_hotdead_pixel:
configs_degrade += [
("RepairHotDeadPixel", {"threshold": hot_px_th}),
]
if demosaick:
configs_degrade += [
(demosaick_class, {'use_median_filter': demosaick_use_median,
'n_iter': demosaick_n_iter,
'delta': demosaick_ahd_delta,
'sobel_sz': demosaick_ahd_sobel_sz,
'avg_sz': demosaick_ahd_avg_sz,
}),
('PixelClip', {}),
]
if white_balance:
configs_degrade += [
('WhiteBalanceTemperature', {"new_temp": white_balance_temp,
"new_tint": white_balance_tint,
}),
]
if pre_tonemap_class is not None:
kw = "gamma" if "Gamma" in pre_tonemap_class else "amount"
configs_degrade += [
(pre_tonemap_class, {kw: pre_tonemap_amount})
]
if use_autolevel:
configs_degrade.append(('AutoLevelNonDifferentiable', {'max_mult': autolevel_max,
'blkpt': autolevel_blk,
'whtpt': autolevel_wht,
}))
denoise_list = []
if denoise:
denoise_list.append([
('PixelClip', {}),
(bilateral_class, {'sigma_s': denoise_sigma_s,
'sigma_r': denoise_sigma_r,
'color_sigma_ratio': denoise_color_sigma_ratio,
'color_range_ratio': denoise_color_range_ratio,
'filter_lum': not denoise_color_only,
'n_iter': denoise_n_iter,
'guide_transform': denoise_guide_transform,
'_bp': log_blackpts,
}),
('PixelClip', {}),
])
if use_median_denoise:
# TODO: Fix this.
# Special value because our config can't specify list of list
if denoise_median == -1:
denoise_median = [[0, 1, 0], [1, 1, 1], [0, 1, 0]]
if debug:
print("Denoising with Median Filter")
denoise_list.append([
('DenoisingMedianNonDifferentiable', {'neighbor_sz': denoise_median,
'color_sigma_ratio': denoise_median_ratio,
'n_iter': denoise_median_n_iter,
}),
])
if median_before_bilateral:
denoise_list = denoise_list[::-1]
if use_wavelet:
# always do wavelet first.
wavelet_config = [
('PixelClip', {}),
("DenoisingWaveletNonDifferentiable", {'sigma_s': wavelet_th_method,
'sigma_r': wavelet_sigma,
'color_sigma_ratio': wavelet_family,
'filter_lum': True,
'n_iter': wavelet_levels,
'guide_transform': denoise_guide_transform,
'_bp': wavelet_threshold,
'filter_chrom': wavelet_filter_chrom,
}),
('PixelClip', {}),
]
if wavelet_last:
denoise_list.append(wavelet_config)
else:
denoise_list.insert(0, wavelet_config)
for i in range(len(denoise_list)):
configs_degrade += denoise_list[i]
if post_tonemap_class is not None:
kw = "gamma" if "Gamma" in post_tonemap_class else "amount"
configs_degrade += [
(post_tonemap_class, {kw: post_tonemap_amount})
]
if post_tonemap_class2 is not None:
kw = "gamma" if "Gamma" in post_tonemap_class2 else "amount"
configs_degrade += [
(post_tonemap_class2, {kw: post_tonemap_amount2})
]
if use_tone_curve3zones:
ctrl_val = [t for t in [tone_curve_shadow,
tone_curve_midshadow,
tone_curve_midtone,
tone_curve_midhighlight,
tone_curve_highlight] if t is not None]
configs_degrade += [
('ToneCurveNZones', {'ctrl_val': ctrl_val,
}),
('PixelClip', {}),
]
if use_unsharp_mask:
configs_degrade += [
('Unsharpen', {'amount': unsharp_amount,
'radius': unsharp_radius,
'threshold': unsharp_threshold}),
('PixelClip', {}),
]
if saturation is not None:
configs_degrade.append(('Saturation', {'value': saturation}))
# things that happens after camera apply denoising, etc.
if jpeg_compression:
configs_degrade += [
('sRGBGamma', {}),
('Quantize', {'nbits': 8}),
('PixelClip', {}),
('JPEGCompression', {"quality": jpeg_quality}),
('PixelClip', {}),
('UndosRGBGamma', {}),
('PixelClip', {}),
]
else:
if quantize:
configs_degrade += [
('Quantize', {'nbits': 8}),
('PixelClip', {}),
]
if exposure_correction:
# Finally do exposure correction of weird jpeg-compressed image to get crappy images.
configs_degrade.append(('ExposureAdjustment', {'nstops': -exp_adjustment}))
target_pipeline = None
else:
configs_target = [
('ExposureAdjustment', {'nstops': exp_adjustment}),
('PixelClip', {}),
]
target_pipeline = ImageDegradationPipeline(configs_target)
configs_degrade.append(('PixelClip', {}))
if debug:
print('Final config:')
print('\n'.join([str(c) for c in configs_degrade]))
degrade_pipeline = ImageDegradationPipeline(configs_degrade)
return degrade_pipeline, target_pipeline
def __getitem__(self, index):
if self.use_cache:
try:
data = self._load_tensor(index)
return data
except:
# unsucessful at loading
pass
t0 = time()
# original image
target_path = os.path.join(self.data_root,
self.file_list[index] + '.pth')
# img = np.load(target_path).astype('float32')
img = (np.array(Image.open(target_path)) / 255.0).astype(np.float32)
# degradation pipeline, only one needing for N frame
t1_load = time()
degrade_param = self._randomize_parameter()
degrade_pipeline, target_pipeline = self._create_pipeline(**{**self.pipeline_configs,
**degrade_param})
t2_create_pipeline = time()
# Actually process image.
img = FloatTensor(img).permute(2, 0, 1)
# Crop first so that we don't waste computation on the whole image.
# image with big jitter on original image
img_big_jitter = bayer_crop_tensor(
img, self.im_size_upscale, self.im_size_upscale, self.cropping
)
if len(img_big_jitter.size()) == 3:
img_big_jitter = img_big_jitter.unsqueeze(0)
# get N frames with big or small jitters
burst_jitter = []
for i in range(self.burst_length):
# this is the ref. frame without shift
if i == 0:
burst_jitter.append(
F.interpolate(
img_big_jitter[:, :, self.big_restore_upscale:-self.big_restore_upscale,
self.big_restore_upscale:-self.big_restore_upscale],
scale_factor=1 / self.down_sample
)
)
else:
# whether to flip the coin
big_jitter = np.random.binomial(1, np.random.poisson(lam=1.5) / self.burst_length)
if big_jitter:
burst_jitter.append(
F.interpolate(
bayer_crop_tensor(
img_big_jitter,
self.im_size_extra,
self.im_size_extra,
self.cropping
),
scale_factor=1 / self.down_sample
)
)
else:
img_small_jitter = img_big_jitter[:, :, self.big2small_upscale:-self.big2small_upscale,
self.big2small_upscale:-self.big2small_upscale]
burst_jitter.append(
F.interpolate(
bayer_crop_tensor(
img_small_jitter,
self.im_size_extra,
self.im_size_extra,
self.cropping
),
scale_factor=1 / self.down_sample
)
)
burst_jitter = torch.cat(burst_jitter, dim=0)
degraded = torch.zeros_like(burst_jitter)
for i in range(self.burst_length):
degraded[i, ...] = degrade_pipeline(burst_jitter[i, ...])
# degraded = degrade_pipeline(target)
target = burst_jitter[0, ...]
# if not blind estimation, compute the estimated noise
if not self.blind:
read_sigma, poisson_k = degrade_param['read_noise_sigma'], degrade_param['poisson_k']
noise = torch.sqrt(
read_sigma ** 2 + poisson_k ** 2 * degraded[0, ...]
).unsqueeze(0)
degraded = torch.cat([degraded, noise], dim=0)
# If not exposure correction, also apply exposure adjustment to the image.
if not self.pipeline_configs["exposure_correction"]:
target = target_pipeline(target).squeeze()
t3_degrade = time()
exp_adjustment = degrade_param['exp_adjustment']
# Bayer phase selection
target = target.unsqueeze(0)
im = torch.cat([degraded, target], 0)
if self.pipeline_configs["bayer_crop_phase"] is None:
# There are 4 phases of Bayer mosaick.
phase = np.random.choice(4)
else:
phase = self.pipeline_configs["bayer_crop_phase"]
x = phase % 2
y = (phase // 2) % 2
im = im[:, :, y:(y + self.im_size), x:(x + self.im_size)]
degraded, target = torch.split(im, self.burst_length if self.blind else self.burst_length + 1, dim=0)
t4_bayerphase = time()
t5_resize = time()
vis_exposure = 0 if self.pipeline_configs["exposure_correction"] else -exp_adjustment
t6_bayermask = time()
if DEBUG_TIME:
# report
print("--------------------------------------------")
t_total = (t6_bayermask - t0) / 100.0
t_load = t1_load - t0
t_create_pipeline = t2_create_pipeline - t1_load
t_process = t3_degrade - t2_create_pipeline
t_bayercrop = t4_bayerphase - t3_degrade
t_resize = t5_resize - t4_bayerphase
t_bayermask = t6_bayermask - t5_resize
print("load: {} ({}%)".format(t_load, t_load / t_total))
print("create_pipeline: {} ({}%)".format(t_create_pipeline, t_create_pipeline / t_total))
print("process: {} ({}%)".format(t_process, t_process / t_total))
print("bayercrop: {} ({}%)".format(t_bayercrop, t_bayercrop / t_total))
print("resize: {} ({}%)".format(t_resize, t_resize / t_total))
print("bayermask: {} ({}%)".format(t_bayermask, t_bayermask / t_total))
print("--------------------------------------------")
data = {'degraded_img': degraded,
'original_img': target.squeeze(),
'vis_exposure': FloatTensor([vis_exposure]),
}
if self.use_cache:
# TODO: Start a new thread to save.
self._save_tensor(data, index)
return data
def __len__(self):
return len(self.file_list)
class sampler(torch.utils.data.Sampler):
def __init__(self, data_source, num_samples):
self.num_samples = num_samples
self.total_num = len(data_source)
def __iter__(self):
if self.total_num % self.num_samples != 0:
return iter(torch.randperm(self.total_num).tolist() + torch.randperm(self.total_num).tolist()[0:(
self.total_num // self.num_samples + 1) * self.num_samples - self.total_num])
else:
return iter(torch.randperm(self.total_num).tolist())
if __name__ == '__main__':
# import argparse
# from torch.utils.data import DataLoader
#
# parser = argparse.ArgumentParser(description='parameters for training')
# parser.add_argument('--config_file', dest='config_file', default='kpn_specs/kpn_config.conf',
# help='path to config file')
# parser.add_argument('--config_spec', dest='config_spec', default='kpn_specs/configspec.conf',
# help='path to config spec file')
# parser.add_argument('--restart', action='store_true',
# help='Whether to remove all old files and restart the training process')
# parser.add_argument('--num_workers', '-nw', default=4, type=int, help='number of workers in data loader')
# parser.add_argument('--num_threads', '-nt', default=8, type=int, help='number of threads in data loader')
# parser.add_argument('--cuda', '-c', action='store_true', help='whether to train on the GPU')
# parser.add_argument('--mGPU', '-m', action='store_true', help='whether to train on multiple GPUs')
# args = parser.parse_args()
#
# print(args)
#
# config = read_config(args.config_file, args.config_spec)
# train_config = config["training"]
#
#
# i = 0
# while i < 15:
# train_data = OnTheFlyDataset(train_config["dataset_configs"],
# use_cache=True,
# cache_dir='/home/bingo/burst-denoise/dataset/synthetic',
# blind=False,
# dataset_name='{:02d}'.format(i))
# train_loader = DataLoader(train_data, batch_size=1, shuffle=True, num_workers=args.num_workers)
# for index, data in enumerate(train_loader):
# print('epoch {}, step {} is ok'.format(i, index))
# i += 1
files = os.listdir('/home/bingo/burst-denoise/dataset/synthetic')
files.sort()
for index, f in enumerate(files):
os.rename(os.path.join('/home/bingo/burst-denoise/dataset/synthetic', f),
os.path.join('/home/bingo/burst-denoise/dataset/synthetic', '{:06d}.pth'.format(index)))
| 31,377 | 45.076358 | 193 | py |
kernel-prediction-networks-PyTorch | kernel-prediction-networks-PyTorch-master/data_generation/generate_dataset.py | import tifffile
import skimage
import numpy as np
import os
import argparse
import glob
import json
from tqdm import tqdm
from sklearn.feature_extraction.image import extract_patches_2d
import torch
from torch.autograd import Variable
from torch import FloatTensor
from data_generation.pipeline import ImageDegradationPipeline
from data_generation.constants import XYZ2sRGB, ProPhotoRGB2XYZ
def numpy2tensor(arr):
if len(arr.shape) < 3:
arr = np.expand_dims(arr, -1)
return FloatTensor(arr).permute(2, 0, 1).unsqueeze(0).float() / 255.0
def tensor2numpy(t, idx=None):
t = torch.clamp(t, 0, 1)
if idx is None:
t = t[0, ...]
else:
t = t[idx, ...]
return t.permute(1, 2, 0).cpu().squeeze().numpy()
parser = argparse.ArgumentParser(description='')
parser.add_argument('--im_folder', required=True, help='path to input images')
parser.add_argument('--out_dir', required=True, help='path to place output')
parser.add_argument('--total_patch', type=int, required=True, help='total number of patches to generate')
parser.add_argument('--patch_per_image', type=int, default=5, help='Number of patch to generate from a single degradation of an image')
parser.add_argument('--patch_sz', type=int, default=256, help='Patch size (square patch for now)')
parser.add_argument('--fraction_train', type=float, default=0.8, help='Fraction of images to use as training')
parser.add_argument('--input_ext', default='tif', help='path to place output')
parser.add_argument('--max_exposure', type=float, default=0.0, help='maximum exposure adjustment in stops')
parser.add_argument('--min_exposure', type=float, default=0.0, help='minimum exposure adjustment in stops')
parser.add_argument('--max_gaussian_noise', type=float, default=0.0, help='maximum gaussian noise std (on range 0 - 1)')
parser.add_argument('--min_gaussian_noise', type=float, default=0.0, help='minimum gaussian noise std (on range 0 - 1)')
parser.add_argument('--max_poisson_noise', type=float, default=0.0, help='maximum poisson noise mult (See image_processing.PoissonNoise for detail)')
parser.add_argument('--min_poisson_noise', type=float, default=0.0, help='minimum poisson noise mult (See image_processing.PoissonNoise for detail)')
parser.add_argument('--skip_degraded', action="store_true", help='Whether to skip degraded images.')
parser.add_argument('--dwn_factor', type=float, default=4, help='Factor to downsample.')
args = parser.parse_args()
im_names = glob.glob(os.path.join(args.im_folder, '*.' + args.input_ext))
im_names = sorted([os.path.basename(i) for i in im_names])
# Create output folder
os.makedirs(args.out_dir, exist_ok=True)
train_dir = os.path.join(args.out_dir, 'train')
test_dir = os.path.join(args.out_dir, 'test')
os.makedirs(train_dir, exist_ok=True)
os.makedirs(test_dir, exist_ok=True)
for base_dir in [train_dir, test_dir]:
target_dir = os.path.join(base_dir, 'images', 'target')
degraded_dir = os.path.join(base_dir, 'images', 'degraded')
meta_dir = os.path.join(base_dir, 'meta')
os.makedirs(target_dir, exist_ok=True)
os.makedirs(degraded_dir, exist_ok=True)
os.makedirs(meta_dir, exist_ok=True)
n_count = 0
img_idx = 0
progress_bar = tqdm(total=args.total_patch)
while n_count < args.total_patch:
if img_idx < args.fraction_train * len(im_names):
base_dir = train_dir
else:
base_dir = test_dir
target_dir = os.path.join(base_dir, 'images', 'target')
degraded_dir = os.path.join(base_dir, 'images', 'degraded')
meta_dir = os.path.join(base_dir, 'meta')
name = im_names[img_idx]
path = os.path.join(args.im_folder, name)
# We know 5k dataset is 16-bits.
raw_im = tifffile.imread(path).astype('float32') / 65536.0
raw_im = FloatTensor(raw_im).permute(2, 0, 1).unsqueeze(0)
# Define pipeline
poisson_k = np.random.uniform(args.min_poisson_noise, args.max_poisson_noise)
read_noise_sigma = np.random.uniform(args.min_gaussian_noise, args.max_gaussian_noise)
dwn_factor = args.dwn_factor
exp_adjustment = np.random.uniform(args.min_exposure, args.max_exposure)
configs_prepreprocess = [
('UndoProPhotoRGBGamma', {}),
# Convert to sRGB
('ColorSpaceConversionMatrix', {'matrix': torch.matmul(XYZ2sRGB, ProPhotoRGB2XYZ)}),
]
configs_preprocess = [
# Blur and downsample to reduce noise
('GaussianBlur', {'sigma_x': dwn_factor}),
('PytorchResizing', {'resizing_factor': 1.0/dwn_factor, 'mode': 'nearest'})
]
configs_degrade = [
('ExposureAdjustment', {'nstops': exp_adjustment}),
# ('MotionBlur', {'amt': [3, 2], 'direction': [0, 45,]}),
('BayerMosaicking', {}),
# Add artificial noise.
('PoissonNoise',{'sigma': FloatTensor([poisson_k] * 3), 'mosaick_pattern': 'bayer'}),
('GaussianNoise',{'sigma': FloatTensor([read_noise_sigma] * 3), 'mosaick_pattern': 'bayer'}),
('PixelClip', {}),
('ExposureAdjustment', {'nstops': -exp_adjustment}),
('PixelClip', {}),
('NaiveDemosaicking', {}),
('PixelClip', {}),
]
configs_denoise = [
('DenoisingBilateral',{'sigma_s': 1.0, 'sigma_r': 0.1}),
('PixelClip', {}),
('sRGBGamma', {}),
]
pipeline_prepreprocess = ImageDegradationPipeline(configs_prepreprocess)
pipeline_preprocess = ImageDegradationPipeline(configs_preprocess)
pipeline_degrade = ImageDegradationPipeline(configs_degrade)
pipeline_denoise = ImageDegradationPipeline(configs_denoise)
demosaicked = pipeline_prepreprocess(raw_im)
preprocessed = pipeline_preprocess(demosaicked)
degraded = pipeline_degrade(preprocessed)
denoised = pipeline_denoise(degraded)
denoised_numpy = tensor2numpy(denoised)
preprocessed_numpy = tensor2numpy(preprocessed)
stacked = np.concatenate((denoised_numpy, preprocessed_numpy), axis=-1)
patches = extract_patches_2d(stacked,
(args.patch_sz, args.patch_sz),
args.patch_per_image)
degraded_patches, target_patches = np.split(patches, 2, axis=-1)
target_patches = np.split(target_patches, target_patches.shape[0])
degraded_patches = np.split(degraded_patches, degraded_patches.shape[0])
meta = dict(orig=name,
poisson_k=poisson_k,
read_noise_sigma=read_noise_sigma,
exp_adjustment=exp_adjustment,
dwn_factor=dwn_factor)
n_patches = len(degraded_patches)
for i in range(n_patches):
patch_idx = n_count + i + 1
degraded = np.clip(degraded_patches[i] * 255.0, 0, 255).astype('uint8')
if not args.skip_degraded:
skimage.io.imsave(os.path.join(degraded_dir,
"{:06d}.png".format(patch_idx)
),
np.squeeze(degraded))
np.save(os.path.join(target_dir,
"{:06d}.npy".format(patch_idx)
),
np.squeeze(target_patches[i]))
with open(os.path.join(meta_dir,
'{:06d}.json'.format(patch_idx)),
'w') as f:
json.dump(meta, f)
n_count += n_patches
img_idx = (img_idx + 1) % len(im_names)
progress_bar.update(n_patches)
progress_bar.close()
| 7,397 | 41.034091 | 149 | py |
kernel-prediction-networks-PyTorch | kernel-prediction-networks-PyTorch-master/data_generation/constants.py | import math
import torch
from torch import FloatTensor
XYZ2sRGB = FloatTensor([[ 3.2406, -1.5372, -0.4986],
[-0.9689, 1.8758, 0.0415],
[ 0.0557, -0.2040, 1.0570]])
# http://brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
ProPhotoRGB2XYZ = FloatTensor([[0.7976749, 0.1351917, 0.0313534],
[0.2880402, 0.7118741, 0.0000857],
[0.0000000, 0.0000000, 0.8252100]])
RGB2YUV = FloatTensor([[0.29900, 0.5870, 0.1140],
[-.33750, -.6625, 1.0000],
[1.00000, -.8374, -.1626]])
YUV2RGB = FloatTensor([[1.0, 0.0000, 0.7010],
[1.0, -.1721, -.3571],
[1.0, 0.8860, 0.0]])
xyz_color_matching = {
"lambda": FloatTensor([390,395,400,405,410,415,420,425,430,435,440,445,450,455,460,465,470,475,480,485,490,495,500,505,510,515,520,525,530,535,540,545,550,555,560,565,570,575,580,585,590,595,600,605,610,615,620,625,630,635,640,645,650,655,660,665,670,675,680,685,690,695,700,705,710,715,720,725,730,735,740,745,750,755,760,765,770,775,780,785,790,795,800,805,810,815,820,825,830]),
"xyz": FloatTensor([[0.003769647,0.009382967,0.02214302,0.04742986,0.08953803,0.1446214,0.2035729,0.2488523,0.2918246,0.3227087,0.3482554,0.3418483,0.3224637,0.2826646,0.2485254,0.2219781,0.1806905,0.129192,0.08182895,0.04600865,0.02083981,0.007097731,0.002461588,0.003649178,0.01556989,0.04315171,0.07962917,0.1268468,0.1818026,0.2405015,0.3098117,0.3804244,0.4494206,0.5280233,0.6133784,0.7016774,0.796775,0.8853376,0.9638388,1.051011,1.109767,1.14362,1.151033,1.134757,1.083928,1.007344,0.9142877,0.8135565,0.6924717,0.575541,0.4731224,0.3844986,0.2997374,0.2277792,0.1707914,0.1263808,0.09224597,0.0663996,0.04710606,0.03292138,0.02262306,0.01575417,0.01096778,0.00760875,0.005214608,0.003569452,0.002464821,0.001703876,0.001186238,0.000826954,0.00057583,0.00040583,0.000285658,0.000202185,0.000143827,0.000102469,7.34755E-05,5.25987E-05,3.80611E-05,2.75822E-05,2.00412E-05,1.45879E-05,1.06814E-05,7.85752E-06,5.76828E-06,4.25917E-06,3.16777E-06,2.35872E-06,1.76247E-06],
[0.000414616,0.001059646,0.002452194,0.004971717,0.00907986,0.01429377,0.02027369,0.02612106,0.03319038,0.0415794,0.05033657,0.05743393,0.06472352,0.07238339,0.08514816,0.1060145,0.1298957,0.1535066,0.1788048,0.2064828,0.237916,0.285068,0.3483536,0.4277595,0.5204972,0.6206256,0.718089,0.7946448,0.8575799,0.9071347,0.9544675,0.9814106,0.9890228,0.9994608,0.9967737,0.9902549,0.9732611,0.9424569,0.8963613,0.8587203,0.8115868,0.7544785,0.6918553,0.6270066,0.5583746,0.489595,0.4229897,0.3609245,0.2980865,0.2416902,0.1943124,0.1547397,0.119312,0.08979594,0.06671045,0.04899699,0.03559982,0.02554223,0.01807939,0.01261573,0.008661284,0.006027677,0.004195941,0.002910864,0.001995557,0.001367022,0.000944727,0.000653705,0.000455597,0.000317974,0.000221745,0.000156557,0.000110393,7.82744E-05,5.57886E-05,3.98188E-05,2.86018E-05,2.05126E-05,1.48724E-05,1.08E-05,7.86392E-06,5.73694E-06,4.2116E-06,3.10656E-06,2.28679E-06,1.69315E-06,1.26256E-06,9.42251E-07,7.05386E-07],
[0.0184726,0.04609784,0.109609,0.2369246,0.4508369,0.7378822,1.051821,1.305008,1.552826,1.74828,1.917479,1.918437,1.848545,1.664439,1.522157,1.42844,1.25061,0.9991789,0.7552379,0.5617313,0.4099313,0.3105939,0.2376753,0.1720018,0.1176796,0.08283548,0.05650407,0.03751912,0.02438164,0.01566174,0.00984647,0.006131421,0.003790291,0.002327186,0.001432128,0.000882253,0.000545242,0.000338674,0.000211777,0.000133503,8.49447E-05,5.46071E-05,3.54966E-05,2.33474E-05,1.55463E-05,1.04839E-05,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]])
}
# default 50% quality
default_jpeg_quantization_matrix = \
FloatTensor([[16, 11, 10, 16, 24, 40, 51, 61],
[12, 12, 14, 19, 26, 58, 60, 55],
[14, 13, 16, 24, 40, 57, 69, 56],
[14, 17, 22, 29, 51, 87, 80, 62],
[18, 22, 37, 56, 68, 109, 103, 77],
[24, 35, 55, 64, 81, 104, 113, 92],
[49, 64, 78, 87, 103, 121, 120, 101],
[72, 92, 95, 98, 112, 100, 103, 99]])
# Photoshop quantization.
# https://www.impulseadventure.com/photo/jpeg-quantization.html
photoshop_jpeg_quantization_lum = \
[
# Luminance Level 0
FloatTensor([
[32, 33, 51, 81, 66, 39, 34, 17],
[33, 36, 48, 47, 28, 23, 12, 12],
[51, 48, 47, 28, 23, 12, 12, 12],
[81, 47, 28, 23, 12, 12, 12, 12],
[66, 28, 23, 12, 12, 12, 12, 12],
[39, 23, 12, 12, 12, 12, 12, 12],
[34, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Luminance Level 1
FloatTensor([
[27, 26, 41, 65, 66, 39, 34, 17],
[26, 29, 38, 47, 28, 23, 12, 12],
[41, 38, 47, 28, 23, 12, 12, 12],
[65, 47, 28, 23, 12, 12, 12, 12],
[66, 28, 23, 12, 12, 12, 12, 12],
[39, 23, 12, 12, 12, 12, 12, 12],
[34, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Luminance Level 2
FloatTensor([
[20, 17, 26, 41, 51, 39, 34, 17],
[17, 18, 24, 39, 28, 23, 12, 12],
[26, 24, 32, 28, 23, 12, 12, 12],
[41, 39, 28, 23, 12, 12, 12, 12],
[51, 28, 23, 12, 12, 12, 12, 12],
[39, 23, 12, 12, 12, 12, 12, 12],
[34, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Luminance Level 3
FloatTensor([
[18, 14, 22, 35, 44, 39, 34, 17],
[14, 16, 21, 34, 28, 23, 12, 12],
[22, 21, 27, 28, 23, 12, 12, 12],
[35, 34, 28, 23, 12, 12, 12, 12],
[44, 28, 23, 12, 12, 12, 12, 12],
[39, 23, 12, 12, 12, 12, 12, 12],
[34, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Luminance Level 4
FloatTensor([
[16, 11, 17, 27, 34, 39, 34, 17],
[11, 12, 16, 26, 28, 23, 12, 12],
[17, 16, 21, 28, 23, 12, 12, 12],
[27, 26, 28, 23, 12, 12, 12, 12],
[34, 28, 23, 12, 12, 12, 12, 12],
[39, 23, 12, 12, 12, 12, 12, 12],
[34, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Luminance Level 5
FloatTensor([
[12, 8, 13, 21, 26, 32, 34, 17],
[8, 9, 12, 20, 27, 23, 12, 12],
[13, 12, 16, 26, 23, 12, 12, 12],
[21, 20, 26, 23, 12, 12, 12, 12],
[26, 27, 23, 12, 12, 12, 12, 12],
[32, 23, 12, 12, 12, 12, 12, 12],
[34, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Luminance Level 6
FloatTensor([
[8, 6, 9, 14, 17, 21, 28, 17],
[6, 6, 8, 13, 18, 23, 12, 12],
[9, 8, 11, 17, 23, 12, 12, 12],
[14, 13, 17, 23, 12, 12, 12, 12],
[17, 18, 23, 12, 12, 12, 12, 12],
[21, 23, 12, 12, 12, 12, 12, 12],
[28, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Luminance Level 7
FloatTensor([
[10, 7, 11, 18, 22, 27, 34, 17],
[7, 8, 10, 17, 23, 23, 12, 12],
[11, 10, 14, 22, 23, 12, 12, 12],
[18, 17, 22, 23, 12, 12, 12, 12],
[22, 23, 23, 12, 12, 12, 12, 12],
[27, 23, 12, 12, 12, 12, 12, 12],
[34, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Luminance Level 8
FloatTensor([
[6, 4, 7, 11, 14, 17, 22, 17],
[4, 5, 6, 10, 14, 19, 12, 12],
[7, 6, 8, 14, 19, 12, 12, 12],
[11, 10, 14, 19, 12, 12, 12, 12],
[14, 14, 19, 12, 12, 12, 12, 12],
[17, 19, 12, 12, 12, 12, 12, 12],
[22, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Luminance Level 9
FloatTensor([
[4, 3, 4, 7, 9, 11, 14, 17],
[3, 3, 4, 7, 9, 12, 12, 12],
[4, 4, 5, 9, 12, 12, 12, 12],
[7, 7, 9, 12, 12, 12, 12, 12],
[9, 9, 12, 12, 12, 12, 12, 12],
[11, 12, 12, 12, 12, 12, 12, 12],
[14, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Luminance Level 10
FloatTensor([
[2, 2, 3, 4, 5, 6, 8, 11],
[2, 2, 2, 4, 5, 7, 9, 11],
[3, 2, 3, 5, 7, 9, 11, 12],
[4, 4, 5, 7, 9, 11, 12, 12],
[5, 5, 7, 9, 11, 12, 12, 12],
[6, 7, 9, 11, 12, 12, 12, 12],
[8, 9, 11, 12, 12, 12, 12, 12],
[11, 11, 12, 12, 12, 12, 12, 12],
]),
# Luminance Level 11
FloatTensor([
[1, 1, 1, 2, 3, 3, 4, 5],
[1, 1, 1, 2, 3, 4, 4, 6],
[1, 1, 2, 3, 4, 4, 5, 7],
[2, 2, 3, 4, 4, 5, 7, 8],
[3, 3, 4, 4, 5, 7, 8, 8],
[3, 4, 4, 5, 7, 8, 8, 8],
[4, 4, 5, 7, 8, 8, 8, 8],
[5, 6, 7, 8, 8, 8, 8, 8],
]),
# Luminance Level 12
FloatTensor([
[1, 1, 1, 1, 1, 1, 1, 2],
[1, 1, 1, 1, 1, 1, 1, 2],
[1, 1, 1, 1, 1, 1, 2, 2],
[1, 1, 1, 1, 1, 2, 2, 3],
[1, 1, 1, 1, 2, 2, 3, 3],
[1, 1, 1, 2, 2, 3, 3, 3],
[1, 1, 2, 2, 3, 3, 3, 3],
[2, 2, 2, 3, 3, 3, 3, 3],
]),
]
photoshop_jpeg_quantization_chrom = \
[
# Chrominance Level 0
FloatTensor([
[34, 51, 52, 34, 20, 20, 17, 17],
[51, 38, 24, 14, 14, 12, 12, 12],
[52, 24, 14, 14, 12, 12, 12, 12],
[34, 14, 14, 12, 12, 12, 12, 12],
[20, 14, 12, 12, 12, 12, 12, 12],
[20, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Chrominance Level 1
FloatTensor([
[29, 41, 52, 34, 20, 20, 17, 17],
[41, 38, 24, 14, 14, 12, 12, 12],
[52, 24, 14, 14, 12, 12, 12, 12],
[34, 14, 14, 12, 12, 12, 12, 12],
[20, 14, 12, 12, 12, 12, 12, 12],
[20, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Chrominance Level 2
FloatTensor([
[21, 26, 33, 34, 20, 20, 17, 17],
[26, 29, 24, 14, 14, 12, 12, 12],
[33, 24, 14, 14, 12, 12, 12, 12],
[34, 14, 14, 12, 12, 12, 12, 12],
[20, 14, 12, 12, 12, 12, 12, 12],
[20, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Chrominance Level 3
FloatTensor([
[20, 22, 29, 34, 20, 20, 17, 17],
[22, 25, 24, 14, 14, 12, 12, 12],
[29, 24, 14, 14, 12, 12, 12, 12],
[34, 14, 14, 12, 12, 12, 12, 12],
[20, 14, 12, 12, 12, 12, 12, 12],
[20, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Chrominance Level 4
FloatTensor([
[17, 17, 22, 34, 20, 20, 17, 17],
[17, 19, 22, 14, 14, 12, 12, 12],
[22, 22, 14, 14, 12, 12, 12, 12],
[34, 14, 14, 12, 12, 12, 12, 12],
[20, 14, 12, 12, 12, 12, 12, 12],
[20, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Chrominance Level 5
FloatTensor([
[13, 13, 17, 27, 20, 20, 17, 17],
[13, 14, 17, 14, 14, 12, 12, 12],
[17, 17, 14, 14, 12, 12, 12, 12],
[27, 14, 14, 12, 12, 12, 12, 12],
[20, 14, 12, 12, 12, 12, 12, 12],
[20, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Chrominance Level 6
FloatTensor([
[9, 9, 11, 18, 20, 20, 17, 17],
[9, 10, 11, 14, 14, 12, 12, 12],
[11, 11, 14, 14, 12, 12, 12, 12],
[18, 14, 14, 12, 12, 12, 12, 12],
[20, 14, 12, 12, 12, 12, 12, 12],
[20, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Chrominance Level 7
FloatTensor([
[11, 14, 31, 34, 20, 20, 17, 17],
[14, 19, 24, 14, 14, 12, 12, 12],
[31, 24, 14, 14, 12, 12, 12, 12],
[34, 14, 14, 12, 12, 12, 12, 12],
[20, 14, 12, 12, 12, 12, 12, 12],
[20, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Chrominance Level 8
FloatTensor([
[7, 9, 19, 34, 20, 20, 17, 17],
[9, 12, 19, 14, 14, 12, 12, 12],
[19, 19, 14, 14, 12, 12, 12, 12],
[34, 14, 14, 12, 12, 12, 12, 12],
[20, 14, 12, 12, 12, 12, 12, 12],
[20, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Chrominance Level 9
FloatTensor([
[4, 6, 12, 22, 20, 20, 17, 17],
[6, 8, 12, 14, 14, 12, 12, 12],
[12, 12, 14, 14, 12, 12, 12, 12],
[22, 14, 14, 12, 12, 12, 12, 12],
[20, 14, 12, 12, 12, 12, 12, 12],
[20, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
[17, 12, 12, 12, 12, 12, 12, 12],
]),
# Chrominance Level 10
FloatTensor([
[3, 3, 7, 13, 15, 15, 15, 15],
[3, 4, 7, 13, 14, 12, 12, 12],
[7, 7, 13, 14, 12, 12, 12, 12],
[13, 13, 14, 12, 12, 12, 12, 12],
[15, 14, 12, 12, 12, 12, 12, 12],
[15, 12, 12, 12, 12, 12, 12, 12],
[15, 12, 12, 12, 12, 12, 12, 12],
[15, 12, 12, 12, 12, 12, 12, 12],
]),
# Chrominance Level 11
FloatTensor([
[1, 2, 4, 7, 8, 8, 8, 8],
[2, 2, 4, 7, 8, 8, 8, 8],
[4, 4, 7, 8, 8, 8, 8, 8],
[7, 7, 8, 8, 8, 8, 8, 8],
[8, 8, 8, 8, 8, 8, 8, 8],
[8, 8, 8, 8, 8, 8, 8, 8],
[8, 8, 8, 8, 8, 8, 8, 8],
[8, 8, 8, 8, 8, 8, 8, 8],
]),
# Chrominance Level 12
FloatTensor([
[1, 1, 1, 2, 3, 3, 3, 3],
[1, 1, 1, 2, 3, 3, 3, 3],
[1, 1, 2, 3, 3, 3, 3, 3],
[2, 2, 3, 3, 3, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 3, 3],
]),
]
# 0-6 have subsampling, 7-12 don't.
photoshop_chroma_subsampling = [True] * 7 + [False] * 6
# DCT Coefficients
# The inverse coefficient is the same.
def _DCT_coeff():
v = torch.arange(8).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand((8, 8, 8, 8)).float()
u = torch.arange(8).unsqueeze( 0).unsqueeze(-1).unsqueeze(-1).expand((8, 8, 8, 8)).float()
y = torch.arange(8).unsqueeze( 0).unsqueeze( 0).unsqueeze(-1).expand((8, 8, 8, 8)).float()
x = torch.arange(8).unsqueeze( 0).unsqueeze( 0).unsqueeze( 0).expand((8, 8, 8, 8)).float()
au = torch.ones((8, 8, 8, 8)).float()
av = torch.ones((8, 8, 8, 8)).float()
av[0, :, ...] = 0.707 # 1 / sqrt(2)
au[:, 0, ...] = 0.707 # 1 / sqrt(2)
coeff = au * av * torch.cos((2*x + 1)*u*math.pi/16.0) \
* torch.cos((2*y + 1)*v*math.pi/16.0)
return coeff * 0.25
DCT_coeff = _DCT_coeff()
| 16,240 | 44.113889 | 990 | py |
kernel-prediction-networks-PyTorch | kernel-prediction-networks-PyTorch-master/data_generation/data_utils.py | """ Utilities functions.
"""
import numbers
import numpy as np
import torch
from torch import FloatTensor
def random_crop(im, num_patches, w, h=None):
h = w if h is None else h
nw = im.size(-1) - w
nh = im.size(-2) - h
if nw < 0 or nh < 0:
raise RuntimeError("Image is to small {} for the desired size {}". \
format((im.size(-1), im.size(-2)), (w, h))
)
idx_w = np.random.choice(nw + 1, size=num_patches)
idx_h = np.random.choice(nh + 1, size=num_patches)
result = []
for i in range(num_patches):
result.append(im[...,
idx_h[i]:(idx_h[i]+h),
idx_w[i]:(idx_w[i]+w)])
return result
def expand_to_4d_channel(arr):
""" Expand Scalar or 1D dimension to 4D
Assumes that a 1D list represent the channel dimension (2nd dim).
Args:
arr: A scalar or 1D tensor to be expanded to 4D
"""
# for scalar and 1D tensor, add batch dimensions.
while len(arr.size()) < 2:
arr = arr.unsqueeze(0)
# regain spatial dimension
while len(arr.size()) < 4:
arr = arr.unsqueeze(-1)
return arr
def expand_to_4d_batch(arr):
""" Expand Scalar or 1D dimension to 4D
Assumes that a 1D list represent the batch dimension (1st dim).
Args:
arr: A scalar or 1D tensor to be expanded to 4D
"""
# regain spatial dimension and channel dimension
while len(arr.size()) < 4:
arr = arr.unsqueeze(-1)
return arr
def is_number(a):
return isinstance(a, numbers.Number)
def python_to_tensor(a):
if isinstance(a, numbers.Number):
return FloatTensor([a])
return a
def number_to_list(a):
if isinstance(a, numbers.Number):
a = [a]
return a
def cuda_like(arr, src):
""" Move arr on to GPU/CPU like src
"""
if src.is_cuda:
return arr.cuda()
else:
return arr.cpu()
def mosaick_multiply(mult, im, mosaick_pattern):
""" mosaick pattern-aware multiply.
Args:
mult: n-list of multiplier, where n is number of image channel.
A batch dimension is optional.
im: tensor of size n_batch x n_channel x width x height.
mosaick_pattern: None or string indicating the mosaick pattern.
"""
if mosaick_pattern is None:
return im * expand_to_4d_channel(mult)
elif mosaick_pattern == "bayer":
# Assume GRGB format.
mult = expand_to_4d_channel(mult)
h, w = im.size(2), im.size(3)
x = torch.arange(w).unsqueeze(0).expand(h, -1)
y = torch.arange(h).unsqueeze(-1).expand(-1, w)
x = x.unsqueeze(0).unsqueeze(0)
y = y.unsqueeze(0).unsqueeze(0)
if im.is_cuda:
x = x.cuda()
y = y.cuda()
odd_x = torch.fmod(x, 2)
odd_y = torch.fmod(y, 2)
is_green = odd_x == odd_y
is_red = odd_x * (1.0 - odd_y)
is_blue = (1.0 - odd_x) * odd_y
mult = mult.expand(-1, 3, -1, -1)
return im * mult[:, 0:1, ...] * is_red.float() + \
im * mult[:, 1:2, ...] * is_green.float() + \
im * mult[:, 2:3, ...] * is_blue.float()
else:
raise ValueError("Mosaick pattern, {}, not supported." \
.format(mosaick_pattern))
| 3,336 | 25.696 | 76 | py |
kernel-prediction-networks-PyTorch | kernel-prediction-networks-PyTorch-master/data_generation/image_processing.py | import torch
import torch.nn as nn
from torch import FloatTensor, IntTensor
# For drawing motion blur kernel.
import numpy as np
import cv2
import scipy
import functools
import math
from .data_utils import mosaick_multiply, expand_to_4d_batch
from .data_utils import python_to_tensor, cuda_like, number_to_list, is_number
from .kernel import gausskern1d, gausskern2d
from .constants import xyz_color_matching, XYZ2sRGB
from .constants import RGB2YUV, YUV2RGB
from .constants import DCT_coeff
from .constants import photoshop_jpeg_quantization_lum
from .constants import photoshop_jpeg_quantization_chrom
from .constants import photoshop_chroma_subsampling
from .ahd_demosaicking import ahd_demosaicking
from utils.image_utils import check_nan_tensor
import skimage
from .denoise_wavelet import denoise_wavelet as sk_denoise_wavelet
try:
from halide.gradient_apps.gapps import functions as halide_funcs
HAS_HALIDE = True
except:
HAS_HALIDE = False
DEBUG = False
def _has_halide():
return HAS_HALIDE
# TODO: Check if I need to set required_grad properly on all constant tensors.
class IdentityModule(nn.Module):
"""Dummy Class for testing."""
def __init__(self):
super().__init__()
def forward(self, image):
return image.copy()
# Halide实现
# Cellphone Image Processing
class DenoisingBilateral(nn.Module):
# TODO: support batch
# TODO: support GPU.
def __init__(self,
sigma_s,
sigma_r,
color_sigma_ratio=5,
filter_lum=True,
filter_chrom=True,
n_iter=1,
guide_transform=None,
_bp=0.004,
color_range_ratio=1):
""" Apply Gaussian bilateral filter to denoise image.
Args:
sigma_s: stdev in spatial dimension.
sigma_r: stdev in the range dimension.
color_sigma_ratio: multiplier for spatial sigma for filtering
chrominance.
filter_lum: whether or not to filter luminance (useful if want to
filter chrominance only).
filter_chrom: same as filter_lum but for chrominance.
n_iter: number of times to apply this filter.
guide_transform: transformation to apply to the guide map. Must be
'sqrt', 'log', None, or a number. If a number, this is use as
the exponent to transform the guide according to power law.
_bp: Black point for log transform. This is used to prevent taking
log of zeros or negative numbers. Must be positive.
color_range_ratio: multiplier for range sigma for filtering
chrominance.
"""
super().__init__()
self.sigma_s = sigma_s
self.sigma_r = sigma_r
self.color_sigma_ratio = color_sigma_ratio
self.color_range_ratio = color_range_ratio
self.rgb2yuv = ColorSpaceConversionMatrix(RGB2YUV)
self.yuv2rgb = ColorSpaceConversionMatrix(YUV2RGB)
self.filter_lum = filter_lum
self.filter_chrom = filter_chrom
self.n_iter = n_iter
self.guide_transform = guide_transform
self._bp = _bp
if self.guide_transform not in ['sqrt', 'log', None] and \
not (is_number(self.guide_transform)):
raise ValueError('Invalid guide transformation received: {}'.format(guide_transform))
if self.guide_transform == 'sqrt':
self.guide_transform = 0.5
def forward(self, image):
if not _has_halide():
raise RuntimeError("Need halide in order to run this")
if DEBUG and check_nan_tensor(image):
print("Denoising input has NAN!")
self._filter_s = FloatTensor(gausskern1d(self.sigma_s))
self._filter_s_2 = FloatTensor(gausskern1d(3.0 * self.sigma_s))
self._filter_s_color = FloatTensor(gausskern1d(self.color_sigma_ratio * self.sigma_s))
self._filter_r = FloatTensor(gausskern1d(self.sigma_r))
self._filter_r_color = FloatTensor(gausskern1d(self.sigma_r * self.color_range_ratio))
self._filter_s = cuda_like(self._filter_s, image)
self._filter_s_2 = cuda_like(self._filter_s_2, image)
self._filter_s_color = cuda_like(self._filter_s_color, image)
self._filter_r = cuda_like(self._filter_r, image)
yuv = self.rgb2yuv(image)
for i in range(self.n_iter):
yuv = self._forward(yuv)
output = self.yuv2rgb(yuv)
if DEBUG and check_nan_tensor(output):
print("Denoising output has NAN!")
return output
def _forward(self, yuv):
lum = yuv[:, 0:1, ...]
guide = lum[:, 0, ...]
if is_number(self.guide_transform):
guide = self._gamma_compression(guide, self.guide_transform)
elif self.guide_transform == 'log':
guide = self._log_compression(guide, self._bp)
guide = torch.clamp(guide, 0.0, 1.0)
out_yuv = yuv.clone()
if self.filter_lum:
out_lum = halide_funcs.BilateralGrid.apply(lum,
guide,
self._filter_s,
self._filter_r)
out_yuv[:, 0:1, ...] = out_lum
if self.filter_chrom:
out_yuv[:, 1:3, ...] = halide_funcs.BilateralGrid.apply(yuv[:, 1:3, ...],
out_yuv[:, 0, ...],
self._filter_s_color,
self._filter_r_color)
return out_yuv
@staticmethod
def _gamma_compression(lum, gamma):
return torch.pow(torch.clamp(lum, 0), gamma)
@staticmethod
def _undo_gamma_compression(lum, gamma):
return torch.pow(torch.clamp(lum, 0), 1.0 / gamma)
@staticmethod
def _log_compression(lum, bp):
# Just clamp
log_bp = np.log(bp)
lum = torch.log(torch.clamp(lum, bp))
lum = torch.clamp((lum - log_bp) / (-log_bp), 0, 1)
return lum
@staticmethod
def _undo_log_compression(lum, bp):
# Add and rescale
log_bp = np.log(bp)
log_1_bp = np.log(1.0 + bp)
lum = (lum * (log_1_bp - log_bp)) + log_bp
lum = (torch.exp(lum) - bp)
return lum
# 双边滤波非差分实现 不使用Halide
class DenoisingSKImageBilateralNonDifferentiable(DenoisingBilateral):
def forward(self, image):
if DEBUG and check_nan_tensor(image):
print("Denoising input has NAN!")
yuv = self.rgb2yuv(image)
for i in range(self.n_iter):
yuv = self._forward(yuv)
output = self.yuv2rgb(yuv)
if DEBUG and check_nan_tensor(output):
print("Denoising output has NAN!")
return output
def _forward(self, yuv):
lum = yuv[:, 0:1, ...]
lum = torch.clamp(lum, 0, 1)
out_yuv = yuv.clone()
# This is use to convert sigma_r so that it is in the same range as
# Halide's bilateral grid
HALIDE_RANGE_GRID = 32.0
skbilateral = skimage.restoration.denoise_bilateral
if self.filter_lum:
# skimage's bilateral filter uses the luminance as the guide.
if is_number(self.guide_transform):
lum = self._gamma_compression(lum, self.guide_transform)
elif self.guide_transform == 'log':
lum = self._log_compression(lum, self._bp)
lum_ = lum.cpu().permute(0, 2, 3, 1).data.numpy().astype('float32')
lum_ = lum_[:, :, :, 0]
# Filter each image in the batch
for i in range(lum_.shape[0]):
# lum_[i, ...] = skbilateral(lum_[i, ...],
# sigma_color=self.sigma_r / HALIDE_RANGE_GRID,
# sigma_spatial=self.sigma_s,
# multichannel=False,
# mode="reflect")
win_sz = max(5, 2 * math.ceil(3 * self.sigma_s) + 1)
lum_[i, ...] = cv2.bilateralFilter(lum_[i, ...],
d=win_sz,
sigmaColor=self.sigma_r / HALIDE_RANGE_GRID,
sigmaSpace=self.sigma_s,
borderType=cv2.BORDER_REFLECT)
lum_ = FloatTensor(lum_).unsqueeze(-1).permute(0, 3, 1, 2)
out_lum = cuda_like(lum_, lum)
# Undo guide transformation
if is_number(self.guide_transform):
out_lum = self._undo_gamma_compression(out_lum, self.guide_transform)
elif self.guide_transform == 'log':
out_lum = self._undo_log_compression(out_lum, self._bp)
out_lum = torch.clamp(out_lum, 0.0, 1.0)
out_yuv[:, 0:1, ...] = out_lum
# Filter chrominance.
if self.filter_chrom:
chrom = yuv[:, 1:3, ...]
chrom = torch.clamp((chrom + 1) * 0.5, 0.0, 1.0)
chrom_ = chrom.cpu().permute(0, 2, 3, 1).data.numpy().astype('float32')
# Filter each image in the batch
for i in range(chrom_.shape[0]):
for j in range(2):
# chrom_[i, :, :, j] = skbilateral(chrom_[i, :, :, j],
# sigma_color=self.sigma_r / HALIDE_RANGE_GRID * self.color_range_ratio,
# sigma_spatial=(self.sigma_s * self.color_sigma_ratio),
# multichannel=False,
# mode="reflect")
win_sz = max(5, 2 * math.ceil(3 * self.sigma_s * self.color_sigma_ratio) + 1)
chrom_[i, :, :, j] = cv2.bilateralFilter(chrom_[i, :, :, j],
d=win_sz,
sigmaColor=self.sigma_r / HALIDE_RANGE_GRID * self.color_range_ratio,
sigmaSpace=self.sigma_s * self.color_sigma_ratio,
borderType=cv2.BORDER_REFLECT)
# Convert back to PyTorch tensor.
chrom_ = FloatTensor(chrom_).permute(0, 3, 1, 2)
out_chrom = cuda_like(chrom_, chrom)
out_chrom = 2.0 * out_chrom - 1.0
out_yuv[:, 1:3, ...] = out_chrom
return out_yuv
class DenoisingWaveletNonDifferentiable(DenoisingSKImageBilateralNonDifferentiable):
def __init__(self, **kwargs):
""" HACK: this function repurpose input for bilateral filters for
different things.
sigma_s --> Thresholding method. Can be string of numerical flags.
color_sigma_ratio --> String indicating wavelet family (see skimage's documentation for detail).
n_iter --> levels of wavelets.
_bp --> wavelet threshold.
"""
super().__init__(**kwargs)
if is_number(self.sigma_s):
self.method = "BayesShrink" if self.sigma_s < 1 else "VisuShrink"
else:
self.method = self.sigma_s
if is_number(self.color_sigma_ratio):
raise ValueError("Wavelet denoising uses color_sigma_ratio to be"
" string indicating wavelet family to use. "
"{} received.".format(self.color_sigma_ratio))
self.wavelet_family = self.color_sigma_ratio
self.wavelet_levels = self.n_iter
self.n_iter = 1
self.wavelet_threshold = self._bp
def _forward(self, yuv):
lum = yuv[:, 0:1, ...]
out_yuv = yuv.clone()
# this is use to convert sigma_r so that it is in the same range as Halide's bilateral grid
# HALIDE_RANGE_GRID = 32.0
if self.filter_lum:
if is_number(self.guide_transform):
lum = self._gamma_compression(lum, self.guide_transform)
elif self.guide_transform == 'log':
lum = self._log_compression(lum, self._bp)
lum_ = lum.cpu().permute(0, 2, 3, 1).data.numpy().astype('float64')
lum_ = lum_[:, :, :, 0]
for i in range(lum_.shape[0]):
lum_[i, ...] = sk_denoise_wavelet(lum_[i, ...],
sigma=self.sigma_r,
method=self.method,
wavelet=self.wavelet_family,
wavelet_levels=self.wavelet_levels,
threshold=self.wavelet_threshold,
mode="soft")
lum_ = FloatTensor(lum_).unsqueeze(-1).permute(0, 3, 1, 2)
out_lum = cuda_like(lum_, lum)
if is_number(self.guide_transform):
out_lum = self._undo_gamma_compression(out_lum, self.guide_transform)
elif self.guide_transform == 'log':
out_lum = self._undo_log_compression(out_lum, self._bp)
out_lum = torch.clamp(out_lum, 0.0, 1.0)
out_yuv[:, 0:1, ...] = out_lum
if self.filter_chrom:
chrom = yuv[:, 1:3, ...]
chrom = torch.clamp((chrom + 1) * 0.5, 0.0, 1.0)
chrom_ = chrom.cpu().permute(0, 2, 3, 1).data.numpy().astype('float64')
for i in range(chrom_.shape[0]):
chrom_[i, ...] = sk_denoise_wavelet(chrom_[i, ...],
method=self.method,
wavelet=self.wavelet_family,
wavelet_levels=self.wavelet_levels,
threshold=self.wavelet_threshold,
mode="soft")
chrom_ = FloatTensor(chrom_).permute(0, 3, 1, 2)
out_chrom = cuda_like(chrom_, chrom)
out_chrom = 2.0 * out_chrom - 1.0
out_yuv[:, 1:3, ...] = out_chrom
return out_yuv
class DenoisingMedianNonDifferentiable(nn.Module):
def __init__(self,
neighbor_sz,
color_sigma_ratio=5,
filter_lum=True,
filter_chrom=True,
n_iter=1):
""" Apply Median Filtering
"""
super().__init__()
self.rgb2yuv = ColorSpaceConversionMatrix(RGB2YUV)
self.yuv2rgb = ColorSpaceConversionMatrix(YUV2RGB)
self.filter_lum = filter_lum
self.filter_chrom = filter_chrom
self.n_iter = n_iter
self.lum_median = MedianFilterNonDifferentiable(neighbor_sz)
if is_number(neighbor_sz):
self.chrom_median = MedianFilterNonDifferentiable(int(neighbor_sz * color_sigma_ratio))
else:
if DEBUG and color_sigma_ratio != 1:
print("Warning: ignoring color_sigma_ratio because neighbor_sz is not a number.")
self.chrom_median = self.lum_median
def forward(self, image):
if DEBUG and check_nan_tensor(image):
print("Denoising input has NAN!")
yuv = self.rgb2yuv(image)
for i in range(self.n_iter):
yuv = self._forward(yuv)
output = self.yuv2rgb(yuv)
if DEBUG and check_nan_tensor(output):
print("Denoising output has NAN!")
return output
def _forward(self, yuv):
lum = yuv[:, 0:1, ...]
out_yuv = yuv.clone()
if self.filter_lum:
out_lum = self.lum_median(lum)
out_yuv[:, 0:1, ...] = torch.clamp(out_lum, 0.0, 1.0)
if self.filter_chrom:
out_yuv[:, 1:3, ...] = self.chrom_median(yuv[:, 1:3, ...])
return out_yuv
class PytorchResizing(nn.Module):
def __init__(self,
resizing_factor=None,
new_size=None,
mode='bilinear'):
""" Bilinear interpolation for resizing.
*** No Pre-filtering is applied!
Args:
resizing_factor: factors to resize image with. This or new_size
must be specified.
new_size: new image size (width, height) to resize to.
mode: "bilinear", "area", "nearest". See nn.functional.interpolate
for more detail.
"""
super().__init__()
if (new_size is None) == (resizing_factor is None):
raise ValueError("Must specified exactly one of new_size ({})"
" or resizing_factor ({}).".format(new_size,
resizing_factor)
)
self.resizing_factor = resizing_factor
self.new_size = new_size
self.mode = mode
def forward(self, image):
return nn.functional.interpolate(image,
self.new_size,
self.resizing_factor,
mode=self.mode)
class MedianFilterNonDifferentiable(nn.Module):
def __init__(self, filter_sz):
super().__init__()
if is_number(filter_sz):
self.filter_sz = filter_sz
self.footprint = None
else:
self.filter_sz = None
self.footprint = filter_sz
def forward(self, image):
image_ = image.cpu().data.numpy()
for i in range(image.shape[0]):
for j in range(image.shape[1]):
image_[i, j, ...] = scipy.ndimage.filters.median_filter(image_[i, j, ...], size=self.filter_sz,
footprint=self.footprint)
image_ = FloatTensor(image_)
return cuda_like(image_, image)
class BicubicResizing(nn.Module):
def __init__(self,
resizing_factor=None,
new_size=None,
B=1.0, C=0.0):
""" Bicubic interpolation for resizing.
*** No Pre-filtering is applied!
Args:
resizing_factor: factors to resize image with. This or new_size
must be specified.
new_size: new image size (width, height) to resize to.
B, C: parameters of the spline (refer to Mitchell's SIGGRAPH'88 paper).
Default is (1, 0) which makes this a B-spline.
"""
super().__init__()
if (new_size is None) == (resizing_factor is None):
raise ValueError("Must specified exactly one of new_size ({})"
" or resizing_factor ({}).".format(new_size,
resizing_factor)
)
self.resizing_factor = resizing_factor
self.new_size = new_size
self.B, self.C = B, C
# The halide backend still needs debuging.
raise NotImplementedError
def forward(self, image):
if self.resizing_factor is not None:
sz = list(image.size())
new_W = int(self.resizing_factor * sz[-1])
new_H = int(self.resizing_factor * sz[-2])
if new_W < 1 or new_H < 1:
raise ValueError("Image to small that new size is zeros "
"(w, h = {}, {})".format(new_W, new_H))
else:
new_W, new_H = int(self.new_size[0]), int(self.new_size[1])
output = halide_funcs.BicubicResizing.apply(image,
new_W, new_H,
self.B, self.C)
return output
class Unsharpen(nn.Module):
def __init__(self, amount, radius, threshold, blur_filter_sz=None):
"""Unsharp an image.
This doesn't support batching because GaussianBlur doesn't.
Args:
amount: (float) amount of sharpening to apply.
radius: (float) radius of blur for the mask in pixel.
threshold: (float) minimum brightness diff to operate on (on 0-255 scale)
"""
super().__init__()
self.amount = amount
self.radius = radius
self.threshold = threshold
# if not specified, set it to twice the radius.
if blur_filter_sz is None:
self.filter_size = radius * 2
else:
self.filter_size = blur_filter_sz
self.blur = GaussianBlur(self.radius,
sz_x=self.filter_size,
sz_y=self.filter_size)
def forward(self, image):
# Create unsharp mask
unsharp_mask = image - self.blur(image)
# Apply threshold
unsharp_mask = unsharp_mask * (torch.abs(unsharp_mask) > (self.threshold / 255)).float()
return image + unsharp_mask * self.amount
# Demosaicking
class NaiveDemosaicking(nn.Module):
# TODO: Support GPU. Having host_dirty() exception now.
def __init__(self, use_median_filter=True, n_iter=3, **kwargs):
"""
Args:
use_median_filter: whether or not to apply median filter on chrominance/luminance
n_iter: number of times to apply median filters.
"""
super().__init__()
if use_median_filter:
# Same footprint as in the original AHD algorithm.
RB_footprint = np.array([[1, 1, 1], [1, 0, 1], [1, 1, 1]])
G_footprint = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 0]])
self.median_RB = MedianFilterNonDifferentiable(RB_footprint)
self.median_G = MedianFilterNonDifferentiable(G_footprint)
self.use_median_filter = use_median_filter
self.n_iter = n_iter
if _has_halide():
self.demosaicker = halide_funcs.NaiveDemosaick.apply
def forward(self, image):
demosaicked = self.demosaicker(image)
if self.use_median_filter:
demosaicked_ = demosaicked.cpu()
# repeat 3 times
for i in range(self.n_iter):
# follow AHD paper:
# https://www.photoactivity.com/Pagine/Articoli/006NewDCRaw/hirakawa03adaptive.pdf
R = demosaicked_[:, 0:1, ...].clone()
G = demosaicked_[:, 1:2, ...].clone()
B = demosaicked_[:, 2:3, ...].clone()
R = self.median_RB(R - G) + G
B = self.median_RB(B - G) + G
G = 0.5 * (self.median_G(G - R) + \
self.median_G(G - B) + \
R + B)
demosaicked_[:, 0:1, ...] = R
demosaicked_[:, 1:2, ...] = G
demosaicked_[:, 2:3, ...] = B
demosaicked = cuda_like(demosaicked_, demosaicked)
return demosaicked
class AHDDemosaickingNonDifferentiable(NaiveDemosaicking):
# TODO: Convert Numpy to Pytorch
def __init__(self, use_median_filter=True, n_iter=3, delta=2, sobel_sz=3, avg_sz=3):
super().__init__(use_median_filter, n_iter)
# print("Using AHD Non-differentiable")
def ahd_demosaicker(image):
image_ = image.cpu().permute(0, 2, 3, 1).squeeze(-1).data.numpy()
output = []
for i in range(image_.shape[0]):
output.append(FloatTensor(ahd_demosaicking(image_[i, ...], delta, sobel_sz, avg_sz)).unsqueeze(0))
output = cuda_like(torch.cat(output, dim=0).permute(0, 3, 1, 2), image)
return output
self.demosaicker = ahd_demosaicker
class BayerMosaicking(nn.Module):
""" Turn 3-channel image into GRGB Bayer.
"""
def forward(self, image):
# Compute Meshgrid.
# Tensors are batch x channels x height x width
h, w = image.size(2), image.size(3)
x = torch.arange(w).unsqueeze(0).expand(h, -1)
y = torch.arange(h).unsqueeze(-1).expand(-1, w)
x = x.unsqueeze(0).unsqueeze(0)
y = y.unsqueeze(0).unsqueeze(0)
if image.is_cuda:
x = x.cuda()
y = y.cuda()
odd_x = torch.fmod(x, 2)
odd_y = torch.fmod(y, 2)
is_green = odd_x == odd_y
is_red = odd_x * (1.0 - odd_y)
is_blue = (1.0 - odd_x) * odd_y
return image[:, 0:1, :, :] * is_red.float() + \
image[:, 1:2, :, :] * is_green.float() + \
image[:, 2:3, :, :] * is_blue.float()
# Color
class WhiteBalance(nn.Module):
def __init__(self, scaling, mosaick_pattern=None):
""" Perform white balance with a scaling factor.
Args:
scaling: Tensor of size [channels] for scaling each channel
of the image. Batch dimension is optional.
mosaick_pattern: mosaick pattern of the input image.
"""
super().__init__()
self.scaling = scaling
self.mosaick_pattern = mosaick_pattern
def forward(self, image):
# need to check the type.
self.scaling = cuda_like(self.scaling, image)
return mosaick_multiply(self.scaling,
image,
self.mosaick_pattern)
class WhiteBalanceTemperature(nn.Module):
def __init__(self,
new_temp,
new_tint=0.0,
orig_temp=6504,
orig_tint=0.0,
mosaick_pattern=None):
""" WhiteBalancing with temperature parameterization.
Args:
new_temp: temperature to correct to. Can be scalar or 1D Tensor.
new_tint: tint to correct to. Can be scalar or 1D Tensor.
orig_temp: original temperature (default to D65)
orig_tint: original tint (default to D65)
mosaick_pattern: whether if the input has Bayer pattern.
"""
super().__init__()
# Make sure any scalars are converted to FloatTensor properly.
self.new_temp = python_to_tensor(new_temp)
self.new_tint = python_to_tensor(new_tint)
self.orig_temp = python_to_tensor(orig_temp)
self.orig_tint = python_to_tensor(orig_tint)
self.mosaick_pattern = mosaick_pattern
@staticmethod
def _planckian_locus(T, tint):
"""Calculate Planckian Locus and its derivative in CIExyY.
Args:
T: Correlated Color Temp (in K) (Scalar or 1D tensor)
tint: (to be implemented) (Scalar or 1D tensor)
Returns:
The white point in CIEXYZ space as a tensor of shape [batch x 3]
"""
# formula from wikipedia
def _blackbody_spectrum(l, T):
""" Blackbody radiation spectrum
See https://en.wikipedia.org/wiki/Planckian_locus.
Args:
l: wavelength in nanometer.
T: temperature in Kelvin.
"""
# See https://en.wikipedia.org/wiki/Planckian_locus.
c2 = 1.4387773683E7
l = l.unsqueeze(0)
lT = l * T.unsqueeze(-1)
return 1.0 / (torch.pow(l, 5) * (torch.exp(c2 / lT) - 1))
def _diff_blackbody_spectrum(l, T):
""" Temperature-derivative for blackbody spectrum function. This
is used for tint where we find the perpendicular direction to
the Planckian locus.
"""
c2 = 1.4387773683E7
l = l.unsqueeze(0)
T = T.unsqueeze(-1)
lT = l * T
exp = torch.exp(c2 / (lT))
return c2 * exp / (torch.pow(l, 6) * torch.pow(T * (exp - 1), 2))
# Convert Scalar T into a 1D tensor
if len(T.size()) < 1:
T = T.unsqueeze(0)
# Shape [batch x wavelength]
M = _blackbody_spectrum(xyz_color_matching['lambda'], T)
M_ = _diff_blackbody_spectrum(xyz_color_matching['lambda'], T)
X = torch.sum(M.unsqueeze(1) * xyz_color_matching['xyz'].unsqueeze(0),
dim=-1)
X_ = torch.sum(M_.unsqueeze(1) * xyz_color_matching['xyz'].unsqueeze(0),
dim=-1)
Y = X[:, 1:2]
Y_ = X_[:, 1:2]
X_ = (X_ / Y) - (X / (Y * Y) * Y_)
# switch X and Z so this is orthogonal
X_[:, 0], X_[:, 2] = X_[:, 2], X_[:, 0]
X_[:, 1] = 0
X_ /= torch.sqrt(torch.sum(X_ ** 2, dim=1))
# normalize Y to 1.
X = X / X[:, 1:2] + tint.unsqueeze(-1) * X_
return X
def forward(self, image):
X_orig = self._planckian_locus(self.orig_temp, self.orig_tint)
X_new = self._planckian_locus(self.new_temp, self.new_tint)
# The numerator is the original correction factor that makes D65
# into [1, 1, 1] in sRGB. The XYZ2sRGB matrix encodes this, so as
# a sanity check, XYZ2sRGB * X_D65 should equals 1.
scaling = torch.matmul(XYZ2sRGB, X_new.t()) / \
torch.matmul(XYZ2sRGB, X_orig.t())
# Transpose to [batch, 3]
scaling = scaling.t()
self._wb = WhiteBalance(scaling, self.mosaick_pattern)
return self._wb(image)
class ColorSpaceConversionMatrix(nn.Module):
def __init__(self, matrix):
""" Linear color space conversion.
Useful for converting between sRGB and YUV.
Args:
matrix: matrix to convert color space (should be 2-D Tensor).
The conversion works as c_new = A * c_old, where c's are
column vectors in each color space.
"""
super().__init__()
self.matrix = matrix
def forward(self, image):
self.matrix = cuda_like(self.matrix, image)
return torch.einsum('ij,kjlm->kilm',
(self.matrix,
image)
)
class Saturation(nn.Module):
def __init__(self, value):
""" Adjust Saturation in YUV space
Args:
value: multiplier to the chrominance.
"""
super().__init__()
self.value = value
self.rgb2yuv = ColorSpaceConversionMatrix(RGB2YUV)
self.yuv2rgb = ColorSpaceConversionMatrix(YUV2RGB)
def forward(self, image):
image = self.rgb2yuv(image)
image[:, 1:, ...] *= self.value
image[:, 1:, ...] = torch.clamp(image[:, 1:, ...], -1.0, 1.0)
image = self.yuv2rgb(image)
return image
# Tone
class sRGBLikeGamma(nn.Module):
def __init__(self, threshold, a, mult, gamma):
"""sRGB-like Gamma compression.
Linear at low range then power gamma.
Args:
threshold: threshold under which the conversion becomes linear.
a: constant factor to ensure continuity.
mult: slope for the linear part.
gamma: Gamma value.
"""
super().__init__()
self.threshold = threshold
self.a = a
self.mult = mult
self.gamma = gamma
def forward(self, image):
mask = (image > self.threshold).float()
image_lo = image * self.mult
# 0.001 is to avoid funny thing at 0.
image_hi = (1 + self.a) * torch.pow(image + 0.001, 1.0 / self.gamma) - self.a
return mask * image_hi + (1 - mask) * image_lo
class UndosRGBLikeGamma(nn.Module):
""" Linear at low range then power gamma.
This is inverse of sRGBLikeGamma. See sRGBLikeGamma for detail.
"""
def __init__(self, threshold, a, mult, gamma):
super().__init__()
self.threshold = threshold
self.a = a
self.mult = mult
self.gamma = gamma
def forward(self, image):
mask = (image > self.threshold).float()
image_lo = image / self.mult
image_hi = torch.pow(image + self.a, self.gamma) / (1 + self.a)
return mask * image_hi + (1 - mask) * image_lo
class sRGBGamma(sRGBLikeGamma):
# See https://en.wikipedia.org/wiki/SRGB#Specification_of_the_transformation
def __init__(self):
super().__init__(threshold=0.0031308,
a=0.055,
mult=12.92,
gamma=2.4)
class UndosRGBGamma(UndosRGBLikeGamma):
# See https://en.wikipedia.org/wiki/SRGB#Specification_of_the_transformation
def __init__(self):
super().__init__(threshold=0.04045,
a=0.055,
mult=12.92,
gamma=2.4)
class ProPhotoRGBGamma(sRGBLikeGamma):
# See https://en.wikipedia.org/wiki/SRGB#Specification_of_the_transformation
def __init__(self):
super().__init__(threshold=1.0 / 512.0,
a=0.0,
mult=16.0,
gamma=1.8)
class UndoProPhotoRGBGamma(UndosRGBLikeGamma):
# See https://en.wikipedia.org/wiki/SRGB#Specification_of_the_transformation
def __init__(self):
super().__init__(threshold=1.0 / 32.0,
a=0.0,
mult=16.0,
gamma=1.8)
class GammaCompression(nn.Module):
def __init__(self, gamma):
""" Pure power-law gamma compression.
"""
super().__init__()
gamma = python_to_tensor(gamma)
self.gamma = expand_to_4d_batch(gamma)
def forward(self, image):
self.gamma = cuda_like(self.gamma, image)
return (image + 0.0001).pow(self.gamma)
class UndoGammaCompression(nn.Module):
def __init__(self, gamma):
""" Inverse of GammaCompression.
"""
super().__init__()
gamma = python_to_tensor(gamma)
self._gamma = GammaCompression(1.0 / gamma)
def forward(self, image):
return self._gamma(image)
class Gray18Gamma(nn.Module):
def __init__(self, gamma):
""" Applying gamma while keeping 18% gray constant.
"""
super().__init__()
gamma = python_to_tensor(gamma)
self.gamma = expand_to_4d_batch(gamma)
def forward(self, image):
# mult x (0.18)^gamma = 0.18; 0.18 = 18% gray
self.mult = FloatTensor([0.18]).pow(1.0 - self.gamma)
self.gamma = cuda_like(self.gamma, image)
self.mult = cuda_like(self.mult, image)
return self.mult * torch.pow(image + 0.001, self.gamma)
class ToneCurve(nn.Module):
def __init__(self, amount):
""" Tone curve using cubic curve.
The curve is assume to pass 0, 0.25-a, 0.5, 0.75+a, 1, where
a is a parameter controlling the curve. For usability, the parameter
amount of 0 and 1 is mapped to a of 0 and 0.2.
"""
super().__init__()
self.amount = amount
self.rgb2yuv = ColorSpaceConversionMatrix(RGB2YUV)
self.yuv2rgb = ColorSpaceConversionMatrix(YUV2RGB)
def forward(self, image):
a = self.amount * 0.2
self._A = -64.0 * a / 3.0
self._B = 32.0 * a
self._C = 1.0 - 32.0 * a / 3.0
yuv = self.rgb2yuv(image)
y = yuv[:, 0, ...]
y_sqr = y * y
y_cub = y_sqr * y
y = self._A * y_cub + self._B * y_sqr + self._C * y
yuv = yuv.clone()
yuv[:, 0, ...] = y
image = self.yuv2rgb(yuv)
return image
class ToneCurveNZones(nn.Module):
def __init__(self, ctrl_val):
""" Tone curve using linear curve with N zone.
Args:
ctrl_val: list of values that specify control points. These
are assumed to be equally spaced between 0 and 1.
"""
super().__init__()
self.ctrl_val = ctrl_val
self.rgb2yuv = ColorSpaceConversionMatrix(RGB2YUV)
self.yuv2rgb = ColorSpaceConversionMatrix(YUV2RGB)
def forward(self, image):
yuv = self.rgb2yuv(image)
y = yuv[:, 0, ...]
n_zones = len(self.ctrl_val) + 1
val_scaling = 1.0 / n_zones
in_val = torch.linspace(0, 1, n_zones + 1)
out_val = [0] + [val_scaling * (i + 1 + self.ctrl_val[i]) for i in range(len(self.ctrl_val))] + [1]
y_ = 0
for i in range(len(in_val) - 1):
# if statement for the boundary case, in case we have something negatives
mask_lo = (y >= in_val[i]).float() if i > 0 else 1
mask_hi = (y < in_val[i + 1]).float() if i < len(in_val) - 2 else 1
mask = mask_lo * mask_hi
slope = (out_val[i + 1] - out_val[i]) / (in_val[i + 1] - in_val[i])
y_ += ((y - in_val[i]) * slope + out_val[i]) * mask
yuv = yuv.clone()
yuv[:, 0, ...] = y
image = self.yuv2rgb(yuv)
return image
class ToneCurveThreeZones(nn.Module):
def __init__(self, highlight, midtone, shadow):
""" Same as ToneCurveNZones but have different signature so that
it is more explicit.
"""
super().__init__()
self.tc = ToneCurveNZones([shadow, midtone, highlight])
def forward(self, image):
return self.tc.forward(image)
class Quantize(nn.Module):
def __init__(self, nbits=8):
""" Quantize image to number of bits.
"""
super().__init__()
self.nbits = nbits
def forward(self, image):
self.mult = FloatTensor([2]).pow(self.nbits)
self.mult = cuda_like(self.mult, image)
return torch.floor(image * self.mult) / self.mult
class ExposureAdjustment(nn.Module):
def __init__(self, nstops):
""" Exposure adjustment by the stops.
Args:
nstops: number of stops to adjust exposure. Can be scalar or
1D Tensor.
"""
super().__init__()
nstops = python_to_tensor(nstops)
self.nstops = expand_to_4d_batch(nstops)
def forward(self, image):
self._multiplier = FloatTensor([2]).pow(self.nstops)
self._multiplier = cuda_like(self._multiplier, image)
return self._multiplier * image
class AffineExposure(nn.Module):
def __init__(self, mult, add):
""" Exposure adjustment with affine transform.
This calculate exposure according to mult*L + add, where L is
the current pixel value.
Args:
mult: Multiplier. Can be scalar or 1D Tensor.
add: Additive constant. Can be scalar or 1D Tensor.
"""
super().__init__()
mult = python_to_tensor(mult)
add = python_to_tensor(add)
self._mult = expand_to_4d_batch(mult)
self._add = expand_to_4d_batch(add)
def forward(self, image):
self._mult = cuda_like(self._mult, image)
self._add = cuda_like(self._add, image)
return self._mult * image + self._add
class AutoLevelNonDifferentiable(nn.Module):
def __init__(self, blkpt=1, whtpt=99, max_mult=1.5):
""" AutoLevel
Non-differentiable because it uses percentile function.
Args:
blkpt: percentile used as black point.
whtpt: percentile used as white point.
max_mult: max multiplication factor to avoid over brightening
image.
"""
super().__init__()
self.blkpt = blkpt
self.whtpt = whtpt
self.max_mult = max_mult
self.rgb2yuv = ColorSpaceConversionMatrix(RGB2YUV)
self.yuv2rgb = ColorSpaceConversionMatrix(YUV2RGB)
def forward(self, image):
yuv = self.rgb2yuv(image)
y = yuv[:, 0, ...].cpu().numpy()
y = np.reshape(y, (y.shape[0], -1))
blkpt = np.percentile(y, self.blkpt, axis=1)
whtpt = np.percentile(y, self.whtpt, axis=1)
mult = 1.0 / (whtpt - blkpt)
if self.max_mult is not None:
# if self.max_mult == "auto":
# HACK: so that we can control both flow without additional switch.
if self.max_mult < 0:
mm = 4.0 * np.power(whtpt, -self.max_mult)
mm = np.minimum(mm, 4.0)
mm = np.maximum(mm, 1.0)
else:
mm = self.max_mult
mult = np.minimum(mult, mm)
mult = FloatTensor(mult).unsqueeze(-1).unsqueeze(-1)
mult = cuda_like(mult, yuv)
blkpt = FloatTensor(blkpt).unsqueeze(-1).unsqueeze(-1)
blkpt = cuda_like(blkpt, yuv)
# yuv[:, 0, ...] = (yuv[:, 0, ...] - blkpt) * mult
image = (image - blkpt) * mult
image = torch.clamp(image, 0.0, 1.0)
return image
# Noises
class NoiseModule(nn.Module):
"""Base class for noise modules"""
def get_noise_image(self, image):
""" Return additive noise to the image.
This function should return noise image with the standard deviation
and the mosaick pattern baked in.
"""
raise RuntimeError("This is a base class for noise modules. "
"Use one of its subclasses instead.")
def forward(self, image):
return image + self.get_noise_image(image)
class PoissonNoise(NoiseModule):
def __init__(self, sigma, mosaick_pattern=None):
""" Poisson noise
Args:
sigma: multiplier to the noise strength.
"""
super().__init__()
self.sigma = python_to_tensor(sigma)
self.mosaick_pattern = mosaick_pattern
def get_noise_image(self, image):
noise_image = torch.randn_like(image)
noise_image *= torch.sqrt(torch.clamp(image, min=0.0))
self.sigma = cuda_like(self.sigma, image)
return mosaick_multiply(self.sigma, noise_image, self.mosaick_pattern)
class GaussianNoise(NoiseModule):
def __init__(self, sigma, mosaick_pattern=None):
""" Gaussian noise
Args:
sigma: noise STD.
"""
super().__init__()
self.sigma = python_to_tensor(sigma)
self.mosaick_pattern = mosaick_pattern
def get_noise_image(self, image):
noise_image = torch.randn_like(image)
self.sigma = cuda_like(self.sigma, image)
return mosaick_multiply(self.sigma, noise_image, self.mosaick_pattern)
class GaussPoissonMixtureNoise(NoiseModule):
def __init__(self, sigma_p, sigma_g, mosaick_pattern=None):
""" Gaussian and poisson noise mixture.
Args:
sigma_p: poisson noise multiplication..
sigma_g: noise gaussian STD.
"""
super().__init__()
self.mosaick_pattern = mosaick_pattern
self.sigma_p = sigma_p
self.sigma_g = sigma_g
self._poisson = PoissonNoise(self.sigma_p, self.mosaick_pattern)
self._gauss = GaussianNoise(self.sigma_g, self.mosaick_pattern)
def get_noise_image(self, image):
return self._poisson.get_noise_image(image) + \
self._gauss.get_noise_image(image)
# Other artifacts.
class JPEGCompression(nn.Module):
DCT_BLOCK_SIZE = 8
# TODO: Support batch for different quality.
def __init__(self, quality):
""" JPEGCompression with integer quality.
Args:
quality: integer between 0 and 12 (highest quality).
This selects quantization table to use. See constant.py
for detail.
"""
# Quality must be integer between 0 and 12.
super().__init__()
quality = int(quality)
# Add batch and channel dimension
self.DCT_coeff_block = DCT_coeff.clone().unsqueeze(0).unsqueeze(0)
self.quantization_lum = photoshop_jpeg_quantization_lum[quality]
self.quantization_chrom = photoshop_jpeg_quantization_chrom[quality]
self.quantization_lum = self.quantization_lum \
.unsqueeze(0).unsqueeze(0) \
.unsqueeze(-1).unsqueeze(-1)
self.quantization_chrom = self.quantization_chrom \
.unsqueeze(0).unsqueeze(0) \
.unsqueeze(-1).unsqueeze(-1)
self.downsample_chrom = photoshop_chroma_subsampling[quality]
self.rgb2yuv = ColorSpaceConversionMatrix(RGB2YUV)
self.yuv2rgb = ColorSpaceConversionMatrix(YUV2RGB)
@staticmethod
def _tile_sum(arr, dct_block):
"""Do the cumulative sum in tiles over the last two dimensions.
input should be shaped (batch, ch, blk_sz, blk_sz, im_h, im_w)
output will be (batch, ch, blk_sz, blk_sz, n_blk_h, n_blk_w)
"""
verbose = False
dct_block_size = dct_block.size(-1)
# allocating a temp array seems helpful, maybe because it doesn't
# have to write to the original array, which would result in more
# cache misses.
res = torch.zeros((arr.size(0),
arr.size(1),
dct_block_size, dct_block_size,
int(arr.size(4) / dct_block_size),
arr.size(5)))
# also multiply DCT coefficient here because actually repeating
# in two dim and multiply is very slow.
dct_block = dct_block.repeat(1, 1, 1, 1, 1, int(arr.size(5) / dct_block_size))
# Sum in height and multiply.
for i in range(dct_block_size):
res += arr[..., i::dct_block_size, :] * dct_block[..., i:(i + 1), :]
# Sum in width
for i in range(dct_block_size - 1):
res[..., :, (i + 1)::dct_block_size] += res[..., :, i::dct_block_size]
# Slice the array
# now DCT should have dimension (batch, ch, 8, 8, n_blk_h, n_blk_w)
res = res[..., :, (dct_block_size - 1)::dct_block_size]
return res
@staticmethod
def _tile_to_image(arr):
"""Takes arr of shape (batch, ch, blk_sz, blk_sz, n_blk_h, n_blk_w),
and reshape it so that it is (batch, ch, im_h, im_w)
"""
# For readability
dct_block_size = JPEGCompression.DCT_BLOCK_SIZE
n_blk_h = int(arr.size(-2))
n_blk_w = int(arr.size(-1))
# reshape it, assume reshape does it in C-order, last element changing fastest.
# Rearrange it so that it is
# (batch, ch, n_blk_h, v, n_blk_w, u)
arr = arr.permute(0, 1, 4, 2, 5, 3)
# dct is now (batch, ch, y, x, v, u)
arr = arr.contiguous()
arr = arr.view(arr.size(0),
arr.size(1),
n_blk_h * dct_block_size,
n_blk_w * dct_block_size)
return arr
def _compress(self, image, quantization_matrix):
# convert to -128 - 127 range
image = (image * 255.0) - 128.0
# For readability
dct_block_size = JPEGCompression.DCT_BLOCK_SIZE
# pad image
im_h = int(image.size(-2))
im_w = int(image.size(-1))
n_blk_h = int(np.ceil(im_h / dct_block_size))
n_blk_w = int(np.ceil(im_w / dct_block_size))
n_pad_h = n_blk_h * dct_block_size - image.size(-2)
n_pad_w = n_blk_w * dct_block_size - image.size(-1)
# pad image
image = torch.nn.functional.pad(image, (0, n_pad_w, 0, n_pad_h))
# Add u, v dimension
image = image.unsqueeze(-3).unsqueeze(-3)
# Compute DCT
# Sum within each tile.
dct = self._tile_sum(image, self.DCT_coeff_block)
# Quantize
dct = torch.round(dct / quantization_matrix) * quantization_matrix
# reshape it so that this becomes a u-v image.
dct = self._tile_to_image(dct).unsqueeze(-3).unsqueeze(-3)
# DCT should be (batch, ch, 8, 8, im_h, im_w)
# do the sum in u, v
dct = self._tile_sum(dct, self.DCT_coeff_block.permute(0, 1, 4, 5, 2, 3))
dct = self._tile_to_image(dct)
# Undo padding.
dct = dct[..., :im_h, :im_w]
# convert back to 0-1 range
dct = (dct + 128.0) / 255.0
return dct
def forward(self, image):
self.quantization_lum = cuda_like(self.quantization_lum, image)
self.DCT_coeff_block = cuda_like(self.DCT_coeff_block, image)
image_yuv = self.rgb2yuv(image)
image_y = image_yuv[:, 0:1, ...]
image_uv = image_yuv[:, 1:, ...]
# Compress luminance.
image_y = self._compress(image_y, self.quantization_lum)
# Compress the chrominance.
if self.downsample_chrom:
uv_size = image_uv.size()
image_uv = nn.functional.interpolate(image_uv, scale_factor=0.5)
image_uv = self._compress(image_uv, self.quantization_chrom)
if self.downsample_chrom:
image_uv = nn.functional.interpolate(image_uv, size=uv_size[-2:])
image_yuv = torch.cat((image_y, image_uv), dim=1)
image = self.yuv2rgb(image_yuv)
return image
class ChromaticAberration(nn.Module):
def __init__(self, scaling):
"""Chromatic Aberration
Args:
scaling: This class scales R and B channel with factor of scaling and 1/scaling
respectively.
"""
super().__init__()
self.scaling = expand_to_4d_batch(python_to_tensor(scaling))
@staticmethod
def _scale(image, scaling):
# create the affine matrix.
theta = torch.zeros((image.size(0), 2, 3))
# diagonal entry
theta[:, 0, 0] = scaling
theta[:, 1, 1] = scaling
theta = cuda_like(theta, image)
grid = nn.functional.affine_grid(theta, image.size())
return nn.functional.grid_sample(image, grid, padding_mode="border")
def forward(self, image):
# R
output_img = image.clone()
output_img[:, 0:1, ...] = self._scale(image[:, 0:1, ...],
self.scaling)
# B
output_img[:, 2:3, ...] = self._scale(image[:, 2:3, ...],
1.0 / self.scaling)
return output_img
class PixelClip(nn.Module):
""" Module for clipping pixel value.
"""
def forward(self, image):
return torch.clamp(image, 0.0, 1.0)
class RepairHotDeadPixel(nn.Module):
# Adapt from https://github.com/letmaik/rawpy/blob/291afa870727f759a7bb68d756e4603806a466a4/rawpy/enhance.py
def __init__(self, threshold=0.2, median_class="MedianFilterNonDifferentiable"):
""" Repair hot pixel with median filter.
Args:
threshold: Difference to be considered as hot/dead pixels.
"""
super().__init__()
median_classes = {"MedianFilterNonDifferentiable": MedianFilterNonDifferentiable,
}
self.median = median_classes[median_class](3)
self.threshold = threshold
def _repair_one_channel(self, rawslice):
med = self.median(rawslice.clone())
# detect possible bad pixels
candidates = torch.abs(rawslice - med) > self.threshold
candidates = candidates.float()
candidates = cuda_like(candidates, rawslice)
return (1.0 - candidates) * rawslice + candidates * med
def forward(self, image):
# we have bayer
if image.size(1) == 1:
# we have 4 colors (two greens are always seen as two colors)
for offset_y in [0, 1]:
for offset_x in [0, 1]:
rawslice = image[..., offset_y::2, offset_x::2]
rawslice = self._repair_one_channel(rawslice)
image[..., offset_y::2, offset_x::2] = rawslice
else:
# do it per channel
for i in range(image.size(1)):
rawslice = image[:, i:(i + 1), ...]
rawslice = self._repair_one_channel(rawslice)
image[:, i:(i + 1), ...] = rawslice
return image
class PerChannelBlur(nn.Module):
def __init__(self, kern):
""" Blur applied to each channel individually.
Args:
kern: 2D tensors representing the blur kernel.
"""
super().__init__()
self.kern = kern
def forward(self, image):
self.kern = FloatTensor(self.kern).unsqueeze(0).unsqueeze(0)
self.kern = cuda_like(self.kern, image)
n_channel = image.size(1)
padding = []
for i in range(2):
# See https://stackoverflow.com/questions/51131821/even-sized-kernels-with-same-padding-in-tensorflow
sz = self.kern.size(-1 - i)
total_pad = int(sz - 1)
p0 = int(total_pad / 2)
p1 = total_pad - p0
padding += [p0, p1]
# Manually pad.
image = nn.functional.pad(image, padding, mode='reflect')
return nn.functional.conv2d(image,
self.kern.expand(n_channel,
-1, -1, -1),
groups=n_channel)
class SeparablePerChannelBlur(nn.Module):
def __init__(self, kern_x, kern_y=None):
"""Same as PerChannelBlur, but separable kernel.
This is much faster. Useful for when we have separable kernel such as
Gaussian.
Args:
kern_x: 1D tensor representing x-direction kernel.
kern_y: 1D tensor representing y-direction kernel. If None, use
the same thing as kern_x.
"""
super().__init__()
if kern_y is None:
kern_y = kern_x
self.kern_x = kern_x
self.kern_y = kern_y
def forward(self, image):
self.kern_x = FloatTensor(self.kern_x).unsqueeze(0).unsqueeze(0)
self.kern_y = FloatTensor(self.kern_y).unsqueeze(0).unsqueeze(0)
self.kern_x = cuda_like(self.kern_x, image)
self.kern_y = cuda_like(self.kern_y, image)
n_channel = image.size(1)
padding = []
kern_sz = (self.kern_y.size(-1), self.kern_x.size(-1))
for i in range(len(kern_sz)):
# See https://stackoverflow.com/questions/51131821/even-sized-kernels-with-same-padding-in-tensorflow
sz = kern_sz[-1 - i]
total_pad = int(sz - 1)
p0 = int(total_pad / 2)
p1 = total_pad - p0
padding += [p0, p1]
# Manually pad.
image_sz = image.size()
image = nn.functional.pad(image, padding, mode='reflect')
image = image.contiguous().view(-1,
image.size(-2),
image.size(-1))
# Do convolution in each direction
# width, b, height
image = image.permute(2, 0, 1)
image = nn.functional.conv1d(image,
self.kern_y.expand(image.size(1),
-1, -1),
groups=image.size(1))
# height, b, width
image = image.permute(2, 1, 0)
image = nn.functional.conv1d(image,
self.kern_x.expand(image.size(1),
-1, -1),
groups=image.size(1))
# b, height, width
image = image.permute(1, 0, 2)
return image.view(image_sz)
class MotionBlur(PerChannelBlur):
# TODO: Think about how to generate big blur without a giant kernel.
# Seems like this might not be possible
def __init__(self, amt, direction,
kernel_sz=10,
dynrange_th=None,
dynrange_boost=100
):
"""Motion Blur
Args:
amt: (list or number) list of amount of motion blur in pixel.
direction: (list or number) direction of motion in degrees.
kernel_sz: max size of kernel for performance consideration.
dynrange_th: threshold above which will get boosted to simulate
overexposed pixels. (See Burst Image Deblurring Using
Permutation Invariant Convolutional Neural Networks by Aittala
et al. 2018).
dynrange_boost: Multiplicative factor used to boost dynamic range.
"""
# normalize input into a good format.
amt = number_to_list(amt)
direction = number_to_list(direction)
assert len(amt) == len(direction)
# Create the blur kernel.
origin = np.array([0.0, 0.0]).astype('float')
pts = [origin]
min_x = max_x = min_y = max_y = 0.0
for idx in range(len(amt)):
d = direction[idx] * np.pi / 180.0
vec = np.array((np.cos(d), np.sin(d))) * amt[idx]
pt = pts[-1] + vec
x, y = pt[0], pt[1]
if x < min_x:
min_x = x
if x > max_x:
max_x = x
if y < min_y:
min_y = y
if y > max_y:
max_y = y
pts.append(pt)
cv_bit_shift = 8
mult = np.power(2, cv_bit_shift)
if kernel_sz is None:
# figure out kernel_sz
ksz_x = max(max_x - min_x + 2, 8)
ksz_y = max(max_y - min_y + 2, 8)
else:
ksz_x = ksz_y = kernel_sz
ksz_x = int(ksz_x)
ksz_y = int(ksz_y)
kern = np.zeros((ksz_y, ksz_x)).astype('uint8')
pts = np.array(pts)
pts[:, 0] -= min_x
pts[:, 1] -= min_y
pts *= mult
# TODO: Remove cv2 dependencies and use skimage instead.
# LINE_AA only works with uint8 kernel, but there is a bug that it
# only draws the first segment in this mode
cv2.polylines(kern, np.int32([pts]), isClosed=False,
color=1.0, lineType=8,
thickness=1, shift=cv_bit_shift)
kern = kern.astype('float32')
kern = kern / kern.sum()
super().__init__(kern)
self.dynrange_th = dynrange_th
self.dynrange_boost = dynrange_boost
if dynrange_th is not None:
self.rgb2yuv = ColorSpaceConversionMatrix(RGB2YUV)
def forward(self, image):
if self.dynrange_th is not None:
y = self.rgb2yuv(image)[:, 0:1, ...]
mask = y > self.dynrange_th
mask = cuda_like(mask.float(), image)
image = image * (1.0 + mask * self.dynrange_boost)
image = super().forward(image)
if self.dynrange_th is not None:
image = torch.clamp(image, 0.0, 1.0)
return image
class GaussianBlur(SeparablePerChannelBlur):
def __init__(self, sigma_x, sigma_y=None,
sz_x=None, sz_y=None):
"""Channel-wise Gaussian Blur.
Args:
sigma_x: stdev in x-direction.
sigma_y: stdev in y-direction. (default: sigma_x)
sz_x = kernel size in x (default: twice sigma_x)
sz_y = kernel size in y (default: twice sigma_y)
"""
if sigma_y is None:
sigma_y = sigma_x
if sz_x is None:
sz_x = max(int(2.0 * sigma_x), 1)
if sz_y is None:
sz_y = max(int(2.0 * sigma_y), 1)
super().__init__(None, None)
self.sz_x = sz_x
self.sz_y = sz_y
self.sigma_x = sigma_x
self.sigma_y = sigma_y
def forward(self, image):
self.kern_x = gausskern1d(self.sigma_x, self.sz_x)
self.kern_y = gausskern1d(self.sigma_y, self.sz_y)
return super().forward(image)
class Rotation90Mult(nn.Module):
def __init__(self, angle):
""" Rotate image in multiples of 90.
"""
super().__init__()
self.angle = int(angle) % 360
if self.angle not in [0, 90, 180, 270]:
raise ValueError("Angle must be multiple of 90 degrees")
def forward(self, image):
if self.angle == 0:
return image
elif self.angle == 90:
return image.transpose(2, 3).flip(2)
elif self.angle == 270:
return image.transpose(2, 3).flip(3)
elif self.angle == 180:
return image.flip(2).flip(3)
else:
raise ValueError("Angle must be multiple of 90 degrees")
| 60,184 | 36.615625 | 130 | py |
kernel-prediction-networks-PyTorch | kernel-prediction-networks-PyTorch-master/data_generation/pipeline.py | import torch.nn as nn
import torch
from . import image_processing
class ImageDegradationPipeline(nn.Module):
def __init__(self, configs):
""" Image Degradation Pipeline.
Args:
configs: list of modules to be implemented and their parameters.
The list should contain tuple of a form (str, dict),
where str indicate module class name (see
image_processing.py), and dict contain the key-value of
the parameter of such module.
"""
super().__init__()
self.initialize_pipeline(configs)
def initialize_pipeline(self, configs):
pipeline = []
# initialize module.
for c in configs:
class_ = getattr(image_processing, c[0])
module = class_(**c[1])
pipeline.append(module)
self._pipeline = nn.Sequential(*pipeline)
# self._pipeline = tuple(pipeline)
def forward(self, image):
# import torchvision.transforms as transforms
# trans = transforms.ToPILImage()
# for index, func in enumerate(self._pipeline):
# image = func(image)
# # save images
# # image_trans = trans((torch.clamp(image, 0.0, 1.0)).squeeze())
# # image_trans.save('./train_images/tmp_{:02d}.png'.format(index), quality=100)
# return image
return self._pipeline(image)
| 1,439 | 34.121951 | 92 | py |
kernel-prediction-networks-PyTorch | kernel-prediction-networks-PyTorch-master/data_generation/kernel.py | import torch
def gausskern1d(sig, sz=None):
""" 1D Gaussian kernel.
Args:
sz: kernel size.
sig: stdev of the kernel
"""
if sz is None:
sz = int(2*int(sig) + 1)
sz = max(sz, 3)
half_sz = int(sz / 2)
neg_half_sz = half_sz - sz + 1
neg_half_sz = float(neg_half_sz)
half_sz = float(half_sz)
x = torch.linspace(neg_half_sz, half_sz, int(sz)) / sig
x = x ** 2
kern = torch.exp(-x/2.0)
kern = kern / kern.sum()
return kern
def gausskern2d(sz_x, sig_x, sz_y=None, sig_y=None):
"""Returns a 2D Gaussian kernel array.
Modified from https://stackoverflow.com/questions/29731726/how-to-calculate-a-gaussian-kernel-matrix-efficiently-in-numpy
Args:
sz_{x,y}: kernel size.
sig_{x,y}: stdev of kernel in each direction
"""
if sz_y is None:
sz_y = sz_x
if sig_y is None:
sig_y = sig_x
kern1d_x = gausskern1d(sz_x, sig_x)
kern1d_y = gausskern1d(sz_y, sig_y)
kernel_raw = torch.einsum('i,j->ij', kern1d_x, kern1d_y)
# This einsum is equivalent to outer product (no repeated indices).
# For future reference
# kernel_raw = np.sqrt(np.einsum('ij,k', kernel_raw, kern_r))
kernel = kernel_raw/kernel_raw.sum()
return kernel
| 1,285 | 26.956522 | 129 | py |
kernel-prediction-networks-PyTorch | kernel-prediction-networks-PyTorch-master/utils/image_utils.py | import numpy as np
import torch
def center_crop_tensor(tensor, w, h):
tw = tensor.size(-1)
th = tensor.size(-2)
if tw < w or th < h:
raise RuntimeError("Crop size is larger than image size.")
h0 = int((th - h) / 2)
w0 = int((tw - w) / 2)
h1 = h0 + h
w1 = w0 + w
return tensor[..., h0:h1, w0:w1]
def bayer_crop_tensor(tensor, w, h, mode="random"):
"""Crop that preserves Bayer phase"""
tw = tensor.size(-1)
th = tensor.size(-2)
if tw < w or th < h:
raise RuntimeError("Crop size ({}) is larger than image size ({})." \
.format((w, h), (tw, th)))
if mode == "random":
h0 = np.random.choice(th + 1 - h)
w0 = np.random.choice(tw + 1 - w)
elif mode == "center":
h0 = int((th - h) / 2)
w0 = int((tw - w) / 2)
else:
raise ValueError("Bayer crop: unrecognized mode ({}). Must be 'random' or 'center'.".format(mode))
# make sure start index is divisible by 2
h0 = h0 - (h0 % 2)
w0 = w0 - (w0 % 2)
h1 = h0 + h
w1 = w0 + w
return tensor[..., h0:h1, w0:w1]
def random_crop_tensor(tensor, w, h):
tw = tensor.size(-1)
th = tensor.size(-2)
if tw < w or th < h:
raise RuntimeError("Crop size is larger than image size.")
h0 = np.random.randint(th - h)
w0 = np.random.randint(tw - w)
h1 = h0 + h
w1 = w0 + w
return tensor[..., h0:h1, w0:w1]
def check_nan_tensor(x):
return torch.isnan(x).any()
| 1,497 | 26.740741 | 106 | py |
kernel-prediction-networks-PyTorch | kernel-prediction-networks-PyTorch-master/utils/training_util.py | import numpy as np
import glob
import torch
import shutil
import os
import cv2
import numbers
import skimage
from collections import OrderedDict
from configobj import ConfigObj
from validate import Validator
from data_generation.pipeline import ImageDegradationPipeline
class MovingAverage(object):
def __init__(self, n):
self.n = n
self._cache = []
self.mean = 0
def update(self, val):
self._cache.append(val)
if len(self._cache) > self.n:
del self._cache[0]
self.mean = sum(self._cache) / len(self._cache)
def get_value(self):
return self.mean
def save_checkpoint(state, is_best, checkpoint_dir, n_iter, max_keep=10):
filename = os.path.join(checkpoint_dir, "{:06d}.pth.tar".format(n_iter))
torch.save(state, filename)
if is_best:
shutil.copyfile(filename,
os.path.join(checkpoint_dir,
'model_best.pth.tar'))
files = sorted(os.listdir(checkpoint_dir))
rm_files = files[0:max(0, len(files) - max_keep)]
for f in rm_files:
os.remove(os.path.join(checkpoint_dir, f))
def _represent_int(s):
try:
int(s)
return True
except ValueError:
return False
def load_checkpoint(checkpoint_dir, best_or_latest='best'):
if best_or_latest == 'best':
checkpoint_file = os.path.join(checkpoint_dir, 'model_best.pth.tar')
elif isinstance(best_or_latest, numbers.Number):
checkpoint_file = os.path.join(checkpoint_dir,
'{:06d}.pth.tar'.format(best_or_latest))
if not os.path.exists(checkpoint_file):
files = glob.glob(os.path.join(checkpoint_dir, '*.pth.tar'))
basenames = [os.path.basename(f).split('.')[0] for f in files]
iters = sorted([int(b) for b in basenames if _represent_int(b)])
raise ValueError('Available iterations are ({} requested): {}'.format(best_or_latest, iters))
else:
files = glob.glob(os.path.join(checkpoint_dir, '*.pth.tar'))
basenames = [os.path.basename(f).split('.')[0] for f in files]
iters = sorted([int(b) for b in basenames if _represent_int(b)])
checkpoint_file = os.path.join(checkpoint_dir,
'{:06d}.pth.tar'.format(iters[-1]))
return torch.load(checkpoint_file)
def load_statedict_runtime(checkpoint_dir, best_or_latest='best'):
# This function grabs state_dict from checkpoint, and do modification
# to the weight name so that it can be load at runtime.
# During training nn.DataParallel adds 'module.' to the name,
# which doesn't exist at test time.
ckpt = load_checkpoint(checkpoint_dir, best_or_latest)
state_dict = ckpt['state_dict']
global_iter = ckpt['global_iter']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
# remove `module.`
name = k[7:]
new_state_dict[name] = v
return new_state_dict, global_iter
def prep_and_vis_flow(flow, flow_visualizer, max_flow=None):
flow = flow_visualizer(flow[0, :, :, :], max_flow=max_flow)
flow = flow.cpu().data.numpy()
return flow
def put_text_on_img(image, text, loc=(20, 100), color=(1, 0, 0)):
""" Put text on flow
Args:
image: numpy array of dimension (3, h, w)
text: text to put on.
loc: ibottom-left location of text in (x, y) from top-left of image.
color: color of the text.
Returns:
image with text written on it.
"""
image = np.array(np.moveaxis(image, 0, -1)).copy()
cv2.putText(image, text, loc, cv2.FONT_HERSHEY_SIMPLEX, 1, color)
return np.moveaxis(image, -1, 0)
def read_config(config_file, config_spec):
configspec = ConfigObj(config_spec, raise_errors=True)
config = ConfigObj(config_file,
configspec=configspec,
raise_errors=True,
file_error=True)
config.validate(Validator())
return config
def torch2numpy(tensor, gamma=None):
tensor = torch.clamp(tensor, 0.0, 1.0)
# Convert to 0 - 255
if gamma is not None:
tensor = torch.pow(tensor, gamma)
tensor *= 255.0
return tensor.permute(0, 2, 3, 1).cpu().data.numpy()
def prep_for_vis(degraded_img, target_img, output_img, exposure=None):
if exposure is not None:
def adjust_exp(img, exp):
configs = [
('PixelClip', {}),
('ExposureAdjustment', {'nstops': exp}),
('PixelClip', {}),
]
return ImageDegradationPipeline(configs)(img)
degraded_img = adjust_exp(degraded_img, exposure)
target_img = adjust_exp(target_img, exposure)
output_img = adjust_exp(output_img, exposure)
degraded_tf = torch2numpy(degraded_img, 1.0 / 2.2).astype('uint8')
# Gamma encode output for illustration purpose
target_tf = torch2numpy(target_img, 1.0 / 2.2).astype('uint8')
output_tf = torch2numpy(output_img, 1.0 / 2.2).astype('uint8')
return degraded_tf, target_tf, output_tf
def prep_for_vis_arr(img_arr, exposure=None):
if exposure is not None:
configs = [
('PixelClip', {}),
('ExposureAdjustment', {'nstops': exposure}),
('PixelClip', {}),
]
exp_adj = ImageDegradationPipeline(configs)
img_arr = [exp_adj(im) for im in img_arr]
img_arr = [torch2numpy(im, 1.0 / 2.2).astype('uint8') for im in img_arr]
return img_arr
def create_vis_arr(img_arr, exposure=None):
img_arr = prep_for_vis_arr(img_arr, exposure)
return np.concatenate(img_arr, axis=-2)
def create_vis(degraded_img, target_img, output_img, exposure=None):
degraded_tf, target_tf, output_tf = prep_for_vis(degraded_img,
target_img,
output_img)
img = np.concatenate((degraded_tf,
target_tf,
output_tf),
axis=-2)
return img
def calculate_psnr(output_img, target_img):
target_tf = torch2numpy(target_img)
output_tf = torch2numpy(output_img)
psnr = 0.0
n = 0.0
for im_idx in range(output_tf.shape[0]):
psnr += skimage.measure.compare_psnr(target_tf[im_idx, ...],
output_tf[im_idx, ...],
data_range=255)
n += 1.0
return psnr / n
def calculate_ssim(output_img, target_img):
target_tf = torch2numpy(target_img)
output_tf = torch2numpy(output_img)
ssim = 0.0
n = 0.0
for im_idx in range(output_tf.shape[0]):
ssim += skimage.measure.compare_ssim(target_tf[im_idx, ...],
output_tf[im_idx, ...],
multichannel=True,
data_range=255)
n += 1.0
return ssim / n
| 7,118 | 34.41791 | 105 | py |
prospector | prospector-master/doc/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# prospector documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 8 16:26:26 2018.
#
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
extensions = [
'sphinx.ext.autodoc',
#'sphinx.napoleon',
'sphinx.ext.mathjax',
# 'sphinx.ext.doctest',
# 'sphinx.ext.viewcode',
# 'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'prospector'
copyright = '2014-2020, Benjamin Johnson and Contributors'
author = 'Benjamin Johnson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = '0.4'
# The full version, including alpha/beta/rc tags.
release = '0.4'
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
autodoc_mock_imports = ["sedpy", "h5py"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_options = {"logo_only": True}
# html_theme_options = {}
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
# html_title = 'prospector v0.4'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
html_static_path = ["_static"]
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/logo_name.png"
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.png"
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
html_last_updated_fmt = ''
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'prospectordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
#latex_documents = [
# (master_doc, 'prospector.tex', 'prospector Documentation',
# 'Benjamin Johnson', 'manual'),]
| 5,534 | 30.99422 | 80 | py |
Efficient-FedRec | Efficient-FedRec-main/src/main.py | import argparse
from pathlib import Path
from tqdm import tqdm
import random
import wandb
import numpy as np
import os
import pickle
import torch
from torch.utils.data import DataLoader
import torch.optim as optim
from agg import Aggregator
from model import Model, TextEncoder, UserEncoder
from data import TrainDataset, NewsDataset, UserDataset
from metrics import evaluation_split
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--wandb_entity", type=str)
parser.add_argument(
"--mode", type=str, default="train", choices=["train", "test", "predict"]
)
parser.add_argument(
"--data_path",
type=str,
default=os.getenv("AMLT_DATA_DIR", "../data"),
help="path to downloaded raw adressa dataset",
)
parser.add_argument(
"--out_path",
type=str,
default=os.getenv("AMLT_OUTPUT_DIR", "../output"),
help="path to downloaded raw adressa dataset",
)
parser.add_argument(
"--data",
type=str,
default="mind",
choices=["mind", "adressa"],
help="decide which dataset for preprocess",
)
parser.add_argument("--bert_type", type=str, default="bert-base-uncased")
parser.add_argument(
"--trainable_layers", type=int, nargs="+", default=[6, 7, 8, 9, 10, 11]
)
parser.add_argument("--user_lr", type=float, default=0.00005)
parser.add_argument("--news_lr", type=float, default=0.00005)
parser.add_argument("--user_num", type=int, default=50)
parser.add_argument("--max_his_len", type=float, default=50)
parser.add_argument(
"--npratio",
type=int,
default=20,
help="randomly sample neg_num negative impression for every positive behavior",
)
parser.add_argument("--max_train_steps", type=int, default=2000)
parser.add_argument("--validation_steps", type=int, default=100)
parser.add_argument("--name", type=str, default="efficient-fedrec")
args = parser.parse_args()
return args
def process_news_grad(candidate_info, his_info):
news_grad = {}
candidate_news, candidate_vecs, candidate_grad = candidate_info
his, his_vecs, his_grad = his_info
candidate_news, candaidate_grad = (
candidate_news.reshape(-1,),
candidate_grad.reshape(-1, 400),
)
his, his_grad = his.reshape(-1,), his_grad.reshape(-1, 400)
for nid, grad in zip(his, his_grad):
if nid in news_grad:
news_grad[nid] += grad
else:
news_grad[nid] = grad
for nid, grad in zip(candidate_news, candaidate_grad):
if nid in news_grad:
news_grad[nid] += grad
else:
news_grad[nid] = grad
return news_grad
def process_user_grad(model_param, sample_num, user_sample):
user_grad = {}
for name, param in model_param:
user_grad[name] = param.grad * (sample_num / user_sample)
return user_grad
def collect_users_nids(train_sam, users, user_indices, nid2index):
user_nids = [0]
user_sample = 0
for user in users:
sids = user_indices[user]
user_sample += len(sids)
for idx in sids:
_, pos, neg, his, _ = train_sam[idx]
user_nids.extend([nid2index[i] for i in list(set([pos] + neg + his))])
return list(set(user_nids)), user_sample
def train_on_step(
agg, model, args, user_indices, user_num, train_sam, nid2index, news_index, device
):
# sample users
users = random.sample(user_indices.keys(), user_num)
nids, user_sample = collect_users_nids(train_sam, users, user_indices, nid2index)
agg.gen_news_vecs(nids)
train_ds = TrainDataset(
args, train_sam, users, user_indices, nid2index, agg, news_index
)
train_dl = DataLoader(train_ds, batch_size=16384, shuffle=True, num_workers=0)
model.train()
loss = 0
for cnt, batch_sample in enumerate(train_dl):
model.user_encoder.load_state_dict(agg.user_encoder.state_dict())
optimizer = optim.SGD(model.parameters(), lr=args.user_lr)
candidate_news, candidate_news_vecs, his, his_vecs, label = batch_sample
candidate_news_vecs = candidate_news_vecs.to(device)
his_vecs = his_vecs.to(device)
sample_num = his_vecs.shape[0]
label = label.to(device)
# compute gradients for user model and news representations
candidate_news_vecs.requires_grad = True
his_vecs.requires_grad = True
bz_loss, y_hat = model(candidate_news_vecs, his_vecs, label)
loss += bz_loss.detach().cpu().numpy()
optimizer.zero_grad()
bz_loss.backward()
candaidate_grad = candidate_news_vecs.grad.detach().cpu() * (
sample_num / user_sample
)
candidate_vecs = candidate_news_vecs.detach().cpu().numpy()
candidate_news = candidate_news.numpy()
his_grad = his_vecs.grad.detach().cpu() * (sample_num / user_sample)
his_vecs = his_vecs.detach().cpu().numpy()
his = his.numpy()
news_grad = process_news_grad(
[candidate_news, candidate_vecs, candaidate_grad], [his, his_vecs, his_grad]
)
user_grad = process_user_grad(
model.user_encoder.named_parameters(), sample_num, user_sample
)
agg.collect(news_grad, user_grad)
loss = loss / (cnt + 1)
agg.update()
return loss
def validate(args, agg, valid_sam, nid2index, news_index, device):
agg.gen_news_vecs(list(range(len(news_index))))
agg.user_encoder.eval()
user_dataset = UserDataset(args, valid_sam, agg.news_vecs, nid2index)
user_vecs = []
user_dl = DataLoader(user_dataset, batch_size=4096, shuffle=False, num_workers=0)
with torch.no_grad():
for his in tqdm(user_dl):
his = his.to(device)
user_vec = agg.user_encoder(his).detach().cpu().numpy()
user_vecs.append(user_vec)
user_vecs = np.concatenate(user_vecs)
val_scores = evaluation_split(agg.news_vecs, user_vecs, valid_sam, nid2index)
val_auc, val_mrr, val_ndcg, val_ndcg10 = [
np.mean(i) for i in list(zip(*val_scores))
]
return val_auc, val_mrr, val_ndcg, val_ndcg10
def test(args, data_path, out_model_path, out_path, device):
with open(data_path / "test_sam_uid.pkl", "rb") as f:
test_sam = pickle.load(f)
with open(data_path / "bert_test_nid2index.pkl", "rb") as f:
test_nid2index = pickle.load(f)
test_news_index = np.load(data_path / "bert_test_news_index.npy", allow_pickle=True)
text_encoder = TextEncoder(bert_type=args.bert_type).to(device)
user_encoder = UserEncoder().to(device)
ckpt = torch.load(out_model_path / f"{args.name}-{args.data}.pkl")
text_encoder.load_state_dict(ckpt["text_encoder"])
user_encoder.load_state_dict(ckpt["user_encoder"])
test_news_dataset = NewsDataset(test_news_index)
news_dl = DataLoader(
test_news_dataset, batch_size=512, shuffle=False, num_workers=0
)
news_vecs = []
text_encoder.eval()
for news in tqdm(news_dl):
news = news.to(device)
news_vec = text_encoder(news).detach().cpu().numpy()
news_vecs.append(news_vec)
news_vecs = np.concatenate(news_vecs)
user_dataset = UserDataset(args, test_sam, news_vecs, test_nid2index)
user_vecs = []
user_dl = DataLoader(user_dataset, batch_size=4096, shuffle=False, num_workers=0)
user_encoder.eval()
for his in tqdm(user_dl):
his = his.to(device)
user_vec = user_encoder(his).detach().cpu().numpy()
user_vecs.append(user_vec)
user_vecs = np.concatenate(user_vecs)
test_scores = evaluation_split(news_vecs, user_vecs, test_sam, test_nid2index)
test_auc, test_mrr, test_ndcg, test_ndcg10 = [
np.mean(i) for i in list(zip(*test_scores))
]
with open(out_path / f"log.txt", "a") as f:
f.write(
f"test auc: {test_auc:.4f}, mrr: {test_mrr:.4f}, ndcg5: {test_ndcg:.4f}, ndcg10: {test_ndcg10:.4f}\n"
)
def predict(args, data_path, out_model_path, out_path, device):
with open(data_path / "test_sam_uid.pkl", "rb") as f:
test_sam = pickle.load(f)
with open(data_path / "bert_test_nid2index.pkl", "rb") as f:
test_nid2index = pickle.load(f)
test_news_index = np.load(data_path / "bert_test_news_index.npy", allow_pickle=True)
text_encoder = TextEncoder(bert_type=args.bert_type).to(device)
user_encoder = UserEncoder().to(device)
ckpt = torch.load(out_model_path / f"{args.name}-{args.data}.pkl")
text_encoder.load_state_dict(ckpt["text_encoder"])
user_encoder.load_state_dict(ckpt["user_encoder"])
test_news_dataset = NewsDataset(test_news_index)
news_dl = DataLoader(
test_news_dataset, batch_size=512, shuffle=False, num_workers=0
)
news_vecs = []
text_encoder.eval()
for news in tqdm(news_dl):
news = news.to(device)
news_vec = text_encoder(news).detach().cpu().numpy()
news_vecs.append(news_vec)
news_vecs = np.concatenate(news_vecs)
user_dataset = UserDataset(args, test_sam, news_vecs, test_nid2index)
user_vecs = []
user_dl = DataLoader(user_dataset, batch_size=4096, shuffle=False, num_workers=0)
user_encoder.eval()
for his in tqdm(user_dl):
his = his.to(device)
user_vec = user_encoder(his).detach().cpu().numpy()
user_vecs.append(user_vec)
user_vecs = np.concatenate(user_vecs)
pred_lines = []
for i in tqdm(range(len(test_sam))):
impr_id, poss, negs, _, _ = test_sam[i]
user_vec = user_vecs[i]
news_ids = [test_nid2index[i] for i in poss + negs]
news_vec = news_vecs[news_ids]
y_score = np.multiply(news_vec, user_vec)
y_score = np.sum(y_score, axis=1)
pred_rank = (np.argsort(np.argsort(y_score)[::-1]) + 1).tolist()
pred_rank = '[' + ','.join([str(i) for i in pred_rank]) + ']'
pred_lines.append((int(impr_id), ' '.join([impr_id, pred_rank])+ '\n'))
pred_lines.sort(key=lambda x: x[0])
pred_lines = [x[1] for x in pred_lines]
with open(out_path / 'prediction.txt', 'w') as f:
f.writelines(pred_lines)
if __name__ == "__main__":
args = parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device("cuda:0")
torch.cuda.set_device(device)
if args.mode == "train":
wandb.init(
project=f"{args.name}-{args.data}", config=args, entity=args.wandb_entity
)
data_path = Path(args.data_path) / args.data
out_path = Path(args.out_path) / f"{args.name}-{args.data}"
out_model_path = out_path / "model"
out_model_path.mkdir(exist_ok=True, parents=True)
# load preprocessed data
with open(data_path / "bert_nid2index.pkl", "rb") as f:
nid2index = pickle.load(f)
news_index = np.load(data_path / "bert_news_index.npy", allow_pickle=True)
with open(data_path / "train_sam_uid.pkl", "rb") as f:
train_sam = pickle.load(f)
with open(data_path / "valid_sam_uid.pkl", "rb") as f:
valid_sam = pickle.load(f)
with open(data_path / "user_indices.pkl", "rb") as f:
user_indices = pickle.load(f)
news_dataset = NewsDataset(news_index)
agg = Aggregator(args, news_dataset, news_index, device)
model = Model().to(device)
best_auc = 0
for step in range(args.max_train_steps):
loss = train_on_step(
agg,
model,
args,
user_indices,
args.user_num,
train_sam,
nid2index,
news_index,
device,
)
wandb.log({"train loss": loss}, step=step + 1)
if (step + 1) % args.validation_steps == 0:
val_auc, val_mrr, val_ndcg, val_ndcg10 = validate(
args, agg, valid_sam, nid2index, news_index, device
)
wandb.log(
{
"valid auc": val_auc,
"valid mrr": val_mrr,
"valid ndcg@5": val_ndcg,
"valid ndcg@10": val_ndcg10,
},
step=step + 1,
)
with open(out_path / f"log.txt", "a") as f:
f.write(
f"[{step}] round auc: {val_auc:.4f}, mrr: {val_mrr:.4f}, ndcg5: {val_ndcg:.4f}, ndcg10: {val_ndcg10:.4f}\n"
)
if val_auc > best_auc:
best_auc = val_auc
wandb.run.summary["best_auc"] = best_auc
torch.save(
{
"text_encoder": agg.text_encoder.state_dict(),
"user_encoder": agg.user_encoder.state_dict(),
},
out_model_path / f"{args.name}-{args.data}.pkl",
)
with open(out_path / f"log.txt", "a") as f:
f.write(f"[{step}] round save model\n")
elif args.mode == "test":
data_path = Path(args.data_path) / args.data
out_path = Path(args.out_path) / f"{args.name}-{args.data}"
out_model_path = out_path / "model"
test(args, data_path, out_model_path, out_path, device)
elif args.mode == "predict":
data_path = Path(args.data_path) / args.data
out_path = Path(args.out_path) / f"{args.name}-{args.data}"
out_model_path = out_path / "model"
predict(args, data_path, out_model_path, out_path, device)
| 13,768 | 33.946701 | 131 | py |
Efficient-FedRec | Efficient-FedRec-main/src/agg.py | import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from model import TextEncoder, UserEncoder
import torch.optim as optim
from data import NewsPartDataset
class NewsUpdatorDataset(Dataset):
def __init__(self, news_index, news_ids, news_grads):
self.news_index = news_index
self.news_grads = news_grads
self.news_ids = news_ids
def __len__(self):
return len(self.news_ids)
def __getitem__(self, idx):
nid = self.news_ids[idx]
return self.news_index[nid], self.news_grads[idx]
class Aggregator:
def __init__(self, args, news_dataset, news_index, device):
self.device = device
self.text_encoder = TextEncoder(bert_type=args.bert_type).to(device)
self.user_encoder = UserEncoder().to(device)
self.news_optimizer = optim.Adam(self.text_encoder.parameters(), lr=args.news_lr)
self.user_optimizer = optim.Adam(self.user_encoder.parameters(), lr=args.user_lr)
for param in self.text_encoder.bert.parameters():
param.requires_grad = False
for index, layer in enumerate(self.text_encoder.bert.encoder.layer):
if index in args.trainable_layers:
for param in layer.parameters():
param.requires_grad = True
if -1 in args.trainable_layers:
for param in self.text_encoder.bert.embeddings.parameters():
param.requires_grad = True
self.news_dataset = news_dataset
self.news_index = news_index
self.time = 0
self.cnt = 0
self._init_grad_param()
def _init_grad_param(self):
self.news_grads = {}
self.user_optimizer.zero_grad()
self.news_optimizer.zero_grad()
def gen_news_vecs(self, nids):
self.text_encoder.eval()
news_ds = NewsPartDataset(self.news_index, nids)
news_dl = DataLoader(news_ds, batch_size=2048, shuffle=False, num_workers=0)
news_vecs = np.zeros((len(self.news_index), 400), dtype='float32')
with torch.no_grad():
for nids, news in news_dl:
news = news.to(self.device)
news_vec = self.text_encoder(news).detach().cpu().numpy()
news_vecs[nids.numpy()] = news_vec
if np.isnan(news_vecs).any():
raise ValueError("news_vecs contains nan")
self.news_vecs = news_vecs
return news_vecs
def get_news_vecs(self, idx):
return self.news_vecs[idx]
def update(self):
self.update_user_grad()
self.update_news_grad()
self._init_grad_param()
self.cnt += 1
def average_update_time(self):
return self.time / self.cnt
def update_news_grad(self):
self.text_encoder.train()
self.news_optimizer.zero_grad()
news_ids, news_grads = [], []
for nid in self.news_grads:
news_ids.append(nid)
news_grads.append(self.news_grads[nid])
news_up_ds = NewsUpdatorDataset(self.news_index, news_ids, news_grads)
news_up_dl = DataLoader(news_up_ds, batch_size=128, shuffle=False, num_workers=0)
for news_index, news_grad in news_up_dl:
news_index = news_index.to(self.device)
news_grad = news_grad.to(self.device)
news_vecs = self.text_encoder(news_index)
news_vecs.backward(news_grad)
self.news_optimizer.step()
self.news_optimizer.zero_grad()
def update_user_grad(self):
self.user_optimizer.step()
self.user_optimizer.zero_grad()
def check_news_vec_same(self, nids, news_vecs):
assert (self.get_news_vecs(nids) == news_vecs).all(), "News vecs are not the same"
def collect(self, news_grad, user_grad):
# update user model params
for name, param in self.user_encoder.named_parameters():
if param.grad is None:
param.grad = user_grad[name]
else:
param.grad += user_grad[name]
# update news model params
for nid in news_grad:
if nid in self.news_grads:
self.news_grads[nid] += news_grad[nid]
else:
self.news_grads[nid] = news_grad[nid] | 4,402 | 34.224 | 90 | py |
Efficient-FedRec | Efficient-FedRec-main/src/model.py | import torch
from torch import nn
import torch.nn.functional as F
from transformers import BertModel
import numpy as np
class ScaledDotProductAttention(nn.Module):
def __init__(self, d_k):
super(ScaledDotProductAttention, self).__init__()
self.d_k = d_k
def forward(self, Q, K, V, attn_mask=None):
scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(self.d_k)
scores = torch.exp(scores)
if attn_mask is not None:
scores = scores * attn_mask
attn = scores / (torch.sum(scores, dim=-1, keepdim=True) + 1e-8)
context = torch.matmul(attn, V)
return context, attn
class MultiHeadAttention(nn.Module):
def __init__(self, d_model, n_heads, d_k, d_v):
super(MultiHeadAttention, self).__init__()
self.d_model = d_model # 300
self.n_heads = n_heads # 20
self.d_k = d_k # 20
self.d_v = d_v # 20
self.W_Q = nn.Linear(d_model, d_k * n_heads) # 300, 400
self.W_K = nn.Linear(d_model, d_k * n_heads) # 300, 400
self.W_V = nn.Linear(d_model, d_v * n_heads) # 300, 400
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight, gain=1)
def forward(self, Q, K, V, attn_mask=None):
batch_size, seq_len, _ = Q.size()
q_s = self.W_Q(Q).view(batch_size, -1, self.n_heads, self.d_k).transpose(1,2)
k_s = self.W_K(K).view(batch_size, -1, self.n_heads, self.d_k).transpose(1,2)
v_s = self.W_V(V).view(batch_size, -1, self.n_heads, self.d_v).transpose(1,2)
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(1).expand(batch_size, seq_len, seq_len)
attn_mask = attn_mask.unsqueeze(1).repeat(1, self.n_heads, 1, 1)
context, attn = ScaledDotProductAttention(self.d_k)(q_s, k_s, v_s, attn_mask)
context = context.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads * self.d_v)
return context
class AdditiveAttention(nn.Module):
def __init__(self, d_h, hidden_size=200):
super(AdditiveAttention, self).__init__()
self.att_fc1 = nn.Linear(d_h, hidden_size)
self.att_fc2 = nn.Linear(hidden_size, 1)
def forward(self, x, attn_mask=None):
bz = x.shape[0]
e = self.att_fc1(x)
e = nn.Tanh()(e)
alpha = self.att_fc2(e)
alpha = torch.exp(alpha)
if attn_mask is not None:
alpha = alpha * attn_mask.unsqueeze(2)
alpha = alpha / (torch.sum(alpha, dim=1, keepdim=True) + 1e-8)
x = torch.bmm(x.permute(0, 2, 1), alpha)
x = torch.reshape(x, (bz, -1)) # (bz, 400)
return x
class TextEncoder(nn.Module):
def __init__(self,
bert_type="bert-base-uncased",
word_embedding_dim=400,
dropout_rate=0.2,
enable_gpu=True):
super(TextEncoder, self).__init__()
self.dropout_rate = 0.2
self.bert = BertModel.from_pretrained(bert_type,
hidden_dropout_prob=0,
attention_probs_dropout_prob=0)
self.additive_attention = AdditiveAttention(self.bert.config.hidden_size,
self.bert.config.hidden_size//2)
self.fc = nn.Linear(self.bert.config.hidden_size, word_embedding_dim)
def forward(self, text):
# text batch, 2, word
tokens = text[:,0,:]
atts = text[:,1,:]
text_vector = self.bert(tokens, attention_mask=atts)[0]
text_vector = self.additive_attention(text_vector)
text_vector = self.fc(text_vector)
return text_vector
class UserEncoder(nn.Module):
def __init__(self,
news_embedding_dim=400,
num_attention_heads=20,
query_vector_dim=200
):
super(UserEncoder, self).__init__()
self.dropout_rate = 0.2
self.multihead_attention = MultiHeadAttention(news_embedding_dim,
num_attention_heads, 20, 20)
self.additive_attention = AdditiveAttention(news_embedding_dim,
query_vector_dim)
def forward(self, clicked_news_vecs):
clicked_news_vecs = F.dropout(clicked_news_vecs, p=self.dropout_rate, training=self.training)
multi_clicked_vectors = self.multihead_attention(
clicked_news_vecs, clicked_news_vecs, clicked_news_vecs
)
pos_user_vector = self.additive_attention(multi_clicked_vectors)
user_vector = pos_user_vector
return user_vector
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.user_encoder = UserEncoder()
self.criterion = nn.CrossEntropyLoss()
def forward(self, candidate_vecs, clicked_news_vecs, targets, compute_loss=True):
user_vector = self.user_encoder(clicked_news_vecs)
score = torch.bmm(candidate_vecs, user_vector.unsqueeze(-1)).squeeze(dim=-1)
if compute_loss:
loss = self.criterion(score, targets)
return loss, score
else:
return score | 5,484 | 37.356643 | 101 | py |
Efficient-FedRec | Efficient-FedRec-main/src/data.py | import random
import numpy as np
from torch.utils.data import Dataset, DataLoader
def newsample(nnn, ratio):
if ratio > len(nnn):
return nnn + ["<unk>"] * (ratio - len(nnn))
else:
return random.sample(nnn, ratio)
class TrainDataset(Dataset):
def __init__(self, args, samples, users, user_indices, nid2index, agg, news_index):
self.news_index = news_index
self.nid2index = nid2index
self.agg = agg
self.samples = []
self.args = args
for user in users:
self.samples.extend([samples[i] for i in user_indices[user]])
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
# pos, neg, his, neg_his
_, pos, neg, his, _ = self.samples[idx]
neg = newsample(neg, self.args.npratio)
candidate_news = np.array([self.nid2index[n] for n in [pos] + neg])
candidate_news_vecs = self.agg.get_news_vecs(candidate_news)
his = np.array([self.nid2index[n] for n in his] + [0] * (self.args.max_his_len - len(his)))
his_vecs = self.agg.get_news_vecs(his)
label = np.array(0)
return candidate_news, candidate_news_vecs, his, his_vecs, label
class NewsDataset(Dataset):
def __init__(self, news_index):
self.news_index = news_index
def __len__(self):
return len(self.news_index)
def __getitem__(self, idx):
return self.news_index[idx]
class NewsPartDataset(Dataset):
def __init__(self, news_index, nids):
self.news_index = news_index
self.nids = nids
def __len__(self):
return len(self.nids)
def __getitem__(self, idx):
nid = self.nids[idx]
return nid, self.news_index[nid]
class UserDataset(Dataset):
def __init__(self,
args,
samples,
news_vecs,
nid2index):
self.samples = samples
self.args = args
self.news_vecs = news_vecs
self.nid2index = nid2index
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
_, poss, negs, his, _ = self.samples[idx]
his = [self.nid2index[n] for n in his] + [0] * (self.args.max_his_len - len(his))
his = self.news_vecs[his]
return his
class NewsUpdatorDataset(Dataset):
def __init__(self, news_index, news_ids, news_grads):
self.news_index = news_index
self.news_grads = news_grads
self.news_ids = news_ids
def __len__(self):
return len(self.news_ids)
def __getitem__(self, idx):
nid = self.news_ids[idx]
return self.news_index[nid], self.news_grads[idx] | 2,760 | 28.063158 | 99 | py |
Guava-disease-detection | Guava-disease-detection-main/optimization/conversion/torch_to_onnx.py | import torch
from torchvision.models import mobilenet_v2
img_size = (640, 640)
batch_size = 1
onnx_model_path = 'model.onnx'
model = mobilenet_v2()
model.eval()
sample_input = torch.rand((batch_size, 3, *img_size))
y = model(sample_input)
torch.onnx.export(
model,
sample_input,
onnx_model_path,
verbose=False,
input_names=['input'],
output_names=['output'],
opset_version=12
) | 411 | 16.913043 | 53 | py |
CICDFuzzBench | CICDFuzzBench-master/experiments/fuzz duration/data/VD_A.py | import itertools as it
from bisect import bisect_left
from typing import List
import numpy as np
import pandas as pd
import scipy.stats as ss
from pandas import Categorical
def VD_A(treatment: List[float], control: List[float]):
"""
Computes Vargha and Delaney A index
A. Vargha and H. D. Delaney.
A critique and improvement of the CL common language
effect size statistics of McGraw and Wong.
Journal of Educational and Behavioral Statistics, 25(2):101-132, 2000
The formula to compute A has been transformed to minimize accuracy errors
See: http://mtorchiano.wordpress.com/2014/05/19/effect-size-of-r-precision/
:param treatment: a numeric list
:param control: another numeric list
:returns the value estimate and the magnitude
"""
m = len(treatment)
n = len(control)
if m != n:
raise ValueError("Data d and f must have the same length")
r = ss.rankdata(treatment + control)
r1 = sum(r[0:m])
# Compute the measure
# A = (r1/m - (m+1)/2)/n # formula (14) in Vargha and Delaney, 2000
A = (2 * r1 - m * (m + 1)) / (
2 * n * m
) # equivalent formula to avoid accuracy errors
levels = [0.147, 0.33, 0.474] # effect sizes from Hess and Kromrey, 2004
magnitude = ["negligible", "small", "medium", "large"]
scaled_A = (A - 0.5) * 2
magnitude = magnitude[bisect_left(levels, abs(scaled_A))]
estimate = A
return estimate, magnitude
if __name__ == "__main__":
# Examples
# negligible
F = [
0.8236111111111111,
0.7966666666666666,
0.923611111111111,
0.8197222222222222,
0.7108333333333333,
]
G = [
0.8052777777777779,
0.8172222222222221,
0.8322222222222223,
0.783611111111111,
0.8141666666666666,
]
print(VD_A(G, F))
# small
A = [
0.478515625,
0.4638671875,
0.4638671875,
0.4697265625,
0.4638671875,
0.474609375,
0.4814453125,
0.4814453125,
0.4697265625,
0.4814453125,
0.474609375,
0.4833984375,
0.484375,
0.44921875,
0.474609375,
0.484375,
0.4814453125,
0.4638671875,
0.484375,
0.478515625,
0.478515625,
0.45703125,
0.484375,
0.419921875,
0.4833984375,
0.478515625,
0.4697265625,
0.484375,
0.478515625,
0.4638671875,
]
B = [
0.4814453125,
0.478515625,
0.44921875,
0.4814453125,
0.4638671875,
0.478515625,
0.474609375,
0.4638671875,
0.474609375,
0.44921875,
0.474609375,
0.478515625,
0.478515625,
0.474609375,
0.4697265625,
0.474609375,
0.45703125,
0.4697265625,
0.478515625,
0.4697265625,
0.4697265625,
0.484375,
0.45703125,
0.474609375,
0.474609375,
0.4638671875,
0.45703125,
0.474609375,
0.4638671875,
0.4306640625,
]
print(VD_A(A, B))
# medium
C = [
0.9108333333333334,
0.8755555555555556,
0.900277777777778,
0.9274999999999999,
0.8777777777777779,
]
E = [
0.8663888888888888,
0.8802777777777777,
0.7816666666666667,
0.8377777777777776,
0.9305555555555556,
]
print(VD_A(C, E))
# Large
D = [
0.7202777777777778,
0.77,
0.8544444444444445,
0.7947222222222222,
0.7577777777777778,
]
print(VD_A(C, D))
| 3,710 | 21.089286 | 79 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.