repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
reformer-pytorch | reformer-pytorch-master/reformer_pytorch/reversible.py | import torch
import torch.nn as nn
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
class Deterministic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(nn.Module):
def __init__(self, f, g, depth=None, send_signal = False):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
self.depth = depth
self.send_signal = send_signal
def forward(self, x, f_args = {}, g_args = {}):
x1, x2 = torch.chunk(x, 2, dim=2)
y1, y2 = None, None
if self.send_signal:
f_args['_reverse'] = g_args['_reverse'] = False
f_args['_depth'] = g_args['_depth'] = self.depth
with torch.no_grad():
y1 = x1 + self.f(x2, record_rng=self.training, **f_args)
y2 = x2 + self.g(y1, record_rng=self.training, **g_args)
return torch.cat([y1, y2], dim=2)
def backward_pass(self, y, dy, f_args = {}, g_args = {}):
y1, y2 = torch.chunk(y, 2, dim=2)
del y
dy1, dy2 = torch.chunk(dy, 2, dim=2)
del dy
if self.send_signal:
f_args['_reverse'] = g_args['_reverse'] = True
f_args['_depth'] = g_args['_depth'] = self.depth
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1, retain_graph=True)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim=2)
dx = torch.cat([dx1, dx2], dim=2)
return x, dx
class IrreversibleBlock(nn.Module):
def __init__(self, f, g):
super().__init__()
self.f = f
self.g = g
def forward(self, x, f_args, g_args):
x1, x2 = torch.chunk(x, 2, dim=2)
y1 = x1 + self.f(x2, **f_args)
y2 = x2 + self.g(y1, **g_args)
return torch.cat([y1, y2], dim=2)
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, kwargs):
ctx.kwargs = kwargs
for block in blocks:
x = block(x, **kwargs)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
kwargs = ctx.kwargs
for block in ctx.blocks[::-1]:
y, dy = block.backward_pass(y, dy, **kwargs)
return dy, None, None
class ReversibleSequence(nn.Module):
def __init__(self, blocks, layer_dropout = 0., reverse_thres = 0, send_signal = False):
super().__init__()
self.layer_dropout = layer_dropout
self.reverse_thres = reverse_thres
self.blocks = nn.ModuleList([ReversibleBlock(f, g, depth, send_signal) for depth, (f, g) in enumerate(blocks)])
self.irrev_blocks = nn.ModuleList([IrreversibleBlock(f=f, g=g) for f, g in blocks])
def forward(self, x, arg_route = (True, False), **kwargs):
reverse = x.shape[1] > self.reverse_thres
blocks = self.blocks if reverse else self.irrev_blocks
if self.training and self.layer_dropout > 0:
to_drop = torch.empty(len(self.blocks)).uniform_(0, 1) < self.layer_dropout
blocks = [block for block, drop in zip(self.blocks, to_drop) if not drop]
blocks = self.blocks[:1] if len(blocks) == 0 else blocks
f_args, g_args = map(lambda route: kwargs if route else {}, arg_route)
block_kwargs = {'f_args': f_args, 'g_args': g_args}
if not reverse:
for block in blocks:
x = block(x, **block_kwargs)
return x
return _ReversibleFunction.apply(x, blocks, block_kwargs)
| 5,434 | 32.343558 | 120 | py |
reformer-pytorch | reformer-pytorch-master/reformer_pytorch/recorder.py | from torch import nn
from reformer_pytorch.reformer_pytorch import LSHAttention, LSHSelfAttention
from collections import defaultdict
class Recorder(nn.Module):
def __init__(self, net):
super().__init__()
self.iter = 0
self.recordings = defaultdict(list)
self.net = net
self.on = True
self.ejected = False
def eject(self):
self.ejected = True
self.clear()
self.unwire()
return self.net
def wire(self):
for module in self.net.modules():
if isinstance(module, LSHAttention):
module._return_attn = True
if isinstance(module, LSHSelfAttention):
module.callback = self.record
def unwire(self):
for module in self.net.modules():
if isinstance(module, LSHAttention):
module._return_attn = False
if isinstance(module, LSHSelfAttention):
module.callback = None
def turn_on(self):
self.on = True
def turn_off(self):
self.on = False
def clear(self):
del self.recordings
self.recordings = defaultdict(list)
self.iter = 0
def record(self, attn, buckets):
if not self.on: return
data = {'attn': attn.detach().cpu(), 'buckets': buckets.detach().cpu()}
self.recordings[self.iter].append(data)
def forward(self, x, **kwargs):
assert not self.ejected, 'Recorder has already been ejected and disposed'
if self.on:
self.wire()
out = self.net(x, **kwargs)
self.iter += 1
self.unwire()
return out
| 1,662 | 26.716667 | 81 | py |
reformer-pytorch | reformer-pytorch-master/reformer_pytorch/autopadder.py | import math
import torch
from torch import nn
import torch.nn.functional as F
from reformer_pytorch.reformer_pytorch import Reformer, ReformerLM, LSHSelfAttention
def pad_to_multiple(tensor, seqlen, multiple, dim=-1):
m = seqlen / multiple
if m.is_integer():
return tensor
remainder = math.ceil(m) * multiple - seqlen
pad_offset = (0,) * (-1 - dim) * 2
return F.pad(tensor, (*pad_offset, 0, remainder), value=0)
class Autopadder(nn.Module):
def __init__(self, net):
super().__init__()
assert isinstance(net, (LSHSelfAttention, Reformer, ReformerLM)), 'only modules LSHSelfAttention, Reformer, ReformerLM accepted'
self.net = net
reformer = net.reformer if isinstance(net, ReformerLM) else net
self.pad_dim = -1 if isinstance(net, ReformerLM) else -2
self.bucket_size = reformer.bucket_size
self.num_mem_kv = reformer.num_mem_kv
self.full_attn_thres = reformer.full_attn_thres
def forward(self, x, **kwargs):
b, t, m, device = *x.shape[:2], self.num_mem_kv, x.device
keys = kwargs.get('keys')
input_mask = kwargs.get('input_mask')
input_attn_mask = kwargs.get('input_attn_mask')
k_len = 0 if keys is None else keys.shape[1]
seqlen = t + m + k_len
if seqlen > self.full_attn_thres:
if input_mask is None:
input_mask = torch.full((b, t), True, device=x.device, dtype=torch.bool)
x = pad_to_multiple(x, seqlen, self.bucket_size * 2, dim=self.pad_dim)
if input_mask is not None:
new_mask = F.pad(input_mask, (0, x.shape[1] - input_mask.shape[1]), value=False)
kwargs.update(input_mask=new_mask)
if input_attn_mask is not None:
offset = x.shape[1] - input_attn_mask.shape[1]
new_mask = F.pad(input_attn_mask, (0, offset, 0, offset), value=False)
kwargs.update(input_attn_mask=new_mask)
out = self.net(x, **kwargs)
return out[:, 0:t]
| 2,056 | 35.732143 | 136 | py |
reformer-pytorch | reformer-pytorch-master/reformer_pytorch/reformer_enc_dec.py | import re
from torch import nn
from reformer_pytorch.reformer_pytorch import ReformerLM
from reformer_pytorch.generative_tools import TrainingWrapper
ENC_PREFIX = 'enc_'
DEC_PREFIX = 'dec_'
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return bool(re.match(f'^{prefix}', str))
def group_by_key_prefix(prefix, d):
return group_dict_by_key(lambda x: string_begins_with(prefix, x), d)
def group_by_key_prefix_and_remove_prefix(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(lambda x: string_begins_with(prefix, x), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
def extract_enc_dec_kwargs(kwargs):
enc_kwargs, kwargs = group_by_key_prefix_and_remove_prefix(ENC_PREFIX, kwargs)
dec_kwargs, kwargs = group_by_key_prefix_and_remove_prefix(DEC_PREFIX, kwargs)
return enc_kwargs, dec_kwargs, kwargs
def extract_and_set_enc_dec_kwargs(kwargs):
enc_kwargs, dec_kwargs, kwargs = extract_enc_dec_kwargs(kwargs)
if 'input_mask' in enc_kwargs:
dec_kwargs.setdefault('context_mask', enc_kwargs['input_mask'])
return enc_kwargs, dec_kwargs, kwargs
class ReformerEncDec(nn.Module):
def __init__(self, dim, ignore_index = 0, pad_value = 0, **kwargs):
super().__init__()
enc_kwargs, dec_kwargs, _ = extract_enc_dec_kwargs(kwargs)
assert 'return_embedding' not in enc_kwargs, 'you cannot manually set the return embeddings flag for the encoder'
assert 'dim' not in dec_kwargs and 'dim' not in enc_kwargs, 'you must set the dim for both encoder and decoder'
enc_kwargs['dim'] = dec_kwargs['dim'] = dim
enc_kwargs['return_embeddings'] = True
dec_kwargs['causal'] = True
enc_kwargs.setdefault('bucket_size', 64)
dec_kwargs.setdefault('bucket_size', enc_kwargs['bucket_size'] * 2)
enc = ReformerLM(**enc_kwargs)
dec = ReformerLM(**dec_kwargs)
self.enc = TrainingWrapper(enc, ignore_index = ignore_index, pad_value = pad_value)
self.dec = TrainingWrapper(dec, ignore_index = ignore_index, pad_value = pad_value)
def generate(self, seq_in, seq_out_start, seq_len, **kwargs):
enc_kwargs, dec_kwargs, kwargs = extract_and_set_enc_dec_kwargs(kwargs)
enc_keys = self.enc(seq_in, **enc_kwargs)
return self.dec.generate(seq_out_start, seq_len, keys = enc_keys, **{**dec_kwargs, **kwargs})
def forward(self, seq_in, seq_out, return_loss = False, **kwargs):
enc_kwargs, dec_kwargs, kwargs = extract_and_set_enc_dec_kwargs(kwargs)
enc_keys = self.enc(seq_in, **enc_kwargs)
return self.dec(seq_out, return_loss = return_loss, keys = enc_keys, **dec_kwargs)
| 2,978 | 42.173913 | 121 | py |
reformer-pytorch | reformer-pytorch-master/reformer_pytorch/reformer_pytorch.py | import math
import torch
import torch.nn as nn
from torch.nn import Identity
import torch.nn.functional as F
from torch.autograd import Function
from functools import partial, reduce, wraps
from itertools import chain
from operator import mul
from local_attention import LocalAttention
from axial_positional_embedding import AxialPositionalEmbedding
from product_key_memory import PKM
from reformer_pytorch.reversible import ReversibleSequence
from einops import rearrange, repeat
#constants
TOKEN_SELF_ATTN_VALUE = -5e4 # carefully set for half precision to work
# helper fns
def exists(val):
return val is not None
def sort_key_val(t1, t2, dim=-1):
values, indices = t1.sort(dim=dim)
t2 = t2.expand_as(t1)
return values, t2.gather(dim, indices)
def batched_index_select(values, indices):
last_dim = values.shape[-1]
return values.gather(1, indices[:, :, None].expand(-1, -1, last_dim))
def process_inputs_chunk(fn, chunks=1, dim=0):
def inner_fn(*args, **kwargs):
keys, values, len_args = kwargs.keys(), kwargs.values(), len(args)
chunked_args = list(zip(*map(lambda x: x.chunk(chunks, dim=dim), list(args) + list(values))))
all_args = map(lambda x: (x[:len_args], dict(zip(keys, x[len_args:]))), chunked_args)
outputs = [fn(*c_args, **c_kwargs) for c_args, c_kwargs in all_args]
return tuple(map(lambda x: torch.cat(x, dim=dim), zip(*outputs)))
return inner_fn
def chunked_sum(tensor, chunks=1):
*orig_size, last_dim = tensor.shape
tensor = tensor.reshape(-1, last_dim)
summed_tensors = [c.sum(dim=-1) for c in tensor.chunk(chunks, dim=0)]
return torch.cat(summed_tensors, dim=0).reshape(orig_size)
def default(val, default_val):
return default_val if val is None else val
def cast_tuple(x):
return x if isinstance(x, tuple) else (x,)
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def cache_fn(f):
cache = None
@wraps(f)
def cached_fn(*args, **kwargs):
nonlocal cache
if cache is not None:
return cache
cache = f(*args, **kwargs)
return cache
return cached_fn
def cache_method_decorator(cache_attr, cache_namespace, reexecute = False):
def inner_fn(fn):
@wraps(fn)
def wrapper(self, *args, key_namespace=None, fetch=False, set_cache=True, **kwargs):
namespace_str = str(default(key_namespace, ''))
_cache = getattr(self, cache_attr)
_keyname = f'{cache_namespace}:{namespace_str}'
if fetch:
val = _cache[_keyname]
if reexecute:
fn(self, *args, **kwargs)
else:
val = fn(self, *args, **kwargs)
if set_cache:
setattr(self, cache_attr, {**_cache, **{_keyname: val}})
return val
return wrapper
return inner_fn
def expand_dim(dim, k, t):
t = t.unsqueeze(dim)
expand_shape = [-1] * len(t.shape)
expand_shape[dim] = k
return t.expand(*expand_shape)
def merge_dims(ind_from, ind_to, tensor):
shape = list(tensor.shape)
arr_slice = slice(ind_from, ind_to + 1)
shape[arr_slice] = [reduce(mul, shape[arr_slice])]
return tensor.reshape(*shape)
def split_at_index(dim, index, t):
pre_slices = (slice(None),) * dim
l = (*pre_slices, slice(None, index))
r = (*pre_slices, slice(index, None))
return t[l], t[r]
# helper classes
class Always(nn.Module):
def __init__(self, val):
super().__init__()
self.val = val
def forward(self, *args, **kwargs):
return self.val
class MatrixMultiply(nn.Module):
def __init__(self, tensor, transpose = False, normalize = False):
super().__init__()
self.tensor = tensor
self.transpose = transpose
self.normalize = normalize
def forward(self, x):
tensor = self.tensor
if self.normalize:
tensor = F.normalize(tensor, dim=-1)
if self.transpose:
tensor = tensor.t()
return x @ tensor
class ReZero(nn.Module):
def __init__(self, fn):
super().__init__()
self.g = nn.Parameter(torch.zeros(1))
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) * self.g
class ScaleNorm(nn.Module):
def __init__(self, dim, eps=1e-5):
super().__init__()
self.g = nn.Parameter(torch.ones(1))
self.eps = eps
def forward(self, x):
n = torch.norm(x, dim=-1, keepdim=True).clamp(min=self.eps)
return x / n * self.g
class PreNorm(nn.Module):
def __init__(self, norm_class, dim, fn):
super().__init__()
self.norm = norm_class(dim)
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class Chunk(nn.Module):
def __init__(self, chunks, fn, along_dim = -1):
super().__init__()
self.dim = along_dim
self.chunks = chunks
self.fn = fn
def forward(self, x, **kwargs):
if self.chunks == 1:
return self.fn(x, **kwargs)
chunks = x.chunk(self.chunks, dim = self.dim)
return torch.cat([self.fn(c, **kwargs) for c in chunks], dim = self.dim)
# LSH attention as described in https://openreview.net/pdf?id=rkgNKkHtvB
# adapted from trax, stripped to what paper said needed to work
# namely that buckets need to be at least 64 with 8 rounds of hashing
# https://github.com/google/trax/blob/master/trax/layers/research/efficient_attention.py#L442
class LSHAttention(nn.Module):
def __init__( self,
dropout = 0.,
bucket_size = 64,
n_hashes = 8,
causal = False,
allow_duplicate_attention = True,
attend_across_buckets = True,
rehash_each_round = True,
drop_for_hash_rate = 0.0,
random_rotations_per_head = False,
return_attn = False):
super().__init__()
if dropout >= 1.0:
raise ValueError('Dropout rates must be lower than 1.')
self.dropout = nn.Dropout(dropout)
self.dropout_for_hash = nn.Dropout(drop_for_hash_rate)
assert rehash_each_round or allow_duplicate_attention, (
'The setting {allow_duplicate_attention=False, rehash_each_round=False}'
' is not implemented.')
self.causal = causal
self.bucket_size = bucket_size
self.n_hashes = n_hashes
self._allow_duplicate_attention = allow_duplicate_attention
self._attend_across_buckets = attend_across_buckets
self._rehash_each_round = rehash_each_round
self._random_rotations_per_head = random_rotations_per_head
# will expend extra computation to return attention matrix
self._return_attn = return_attn
# cache buckets for reversible network, reported by authors to make Reformer work at depth
self._cache = {}
@cache_method_decorator('_cache', 'buckets', reexecute=True)
def hash_vectors(self, n_buckets, vecs):
batch_size = vecs.shape[0]
device = vecs.device
# See https://arxiv.org/pdf/1509.02897.pdf
# We sample a different random rotation for each round of hashing to
# decrease the probability of hash misses.
assert n_buckets % 2 == 0
rot_size = n_buckets
rotations_shape = (
batch_size if self._random_rotations_per_head else 1,
vecs.shape[-1],
self.n_hashes if self._rehash_each_round else 1,
rot_size // 2)
random_rotations = torch.randn(rotations_shape, dtype=vecs.dtype, device=device).expand(batch_size, -1, -1, -1)
dropped_vecs = self.dropout_for_hash(vecs)
rotated_vecs = torch.einsum('btf,bfhi->bhti', dropped_vecs, random_rotations)
if self._rehash_each_round:
# rotated_vectors size [batch,n_hash,seq_len,buckets]
rotated_vecs = torch.cat([rotated_vecs, -rotated_vecs], dim=-1)
buckets = torch.argmax(rotated_vecs, dim=-1)
else:
rotated_vecs = torch.cat([rotated_vecs, -rotated_vecs], dim=-1)
# In this configuration, we map each item to the top self.n_hashes buckets
rotated_vecs = torch.squeeze(rotated_vecs, 1)
bucket_range = torch.arange(rotated_vecs.shape[-1], device=device)
bucket_range = torch.reshape(bucket_range, (1, -1))
bucket_range = bucket_range.expand_as(rotated_vecs)
_, buckets = sort_key_val(rotated_vecs, bucket_range, dim=-1)
# buckets size [batch size, seq_len, buckets]
buckets = buckets[... , -self.n_hashes:].transpose(1, 2)
# buckets is now (self.n_hashes, seq_len). Next we add offsets so that
# bucket numbers from different hashing rounds don't overlap.
offsets = torch.arange(self.n_hashes, device=device)
offsets = torch.reshape(offsets * n_buckets, (1, -1, 1))
buckets = torch.reshape(buckets + offsets, (batch_size, -1,))
return buckets
def forward(self, qk, v, query_len = None, input_mask = None, input_attn_mask = None, pos_emb = None, **kwargs):
batch_size, seqlen, dim, device = *qk.shape, qk.device
query_len = default(query_len, seqlen)
is_reverse = kwargs.pop('_reverse', False)
depth = kwargs.pop('_depth', None)
assert seqlen % (self.bucket_size * 2) == 0, f'Sequence length ({seqlen}) needs to be divisible by target bucket size x 2 - {self.bucket_size * 2}'
n_buckets = seqlen // self.bucket_size
buckets = self.hash_vectors(n_buckets, qk, key_namespace=depth, fetch=is_reverse, set_cache=self.training)
# We use the same vector as both a query and a key.
assert int(buckets.shape[1]) == self.n_hashes * seqlen
total_hashes = self.n_hashes
ticker = torch.arange(total_hashes * seqlen, device=device).unsqueeze(0).expand_as(buckets)
buckets_and_t = seqlen * buckets + (ticker % seqlen)
buckets_and_t = buckets_and_t.detach()
# Hash-based sort ("s" at the start of variable names means "sorted")
sbuckets_and_t, sticker = sort_key_val(buckets_and_t, ticker, dim=-1)
_, undo_sort = sticker.sort(dim=-1)
del ticker
sbuckets_and_t = sbuckets_and_t.detach()
sticker = sticker.detach()
undo_sort = undo_sort.detach()
if exists(pos_emb):
qk = apply_rotary_pos_emb(qk, pos_emb)
st = (sticker % seqlen)
sqk = batched_index_select(qk, st)
sv = batched_index_select(v, st)
# Split off a "bin" axis so that attention only occurs within chunks.
chunk_size = total_hashes * n_buckets
bq_t = bkv_t = torch.reshape(st, (batch_size, chunk_size, -1))
bqk = torch.reshape(sqk, (batch_size, chunk_size, -1, dim))
bv = torch.reshape(sv, (batch_size, chunk_size, -1, dim))
# Hashing operates on unit-length vectors. Unnormalized query vectors are
# fine because they effectively provide a learnable temperature for the
# attention softmax, but normalizing keys is needed so that similarity for
# the purposes of attention correctly corresponds to hash locality.
bq = bqk
bk = F.normalize(bqk, p=2, dim=-1).type_as(bq)
# Allow each chunk to attend within itself, and also one chunk back. Chunk
# boundaries might occur in the middle of a sequence of items from the
# same bucket, so this increases the chances of attending to relevant items.
def look_one_back(x):
x_extra = torch.cat([x[:, -1:, ...], x[:, :-1, ...]], dim=1)
return torch.cat([x, x_extra], dim=2)
bk = look_one_back(bk)
bv = look_one_back(bv)
bkv_t = look_one_back(bkv_t)
# Dot-product attention.
dots = torch.einsum('bhie,bhje->bhij', bq, bk) * (dim ** -0.5)
masked_value = max_neg_value(dots)
# Mask for post qk attention logits of the input sequence
if input_attn_mask is not None:
input_attn_mask = F.pad(input_attn_mask, (0, seqlen - input_attn_mask.shape[-1], 0, seqlen - input_attn_mask.shape[-2]), value=True)
dot_attn_indices = ((bq_t * seqlen)[:, :, :, None] + bkv_t[:, :, None, :])
input_attn_mask = input_attn_mask.reshape(batch_size, -1)
dot_attn_indices = dot_attn_indices.reshape(batch_size, -1)
mask = input_attn_mask.gather(1, dot_attn_indices).reshape_as(dots)
dots.masked_fill_(~mask, masked_value)
del mask
# Input mask for padding in variable lengthed sequences
if input_mask is not None:
input_mask = F.pad(input_mask, (0, seqlen - input_mask.shape[1]), value=True)
mq = input_mask.gather(1, st).reshape((batch_size, chunk_size, -1))
mkv = look_one_back(mq)
mask = mq[:, :, :, None] * mkv[:, :, None, :]
dots.masked_fill_(~mask, masked_value)
del mask
# Causal masking
if self.causal:
mask = bq_t[:, :, :, None] < bkv_t[:, :, None, :]
if seqlen > query_len:
mask = mask & (bkv_t[:, :, None, :] < query_len)
dots.masked_fill_(mask, masked_value)
del mask
# Mask out attention to self except when no other targets are available.
self_mask = bq_t[:, :, :, None] == bkv_t[:, :, None, :]
dots.masked_fill_(self_mask, TOKEN_SELF_ATTN_VALUE)
del self_mask
# Mask out attention to other hash buckets.
if not self._attend_across_buckets:
bq_buckets = bkv_buckets = torch.reshape(sbuckets_and_t // seqlen, (batch_size, chunk_size, -1))
bkv_buckets = look_one_back(bkv_buckets)
bucket_mask = bq_buckets[:, :, :, None] != bkv_buckets[:, :, None, :]
dots.masked_fill_(bucket_mask, masked_value)
del bucket_mask
# Don't double-count query-key pairs across multiple rounds of hashing.
# There are two possible strategies here. (1) The default is to count how
# many times a query-key pair is repeated, and to lower its log-prob
# correspondingly at each repetition. (2) When hard_k is set, the code
# instead masks all but the first occurence of each query-key pair.
if not self._allow_duplicate_attention:
locs1 = undo_sort // bq_t.shape[-1]
locs2 = (locs1 + 1) % chunk_size
if not self._attend_across_buckets:
locs1 = buckets * chunk_size + locs1
locs2 = buckets * chunk_size + locs2
locs = torch.cat([
torch.reshape(locs1, (batch_size, total_hashes, seqlen)),
torch.reshape(locs2, (batch_size, total_hashes, seqlen)),
], 1).permute((0, 2, 1))
slocs = batched_index_select(locs, st)
b_locs = torch.reshape(slocs, (batch_size, chunk_size, -1, 2 * total_hashes))
b_locs1 = b_locs[:, :, :, None, :total_hashes]
bq_locs = b_locs1.expand(b_locs.shape[:3] + (2, total_hashes))
bq_locs = torch.reshape(bq_locs, b_locs.shape)
bkv_locs = look_one_back(b_locs)
dup_counts = (bq_locs[:, :, :, None, :] == bkv_locs[:, :, None, :, :])
# for memory considerations, chunk summation of last dimension for counting duplicates
dup_counts = chunked_sum(dup_counts, chunks=(total_hashes * batch_size))
dup_counts = dup_counts.detach()
assert dup_counts.shape == dots.shape
dots = dots - torch.log(dup_counts + 1e-9)
del dup_counts
# Softmax.
dots_logsumexp = torch.logsumexp(dots, dim=-1, keepdim=True)
dots = torch.exp(dots - dots_logsumexp).type_as(dots)
dropped_dots = self.dropout(dots)
bo = torch.einsum('buij,buje->buie', dropped_dots, bv)
so = torch.reshape(bo, (batch_size, -1, dim))
slogits = torch.reshape(dots_logsumexp, (batch_size, -1,))
# unsort logits
o = batched_index_select(so, undo_sort)
logits = slogits.gather(1, undo_sort)
o = torch.reshape(o, (batch_size, total_hashes, seqlen, dim))
logits = torch.reshape(logits, (batch_size, total_hashes, seqlen, 1))
if query_len != seqlen:
query_slice = (slice(None), slice(None), slice(0, query_len))
o, logits = o[query_slice], logits[query_slice]
probs = torch.exp(logits - torch.logsumexp(logits, dim=1, keepdim=True))
out = torch.sum(o * probs, dim=1)
attn = torch.empty(0, device=device)
# return unsorted attention weights
if self._return_attn:
attn_unsort = ((bq_t * seqlen)[:, :, :, None] + bkv_t[:, :, None, :])
attn_unsort = attn_unsort.view(batch_size * total_hashes, -1).long()
unsorted_dots = torch.zeros(batch_size * total_hashes, seqlen * seqlen, device=device)
unsorted_dots.scatter_add_(1, attn_unsort, dots.view_as(attn_unsort))
del attn_unsort
unsorted_dots = unsorted_dots.reshape(batch_size, total_hashes, seqlen, seqlen)
attn = torch.sum(unsorted_dots[:, :, 0:query_len, :] * probs, dim=1)
# return output, attention matrix, and bucket distribution
return out, attn, buckets
# simple full attention
class FullQKAttention(nn.Module):
def __init__(self, causal = False, dropout = 0.):
super().__init__()
self.causal = causal
self.dropout = nn.Dropout(dropout)
def forward(self, qk, v, query_len = None, input_mask = None, input_attn_mask = None, **kwargs):
b, seq_len, dim = qk.shape
query_len = default(query_len, seq_len)
t = query_len
q = qk[:, 0:query_len]
qk = F.normalize(qk, 2, dim=-1).type_as(q)
dot = torch.einsum('bie,bje->bij', q, qk) * (dim ** -0.5)
# qk attention requires tokens not attend to self
i = torch.arange(t)
dot[:, i, i] = TOKEN_SELF_ATTN_VALUE
masked_value = max_neg_value(dot)
# Input mask for padding in variable lengthed sequences
if input_mask is not None:
mask = input_mask[:, 0:query_len, None] * input_mask[:, None, :]
mask = F.pad(mask, (0, seq_len - mask.shape[-1]), value=True)
dot.masked_fill_(~mask, masked_value)
# Mask for post qk attention logits of the input sequence
if input_attn_mask is not None:
input_attn_mask = F.pad(input_attn_mask, (0, seq_len - input_attn_mask.shape[-1]), value=True)
dot.masked_fill_(~input_attn_mask, masked_value)
if self.causal:
i, j = torch.triu_indices(t, t, 1)
dot[:, i, j] = masked_value
dot = dot.softmax(dim=-1)
dot = self.dropout(dot)
out = torch.einsum('bij,bje->bie', dot, v)
return out, dot, torch.empty(0)
# Shared qk attention, using either full or LSH attention
class LSHSelfAttention(nn.Module):
def __init__(self, dim, heads = 8, bucket_size = 64, n_hashes = 8, causal = False, dim_head = None, attn_chunks = 1, random_rotations_per_head = False, attend_across_buckets = True, allow_duplicate_attention = True, num_mem_kv = 0, one_value_head = False, use_full_attn = False, full_attn_thres = None, return_attn = False, post_attn_dropout = 0., dropout = 0., n_local_attn_heads = 0, **kwargs):
super().__init__()
assert dim_head or (dim % heads) == 0, 'dimensions must be divisible by number of heads'
assert n_local_attn_heads < heads, 'local attention heads must be less than number of heads'
dim_head = default(dim_head, dim // heads)
dim_heads = dim_head * heads
self.dim = dim
self.heads = heads
self.dim_head = dim_head
self.attn_chunks = default(attn_chunks, 1)
self.v_head_repeats = (heads if one_value_head else 1)
v_dim = dim_heads // self.v_head_repeats
self.toqk = nn.Linear(dim, dim_heads, bias = False)
self.tov = nn.Linear(dim, v_dim, bias = False)
self.to_out = nn.Linear(dim_heads, dim)
self.bucket_size = bucket_size
self.lsh_attn = LSHAttention(bucket_size=bucket_size, n_hashes=n_hashes, causal=causal, random_rotations_per_head=random_rotations_per_head, attend_across_buckets = attend_across_buckets, allow_duplicate_attention = allow_duplicate_attention, return_attn = return_attn, dropout = dropout, **kwargs)
self.full_attn = FullQKAttention(causal=causal, dropout=dropout)
self.post_attn_dropout = nn.Dropout(post_attn_dropout)
self.use_full_attn = use_full_attn
self.full_attn_thres = default(full_attn_thres, bucket_size)
self.num_mem_kv = num_mem_kv
self.mem_kv = nn.Parameter(torch.randn(1, num_mem_kv, dim, requires_grad=True)) if num_mem_kv > 0 else None
self.n_local_attn_heads = n_local_attn_heads
self.local_attn = LocalAttention(window_size=bucket_size * 2, causal=causal, dropout=dropout, shared_qk=True, look_forward=(1 if not causal else 0))
self.callback = None
def forward(self, x, keys = None, input_mask = None, input_attn_mask = None, context_mask = None, pos_emb = None, **kwargs):
device, dtype = x.device, x.dtype
b, t, e, h, dh, m, l_h = *x.shape, self.heads, self.dim_head, self.num_mem_kv, self.n_local_attn_heads
mem_kv = default(self.mem_kv, torch.empty(b, 0, e, dtype=dtype, device=device))
mem = mem_kv.expand(b, m, -1)
keys = default(keys, torch.empty(b, 0, e, dtype=dtype, device=device))
c = keys.shape[1]
kv_len = t + m + c
use_full_attn = self.use_full_attn or kv_len <= self.full_attn_thres
x = torch.cat((x, mem, keys), dim=1)
qk = self.toqk(x)
v = self.tov(x)
v = v.repeat(1, 1, self.v_head_repeats)
def merge_heads(v):
return v.view(b, kv_len, h, -1).transpose(1, 2)
def split_heads(v):
return v.view(b, h, t, -1).transpose(1, 2).contiguous()
merge_batch_and_heads = partial(merge_dims, 0, 1)
qk, v = map(merge_heads, (qk, v))
has_local = l_h > 0
lsh_h = h - l_h
split_index_fn = partial(split_at_index, 1, l_h)
(lqk, qk), (lv, v) = map(split_index_fn, (qk, v))
lqk, qk, lv, v = map(merge_batch_and_heads, (lqk, qk, lv, v))
masks = {}
if input_mask is not None or context_mask is not None:
default_mask = torch.tensor([True], device=device)
i_mask = default(input_mask, default_mask.expand(b, t))
m_mask = default_mask.expand(b, m)
c_mask = default(context_mask, default_mask.expand(b, c))
mask = torch.cat((i_mask, m_mask, c_mask), dim=1)
mask = merge_batch_and_heads(expand_dim(1, lsh_h, mask))
masks['input_mask'] = mask
if input_attn_mask is not None:
input_attn_mask = merge_batch_and_heads(expand_dim(1, lsh_h, input_attn_mask))
masks['input_attn_mask'] = input_attn_mask
attn_fn = self.lsh_attn if not use_full_attn else self.full_attn
partial_attn_fn = partial(attn_fn, query_len = t, pos_emb = pos_emb, **kwargs)
attn_fn_in_chunks = process_inputs_chunk(partial_attn_fn, chunks = self.attn_chunks)
out, attn, buckets = attn_fn_in_chunks(qk, v, **masks)
if self.callback is not None:
self.callback(attn.reshape(b, lsh_h, t, -1), buckets.reshape(b, lsh_h, -1))
if has_local:
lqk, lv = lqk[:, :t], lv[:, :t]
local_out = self.local_attn(lqk, lqk, lv, input_mask=input_mask)
local_out = local_out.reshape(b, l_h, t, -1)
out = out.reshape(b, lsh_h, t, -1)
out = torch.cat((local_out, out), dim=1)
out = split_heads(out).view(b, t, -1)
out = self.to_out(out)
return self.post_attn_dropout(out)
# feed forward
class GELU_(nn.Module):
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
GELU = nn.GELU if hasattr(nn, 'GELU') else GELU_
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0., activation = None, glu = False):
super().__init__()
activation = default(activation, GELU)
self.glu = glu
self.w1 = nn.Linear(dim, dim * mult * (2 if glu else 1))
self.act = activation()
self.dropout = nn.Dropout(dropout)
self.w2 = nn.Linear(dim * mult, dim)
def forward(self, x, **kwargs):
if not self.glu:
x = self.w1(x)
x = self.act(x)
else:
x, v = self.w1(x).chunk(2, dim=-1)
x = self.act(x) * v
x = self.dropout(x)
x = self.w2(x)
return x
# positional embeddings
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len):
super().__init__()
self.emb = nn.Embedding(max_seq_len, dim)
def forward(self, x):
t = torch.arange(x.shape[1], device=x.device)
return self.emb(t)
class FixedPositionalEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, x, seq_dim = 1):
t = torch.arange(x.shape[seq_dim], device = x.device).type_as(self.inv_freq)
sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq)
emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1)
return emb[None, :, :].type_as(x)
# rotary positional embedding helpers
def rotate_every_two(x):
x = rearrange(x, '... (d j) -> ... d j', j = 2)
x1, x2 = x.unbind(dim = -1)
x = torch.stack((-x2, x1), dim = -1)
return rearrange(x, '... d j -> ... (d j)')
def apply_rotary_pos_emb(qk, sinu_pos):
sinu_pos = sinu_pos.type(qk.dtype)
sinu_pos = rearrange(sinu_pos, '() n (j d) -> n j d', j = 2)
sin, cos = sinu_pos.unbind(dim = -2)
sin, cos = map(lambda t: repeat(t, 'n d -> n (d j)', j = 2), (sin, cos))
seq_len = sin.shape[0]
qk, qk_pass = qk[:, :seq_len], qk[:, seq_len:]
qk = (qk * cos) + (rotate_every_two(qk) * sin)
return torch.cat((qk, qk_pass), dim = 1)
# reformer lm
class Reformer(nn.Module):
def __init__(self, dim, depth, heads = 8, dim_head = None, bucket_size = 64, n_hashes = 8, ff_chunks = 100, attn_chunks = None, causal = False, weight_tie = False, lsh_dropout = 0., ff_dropout = 0., ff_activation = None, ff_mult = 4, ff_glu = False, post_attn_dropout = 0., layer_dropout = 0., lsh_attend_across_buckets = True, lsh_allow_duplicate_attention = True, random_rotations_per_head = False, use_scale_norm = False, use_rezero = False, use_full_attn = False, full_attn_thres = 0, reverse_thres = 0, num_mem_kv = 0, one_value_head = False, n_local_attn_heads = 0, pkm_layers = tuple(), pkm_num_keys = 128):
super().__init__()
self.dim = dim
self.depth = depth
self.bucket_size = bucket_size
self.num_mem_kv = num_mem_kv
self.full_attn_thres = full_attn_thres
get_attn = lambda: LSHSelfAttention(dim, heads, bucket_size, n_hashes, causal = causal, dim_head = dim_head, dropout = lsh_dropout, post_attn_dropout = post_attn_dropout, attn_chunks = attn_chunks, allow_duplicate_attention = lsh_allow_duplicate_attention, attend_across_buckets = lsh_attend_across_buckets, random_rotations_per_head = random_rotations_per_head, num_mem_kv = num_mem_kv, use_full_attn = use_full_attn, full_attn_thres = full_attn_thres, one_value_head = one_value_head, n_local_attn_heads = n_local_attn_heads)
get_ff = lambda: Chunk(ff_chunks, FeedForward(dim, dropout = ff_dropout, activation = ff_activation, mult = ff_mult, glu = ff_glu), along_dim = -2)
get_pkm = lambda: PKM(dim, num_keys = pkm_num_keys)
if weight_tie:
get_attn, get_ff, get_pkm = map(cache_fn, (get_attn, get_ff, get_pkm))
blocks = []
norm_type = ScaleNorm if use_scale_norm else nn.LayerNorm
residual_fn_wrapper = ReZero if use_rezero else partial(PreNorm, norm_type, dim)
for ind in range(depth):
layer_num = ind + 1
use_pkm = layer_num in cast_tuple(pkm_layers)
parallel_net = None
attn = get_attn()
if use_pkm:
parallel_net = get_pkm()
else:
parallel_net = get_ff()
f = residual_fn_wrapper(attn)
g = residual_fn_wrapper(parallel_net)
blocks.append(nn.ModuleList([f, g]))
self.layers = ReversibleSequence(nn.ModuleList(blocks), layer_dropout = layer_dropout, reverse_thres = reverse_thres, send_signal = True)
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim = -1)
x = self.layers(x, **kwargs)
return torch.stack(x.chunk(2, dim=-1)).mean(dim=0)
class ReformerLM(nn.Module):
def __init__(self, num_tokens, dim, depth, max_seq_len, heads = 8, dim_head = 64, bucket_size = 64, n_hashes = 4, ff_chunks = 100, attn_chunks = 1, causal = False, weight_tie = False, lsh_dropout = 0., ff_dropout = 0., ff_mult = 4, ff_activation = None, ff_glu = False, post_attn_dropout = 0., layer_dropout = 0., random_rotations_per_head = False, use_scale_norm = False, use_rezero = False, use_full_attn = False, full_attn_thres = 0, reverse_thres = 0, num_mem_kv = 0, one_value_head = False, emb_dim = None, return_embeddings = False, weight_tie_embedding = False, fixed_position_emb = False, absolute_position_emb = False, axial_position_emb = False, axial_position_shape = None, n_local_attn_heads = 0, pkm_layers = tuple(), pkm_num_keys = 128):
super().__init__()
emb_dim = default(emb_dim, dim)
self.max_seq_len = max_seq_len
self.token_emb = nn.Embedding(num_tokens, emb_dim)
self.to_model_dim = Identity() if emb_dim == dim else nn.Linear(emb_dim, dim)
self.pos_emb = Always(0)
self.layer_pos_emb = Always(None)
if axial_position_emb:
axial_position_shape = default(axial_position_shape, (math.ceil(max_seq_len / bucket_size), bucket_size))
self.pos_emb = AxialPositionalEmbedding(emb_dim, axial_position_shape)
elif absolute_position_emb:
self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len)
elif fixed_position_emb:
self.pos_emb = FixedPositionalEmbedding(emb_dim)
else:
self.layer_pos_emb = FixedPositionalEmbedding(dim_head)
self.reformer = Reformer(dim, depth, heads = heads, dim_head = dim_head, bucket_size = bucket_size, n_hashes = n_hashes, ff_chunks = ff_chunks, attn_chunks = attn_chunks, causal = causal, weight_tie = weight_tie, lsh_dropout = lsh_dropout, ff_mult = ff_mult, ff_activation = ff_activation, ff_glu = ff_glu, ff_dropout = ff_dropout, post_attn_dropout = 0., layer_dropout = layer_dropout, random_rotations_per_head = random_rotations_per_head, use_scale_norm = use_scale_norm, use_rezero = use_rezero, use_full_attn = use_full_attn, full_attn_thres = full_attn_thres, reverse_thres = reverse_thres, num_mem_kv = num_mem_kv, one_value_head = one_value_head, n_local_attn_heads = n_local_attn_heads, pkm_layers = pkm_layers, pkm_num_keys = pkm_num_keys)
self.norm = nn.LayerNorm(dim)
if return_embeddings:
self.out = Identity()
return
self.out = nn.Sequential(
nn.Linear(dim, emb_dim) if emb_dim != dim else Identity(),
nn.Linear(emb_dim, num_tokens) if not weight_tie_embedding else MatrixMultiply(self.token_emb.weight, transpose=True, normalize=True)
)
def forward(self, x, **kwargs):
x = self.token_emb(x)
x = x + self.pos_emb(x)
layer_pos_emb = self.layer_pos_emb(x)
x = self.to_model_dim(x)
x = self.reformer(x, pos_emb = layer_pos_emb, **kwargs)
x = self.norm(x)
return self.out(x)
| 32,377 | 41.602632 | 757 | py |
reformer-pytorch | reformer-pytorch-master/reformer_pytorch/generative_tools.py | from functools import partial
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from reformer_pytorch.reformer_pytorch import ReformerLM
from reformer_pytorch.autopadder import Autopadder
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class TrainingWrapper(nn.Module):
def __init__(self, net, ignore_index = -100, pad_value = 0):
super().__init__()
assert isinstance(net, ReformerLM), 'generative trainer wrapper can only accept ReformerLM class'
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = Autopadder(net)
self.max_seq_len = net.max_seq_len
@torch.no_grad()
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs):
was_training = self.net.training
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
self.net.eval()
out = start_tokens
input_mask = kwargs.pop('input_mask', None)
if input_mask is None:
input_mask = torch.full_like(out, True, dtype=torch.bool, device=out.device)
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
input_mask = input_mask[:, -self.max_seq_len:]
logits = self.net(x, input_mask=input_mask, **kwargs)[:, -1, :]
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
input_mask = F.pad(input_mask, (0, 1), value=True)
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
self.net.train(was_training)
return out
def forward(self, x, return_loss = False, **kwargs):
pad = partial(pad_sequence, batch_first = True, padding_value = self.pad_value)
if not return_loss:
if not isinstance(x, torch.Tensor):
x = pad(x)
return self.net(x, **kwargs)
if isinstance(x, torch.Tensor):
xi = x[:, :-1]
xo = x[:, 1:]
else:
xi = pad(list(map(lambda t: t[:-1], x)))
xo = pad(list(map(lambda t: t[1:], x)))
out = self.net(xi, **kwargs)
loss = F.cross_entropy(out.transpose(1, 2), xo, ignore_index = self.ignore_index)
return loss
| 3,321 | 33.604167 | 138 | py |
reformer-pytorch | reformer-pytorch-master/reformer_pytorch/__init__.py | from reformer_pytorch.reformer_pytorch import LSHAttention, LSHSelfAttention, Reformer, ReformerLM
from reformer_pytorch.reformer_enc_dec import ReformerEncDec
from reformer_pytorch.recorder import Recorder
from reformer_pytorch.autopadder import Autopadder
| 258 | 50.8 | 98 | py |
reformer-pytorch | reformer-pytorch-master/examples/enwik8_simple/train.py | from reformer_pytorch import ReformerLM
from reformer_pytorch.generative_tools import TrainingWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 4096
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate model
model = ReformerLM(
dim = 512,
depth = 6,
max_seq_len = SEQ_LEN,
num_tokens = 256,
heads = 8,
bucket_size = 64,
n_hashes = 4,
ff_chunks = 10,
lsh_dropout = 0.1,
weight_tie = True,
causal = True,
n_local_attn_heads = 4,
use_full_attn = False # set this to true for comparison with full attention
)
model = TrainingWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader), return_loss = True)
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader), return_loss = True)
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
| 3,049 | 25.068376 | 81 | py |
reformer-pytorch | reformer-pytorch-master/examples/enwik8_deepspeed/train.py | import deepspeed
from reformer_pytorch import ReformerLM
from reformer_pytorch.generative_tools import TrainingWrapper
import argparse
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
def add_argument():
parser=argparse.ArgumentParser(description='enwik8')
parser.add_argument('--with_cuda', default=False, action='store_true',
help='use CPU in case there\'s no GPU support')
parser.add_argument('--use_ema', default=False, action='store_true',
help='whether use exponential moving average')
parser.add_argument('-b', '--batch_size', default=32, type=int,
help='mini-batch size (default: 32)')
parser.add_argument('-e', '--epochs', default=30, type=int,
help='number of total epochs (default: 30)')
parser.add_argument('--local_rank', type=int, default=-1,
help='local rank passed from distributed launcher')
parser = deepspeed.add_config_arguments(parser)
args=parser.parse_args()
return args
# constants
EPOCHS = 20
GRADIENT_ACCUMULATE_EVERY = 4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 1024
SEQ_LEN = 4096
# helpers
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate model
model = ReformerLM(
dim = 512,
depth = 6,
max_seq_len = SEQ_LEN,
num_tokens = 256,
heads = 8,
bucket_size = 64,
n_hashes = 4,
ff_chunks = 10,
lsh_dropout = 0.1,
weight_tie = True,
causal = True,
n_local_attn_heads = 4,
use_full_attn = False # set this to true for comparison with full attention
)
model = TrainingWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
# setup deepspeed
cmd_args = add_argument()
model_engine, optimizer, trainloader, _ = deepspeed.initialize(args=cmd_args, model=model, model_parameters=model.parameters(), training_data=train_dataset)
# training
for _ in range(EPOCHS):
for i, data in enumerate(trainloader):
model_engine.train()
data = data.to(model_engine.local_rank)
loss = model_engine(data, return_loss = True)
model_engine.backward(loss)
model_engine.step()
print(loss.item() * GRADIENT_ACCUMULATE_EVERY)
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
inp = random.choice(val_dataset)[:-1]
loss = model(inp[None, :].cuda(), return_loss = True)
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp.cuda(), GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
| 3,856 | 29.132813 | 157 | py |
reformer-pytorch | reformer-pytorch-master/pretraining/self-supervised.py | import re
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, random_split
from tqdm import tqdm
from reformer_pytorch import Reformer, ReformerLM
from transformers import BertTokenizer, PreTrainedTokenizer
from fairseq.optim.adafactor import Adafactor
import os
import json
import logging
from datetime import datetime
class WikiDataset(Dataset):
def __init__(self, path="", prefix="train"):
assert os.path.isdir(path)
self.documents = []
filename_list = os.listdir(path)
for file in filename_list:
path_to_file = os.path.join(path, file)
if not os.path.isfile(path_to_file):
continue
self.documents.append(path_to_file)
def __len__(self):
""" Returns the number of documents. """
return len(self.documents)
def __getitem__(self, idx):
document_path = self.documents[idx]
document_name = document_path.split("/")[-1]
items = []
with open(document_path, encoding="utf-8") as source:
raw_text = source.readlines()
for obj in raw_text:
text = json.loads(obj)['text']
text = re.sub('\\n', ' ', text)
text = re.sub('\\s+', ' ', text)
items.append(text)
return items
class ReformerTrainer(object):
def __init__(self,
dataset,
model,
tokenizer,
device=None,
train_batch_size=8,
eval_batch_size=None,
tb_writer=True,
tb_dir='./tb_logs',
log_dir='./logs'):
"""
Provides an easy to use class for pretraining and evaluating a Reformer Model.
:param dataset: (torch.utils.data.Dataset) containing all of the data you wish to utilize during training.
:param model: (reformer_pytorch.Reformer)
:param tokenizer: (transformers.PreTrainedTokenizer) defaults to BertTokenizer ('bert-base-case')
:param device: provide manual device placement. If None, will default to cuda:0 if available.
:param tb_writer: (bool) Whether to write to tensorboard or not.
:param tb_dir: (str) Where to write TB logs to.
:param log_dir: (str) Where to write generic logs to.
"""
self.dataset = dataset
self.model = model
self.tokenizer = tokenizer
self.device = device
self.n_gpu = torch.cuda.device_count() if torch.cuda.is_available() else 0
self.train_batch_size = train_batch_size
self.eval_batch_size = eval_batch_size
self.tb_writer = tb_writer
self.log_dir = log_dir
if tokenizer is None:
self.tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
if device is None:
self.device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
if eval_batch_size is None:
self.eval_batch_size = train_batch_size
if tb_writer:
from torch.utils.tensorboard import SummaryWriter
self.writer = SummaryWriter(log_dir=tb_dir)
logging.basicConfig(filename=f'{log_dir}/{datetime.now().date()}.log', level=logging.INFO)
def build_dataloaders(self, train_test_split=0.1, train_shuffle=True, eval_shuffle=True):
"""
Builds the Training and Eval DataLoaders
:param train_test_split: The ratio split of test to train data.
:param train_shuffle: (bool) True if you wish to shuffle the train_dataset.
:param eval_shuffle: (bool) True if you wish to shuffle the eval_dataset.
:return: train dataloader and evaluation dataloader.
"""
dataset_len = len(self.dataset)
eval_len = int(dataset_len * train_test_split)
train_len = dataset_len - eval_len
train_dataset, eval_dataset = random_split(self.dataset, (train_len, eval_len))
train_loader = DataLoader(train_dataset, batch_size=self.train_batch_size, shuffle=train_shuffle)
eval_loader = DataLoader(eval_dataset, batch_size=self.eval_batch_size, shuffle=eval_shuffle)
logging.info(f'''train_dataloader size: {len(train_loader.dataset)} | shuffle: {train_shuffle}
eval_dataloader size: {len(eval_loader.dataset)} | shuffle: {eval_shuffle}''')
return train_loader, eval_loader
def mask_tokens(self, inputs: torch.Tensor, mlm_probability=0.15, pad=True):
""" Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """
labels = inputs.clone()
# mlm_probability defaults to 0.15 in Bert
probability_matrix = torch.full(labels.shape, mlm_probability)
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
if pad:
input_pads = self.tokenizer.max_len - inputs.shape[-1]
label_pads = self.tokenizer.max_len - labels.shape[-1]
inputs = F.pad(inputs, pad=(0, input_pads), value=self.tokenizer.pad_token_id)
labels = F.pad(labels, pad=(0, label_pads), value=self.tokenizer.pad_token_id)
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def _tokenize_input_ids(self, input_ids: list, pad_to_max_length: bool = True):
"""
Helper function to clean up the train and eval functions
:param input_ids: inputs to tokenize.
:param pad_to_max_length: Whether you want to pad the inputs to the tokenizer.max_len
:return: Tensor containing training data.
"""
inputs = torch.cat(
[
self.tokenizer.encode(
input_ids[i],
add_special_tokens=True,
max_length=self.tokenizer.max_len,
pad_to_max_length=pad_to_max_length,
return_tensors='pt'
) \
for i in range(len(input_ids))
]
)
return inputs
def train(self,
epochs,
train_dataloader,
eval_dataloader,
log_steps,
ckpt_steps,
ckpt_dir=None,
gradient_accumulation_steps=1):
"""
Trains the Reformer Model
:param epochs: The number of times you wish to loop through the dataset.
:param train_dataloader: (torch.utils.data.DataLoader) The data to train on.
:param eval_dataloader: (torch.utils.data.DataLoader) The data to evaluate on.
:param log_steps: The number of steps to iterate before logging.
:param ckpt_steps: The number of steps to iterate before checkpointing.
:param ckpt_dir: The directory to save the checkpoints to.
:param gradient_accumulation_steps: Optional gradient accumulation.
:return: Total number of steps, total loss, model
"""
optimizer = Adafactor(self.model.parameters())
loss_fn = nn.CrossEntropyLoss()
losses = {}
global_steps = 0
local_steps = 0
step_loss = 0.0
if ckpt_dir is not None:
assert os.path.isdir(ckpt_dir)
try:
logging.info(f'{datetime.now()} | Continuing from checkpoint...')
self.model.load_state_dict(torch.load(f'{ckpt_dir}/model_state_dict.pt', map_location=self.device))
optimizer.load_state_dict(torch.load(f'{ckpt_dir}/optimizer_state_dict.pt'))
except Exception as e:
logging.info(f'{datetime.now()} | No checkpoint was found | {e}')
self.model.train()
if self.n_gpu > 1:
self.model = nn.DataParallel(self.model)
logging.info(f'{datetime.now()} | Utilizing {self.n_gpu} GPUs')
self.model.to(self.device)
logging.info(f'{datetime.now()} | Moved model to: {self.device}')
logging.info(
f'{datetime.now()} | train_batch_size: {self.train_batch_size} | eval_batch_size: {self.eval_batch_size}')
logging.info(f'{datetime.now()} | Epochs: {epochs} | log_steps: {log_steps} | ckpt_steps: {ckpt_steps}')
logging.info(f'{datetime.now()} | gradient_accumulation_steps: {gradient_accumulation_steps}')
for epoch in tqdm(range(epochs), desc='Epochs', position=0):
logging.info(f'{datetime.now()} | Epoch: {epoch}')
for step, batch in tqdm(enumerate(train_dataloader),
desc='Epoch Iterator',
position=1,
leave=True,
total=len(train_dataloader)):
for data in batch:
inputs = self._tokenize_input_ids(data, pad_to_max_length=True)
inputs, labels = self.mask_tokens(inputs)
inputs, labels = inputs.to(self.device), labels.to(self.device)
output = self.model(inputs)
# only calculating loss on masked tokens
loss_mx = labels != -100
output = output[loss_mx].view(-1, self.tokenizer.vocab_size)
labels = labels[loss_mx].view(-1)
loss = loss_fn(output, labels)
if gradient_accumulation_steps > 1:
loss /= gradient_accumulation_steps
loss.backward()
step_loss += loss.item()
losses[global_steps] = loss.item()
local_steps += 1
global_steps += 1
if global_steps % gradient_accumulation_steps == 0:
optimizer.step()
self.model.zero_grad()
if global_steps % log_steps == 0:
if self.tb_writer:
self.writer.add_scalar('Train/Loss', step_loss / local_steps, global_steps)
self.writer.close()
logging.info(
f'''{datetime.now()} | Train Loss: {step_loss / local_steps} | Steps: {global_steps}''')
with open(f'{self.log_dir}/train_results.json', 'w') as results_file:
json.dump(losses, results_file)
results_file.close()
step_loss = 0.0
local_steps = 0
if global_steps % ckpt_steps == 0:
# evaluating before every checkpoint
self.evaluate(eval_dataloader)
model_to_save = self.model.module if hasattr(self.model, 'module') else self.model
torch.save(model_to_save.state_dict(), f'{ckpt_dir}/model_state_dict.pt')
torch.save(optimizer.state_dict(), f'{ckpt_dir}/optimizer_state_dict.pt')
logging.info(f'{datetime.now()} | Saved checkpoint to: {ckpt_dir}')
model_to_save = self.model.module if hasattr(self.model, 'module') else self.model
torch.save(model_to_save.state_dict(), f'{ckpt_dir}/model_state_dict.pt')
torch.save(optimizer.state_dict(), f'{ckpt_dir}/optimizer_state_dict.pt')
return self.model
def evaluate(self, dataloader):
"""
Runs through the provided dataloader with torch.no_grad()
:param dataloader: (torch.utils.data.DataLoader) Evaluation DataLoader
:return: None
"""
loss_fn = nn.CrossEntropyLoss()
if self.n_gpu > 1 and not isinstance(self.model, nn.DataParallel):
self.model = nn.DataParallel(self.model)
self.model.eval()
eval_loss = 0.0
perplexity = 0.0
eval_steps = 0
logging.info(f'{datetime.now()} | Evaluating...')
for step, batch in tqdm(enumerate(dataloader), desc='Evaluating', leave=True, total=len(dataloader)):
for data in batch:
inputs = self._tokenize_input_ids(data, pad_to_max_length=True)
inputs, labels = self.mask_tokens(inputs)
inputs, labels = inputs.to(self.device), labels.to(self.device)
with torch.no_grad():
output = self.model(inputs)
loss_mx = labels != -100
output_ids = output[loss_mx].view(-1, self.tokenizer.vocab_size)
labels = labels[loss_mx].view(-1)
tmp_eval_loss = loss_fn(output_ids, labels)
tmp_perplexity = torch.exp(tmp_eval_loss)
if self.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean()
eval_loss += tmp_eval_loss.item()
perplexity += tmp_perplexity.item()
eval_steps += 1
eval_loss /= eval_steps
perplexity /= eval_steps
if self.tb_writer:
self.writer.add_scalar('Eval/Loss', eval_loss, eval_steps)
self.writer.close()
self.writer.add_scalar('Perplexity', perplexity, eval_steps)
self.writer.close()
logging.info(f'{datetime.now()} | Step: {step} | Eval Loss: {eval_loss} | Perplexity: {perplexity}')
return None
if __name__ == '__main__':
dataset = WikiDataset(path='D:/data/enwiki')
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
tokenizer.max_len = 128
model = ReformerLM(
num_tokens=tokenizer.vocab_size,
dim=512,
depth=6,
heads=8,
max_seq_len=tokenizer.max_len,
causal=True
)
trainer = ReformerTrainer(dataset, model, tokenizer, train_batch_size=32, eval_batch_size=32)
train_dataloader, eval_dataloader = trainer.build_dataloaders(train_test_split=0.90)
model = trainer.train(epochs=3,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
log_steps=10,
ckpt_steps=100,
ckpt_dir='./ckpts',
gradient_accumulation_steps=1)
torch.save(model, './ckpts/model.bin')
| 15,496 | 41.809392 | 118 | py |
curriculumagent | curriculumagent-master/curriculumagent/baseline/evaluate.py | #!/usr/bin/env python3
# Copyright (c) 2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of L2RPN Baselines, L2RPN Baselines a repository to host baselines for l2rpn competitions.
import logging
from pathlib import Path
from typing import Union, Optional
import grid2op
from grid2op.Runner import Runner
from l2rpn_baselines.utils.save_log_gif import save_log_gif
from curriculumagent.baseline.baseline import CurriculumAgent
def evaluate(
env: grid2op.Environment.BaseEnv,
load_path: Union[str, Path] = ".",
logs_path: Optional[Union[str, Path]] = None,
nb_episode: int = 1,
nb_process: int = 1,
max_steps: int = -1,
verbose: Union[bool, int] = False,
save_gif: bool = False,
**kwargs,
) -> Runner:
"""This is the evaluate method for the Curriculum Agent.
Args:
env: The environment on which the baseline will be evaluated. The default is the IEEE14 Case. For other
environments please retrain the agent in advance.
load_path: The path where the model is stored. This is used by the agent when calling "agent.load()"
logs_path: The path where the agents results will be stored.
nb_episode: Number of episodes to run for the assessment of the performance. By default, it equals 1.
nb_process: Number of process to be used for the assessment of the performance. Should be an integer greater
than 1. By default, it's equals 1.
max_steps: Maximum number of timesteps each episode can last. It should be a positive integer or -1.
-1 means that the entire episode is run (until the chronics is out of data or until a game over).
By default,it equals -1.
verbose: Verbosity of the output.
save_gif: Whether to save a gif into each episode folder corresponding to the representation of the said
episode. Note, that depending on the environment (and the performance of your agent) this creation of the gif
might take quite a lot of time!
**kwargs:
Returns:
The experiment file consisting of the data.
"""
runner_params = env.get_params_for_runner()
runner_params["verbose"] = verbose
# Create the agent (this piece of code can change)
agent = CurriculumAgent(
action_space=env.action_space,
observation_space=env.observation_space,
name="Evaluation"
)
# Load weights from file (for example)
agent.load(load_path)
# Build runner
runner = Runner(**runner_params, agentClass=None, agentInstance=agent)
# you can do stuff with your model here
# start the runner
if nb_process > 1:
logging.warning(
f"Parallel execution is not yet available for keras model. Therefore, the number of processes is comuted with "
f"only one process."
)
nb_process = 1
res = runner.run(path_save=logs_path, nb_episode=nb_episode, nb_process=nb_process, max_iter=max_steps, pbar=False)
# Print summary
logging.info("Evaluation summary:")
for _, chron_name, cum_reward, nb_time_step, max_ts in res:
msg_tmp = "\tFor chronics located at {}\n".format(chron_name)
msg_tmp += "\t\t - cumulative reward: {:.6f}\n".format(cum_reward)
msg_tmp += "\t\t - number of time steps completed: {:.0f} / {:.0f}".format(nb_time_step, max_ts)
logging.info(msg_tmp)
if save_gif:
save_log_gif(logs_path, res)
return res
if __name__ == "__main__":
"""
This is a possible implementation of the eval script.
"""
from lightsim2grid import LightSimBackend
import grid2op
logging.basicConfig(level=logging.INFO)
env = grid2op.make("l2rpn_case14_sandbox", backend=LightSimBackend())
obs = env.reset()
path_of_model = Path(__file__).parent / "model_IEEE14"
myagent = CurriculumAgent(
action_space=env.action_space,
observation_space=env.observation_space,
model_path=path_of_model,
path_to_data=path_of_model,
name="Test",
)
env = grid2op.make("l2rpn_case14_sandbox")
out = evaluate(
env,
load_path=path_of_model,
logs_path=Path(__file__).parent / "logs",
nb_episode=10,
nb_process=1,
max_steps=-1,
verbose=0,
save_gif=True,
)
| 4,642 | 36.144 | 123 | py |
curriculumagent | curriculumagent-master/curriculumagent/junior/junior_student.py | """In this file, a neural network is developed to fit the dataset generated by Tutor.
Depending on the observation space and action space, the tutor model can/has to be
adjusted.
The Junior model returns a one-hot encoded output, based on the number of actions.
Credit: The junior is a more general approach of the original code, see
@https://github.com/AsprinChina/L2RPN_NIPS_2020_a_PPO_Solution
"""
import logging
from pathlib import Path
from typing import Union, Optional, Tuple, List
import nni
import numpy as np
from collections import ChainMap
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint, Callback, EarlyStopping
from tensorflow.keras.optimizers.schedules import LearningRateSchedule
from tensorflow.keras.initializers import Initializer
from typing import Union, Optional, TypedDict, Tuple, List
from curriculumagent.common.utilities import map_actions
class JuniorParam(TypedDict):
"""TypeDict Class with following Attributes:
action_space_path: Either path to the actions or a list containing mutliple actions.
data_path: Path of the Grid2Op environment
action_threshold: Between 0 and 1
subset: Should the obs.to_vect be filtered similar to the original Agent. If True,
the obs.to_vect is filtered based on predefined values. If False, all values are considered.
Alternatively, one can submit the once own values.
testing: Indicator, whether the underlying Grid2Op Environment should be started in testing mode or not
"""
activation: str
learning_rate: Optional[Union[float, LearningRateSchedule]]
layer1: int
layer2: int
layer3: int
layer4: int
batchsize: int
dropout1: float
dropout2: float
epochs: int
initializer: Initializer
class SendMetrics(tf.keras.callbacks.Callback):
""" Keras callback to send metrics to NNI framework
"""
def on_epoch_end(self, epoch, logs=None):
"""Keras callback to send the intermediate result to NNI
Args:
epoch: Epoch of training
logs: Log input
Returns: None, reports the intermediate result to NNI
"""
# TensorFlow 2.0 API reference claims the key is `val_acc`, but in fact it's `val_accuracy`
if logs is None:
logs = {}
if "val_acc" in logs:
nni.report_intermediate_result(logs["val_acc"])
else:
nni.report_intermediate_result(logs["val_accuracy"])
class Junior:
def __init__(
self,
action_space_file: Union[Path, List[Path]],
config: JuniorParam =None,
seed: Optional[int] = None,
run_nni: bool = False
):
"""Constructor of the Junior simple model or model with more flexible parameters. This Junior can also be used
for the hyperparameter search with tune ore with NNI.
Except setting all variables, the init additionally requires the size of the train set and optionally
the number of epochs.
Note:
Pass epochs, learning_rate, batchsize, layer_size in the config.
Args:
action_space_file: Action Space file that was used for the Tutor training. This is needed to extract the
correct shape of the Junior model.
config: Dictionary containing the correct input for the hyperparameters.
seed: Optional Seed to reproduce results.
run_nni: Whether NNI is used. If True, then a specific callback is added.
Returns:
None.
"""
# Set self actions to a list to iterate for later
if config is None:
config = {}
list_of_actions = []
if isinstance(action_space_file, Path):
assert action_space_file.is_file()
list_of_actions = [np.load(str(Path(action_space_file)))]
elif isinstance(action_space_file, list):
for act_path in action_space_file:
assert act_path.is_file()
list_of_actions = [np.load(str(act_path)) for act_path in action_space_file]
self.config = config
layer_size, initializer, activation = self._extract_config(config)
self.lr = config.get("learning_rate", 5e-4)
self.batch_size = config.get("batchsize", 256)
self.epochs = config.get("epochs", 1000)
self.num_actions = len(dict(ChainMap(*map_actions(list_of_actions))))
self.activation = activation
self.layer_size = layer_size
self.lf = tf.keras.losses.SparseCategoricalCrossentropy()
self.initializer = tf.keras.initializers.Orthogonal()
# Seed:
self.seed = seed
if self.seed:
np.random.seed(self.seed)
tf.random.set_seed(self.seed)
# Init Model: (either simple or advanced)
self.model = self._build_model()
if run_nni:
self.callback = [SendMetrics()]
else:
self.callback = []
def _build_model(self) -> tf.keras.models.Sequential:
"""Build and return the junior network as a keras model.
Args:
None.
Returns:
Compiled Keras model.
"""
if not self.config:
# build standart juniour model
model = tf.keras.models.Sequential(
[
tf.keras.layers.Dense(units=1000, activation=self.activation, kernel_initializer=self.initializer),
tf.keras.layers.Dense(units=1000, activation=self.activation, kernel_initializer=self.initializer),
tf.keras.layers.Dense(units=1000, activation=self.activation, kernel_initializer=self.initializer),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Dense(units=1000, activation=self.activation, kernel_initializer=self.initializer),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Dense(self.num_actions, activation="softmax"),
]
)
else:
# Build model based on hyperparameter:
# Layer1:
model_structure = [
tf.keras.layers.Dense(self.layer_size[0], activation=self.activation, kernel_initializer=self.initializer)
]
# Layer2:
if self.config["layer2"] != 0:
model_structure += [
tf.keras.layers.Dense(self.layer_size[1], activation=self.activation,
kernel_initializer=self.initializer)
]
# Layer3:
if self.config["layer3"] != 0:
model_structure += [
tf.keras.layers.Dense(self.layer_size[2], activation=self.activation,
kernel_initializer=self.initializer)
]
# Dropout 1:
if self.config["dropout1"] != 0.0:
model_structure += [tf.keras.layers.Dropout(self.config["dropout1"])]
# Layer 4:
if self.config["layer4"] != 0:
model_structure += [
tf.keras.layers.Dense(self.layer_size[3], activation=self.activation,
kernel_initializer=self.initializer)
]
# Dropout 2:
if self.config["dropout2"] != 0.0:
model_structure += [tf.keras.layers.Dropout(self.config["dropout2"])]
model_structure += [tf.keras.layers.Dense(self.num_actions, activation="softmax")]
model = tf.keras.models.Sequential(model_structure)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=self.lr), loss=self.lf, metrics=["accuracy"])
logging.info(model.summary)
return model
def _extract_config(self, config) -> Tuple[List, tf.keras.initializers.Initializer, tf.keras.layers.Layer]:
"""
Method to extract the hyperparameters of the config
Args:
config:
Returns: layers, initializer and activation methode.
"""
# Default values:
layer_size = [1000, 1000, 1000, 1000]
initializer = tf.keras.initializers.Orthogonal()
activation = tf.keras.layers.ReLU()
if not any([k in config.keys() for k in ["activation", "initializer", "layer1",
"layer2", "layer3", "layer4"]]):
import warnings
warnings.warn("The custom dictionary had not the correct keys. Revert to default model.")
return layer_size, initializer, activation
# Activation options:
activation_option = {"leaky_relu": tf.keras.layers.LeakyReLU(),
"relu": tf.keras.layers.ReLU(),
"elu": tf.keras.layers.ELU(),
}
# Initializer Options
initializer_option = {"O": tf.keras.initializers.Orthogonal(),
"RN": tf.keras.initializers.RandomNormal(),
"RU": tf.keras.initializers.RandomUniform(),
"Z": tf.keras.initializers.Zeros()}
activation = activation_option[config.get("activation", "relu")]
initializer = initializer_option[config.get("initializer", "O")]
if all([l in config.keys() for l in ["layer1", "layer2", "layer3", "layer4"]]):
layer_size = [int(np.round(config[l])) for l in ["layer1", "layer2", "layer3", "layer4"]]
return layer_size, initializer, activation
def train(
self,
x_train: np.ndarray,
y_train: np.ndarray,
x_validate: np.ndarray,
y_validate: np.ndarray,
log_dir: Optional[Union[str, Path]] = None,
ckpt_dir: Optional[Union[str, Path]] = None,
patience: Optional[int] = None,
epochs: Optional = None,
) -> tf.keras.callbacks.History:
"""Train the junior model for given number of epochs.
This method builds callbacks for the training and then executes the keras .fit() method
to train the Junior model on the x_train and y_train data. Validation is recorded as well.
Args:
log_dir: Directory for tensorboard callback.
ckpt_dir: Directory for checkpoint callback.
x_train: Training data containing the grid observations.
y_train: Training actions of the tutor.
x_validate: Validation data containing the grid observations.
y_validate: Validation actions of the tutor.
epochs: Number of epochs for the training.
patience: Optional early stopping criterion.
Returns:
Returns training history.
"""
callbacks = self.callback
logging.warning(f"{tf.__version__}")
logging.warning(f"{tf.keras.__version__}")
if log_dir is not None:
tensorboard_callback = TensorBoard(log_dir=log_dir, write_graph=False)
callbacks += [tensorboard_callback]
if isinstance(ckpt_dir, (Path, str)):
if isinstance(ckpt_dir, str):
ckpt_path = ckpt_dir + "/" + "ckpt_{epoch}"
else:
ckpt_path = ckpt_dir / "ckpt_{epoch}"
cp_callback = ModelCheckpoint(filepath=ckpt_path, save_weights_only=False, save_freq=10, verbose=1)
callbacks += [cp_callback]
if patience is not None:
early_stopping = EarlyStopping(
monitor="val_loss",
patience=patience,
verbose=1,
mode="auto",
restore_best_weights=True,
)
callbacks += [early_stopping]
history = self.model.fit(
x=x_train,
y=y_train,
epochs=epochs or self.epochs,
validation_data=(x_validate, y_validate),
batch_size=self.batch_size,
callbacks=callbacks,
)
return history
def test(self, x: np.ndarray, y: np.ndarray, save_path: Optional[Path] = None) -> dict:
"""Test the Junior model with input dataset x and targets/actions y.
The method predicts based on the input x and then computes a ranking, regarding the
accuracy on the actions.
The ranking collects, if the action of the tutor was within the 1-20 actions.
Args:
x: Input with Tutor observation for the prediction.
y: Action of the tutor to validate with the prediction.
save_path: Optional path where the weights of the model are saved.
If needed, the weights are loaded by model.load_weights(save_path).
Returns:
The dictionary that contains the top values.
"""
if isinstance(save_path, Path):
self.model = tf.keras.models.load_model(save_path)
logging.info(f"Imported model from{save_path}")
a_pred = self.model.predict(x, verbose=1)
top_n = []
for i in range(a_pred.shape[0]):
top_n.append(a_pred[i, :].argsort()[-20:])
# Added accuracy to record the prediction performance
accuracy = {}
for n in range(1, 21):
correct = 0
for i in range(a_pred.shape[0]):
if y[i, 0] in top_n[i][-n:]:
correct += 1
acc = correct / a_pred.shape[0] * 100
logging.info(f"accuracy of top-{n} is {acc}")
accuracy["accuracy of top-%d" % n] = correct / a_pred.shape[0] * 100
return accuracy
def load_dataset(
dataset_path: Union[str, Path], dataset_name: str
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Function for loading the datasets of the Tutor run.
Note:
All datasets (training, validation and testing) have the same name but different endings.
As example:
dataset_name = data
training: data_train.npz
validation: data_val.npz
test: data_test.npz
Args:
dataset_path: Path where to find the Tutor results.
dataset_name: Name of the tutor results.
Returns:
Tuple, containing vectors (numpy arrays) with training, val and test data.
"""
train_path = Path(dataset_path) / f"{dataset_name}_train.npz"
train_data = np.load(train_path)
validation_path = Path(dataset_path) / f"{dataset_name}_val.npz"
validation_data = np.load(validation_path)
test_path = Path(dataset_path) / f"{dataset_name}_test.npz"
test_data = np.load(test_path)
return (
train_data["s_train"],
train_data["a_train"],
validation_data["s_validate"],
validation_data["a_validate"],
test_data["s_test"],
test_data["a_test"],
)
def train(
run_name: str,
dataset_path: Path,
target_model_path: Path,
action_space_file: Optional[Union[Path, List[Path]]] = None,
dataset_name: str = "junior_dataset",
epochs: int = 1000,
seed: Optional[int] = None,
) -> tf.keras.callbacks.History:
"""Loads the dataset and then trains the JuniorModel with the given dataset and hyperparameters.
Args:
run_name: The name of the training run.
dataset_path: Path to the dataset files.
target_model_path: Path, where to save the model.
action_space_file: Optional action space file of the tutor.
dataset_name: The name of the dataset in {dataset_name}_train.npz.
epochs: The number of epochs to train.
seed: Random seed to set for the training.
Returns:
Training history in Keras format.
"""
if not target_model_path.is_dir():
logging.warning(f"{target_model_path} does not exists yet. Create directory")
target_model_path.mkdir(parents=True, exist_ok=True)
ckpt_dir = target_model_path / f"ckpt-{run_name}"
s_train, a_train, s_validate, a_validate, _, a_test = load_dataset(dataset_path, dataset_name)
# Get maximum number of actions:
if action_space_file is None:
max_action_value = np.max([np.max(a_train), np.max(a_validate), np.max(a_test)]) + 1
else:
if isinstance(action_space_file, Path):
assert action_space_file.is_file()
actions = np.load(str(Path(action_space_file)))
max_action_value = len(actions)
elif isinstance(action_space_file, list):
for act_path in action_space_file:
assert act_path.is_file()
actions = [np.load(str(act_path)) for act_path in action_space_file]
max_action_value = 0
for act in actions:
max_action_value += len(act)
logging.info(f"A total of {max_action_value} actions are assumed, based on the action_space_file input.")
steps = (len(s_train) * epochs) / 256
lr_schedule = tf.keras.optimizers.schedules.PolynomialDecay(
5e-4, steps, end_learning_rate=1e-4, power=1.0, cycle=False, name=None
)
junior = Junior(
action_space_file=action_space_file,
seed=seed,
)
history = junior.train(
x_train=s_train,
y_train=a_train,
x_validate=s_validate,
y_validate=a_validate,
log_dir=None,
ckpt_dir=ckpt_dir,
patience=None,
epochs=epochs,
)
# Save model
junior.model.save(target_model_path)
return history
def validate(
checkpoint_path: Path,
dataset_path: Path,
dataset_name: str = "junior_dataset",
action_space_file: Optional[Union[Path, List[Path]]] = None,
) -> dict:
"""Test the given checkpoint against the test data set.
Args:
checkpoint_path: The checkpoint file to use for the conversion/input.
dataset_path: Path to the dataset used to train the checkpoint.
dataset_name: The name of the dataset in {dataset_name}_test.npz.
action_space_file: Optional action space file of the Tutor. This is relevant if multiple
action sets were used in the Tutor optimization. If no action_space_file is provided, it is assumed
that only one action file exists. Then the maximum action is taken from the
training/val/test data.
Returns:
Dictionary with accuracy values achieved on the testing dataset.
"""
assert checkpoint_path.is_dir()
s_train, a_train, s_validate, a_validate, s_test, a_test = load_dataset(dataset_path, dataset_name)
if action_space_file is None:
max_action_value = np.max([np.max(a_train), np.max(a_validate), np.max(a_test)]) + 1
else:
if isinstance(action_space_file, Path):
assert action_space_file.is_file()
actions = np.load(str(Path(action_space_file)))
max_action_value = len(actions)
elif isinstance(action_space_file, list):
for act_path in action_space_file:
assert act_path.is_file()
actions = np.concatenate([np.load(str(act_path)) for act_path in action_space_file], axis=0)
max_action_value = len(actions)
logging.info(f"A total of {max_action_value} are assumed, based on the action_space_file input.")
junior = Junior(action_space_file=action_space_file)
accuracy = junior.test(x=s_test, y=a_test, save_path=checkpoint_path)
return accuracy
| 19,532 | 36.41954 | 122 | py |
curriculumagent | curriculumagent-master/curriculumagent/submission/my_agent.py | """ This file is the advanced agent constisting of a model that can cover different action spaces, rllib
training, scaling and other additional features.
To submit your own agent, just specify the model and action_space_path and if you want a scaler that
you previously trained with. The MyAgent will import the model and then act in the specific environment.
"""
import logging
import os
from pathlib import Path
from typing import Optional, Union, List
import grid2op
import numpy as np
import tensorflow as tf
from grid2op.Agent import BaseAgent
from sklearn.base import BaseEstimator
from tensorflow.keras.models import Model
from tensorflow.python.training.tracking.tracking import AutoTrackable
try:
# Import if the agent is coppied into the submission folder
from curriculumagent.common.obs_converter import obs_to_vect
from curriculumagent.common.utilities import (
find_best_line_to_reconnect,
is_legal,
split_action_and_return,
simulate_action, revert_topo,
)
except ImportError:
# Import if the agent is copied into the submission folder
from .obs_converter import obs_to_vect
from .utilities import (
find_best_line_to_reconnect,
is_legal,
split_action_and_return,
simulate_action, revert_topo,
)
class MyAgent(BaseAgent):
def __init__(
self,
action_space,
model_path: Union[Path, str],
action_space_path: Optional[Union[Path, List[Path]]] = None,
this_directory_path: Optional[str] = "./",
subset: Optional[bool] = False,
scaler: Optional[BaseEstimator] = None,
best_action_threshold: float = 0.95,
topo: Optional[bool] = False,
check_overload: Optional[bool] = False,
max_action_sim: Optional[int] = 50
):
"""The new advanced agent.
In contrast to the original agent, this agent enables the implementation of tuple and triple actions,
as well as use either a keras model or a model from rllib which is a AutoTrackable model.
Next to the difference in the models and actions, this agent also has the ability to transform the
observations based on a provided scaler and/or filter them accordingly.
Note:
If you just want to pass this agent as submission without the CurriculumAgent, copy the content
of the common dir into this directory. Further, add the model and actions to complete it.
Args:
action_space: Action Space of the Grid2Op Enviornment
model_path: Path where to find the rllib model or Keras model
action_space_path: path, where to find the action sets. This is required to run the agent
this_directory_path: Path of the submission directory
subset: Boolean, whether to filter the observation
scaler: Optional Scaler for the neural network
best_action_threshold: Threshold, when to stop searching for the results.
topo: Booling indicator, whether the agent should revert to original topology if it is possible
check_overload: Boolean, whether to simulate a stress of the generation and load
max_action_sim: Define, how many of the actions you want to evaluate before selecting a suitable
candidate. If you want to select all, it has to be the number of actions. For a more rapid simulation, you
can just select fewer values.
"""
# Initialize a new agent.
BaseAgent.__init__(self, action_space=action_space)
# Collect action set:
self.actions = self.__collect_action(
this_directory_path=this_directory_path, action_space_path=action_space_path
)
self.subset = subset
self.check_overload = check_overload
# Load Model:
try:
# Could both be Junior or Senior model
self.model: Model = tf.keras.models.load_model(model_path, compile=False)
self.model.compile()
except (IndexError, AttributeError):
# Loader of older model:
self.model: AutoTrackable = tf.saved_model.load(str(model_path))
self.scaler = scaler
self.recovery_stack = []
self.overflow_steps = 0
self.next_actions = None
self.best_action_threshold = best_action_threshold
self.max_action_sim = max_action_sim
if topo:
self.topo = topo
else:
self.topo = False
def act(
self, observation: grid2op.Observation.BaseObservation, reward: float, done: bool
) -> grid2op.Action.BaseAction:
"""Method of the agent to act.
When the function selects a tuple action or triple action, the next steps are predetermined as
well,i.e., all actions are returned sequentially.
Args:
observation: Grid2Op Observation
reward: Reward of the previous action
done: Whether the agent is done
Returns: A suitable Grid2Op action
"""
# Similar to the Tutor, we check whether there is some remaining action, based on previous
# selected tuples
if self.next_actions is not None:
# Try to do a step:
try:
next_action = next(self.next_actions)
next_action = find_best_line_to_reconnect(obs=observation, original_action=next_action)
if is_legal(next_action, observation):
return next_action
except StopIteration:
self.next_actions = None
if observation.rho.max() >= 1:
self.overflow_steps += 1
else:
self.overflow_steps = 0
# case: secure with low threshold
if observation.rho.max() < self.best_action_threshold: # fixed threshold
if self.topo:
action_array = revert_topo(self.action_space, observation)
default_action = self.action_space.from_vect(action_array)
default_action = find_best_line_to_reconnect(obs=observation,
original_action=default_action)
else:
default_action = self.action_space({})
default_action = find_best_line_to_reconnect(obs=observation,
original_action=default_action)
return default_action
# Now, case dangerous:
min_rho = observation.rho.max()
logging.info(
f"{observation.get_time_stamp()}s, heavy load,"
f" line-{observation.rho.argmax()}d load is {observation.rho.max()}"
)
idx_chosen = None
sorted_actions = self.__get_actions(obs=observation)[:self.max_action_sim]
for k, idx in enumerate(sorted_actions):
action_vect = self.actions[idx, :]
rho_max, valid_action = simulate_action(action_space=self.action_space, obs=observation,
action_vect=action_vect, check_overload=self.check_overload
)
if not valid_action:
continue
if rho_max <= self.best_action_threshold:
# For a very suitable candidate, we break the loop
logging.info(f"take action {idx}, max-rho to {rho_max}," f" simulation times: {k + 1}")
idx_chosen = idx
break
if rho_max < min_rho:
# If we have a decrease in rho, we already save the candidate.
min_rho = rho_max
idx_chosen = idx
if idx_chosen:
self.next_actions = split_action_and_return(observation, self.action_space, self.actions[idx_chosen, :])
next_action = next(self.next_actions)
next_action = find_best_line_to_reconnect(obs=observation, original_action=next_action)
else:
next_action = find_best_line_to_reconnect(obs=observation, original_action=self.action_space({}))
return next_action
def reset(self, obs: grid2op.Observation.BaseObservation):
""" Resetting the agent.
Args:
obs:
Returns:
"""
self.next_actions = None
def __collect_action(self, this_directory_path: str, action_space_path: Union[Path, List[Path]]) -> np.ndarray:
"""Check the action space path for the different action set.
Args:
this_directory_path: Directory of the submission files
action_space_path: Optional action space path
Returns:
"""
actions = None
if isinstance(action_space_path, Path):
if action_space_path.is_file():
logging.info(f"Action_space_path {action_space_path} is a file and will be loaded.")
actions = np.load(str(action_space_path))
elif action_space_path.is_dir():
logging.info(
f"Action_space_path {action_space_path} is a path. All available action files "
f" will be loaded."
)
all_action_files = [
act for act in os.listdir(action_space_path) if "actions" in act and ".npy" in act
]
if not all_action_files:
raise FileNotFoundError("No actions files were found!")
loaded_files = []
for act in all_action_files:
if "actions" in act and ".npy" in act:
loaded_files.append(np.load(action_space_path / act))
actions = np.concatenate(loaded_files, axis=0)
elif isinstance(action_space_path, list):
logging.info(f"Action_space_path {action_space_path} is a list containing multiple actions.")
for act_path in action_space_path:
if isinstance(act_path, Path):
assert act_path.is_file()
else:
os.path.isfile(act_path)
loaded_files = [np.load(str(act_path)) for act_path in action_space_path]
actions = np.concatenate(loaded_files, axis=0)
else:
raise ValueError(
f"The action_space_path variable {action_space_path} does neither consist of a single "
f"action nor of a path where actions can be found."
)
return actions
def __get_actions(self, obs: grid2op.Observation.BaseObservation):
""" This method conducts depending on the underlying model the action method
Args:
obs: Input of the Grid2op Environment
Returns: action
"""
if isinstance(self.model, Model):
# Newer Junior or Senior model
sorted_actions = self.__get_keras_actions_model(obs=obs)
else:
# Older Model from Ray<2.4
sorted_actions = self.__get_tf_actions(obs=obs)
return sorted_actions
def __get_keras_actions_model(self, obs: grid2op.Observation.BaseObservation):
"""Method to get the keras actions:
Args:
obs: Current observations
Returns: numpy with the sorted actions:
"""
# Select subset if wanted
if isinstance(self.subset, list):
model_input = obs.to_vect()[self.subset]
elif self.subset:
model_input = obs_to_vect(obs, False)
else:
model_input = obs.to_vect()
if self.scaler:
model_input = self.scaler.transform(model_input.reshape(1, -1))
model_input = model_input.reshape((1, -1))
if isinstance(self.model, tf.keras.models.Sequential):
# Junior Model: Sequential model
action_probability = self.model.predict(model_input)
else:
# Senior Model: tensorflow functional model:
action_probability_pre_softmax, _ = self.model.predict(model_input)
action_probability = tf.nn.softmax(action_probability_pre_softmax).numpy().reshape(-1)
sorted_actions = action_probability.argsort()[::-1]
return sorted_actions.reshape(-1)
def __get_tf_actions(self, obs: grid2op.Observation.BaseObservation):
"""Method to get the tf actions:
Args:
obs: Current observations
Returns: sorted numpy array with actions
"""
# Select subset if wanted
if isinstance(self.subset, list):
model_input = obs.to_vect()[self.subset]
elif self.subset:
model_input = obs_to_vect(obs, False)
else:
model_input = obs.to_vect()
if self.scaler:
model_input = self.scaler.transform(model_input.reshape(1, -1)).reshape(
-1,
)
f = self.model.signatures["serving_default"]
out = f(
observations=tf.convert_to_tensor(model_input.reshape(1, -1)),
timestep=tf.convert_to_tensor(0, dtype=tf.int64),
is_training=tf.convert_to_tensor(False),
)
# Collect the softmax over all actions
try:
prob_of_action = (
tf.nn.softmax(out["action_dist_inputs"])
.numpy()
.reshape(
-1,
)
)
except AttributeError:
poa = tf.nn.softmax(out["action_dist_inputs"])
prob_of_action = poa.eval(session=tf.compat.v1.Session()).reshape(-1, )
sorted_actions = prob_of_action.argsort()[::-1]
return sorted_actions
def make_agent(env, this_directory_path):
my_agent = MyAgent(
action_space=env.action_space,
model_path=Path(this_directory_path) / "model",
this_directory_path=Path(this_directory_path),
action_space_path=Path(this_directory_path) / "actions",
subset=True,
)
return my_agent
| 14,099 | 37.108108 | 118 | py |
curriculumagent | curriculumagent-master/curriculumagent/senior/senior_student.py | import json
import logging
import os
import pickle
import random
from pathlib import Path
from typing import Union, List, Optional
import ray
import tensorflow as tf
from ray._raylet import ObjectRef
from ray.rllib.algorithms.ppo import PPOConfig
from ray.rllib.models import ModelCatalog
from ray.rllib.utils import check_env
from sklearn.base import BaseEstimator
from curriculumagent.senior.rllib_execution.senior_env_rllib import SeniorEnvRllib
from curriculumagent.senior.rllib_execution.senior_model_rllib import Grid2OpCustomModel
from curriculumagent.submission.my_agent import MyAgent
class Senior:
"""
This class is the Senior agent. It is based on the open source framework RLlib and trains with the
PPO. The Senior model requires a Junior model and ray to be initialized.
Note that you can use this class a parent, if you want to change the default values of the
underlying rllib_environment or the model
"""
def __init__(self,
env_path: Union[str, Path],
action_space_path: Union[Path, List[Path]],
model_path: Union[Path, str],
ckpt_save_path: Optional[Union[Path, str]] = None,
scaler: Optional[Union[ObjectRef, BaseEstimator, str]] = None,
custom_junior_config: Optional[dict] = None,
num_workers: Optional[int] = None,
subset: Optional[bool] = False,
env_kwargs: Optional[dict] = {}):
"""
The Senior requires multiple inputs for the initialization.
After the init is complete, you can train the Senior or restore a checkpoint.
Further, we enable a possible check of the underlying environment and model.
Args:
env_path: Path to the Grid2Op Environment. This has to be initialized within the methode.
action_space_path: Either path to the actions or a list containing mutliple actions.
model_path: The required model path loads the underling model for the senior and consist of
one of either a Junior model or a Senior model.
ckpt_save_path: Optional path, where the PPO should save its checkpoints. If not provided,
scaler: If you want, you can pass a Scaler of Sklearn Model.
Either a Sklearn Scaler or its ray ID, if the scaler is saved via ray.put().
If scaler is provided, the environment will scale the observations based on scaler.transform()
custom_junior_config: If the junior model is a model after the hyperparameter training, you
need to pass the model configurations.
ray will save them in the default directory ray_results.
num_workers: You can configure the number of workers, based on your ray.init() configurations.
If not specified, the PPO will used half of you CPU count.
subset: Optional argument, whether the observations should be filtered when saved.
The default version saves the observations according to obs.to_vect(), however if
subset is set to True, then only the all observations regarding the lines, busses, generators and loads are
selected.
env_kwargs: Optional parameters for the Grid2Op environment that should be used when making the environment.
"""
# Set default values:
self.ckpt_save_path = ckpt_save_path
assert ray.is_initialized(), "Ray seems not to be initialized. Please use ray.init() prior to running" \
"the Senior."
if isinstance(scaler, (str,Path)):
try:
with open(scaler, "rb") as fp: # Pickling
scaler = pickle.load(fp)
except Exception as e:
scaler = None
logging.info(f"The scaler provided was either a path or a string. However, loading "
f"the scaler cause the following exception:{e}"
f"It will be set to None")
self.env_config = {
"action_space_path": action_space_path,
"env_path": env_path,
"action_threshold": 0.95,
'subset': subset,
'scaler': scaler,
'topo': True,
"env_kwargs": env_kwargs}
self.model_config = None
if isinstance(custom_junior_config, (dict, str)):
if isinstance(custom_junior_config, str):
with open(custom_junior_config) as json_file:
custom_junior_config = json.load(json_file)
ModelCatalog.register_custom_model('Senior', Grid2OpCustomModel)
self.model_config = {"model_path": model_path,
"custom_config": custom_junior_config}
self.__advanced_model = True
else:
ModelCatalog.register_custom_model('Senior', Grid2OpCustomModel)
self.model_config = {"model_path": model_path}
self.__advanced_model = False
# Testing of model and init the SeniorEnvRllib
self.rllib_env: SeniorEnvRllib = None
self.__test_env_and_model_config()
# Now init PPO
num_cpu = os.cpu_count()
if not num_workers:
num_workers = num_cpu // 2
self.ppo_config = (
PPOConfig().environment(env=SeniorEnvRllib, env_config=self.env_config)
.rollouts(num_rollout_workers=num_workers)
.framework("tf2")
.training(model={"custom_model": "Senior",
"custom_model_config": self.model_config})
.evaluation(evaluation_num_workers=1))
self.ppo = self.ppo_config.build()
def train(self, iterations: int = 1) -> dict:
""" Train the Senior with the underlying PPO agent.
Args:
iterations: Number of Iterations for the PPO
Returns: rllib output
"""
out = None
for i in range(iterations):
out = self.ppo.train()
# For every 5 steps, we save the current checkpoint:
if i % 5 == 0:
self.ppo.save(checkpoint_dir=self.ckpt_save_path)
# Now save final checkpoint
save_path = self.ppo.save(checkpoint_dir=self.ckpt_save_path)
logging.info("An Algorithm checkpoint has been created inside directory: "
f"'{save_path}'.")
return out
def restore(self, path: Optional[Union[str, Path]]) -> None:
"""
Restores the provided checkpoint. Alternatively you can also use the Algorithm.from_checkpoint()
for this.
Args:
path: Path to checkpoint.
Returns: None
"""
self.ppo.restore(path)
logging.info(f"Restored path: {path} ")
def save_to_model(self, path: Optional[Union[str, Path]] = "."):
"""
Saving the model for the final Agent. This model is saved as a TensorFlow model and
can be loaded by the MyAgent method of the CurriculumAgent.
Args:
path: Path, where to save the model.
Returns:
"""
self.ppo.export_policy_model(path)
logging.info(f"The MyAgent model is saved under {path}")
def get_my_agent(self, path: Optional[Union[str, Path]] = ".") -> MyAgent:
"""
Saves the Senior model and returns the final MyAgent model.
Returns: MyAgent model with the respective action sets of the Senior
"""
# First Save the PPO:
self.save_to_model(path)
# Load the my_agent:
agent = MyAgent(
action_space=self.rllib_env.single_env.action_space,
model_path=path,
action_space_path=self.env_config["action_space_path"],
scaler=self.env_config["scaler"],
best_action_threshold=self.env_config["action_threshold"],
topo=self.env_config["topo"],
subset=self.env_config["subset"]
)
return agent
def __test_env_and_model_config(self) -> None:
""" This method tests, whether the inputs of the senior are sufficient enough and
if everything works. This also means running the rllib method check_env()
Returns: Nothing. The method should complete or else you have a problem.
"""
# Create the senior_env_rllib:
logging.info("Init of SeniorEnvRllib and testing one simple execution")
self.rllib_env = SeniorEnvRllib(self.env_config)
assert isinstance(self.rllib_env, SeniorEnvRllib), "The initialization of the SeniorEnvRllib failed!"
# Run Environment:
obs, _ = self.rllib_env.reset()
term = False
trunc = False
while term is False and trunc is False:
act = random.randrange(self.rllib_env.action_space.n)
_, _, term, trunc, _ = self.rllib_env.step(act)
obs, _ = self.rllib_env.reset()
logging.info("The SeniorEnvRllib environment seems to run. Next, we check ray")
check_env(self.rllib_env)
logging.info("The SeniorEnvRllib check completed. ")
# Now the model
logging.info("Analyzing the Model configuration.")
# First TF Model
logging.info("First loading the TensorFlow model")
model = tf.keras.models.load_model(self.model_config["model_path"])
model.compile()
logging.info("TF Model Import works")
# Now the RLlib Model:
if self.__advanced_model:
model = Grid2OpCustomModel(obs_space=self.rllib_env.observation_space,
action_space=self.rllib_env.action_space,
num_outputs=self.rllib_env.action_space.n,
model_config={},
model_path=self.model_config["model_path"],
custom_config=self.model_config["custom_config"],
name="Junior")
else:
model = Grid2OpCustomModel(obs_space=self.rllib_env.observation_space,
action_space=self.rllib_env.action_space,
num_outputs=self.rllib_env.action_space.n,
model_config={},
model_path=self.model_config["model_path"],
name="Junior")
logging.info("Init of model worked.")
# Now Testing
obs = {"obs": obs.reshape(1, -1)}
assert model.forward(input_dict=obs, state=1, seq_lens=None), "Error, the model was not able to pass values!"
logging.info("Model seems to be working")
logging.info("All testing completed. ")
| 10,849 | 41.382813 | 120 | py |
curriculumagent | curriculumagent-master/curriculumagent/senior/rllib_execution/senior_model_rllib.py | """This file constist of two custom models, which transfer the junior weights into the Rllib experiment.
"""
import warnings
from pathlib import Path
from typing import List, Tuple
import numpy as np
import tensorflow as tf
from gymnasium.spaces import Discrete, Box
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
class Grid2OpCustomModel(TFModelV2):
"""Custom model for policy gradient algorithms.
"""
def __init__(
self,
obs_space: Box,
action_space: Discrete,
num_outputs: int,
model_config: dict,
name: str,
**customized_model_kwargs
):
"""Constructor of the custom model.
This is preferably a junior model, however, a pretrained Senior model should work as well.
NOte that when using a Senior model, the second option becomes obsolete.
Args:
obs_space: Observation space passed by rllib.
action_space: Action space passed by rllib.
num_outputs: Number of output passed by rllib, shape of action space.
model_config: Configurations of the model for RLlib to init the model.
name: Name of the model, if wanted.
customized_model_kwargs: Custom Model config from junior Hyper-parameter selections. Here you plug in
any layer information, activation method or initializer.
Returns:
None.
"""
super(Grid2OpCustomModel, self).__init__(obs_space, action_space, num_outputs, model_config, name)
if "custom_config" in customized_model_kwargs.keys():
cconfig = customized_model_kwargs["custom_config"]
else:
cconfig = {}
layer_size, initializer, activation = self._extract_config(cconfig)
# Now Init model:
self.inputs = tf.keras.layers.Input(shape=obs_space.shape, name="observations")
layer1 = tf.keras.layers.Dense(
layer_size[0], name="layer_1", activation=activation, kernel_initializer=initializer)(
self.inputs
)
layer2 = tf.keras.layers.Dense(layer_size[1], name="layer_2", activation=activation, kernel_initializer=initializer)(
layer1
)
layer3 = tf.keras.layers.Dense(
layer_size[2], name="layer_3", activation=activation, kernel_initializer=initializer
)(layer2)
layer4 = tf.keras.layers.Dense(
layer_size[3], name="layer_4", activation=activation, kernel_initializer=initializer
)(layer3)
act_layer = tf.keras.layers.Dense(
num_outputs, name="action_out", activation=None, kernel_initializer=initializer
)(layer4)
val_hidden_layer = tf.keras.layers.Dense(
action_space.n, name="layer_val_hidden", activation=activation, kernel_initializer=initializer
)(layer4)
val_layer = tf.keras.layers.Dense(1, name="value_out", activation=None, kernel_initializer=initializer)(
val_hidden_layer
)
self.base_model = tf.keras.Model(self.inputs, [act_layer, val_layer])
path_to_junior = customized_model_kwargs["model_path"]
self._params_copy(path=path_to_junior)
def _extract_config(self, config) -> Tuple[List, tf.keras.initializers.Initializer, tf.keras.layers.Layer]:
"""
Method to extract the hyperparameters of the config
Args:
config:
Returns: layers, initializer and activation methode.
"""
# Default values:
layer_size = [1000, 1000, 1000, 1000]
initializer = tf.keras.initializers.Orthogonal()
activation = tf.keras.layers.ReLU()
if not any([k in config.keys() for k in ["activation", "initializer", "layer1",
"layer2", "layer3", "layer4"]]):
import warnings
warnings.warn("The custom dictionary had not the correct keys. Using default model.")
return layer_size, initializer, activation
# Activation options:
activation_option = {"leaky_relu": tf.keras.layers.LeakyReLU(),
"relu": tf.keras.layers.ReLU(),
"elu": tf.keras.layers.ELU(),
}
# Initializer Options
initializer_option = {"O": tf.keras.initializers.Orthogonal(),
"RN": tf.keras.initializers.RandomNormal(),
"RU": tf.keras.initializers.RandomUniform(),
"Z": tf.keras.initializers.Zeros()}
if "activation" in config.keys():
activation = activation_option[config["activation"]]
if "initializer" in config.keys():
initializer = initializer_option[config["initializer"]]
if all([l in config.keys() for l in ["layer1", "layer2", "layer3", "layer4"]]):
layer_size = [int(np.round(config[l])) for l in ["layer1", "layer2", "layer3", "layer4"]]
return layer_size, initializer, activation
def _params_copy(self, path: Path):
"""Private Method from PPO code. Overwriting the weights of the rllib model by the Junior model.
This method does one of two things:
1. Used to copy the weights of the Junior model onto the model of the Rllib custom model.
2. If the model is already a PPO model, it just copies it.
Note that for the second option, the custom config does not have an effect.
Args:
path: Path of the Junior model checkpoints.
Returns:
None.
"""
model = tf.keras.models.load_model(path)
if isinstance(model,tf.keras.models.Sequential):
self.base_model.layers[1].set_weights(model.layers[0].get_weights())
self.base_model.layers[2].set_weights(model.layers[1].get_weights())
self.base_model.layers[3].set_weights(model.layers[2].get_weights())
self.base_model.layers[4].set_weights(model.layers[4].get_weights())
self.base_model.layers[5].set_weights((*map(lambda x: x / 5, model.layers[6].get_weights()),))
else:
model.compile()
self.base_model = model
def forward(self, input_dict, state, seq_lens):
model_out, self._value_out = self.base_model(input_dict["obs"])
return model_out, state
def value_function(self):
return tf.reshape(self._value_out, [-1])
def metrics(self):
return {"foo": tf.constant(42.0)}
| 6,529 | 38.817073 | 125 | py |
curriculumagent | curriculumagent-master/tests/test_junior/test_junior.py | import os
import shutil
from pathlib import Path
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import ReLU # , LeakyRalu
from curriculumagent.junior.junior_student import Junior, load_dataset, train, validate
class TestJunior:
"""
Test Suite for the Junior model and the data loader
"""
def test_data_loading(self):
"""
Testing the load_dataset function
"""
data_path = Path(__file__).parent.parent / "data" / "junior_experience"
s_tr, a_tr, s_v, a_v, s_te, a_te = load_dataset(dataset_path=data_path, dataset_name="test")
assert s_tr.shape == (3, 1429)
assert a_tr.shape == (3, 1)
assert s_v.shape == (2, 1429)
assert a_v.shape == (2, 1)
assert s_te.shape == (4, 1429)
assert a_te.shape == (4, 1)
assert np.array_equal(a_te.squeeze(), [6, 279, 0, 85])
def test_init(self, test_submission_action_space):
"""
Testing the normal init
"""
junior = Junior(action_space_file=test_submission_action_space, seed=42)
assert isinstance(junior.model, tf.keras.Model)
assert isinstance(junior.model.layers[1].activation, ReLU)
def test_init_relu(self, test_submission_action_space):
"""
Testing the init with the leaky relu
"""
junior = Junior(action_space_file=test_submission_action_space, seed=42)
assert isinstance(junior.model, tf.keras.Model)
assert isinstance(junior.model.layers[1].activation, ReLU)
def test_train(self, test_submission_action_space, test_junior_input, test_temp_save):
"""
Testing the training of the Junior
"""
tf.random.set_seed(42)
np.random.seed(42)
s_tr, a_tr, s_v, a_v, _, _ = test_junior_input
ckpt_path = test_temp_save
if not ckpt_path.is_dir():
os.mkdir(ckpt_path)
else:
if not os.listdir(ckpt_path):
shutil.rmtree(ckpt_path)
os.mkdir(ckpt_path)
junior = Junior(action_space_file=test_submission_action_space, seed=42)
out = junior.train(
log_dir=ckpt_path / "tb",
ckpt_dir=ckpt_path / "ckpt-junior",
x_train=s_tr,
y_train=a_tr,
x_validate=s_v,
y_validate=a_v,
epochs=30,
)
assert isinstance(out, tf.keras.callbacks.History)
for key in out.history.keys():
key in ["loss", "val_loss", "accuracy", "val_accuracy"]
assert (ckpt_path / "tb" / "train").is_dir()
assert (ckpt_path / "tb" / "validation").is_dir()
assert (ckpt_path / "ckpt-junior" / "ckpt_10").is_dir()
assert (ckpt_path / "ckpt-junior" / "ckpt_20").is_dir()
assert (ckpt_path / "ckpt-junior" / "ckpt_30").is_dir()
event = os.listdir(ckpt_path / "tb" / "train")
assert "events.out.tfevents" in event[0]
out = junior.model.predict(s_v)
# Test if output corresponds to the num_actions:
assert len(out[0]) == 806
# Test action
action = np.zeros(200)
# Check if one action was selected
assert any(out.tolist()[0])
assert any(out.tolist()[1])
# Remove
shutil.rmtree(ckpt_path)
os.mkdir(ckpt_path)
assert not os.listdir(ckpt_path)
def test_predict_after_training(self, test_submission_action_space,
test_junior_input, test_temp_save):
"""
Testing the training of the Junior
"""
tf.random.set_seed(42)
s_tr, a_tr, s_v, a_v, s_te, a_te = test_junior_input
ckpt_path = test_temp_save
if not ckpt_path.is_dir():
os.mkdir(ckpt_path)
else:
if not os.listdir(ckpt_path):
shutil.rmtree(ckpt_path)
os.mkdir(ckpt_path)
junior = Junior(action_space_file=test_submission_action_space, seed=42)
junior.train(
log_dir=ckpt_path / "tb",
ckpt_dir=ckpt_path / "ckpt-junior",
x_train=s_tr,
y_train=a_tr,
x_validate=s_v,
y_validate=a_v,
epochs=10,
)
#
out = junior.test(x=s_te, y=a_te)
assert isinstance(out, dict)
assert isinstance(out["accuracy of top-1"], float)
assert isinstance(out["accuracy of top-20"], float)
shutil.rmtree(ckpt_path)
os.mkdir(ckpt_path)
assert not os.listdir(ckpt_path)
def test_predict_from_checkpoint(self, test_submission_action_space,
test_junior_input, test_temp_save):
"""
Testing the training of the Junior
"""
tf.random.set_seed(42)
_, _, _, _, s_te, a_te = test_junior_input
ckpt_path = test_temp_save.parent / "junior_experience" / "model"
junior = Junior(action_space_file=test_submission_action_space, seed=42)
#
out = junior.test(x=s_te, y=a_te, save_path=ckpt_path)
assert isinstance(out, dict)
assert out["accuracy of top-1"] == 75.0
assert out["accuracy of top-20"] == 75.0
def test_main_train_function(self, test_submission_action_space, test_temp_save):
"""
Running the default train function
"""
test_data_path = Path(__file__).parent.parent / "data"
ckpt_path = test_temp_save
if not ckpt_path.is_dir():
os.mkdir(ckpt_path)
else:
if not os.listdir(ckpt_path):
shutil.rmtree(ckpt_path)
os.mkdir(ckpt_path)
train(
run_name="junior",
dataset_path=test_data_path / "junior_experience",
action_space_file=test_submission_action_space,
target_model_path=ckpt_path,
dataset_name="test",
epochs=30,
seed=42,
)
# Test if last model is saved:
assert (ckpt_path / "saved_model.pb").is_file()
# Test if checkpoint is saved
assert (ckpt_path / "ckpt-junior" / "ckpt_10").is_dir()
assert (ckpt_path / "ckpt-junior" / "ckpt_20").is_dir()
assert (ckpt_path / "ckpt-junior" / "ckpt_30").is_dir()
# Check, wether one can load the model
model = tf.keras.models.load_model(ckpt_path)
assert isinstance(model, tf.keras.Model)
shutil.rmtree(ckpt_path)
os.mkdir(ckpt_path)
def test_main_train_function_multiple_actions(self, test_submission_action_space):
"""
Running the default train function
"""
path_one, path_two = test_submission_action_space
test_data_path = Path(__file__).parent.parent / "data"
ckpt_path = test_data_path / "temporary_save"
if not ckpt_path.is_dir():
os.mkdir(ckpt_path)
else:
if not os.listdir(ckpt_path):
shutil.rmtree(ckpt_path)
os.mkdir(ckpt_path)
out = train(
run_name="junior",
dataset_path=test_data_path / "junior_experience",
action_space_file=[path_one, path_two],
target_model_path=ckpt_path,
dataset_name="test",
epochs=30,
seed=42,
)
assert isinstance(out, tf.keras.callbacks.History)
# Test if last model is saved:
assert (ckpt_path / "saved_model.pb").is_file()
# Check, wether one can load the model
model = tf.keras.models.load_model(ckpt_path)
assert isinstance(model, tf.keras.Model)
out = model.output_shape
# This should be 806, given that all two action sets total 806 actions
assert out == (None, 806)
shutil.rmtree(ckpt_path)
os.mkdir(ckpt_path)
def test_main_predict_function(self, test_temp_save):
"""
Test the predict function
"""
tf.random.set_seed(42)
data_path = test_temp_save.parent / "junior_experience"
out = validate(checkpoint_path=data_path / "model", dataset_path=data_path, dataset_name="test")
assert isinstance(out, dict)
assert out["accuracy of top-1"] == 75.0
assert out["accuracy of top-20"] == 75.0
| 8,319 | 32.548387 | 104 | py |
curriculumagent | curriculumagent-master/tests/test_baseline/test_baselineagent.py | import os
import shutil
import grid2op
import pytest
import ray
import tensorflow as tf
from grid2op.Agent import BaseAgent
from lightsim2grid import LightSimBackend
from tensorflow.keras.models import Sequential
from curriculumagent.baseline import CurriculumAgent
from curriculumagent.submission.my_agent import MyAgent
class TestBaselineAgent:
"""
Test suite of the baseline agent
"""
def test_init(self):
"""
Testing, whether the model is correctly loaded
"""
env = grid2op.make("l2rpn_case14_sandbox", backend=LightSimBackend())
env.reset()
myagent = CurriculumAgent(action_space=env.action_space, observation_space=env.observation_space,
name="El Testo")
# Test the default values
assert isinstance(myagent, BaseAgent)
assert isinstance(myagent.observation_space, grid2op.Observation.ObservationSpace)
assert isinstance(myagent.action_space, grid2op.Action.ActionSpace)
assert myagent.name == "El Testo"
assert myagent.senior is None
assert myagent.agent is None
assert myagent.do_nothing.as_dict() == {}
def test_runnable_do_nothing(self):
"""
Testing, whether the model is runnable
"""
env = grid2op.make("l2rpn_case14_sandbox", backend=LightSimBackend())
env.reset()
myagent = CurriculumAgent(action_space=env.action_space, observation_space=env.observation_space,
name="El Testo Secundo")
env.seed(42)
obs = env.reset()
assert obs.to_vect().shape == (467,)
done = False
while not done:
with pytest.warns():
act = myagent.act(observation=obs, reward=0, done=False)
assert act.as_dict() == {}
obs, rew, done, info = env.step(act)
assert done
def test_load_errors(self, test_baseline_models):
"""
Testing, whether the errors are correctly raised!
"""
senior_path, junior_path = test_baseline_models
env = grid2op.make("l2rpn_case14_sandbox", backend=LightSimBackend())
myagent = CurriculumAgent(action_space=env.action_space, observation_space=env.observation_space,
name="El Junior agento")
# In the junior path we do not have the action file
with pytest.raises(FileNotFoundError):
myagent.load(junior_path)
# Here only a npy file exists.
with pytest.raises(FileNotFoundError):
myagent.load(senior_path / "actions")
def test_loading_junior_with_separate_action_file(self, test_baseline_models):
"""
Testing, whether loading the junior model works.
"""
senior_path, junior_path = test_baseline_models
env = grid2op.make("l2rpn_case14_sandbox", backend=LightSimBackend())
myagent = CurriculumAgent(action_space=env.action_space, observation_space=env.observation_space,
name="El Junior agento")
assert myagent.agent is None
# Now load junior model, with separate action file
myagent.load(path=junior_path,
actions_path=senior_path / "actions")
assert isinstance(myagent.agent, MyAgent)
assert isinstance(myagent.agent.model, Sequential)
# Test, wether model works:
obs = env.reset()
act = myagent.act(observation=obs, reward=0, done=False)
assert isinstance(act, grid2op.Action.BaseAction)
def test_loading_senior_with_subset(self, test_baseline_models):
"""
Testing, whether loading the junior model works.
"""
senior_path, _ = test_baseline_models
env = grid2op.make("l2rpn_case14_sandbox", backend=LightSimBackend())
myagent = CurriculumAgent(action_space=env.action_space, observation_space=env.observation_space,
name="El grande senior")
assert myagent.agent is None
# Now load junior model, with separate action file
myagent.load(path=senior_path, subset=False)
assert isinstance(myagent.agent, MyAgent)
assert isinstance(myagent.agent.model, tf.keras.models.Model)
# Test, wether model works:
obs = env.reset()
act = myagent.act(observation=obs, reward=0, done=False)
assert isinstance(act, grid2op.Action.BaseAction)
def test_actions_senior(self, test_baseline_models):
"""
Testing multiple action steps
"""
senior_path, _ = test_baseline_models
env = grid2op.make("l2rpn_case14_sandbox", backend=LightSimBackend())
myagent = CurriculumAgent(action_space=env.action_space, observation_space=env.observation_space,
name="Run Forrest Run")
myagent.load(path=senior_path)
obs = env.reset()
done = False
non_zero = 0
while not done:
act = myagent.act(observation=obs, reward=0, done=done)
if act.as_dict() != {}:
non_zero += 1
obs, rew, done, info = env.step(act)
assert done is True
assert non_zero > 0
def test_actions_junior(self, test_baseline_models):
"""
Testing multiple action steps
"""
senior_path, junior_path = test_baseline_models
env = grid2op.make("l2rpn_case14_sandbox", backend=LightSimBackend())
myagent = CurriculumAgent(action_space=env.action_space, observation_space=env.observation_space,
name="Run little junior, run!")
assert myagent.agent is None
# Now load junior model, with separate action file
myagent.load(path=junior_path,
actions_path=senior_path / "actions")
obs = env.reset()
done = False
non_zero = 0
while not done:
act = myagent.act(observation=obs, reward=0, done=done)
if act.as_dict() != {}:
non_zero += 1
obs, rew, done, info = env.step(act)
assert done is True
assert non_zero > 0
def test_save_baseline(self, test_baseline_models, test_temp_save):
"""
Checking, whether the saving works
"""
senior_path, _ = test_baseline_models
env = grid2op.make("l2rpn_case14_sandbox", backend=LightSimBackend())
myagent = CurriculumAgent(action_space=env.action_space, observation_space=env.observation_space,
name="Run Forrest Run")
myagent.load(path=senior_path)
#
shutil.rmtree(test_temp_save, ignore_errors=True)
os.mkdir(test_temp_save)
assert test_temp_save.is_dir() and len(os.listdir(test_temp_save)) == 0
myagent.save(test_temp_save)
assert (test_temp_save / "model" / "saved_model.pb")
assert (test_temp_save / "actions" / "actions.npy")
# Test whether loading works effortlessly:
myagent.load(test_temp_save)
shutil.rmtree(test_temp_save, ignore_errors=True)
os.mkdir(test_temp_save)
def test_create_submission_dir(self,test_baseline_models,test_temp_save):
"""
Testing whether create_submission works
"""
senior_path, _ = test_baseline_models
env = grid2op.make("l2rpn_case14_sandbox", backend=LightSimBackend())
myagent = CurriculumAgent(action_space=env.action_space, observation_space=env.observation_space,
name="Run Forrest Run")
myagent.load(path=senior_path)
#
shutil.rmtree(test_temp_save, ignore_errors=True)
os.mkdir(test_temp_save)
assert test_temp_save.is_dir() and len(os.listdir(test_temp_save)) == 0
assert myagent.agent is not None
myagent.create_submission(test_temp_save)
assert (test_temp_save /"common" / "__init__.py").is_file()
assert (test_temp_save / "common" / "obs_converter.py").is_file()
assert (test_temp_save / "common" / "utilities.py").is_file()
assert (test_temp_save / "my_agent.py").is_file()
assert (test_temp_save / "__init__.py").is_file()
shutil.rmtree(test_temp_save, ignore_errors=True)
os.mkdir(test_temp_save)
@pytest.mark.ultra_slow
@pytest.mark.slow
def test_training_senior(self, test_baseline_models, test_temp_save):
"""
Testing, whether the simple training of the senior works
"""
senior_path, _ = test_baseline_models
env = grid2op.make("l2rpn_case14_sandbox", backend=LightSimBackend())
myagent = CurriculumAgent(action_space=env.action_space, observation_space=env.observation_space,
name="Run Forrest Run")
myagent.load(path=senior_path)
# Delete everything.
shutil.rmtree(test_temp_save, ignore_errors=True)
os.mkdir(test_temp_save)
assert test_temp_save.is_dir() and len(os.listdir(test_temp_save)) == 0
ray.init()
assert ray.is_initialized
assert myagent.senior is None
myagent.train(env=env,
iterations=1,
save_path=test_temp_save)
assert myagent.senior.ppo.iteration == 1
assert (test_temp_save / "model" / "saved_model.pb")
assert (test_temp_save / "actions" / "actions.npy")
# Check if the model was saved correctly:
# Back to normal
ray.shutdown()
shutil.rmtree(test_temp_save, ignore_errors=True)
os.mkdir(test_temp_save)
| 9,696 | 35.592453 | 105 | py |
curriculumagent | curriculumagent-master/tests/test_senior/test_senior_student.py | import os
import pickle
import shutil
import pytest
import ray
import tensorflow as tf
from ray._raylet import ObjectRef
from ray.rllib.algorithms import Algorithm
from sklearn.base import BaseEstimator
from curriculumagent.senior.rllib_execution.senior_env_rllib import SeniorEnvRllib
from curriculumagent.senior.senior_student import Senior
from curriculumagent.submission.my_agent import MyAgent
@pytest.mark.gitlabonly
class TestSenior:
"""
Testing the Senior class and all its underlying methods
"""
def test_init_base_model(self, senior_values):
"""
First, we test whether the agent is able to initialize. Note that this also means that we
double check the model.
"""
env_path, actions_path, path_to_junior, test_temp_save, _, scaler = senior_values
# Check if dir is empty
if test_temp_save.is_dir():
if len(os.listdir(test_temp_save)) > 0:
shutil.rmtree(test_temp_save, ignore_errors=True)
if not test_temp_save.is_dir():
os.mkdir(test_temp_save)
assert actions_path[0].is_file()
assert actions_path[1].is_file()
# Now we initialize the Senior.
ray.init(ignore_reinit_error=True)
senior = Senior(env_path=env_path,
action_space_path=actions_path,
model_path=path_to_junior,
ckpt_save_path=test_temp_save,
scaler=scaler, num_workers=1,
subset=False)
# Check for values:
assert isinstance(senior, Senior)
assert isinstance(senior.rllib_env, SeniorEnvRllib)
assert isinstance(senior.ppo, Algorithm)
# Because we did not pass a model config, this should be false:
assert senior._Senior__advanced_model is False
# Close everything and return to normal
ray.shutdown()
shutil.rmtree(test_temp_save, ignore_errors=True)
os.mkdir(test_temp_save)
def test_init_advanced_model(self, senior_values):
"""
Now let's check, whether the custom config is added !
"""
env_path, actions_path, path_to_junior, test_temp_save, custom_config, scaler = senior_values
# Check if dir is empty
if test_temp_save.is_dir():
if len(os.listdir(test_temp_save)) > 0:
shutil.rmtree(test_temp_save, ignore_errors=True)
if not test_temp_save.is_dir():
os.mkdir(test_temp_save)
# Now we initialize the Senior.
ray.init(ignore_reinit_error=True)
senior = Senior(env_path=env_path,
action_space_path=actions_path,
model_path=path_to_junior,
ckpt_save_path=test_temp_save,
custom_junior_config=custom_config, # Set specific config
scaler=scaler, num_workers=1,
subset=False)
# Check for values:
assert isinstance(senior, Senior)
assert isinstance(senior.rllib_env, SeniorEnvRllib)
assert isinstance(senior.ppo, Algorithm)
# Custom config, this should be true
assert senior._Senior__advanced_model is True
# Close everything and return to normal
ray.shutdown()
shutil.rmtree(test_temp_save, ignore_errors=True)
os.mkdir(test_temp_save)
def test_scaler_imports_and_subset(self, senior_values):
"""
Testing, the different ways to import the scaler
"""
env_path, actions_path, path_to_junior, test_temp_save, custom_config, scaler = senior_values
ray.init(ignore_reinit_error=True)
with open(scaler, "rb") as fp: # Pickling
loaded_scaler = pickle.load(fp)
if ray.is_initialized():
ray_scal = ray.put(loaded_scaler)
else:
raise FileNotFoundError
# This should raise a value error:
with pytest.raises(ValueError):
senior = Senior(env_path=env_path,
action_space_path=actions_path,
model_path=path_to_junior,
ckpt_save_path=test_temp_save,
scaler=scaler,
num_workers=1,
subset=True)
# Naturally, the import via path:
senior = Senior(env_path=env_path,
action_space_path=actions_path,
model_path=path_to_junior,
ckpt_save_path=test_temp_save,
scaler=scaler, num_workers=1,
subset=False)
assert isinstance(senior.rllib_env.scaler, BaseEstimator)
# Now imported scaler:
senior = Senior(env_path=env_path,
action_space_path=actions_path,
model_path=path_to_junior,
ckpt_save_path=test_temp_save,
scaler=loaded_scaler,
num_workers=1, subset=False)
assert isinstance(senior.rllib_env.scaler, BaseEstimator)
# Now with ray
senior = Senior(env_path=env_path,
action_space_path=actions_path,
model_path=path_to_junior,
ckpt_save_path=test_temp_save,
scaler=ray_scal, num_workers=1, subset=False)
assert isinstance(senior.rllib_env.scaler, ObjectRef)
ray.shutdown()
@pytest.mark.ultra_slow
@pytest.mark.slow
def test_train_runs_without_errors(self, senior_values):
"""
Testing of training
"""
env_path, actions_path, path_to_junior, test_temp_save, custom_config, scaler = senior_values
ray.init(ignore_reinit_error=True)
senior = Senior(env_path=env_path,
action_space_path=actions_path,
model_path=path_to_junior,
ckpt_save_path=test_temp_save,
scaler=scaler, num_workers=1, subset=False)
assert senior.ppo.iteration == 0
out = senior.train(1)
assert senior.ppo.iteration == 1
assert isinstance(out, dict)
paths = os.listdir(test_temp_save)
assert (test_temp_save / paths[0] / "rllib_checkpoint.json").is_file()
ray.shutdown()
shutil.rmtree(test_temp_save, ignore_errors=True)
os.mkdir(test_temp_save)
@pytest.mark.ultra_slow
@pytest.mark.slow
def test_train_default(self, senior_values):
"""
Testing of training
"""
env_path, actions_path, path_to_junior, test_temp_save, custom_config, scaler = senior_values
ray.init(ignore_reinit_error=True)
senior = Senior(env_path=env_path,
action_space_path=actions_path,
model_path=path_to_junior, subset=False)
assert senior.ppo.iteration == 0
out = senior.train(1)
assert senior.ppo.iteration == 1
assert isinstance(out, dict)
# cleanup
ray.shutdown()
shutil.rmtree(test_temp_save, ignore_errors=True)
os.mkdir(test_temp_save)
def test_restore(self, senior_values, rllib_ckpt):
"""
Testing whether the Policy can be loaded via restore
"""
env_path, actions_path, path_to_junior, test_temp_save, custom_config, scaler = senior_values
ray.init(ignore_reinit_error=True)
senior = Senior(env_path=env_path,
action_space_path=actions_path,
model_path=path_to_junior,
ckpt_save_path=test_temp_save,
scaler=scaler, num_workers=1,
subset=False)
assert senior.ppo.iteration == 0
senior.restore(rllib_ckpt)
assert senior.ppo.iteration == 1
ray.shutdown()
def test_save_model(self, senior_values, rllib_ckpt):
"""
Testing whether the previously loaded policy can be saved again
"""
env_path, actions_path, path_to_junior, test_temp_save, custom_config, scaler = senior_values
ray.init(ignore_reinit_error=True)
senior = Senior(env_path=env_path,
action_space_path=actions_path,
model_path=path_to_junior,
ckpt_save_path=test_temp_save,
scaler=scaler, num_workers=1,
subset=False)
senior.restore(rllib_ckpt)
senior.save_to_model(test_temp_save)
model = tf.keras.models.load_model(test_temp_save)
model.compile()
assert isinstance(model, tf.keras.models.Model)
assert len(os.listdir(test_temp_save)) > 0
# cleanup
ray.shutdown()
shutil.rmtree(test_temp_save, ignore_errors=True)
os.mkdir(test_temp_save)
def test_my_agent_return(self, senior_values, rllib_ckpt, test_temp_save):
"""
Testing, whether the my_agent is returned
"""
if test_temp_save.is_dir():
if len(os.listdir(test_temp_save)) > 0:
shutil.rmtree(test_temp_save, ignore_errors=True)
if not test_temp_save.is_dir():
os.mkdir(test_temp_save)
env_path, actions_path, path_to_junior, test_temp_save, custom_config, scaler = senior_values
ray.init(ignore_reinit_error=True)
senior = Senior(env_path=env_path,
action_space_path=actions_path,
model_path=path_to_junior,
ckpt_save_path=test_temp_save,
scaler=scaler, num_workers=1,
subset=False)
senior.restore(rllib_ckpt)
agent = senior.get_my_agent(test_temp_save)
assert isinstance(agent, MyAgent)
assert len(os.listdir(test_temp_save)) > 0
# cleanup
ray.shutdown()
shutil.rmtree(test_temp_save, ignore_errors=True)
os.mkdir(test_temp_save)
def test_init_errors(self, senior_values):
"""
Testing for errors
"""
env_path, actions_path, path_to_junior, test_temp_save, custom_config, scaler = senior_values
# First raise error due to missing ray init()
with pytest.raises(AssertionError):
Senior(env_path=env_path,
action_space_path=actions_path,
model_path=path_to_junior,
ckpt_save_path=test_temp_save,
custom_junior_config=custom_config, # Set specific config
scaler=scaler, num_workers=1,
subset=False)
ray.init()
# Now pass a wrong action set so that the dimensions of the model do not work:
with pytest.raises(ValueError):
Senior(env_path=env_path,
action_space_path=[actions_path[0]], # Error here
model_path=path_to_junior,
ckpt_save_path=test_temp_save,
custom_junior_config=custom_config, # Set specific config
scaler=scaler, num_workers=1,
subset=False)
# Now the model import should work, BUT not the environment
with pytest.raises(ValueError):
Senior(env_path=env_path,
action_space_path=scaler,
model_path=path_to_junior,
ckpt_save_path=test_temp_save,
custom_junior_config=custom_config,
scaler=scaler, num_workers=1,
subset=False)
# Wrong scaler input
senior = Senior(env_path=env_path,
action_space_path=actions_path,
model_path=path_to_junior,
ckpt_save_path=test_temp_save,
custom_junior_config={"weird": "keys"},
scaler=actions_path[0], num_workers=1,
subset=False)
assert senior.env_config["scaler"] is None
# Testing custom config
with pytest.warns():
Senior(env_path=env_path,
action_space_path=actions_path,
model_path=path_to_junior,
ckpt_save_path=test_temp_save,
custom_junior_config=actions_path[0],
scaler=scaler, num_workers=1,
subset=False)
# Error from scaler, because a subset:
with pytest.raises(ValueError):
Senior(env_path=env_path,
action_space_path=actions_path,
model_path=path_to_junior,
ckpt_save_path=test_temp_save,
scaler=scaler, num_workers=1,
subset=True)
| 12,871 | 37.08284 | 101 | py |
curriculumagent | curriculumagent-master/tests/test_senior/test_senior_model.py | import numpy as np
import pytest
import tensorflow as tf
from gymnasium.spaces import Box, Discrete
from curriculumagent.senior.rllib_execution.senior_model_rllib import Grid2OpCustomModel
class TestAdvancedCustomModel:
def test_load_model(self, custom_config):
"""
First test, whether the model import works at all
"""
model = tf.keras.models.load_model(custom_config["model_path"])
assert isinstance(model, tf.keras.Model)
def test_create_model(self, obs_space, action_space, custom_config):
"""
Create Model and compare
"""
model = Grid2OpCustomModel(obs_space=obs_space, action_space=action_space,
num_outputs=action_space.n, model_config={},
name="test_model", **custom_config)
# Test the creation of the model
assert model.obs_space == obs_space
assert model.action_space == action_space
assert model.num_outputs == action_space.n
assert model.name == "test_model"
assert isinstance(model.base_model, tf.keras.Model)
assert len(model.base_model.layers) == 8
def test_wrong_custom_input(self, obs_space, action_space, custom_config):
"""
Create Model and compare
"""
with pytest.raises(KeyError):
Grid2OpCustomModel(obs_space=obs_space, action_space=action_space,
num_outputs=action_space.n, model_config={},
name="test_model", **{"what the f***":"am I doing here?"})
with pytest.warns():
cc = custom_config.copy()
cc["custom_config"] = {"what the f***":"am I doing here?"}
Grid2OpCustomModel(obs_space=obs_space, action_space=action_space,
num_outputs=action_space.n, model_config={},
name="test_model", **cc)
def test_wrong_layer_shape(self, obs_space, action_space, custom_config):
"""
Testing wrong layer input
"""
wrong_config = custom_config.copy()
wrong_config['custom_config']["layer1"] = 100
wrong_config['custom_config']["layer2"] = 8
with pytest.raises(ValueError):
Grid2OpCustomModel(obs_space=obs_space, action_space=action_space,
num_outputs=action_space.n, model_config=wrong_config,
name="test_model", **custom_config)
def test_wrong_shape_of_action_and_obs(self, obs_space, action_space, custom_config):
"""
Testing wrong input of action and observation
"""
wrong_config = custom_config.copy()
wrong_obs = Box(low=-1.0, high=1.0, shape=(4,), dtype=np.float32)
wrong_action = Discrete(2)
# Obs Space
with pytest.raises(ValueError):
Grid2OpCustomModel(wrong_obs, action_space, action_space.n, wrong_config, "test_model",
**custom_config)
with pytest.raises(ValueError):
Grid2OpCustomModel(obs_space, wrong_action, action_space.n, wrong_config, "test_model",
**custom_config)
def test_extract_config(self, obs_space, action_space, custom_config):
"""
Create Model and test the extract_config method
"""
model = Grid2OpCustomModel(obs_space, action_space, action_space.n, {}, "test_model",
**custom_config)
# Example config
config = {'activation': 'elu',
'batchsize': 256,
'dropout1': 0.018195067193059022,
'dropout2': 0.32907678342440344,
'initializer': 'RN',
'layer1': 1032.228390773144,
'layer2': 239.69398910901413,
'layer3': 1236.3175666745672,
'layer4': 1163.4560269775084,
'learning_rate': 0.00016864815673883727,
'TRIAL_BUDGET': 100}
layer_size, initializer, activation = model._extract_config(config)
assert layer_size == [1032, 240, 1236, 1163]
assert isinstance(initializer, tf.keras.initializers.RandomNormal)
assert isinstance(activation, tf.keras.layers.ELU)
def test_params_copy(self, obs_space, action_space, custom_config):
"""
Test the _params_copy method
"""
model = Grid2OpCustomModel(obs_space, action_space, action_space.n, {}, "test_model",
**custom_config)
# We overwrite for the 4 layers à 1000 with the weights of the first layer
model.base_model.layers[3].set_weights(model.base_model.layers[2].get_weights())
# This should work
assert np.allclose(model.base_model.layers[2].get_weights()[0], model.base_model.layers[2].get_weights()[0])
assert np.allclose(model.base_model.layers[3].get_weights()[0], model.base_model.layers[2].get_weights()[0])
# Now we reload the model with the original wheights:
model._params_copy(custom_config["model_path"])
assert not np.allclose(model.base_model.layers[3].get_weights()[0], model.base_model.layers[
2].get_weights()[0])
#
def test_forward(self, obs_space, action_space, custom_config):
"""
Test the forward method
"""
model = Grid2OpCustomModel(obs_space, action_space, action_space.n, {}, "test_model",
**custom_config)
# This method should now rais an error, because there is no value method:
with pytest.raises(AttributeError):
model.value_function()
obs = tf.constant(np.ones((1, 1429)), dtype=tf.float32)
model_out, state = model.forward({"obs": obs}, "Don't mind me, just passing through", None)
assert model_out.shape == (1, 806)
assert state is "Don't mind me, just passing through"
# Now we can check,whether there is a value
value = model.value_function()
assert isinstance(value,tf.Tensor)
assert "_value_out" in model.__dict__
def test_create_model(self, obs_space, action_space, custom_config,test_submission_models):
"""
Testing the import of a junior model vs. a Senior model.
"""
model = Grid2OpCustomModel(obs_space=obs_space, action_space=action_space,
num_outputs=action_space.n, model_config={},
name="test_model", **custom_config)
# Test the creation of the model
weights = model.base_model.get_weights()
assert weights[0][0].shape == (1000,)
assert weights[0][0][0] == pytest.approx(0.01929925)
# Now we try to load a Senior model
# Note that we do not need the custom_config, because it is overwritten !
_,_, model_path = test_submission_models
cc = {"model_path": model_path,
# By default adding some bullshit here that should not effect the model
"custom_config": {'activation': 'relu',
'initializer': "Z",
'layer1': 42,
'layer2': 1022,
'layer3': 1022,
'layer4': 9}}
model = Grid2OpCustomModel(obs_space=obs_space, action_space=action_space,
num_outputs=action_space.n, model_config={},
name="test_model", **cc)
weights = model.base_model.get_weights()
assert weights[0][0].shape == (1000,) # And not 42!
assert weights[0][0][0] == pytest.approx(0.01947462) | 7,823 | 41.754098 | 116 | py |
curriculumagent | curriculumagent-master/tests/test_submission/test_my_agent.py | import types
import grid2op
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.python.training.tracking.tracking import AutoTrackable
from curriculumagent.submission.my_agent import MyAgent
class TestAdvancedAgent:
"""
Test suite of the advanced submission agent
"""
def test_init_ray24(self, test_env, test_submission_models, test_paths_env):
"""Testing, whether the init works, especially the models
First we start with the old model and old action space
"""
_, act_path = test_paths_env
act_list = [
act_path.parent / "submission" / "actionspace_tuples.npy",
act_path.parent / "submission" / "actionspace_nminus1.npy",
]
env = test_env
_,_,ray_v24 = test_submission_models
agent = MyAgent(
env.action_space,
model_path=ray_v24,
this_directory_path=act_path,
action_space_path=act_list,
subset=True,
scaler=None,
)
# Note that model size and action space do not fit, because this is a model
# for the IEEE118 grid. So we test differently
assert isinstance(agent.model, tf.keras.models.Model)
# We test with an example input :
np.random.seed(42)
tf.random.set_seed(42)
a1 = np.random.random((1429,))
out,_ = agent.model.predict(a1.reshape(1,-1))
action_prob = tf.nn.softmax(out).numpy().reshape(-1)
assert action_prob.argmax()== 681
def test_init_junior(self, test_env, test_paths_env,senior_values):
"""Testing, whether the init works, especially the models
First we start with the old model and old action space
"""
_,act_list,junior_model,_,_,_ = senior_values
env = test_env
agent = MyAgent(
env.action_space,
model_path=junior_model,
this_directory_path=act_list,
action_space_path=act_list,
subset=True,
scaler=None,
)
# This one here should be a sequential model
assert isinstance(agent.model, tf.keras.models.Sequential)
# We test with an example input :
np.random.seed(42)
tf.random.set_seed(42)
# The junior has a different model size here
a1 = np.random.random((1429,))
out = agent.model.predict(a1.reshape(1,-1))
action_prob = tf.nn.softmax(out).numpy().reshape(-1)
assert action_prob.argmax()==0
def test_init_check_overload(self, test_env, test_submission_models, test_paths_env,senior_values):
"""Testing, whether the init works, especially the models
First we start with the old model and old action space
"""
_, act_path = test_paths_env
_, act_list, junior_model, _, _, _ = senior_values
env = test_env
tf.random.set_seed(42)# Setting seed for runs
np.random.seed(42)
env.seed(42)
env.reset()
agent = MyAgent(
env.action_space,
model_path=junior_model,
this_directory_path=act_path,
action_space_path=act_list,
subset=False,
scaler=None,
)
agent_overload = MyAgent(
env.action_space,
model_path=junior_model,
this_directory_path=act_path,
action_space_path=act_list,
subset=False,
scaler=None,
check_overload=True
)
done = False
obs = env.reset()
collect_actions = []
while not done:
act1 = agent.act(obs,0,done)
act2 = agent_overload.act(obs,0,done)
collect_actions.append(act1==act2)
obs,rew,done,info = env.step(act2)
# Check if they differ
assert not all(collect_actions)
def test_init_check_overload(self, test_env, test_submission_models, test_paths_env,senior_values):
"""Testing, whether the init works, especially the models
First we start with the old model and old action space
"""
_, act_path = test_paths_env
_, act_list, junior_model, _, _, _ = senior_values
env = test_env
tf.random.set_seed(42)# Setting seed for runs
np.random.seed(42)
env.seed(42)
env.reset()
agent = MyAgent(
env.action_space,
model_path=junior_model,
this_directory_path=act_path,
action_space_path=act_list,
subset=False,
scaler=None,
)
agent_overload = MyAgent(
env.action_space,
model_path=junior_model,
this_directory_path=act_path,
action_space_path=act_list,
subset=False,
scaler=None,
check_overload=True
)
done = False
obs = env.reset()
collect_actions = []
while not done:
act1 = agent.act(obs,0,done)
act2 = agent_overload.act(obs,0,done)
collect_actions.append(act1==act2)
obs,rew,done,info = env.step(act2)
# Check if they differ
assert not all(collect_actions)
| 5,256 | 29.04 | 103 | py |
curriculumagent | curriculumagent-master/paper_data_MPGTTA/scripts_paper/hps_junior_nni.py | """
In this file, the code for the hyper-parameter search of the junior model is provided.
It is optimized to run the NNI framework @https://nni.readthedocs.io/en/stable/
In order to run this code via NNI, you have to start it via the terminal.
NNI then automatically runs through the parameters and selects the best option.
"""
import logging
from pathlib import Path
from typing import Optional
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
import numpy as np
from curriculumagent.junior.junior_student import Junior
from curriculumagent.junior.junior_student import load_dataset
import nni
def get_default():
""" Method to get the range of hyper-parameters
Returns: Default dictionary of hyper-parameters
"""
hyper_config = {
"epochs": 10, # set to 1000, should be adjusted by the A. hyperband
"learning_rate": 1e-5,
"activation": "relu",
"layer1": 1000,
"layer2": 1000,
"layer3": 1000,
"layer4": 1000,
"dropout1": 0.25,
"dropout2": 0.25,
"patience": 0,
"initializer": "O",
'batchsize': 256
}
return hyper_config
def run_hyperopt_training(config, path_to_files: Optional[Path] = None, run_nni: bool = True):
""" Method for the hyperoptimization training with tune. This method will be executed
with the config file. The config is specified by tune
Args:
config: config file with all the necessary parameters for the Juniort model.
run_nni: Boolian indicator, whether or not to run the code with NNI. Tipp: Test this code first
without NNI.
Returns: The accuracy of the validation run
"""
if config["initializer"] == "O":
config["initializer"] = tf.keras.initializers.Orthogonal()
elif config["initializer"] == "RN":
config["initializer"] = tf.keras.initializers.RandomNormal()
elif config["initializer"] == "RU":
config["initializer"] = tf.keras.initializers.RandomUniform()
else:
config["initializer"] = tf.keras.initializers.Zeros()
config["patience"] = 0
#
if path_to_files is None:
path_to_files = Path("junior/train")
s_train, a_train, s_validate, a_validate, _, _ = load_dataset(path_to_files, dataset_name="n1data_topo")
# Standardize:
scaler = StandardScaler()
s_tr_t = scaler.fit_transform(s_train)
s_val_t = scaler.transform(s_validate)
# First initialize the advanced junior:
# Note: We do not set a seed, to have more variability!!
junior = Junior(config=config, num_actions=np.max(a_train) + 1, run_nni=run_nni)
history = junior.train(
log_dir=None,
ckpt_dir=None,
patience=None,
x_train=s_tr_t,
y_train=a_train,
x_validate=s_val_t,
y_validate=a_validate,
)
acc = max(history.history["val_accuracy"])
if run_nni:
nni.report_final_result(acc)
# we return the best validation result:
return acc
if __name__ == "__main__":
"""
In order to run this code via NNI, you have to start it via the terminal.
NNI then automatically runs throught the parameters and selects the best option.
See https://nni.readthedocs.io/en/stable/ for the information
If you want to test the code above set run_nni=False
"""
run_nni = False
params = get_default()
try:
# get parameters form tuner
if run_nni:
tuner_params = nni.get_next_parameter()
logging.info(tuner_params)
tuner_params['epochs'] = tuner_params['TRIAL_BUDGET'] * 10 + 5
params.update(tuner_params)
logging.info(params)
run_hyperopt_training(config=params,
path_to_files=Path('n1_topo'),
run_nni=run_nni)
except Exception as exception:
logging.exception(exception)
raise
| 3,931 | 30.709677 | 108 | py |
GPflowOpt | GPflowOpt-master/doc/source/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# GPflowOpt documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 30 20:34:41 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
from gpflowopt import __version__
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'numpydoc',
'nbsphinx',
'IPython.sphinxext.ipython_console_highlighting'
]
numpydoc_show_class_members = True
numpydoc_show_inherited_class_members = True
numpydoc_class_members_toctree = False
#autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'GPflowOpt'
copyright = '2017, Joachim van der Herten'
author = 'Joachim van der Herten, Ivo Couckuyt'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'GPflowOptdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'gpflowopt.tex', 'GPflowOpt Documentation',
'Joachim van der Herten', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'GPflowOpt', 'GPflowOpt Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GPflowOpt', 'GPflowOpt Documentation',
author, 'GPflowOpt', 'One line description of project.',
'Miscellaneous'),
]
| 5,534 | 29.412088 | 98 | py |
CAMB | CAMB-master/docs/source/conf.py | # -*- coding: utf-8 -*-
#
# MyProj documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 18 20:57:49 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
# autoclass_content = 'both'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, '../..')
import camb
# -- General configuration ------------------------------------------------
nitpicky = True
# Prevent spurious errors for every field ivar (not sure why..)
def on_missing_reference(app, env, node, contnode):
if node['reftype'] == 'obj':
return contnode
else:
return None
def setup(app):
app.connect('missing-reference', on_missing_reference)
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '4.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode', 'sphinx.ext.autosummary',
'sphinx.ext.mathjax', 'sphinx_rtd_theme', 'sphinxcontrib.jquery'
]
intersphinx_mapping = {'python': ('https://docs.python.org/3', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('https://matplotlib.org/', None)}
plot_formats = [('png', 80)]
plot_html_show_formats = False
plot_html_show_source_link = False
autosummary_generate = True
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Code for Anisotropies in the Microwave Background (CAMB)'
copyright = 'Antony Lewis'
author = 'Antony Lewis'
version = camb.__version__
release = camb.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = ['../CAMBdemo.html', '../ScalEqs.html']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'CAMBDoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CAMB.tex', u'CAMB Python Documentation',
author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'CAMB', u'CAMB Python Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CAMB', u'CAMB Python Documentation',
author, 'CAMB', 'Cosmology calculations and output.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| 9,848 | 31.50495 | 100 | py |
CS_ELMo | CS_ELMo-master/src/main.py | import os
import re
import json
import argparse
import random
import numpy as np
import torch
import experiments.experiment_langid as experiment_lid
import experiments.experiment_ner as experiment_ner
import experiments.experiment_pos as experiment_pos
from types import SimpleNamespace as Namespace
PROJ_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class Arguments(object):
def __init__(self, config_path=None):
if config_path is None:
parser = argparse.ArgumentParser()
parser.add_argument('--config', help="provide a relative path to a JSON config file from the configs directory")
parser.add_argument('--mode', choices=['train', 'eval'], default='train', help="Specify whether to train or evaluate the model")
parser.add_argument('--gpu', type=int, default=-1, help="The GPU label number. By default, the code runs on CPU")
parser.add_argument('--seed', type=int, default=42)
args = parser.parse_args()
# Fields expected from the command line
self.config = os.path.join(PROJ_DIR, args.config)
self.mode = args.mode
self.gpu = args.gpu
self.seed = args.seed
else:
self.gpu = -1
self.mode = 'eval'
self.seed = 42
self.config = os.path.join(PROJ_DIR, config_path)
assert os.path.exists(self.config) and self.config.endswith('.json'), f'Bad config path: {self.config}'
# Read the parameters from the JSON file and skip comments
with open(self.config, 'r') as f:
params = ''.join([re.sub(r"//.*$", "", line, flags=re.M) for line in f])
arguments = json.loads(params, object_hook=lambda d: Namespace(**d))
# Must-have fields expected from the JSON config file
self.experiment = arguments.experiment
self.description = arguments.description
self.task = arguments.task
self.dataset = arguments.dataset
self.model = arguments.model
self.training = arguments.training
self.evaluation = arguments.evaluation
# Checking that the JSON contains at least the fixed fields
assert all([hasattr(self.dataset, name) for name in {'train', 'dev', 'test'}])
assert all([hasattr(self.model, name) for name in {'name'}])
assert all([hasattr(self.training, name) for name in {'epochs', 'batch_size', 'optimizer', 'lr_scheduler', 'l2', 'clip_grad'}])
assert all([hasattr(self.training.optimizer, name) for name in {'name', 'lr'}])
assert all([hasattr(self.training.lr_scheduler, name) for name in {'name'}])
assert all([hasattr(self.evaluation, name) for name in {'batch_size'}])
self._format_datapaths()
self._add_extra_fields()
self._add_transfer_learning_fields(arguments)
def _add_transfer_learning_fields(self, args):
if hasattr(args, "pretrained_config"):
self.pretrained_config = args.pretrained_config
if hasattr(args, "transfer_mode"):
self.transfer_mode = args.transfer_mode
if hasattr(args, "restore_model"):
self.restore_model = args.restore_model
def _format_datapaths(self):
self.dataset.train = os.path.join(PROJ_DIR, 'data', self.dataset.train)
self.dataset.dev = os.path.join(PROJ_DIR, 'data', self.dataset.dev)
self.dataset.test = os.path.join(PROJ_DIR, 'data', self.dataset.test)
def _add_extra_fields(self):
self.checkpoints = os.path.join(PROJ_DIR, 'checkpoints', self.experiment)
self.figures = os.path.join(PROJ_DIR, 'reports/figures', self.experiment)
self.history = os.path.join(PROJ_DIR, 'reports/history', self.experiment)
self.predictions = os.path.join(PROJ_DIR, 'reports/predictions', self.experiment)
self.attentions = os.path.join(PROJ_DIR, 'reports/attentions', self.experiment)
def main():
args = Arguments()
if torch.cuda.is_available() and args.gpu >= 0:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.deterministic = True
args.device = torch.device(f"cuda:{args.gpu}")
torch.cuda.set_device(args.device)
else:
args.device = torch.device("cpu")
print("[LOG] {}".format('=' * 40))
print("[LOG] {: >15}: '{}'".format("Experiment ID", args.experiment))
print("[LOG] {: >15}: '{}'".format("Description", args.description))
print("[LOG] {: >15}: '{}'".format("Task", args.task.upper()))
for key, val in vars(args.dataset).items():
print("[LOG] {: >15}: {}".format(key, val))
print("[LOG] {: >15}: '{}'".format("Modeling", vars(args.model)))
print("[LOG] {: >15}: '{}'".format("Training", vars(args.training)))
print("[LOG] {: >15}: '{}'".format("Evaluation", vars(args.evaluation)))
print("[LOG] {: >15}: '{}'".format("Device", args.device))
print("[LOG] {}".format('=' * 40))
if args.task.startswith('lid'): experiment_lid.main(args)
elif args.task.startswith('ner'): experiment_ner.main(args)
elif args.task.startswith('pos'): experiment_pos.main(args)
else: raise Exception('Unexpected task: {}'.format(args.task))
if __name__ == '__main__':
main()
| 5,425 | 41.724409 | 140 | py |
CS_ELMo | CS_ELMo-master/src/dataset.py | import os
import string
from torch.utils.data import DataLoader, Dataset
class CSDataset(Dataset):
def __init__(self, dataset_path, tok_index=0, lid_index=1, ner_index=None, pos_index=None, debug=False):
assert os.path.exists(dataset_path), 'File path not found: {}'.format(dataset_path)
self.has_ner = ner_index is not None
self.has_pos = pos_index is not None
self.tok_index = tok_index
self.lid_index = lid_index
self.ner_index = ner_index
self.pos_index = pos_index
self.dataset_path = dataset_path
self.tokens = []
self.langids = []
self.simplified = []
self.entities = []
self.postags = []
for post in read_lines(self.dataset_path):
toks_i = []
lids_i = []
ners_i = []
poss_i = []
for token_line in post:
token_pack = token_line.split('\t')
toks_i.append(token_pack[self.tok_index])
lids_i.append(token_pack[self.lid_index])
if len(token_pack) > 2:
if self.has_ner: ners_i.append(token_pack[self.ner_index])
if self.has_pos: poss_i.append(token_pack[self.pos_index])
self.tokens.append(toks_i)
self.langids.append(lids_i)
self.simplified.append(map_langids(lids_i))
if self.has_ner: self.entities.append(ners_i)
if self.has_pos: self.postags.append(poss_i)
self.ner_scheme = self.get_current_scheme() if self.has_ner else None
def __len__(self):
return len(self.tokens)
def __getitem__(self, item):
sample = dict()
sample['tokens'] = self.tokens[item]
sample['langids'] = self.langids[item]
sample['simplified'] = self.simplified[item]
if self.has_ner: sample['entities'] = self.entities[item]
if self.has_pos: sample['postags'] = self.postags[item]
return sample
def merge(self, dataset):
self.tokens += dataset.tokens
self.langids += dataset.langids
self.simplified += dataset.simplified
if self.has_ner and dataset.has_ner: self.entities += dataset.entities
elif self.has_ner or dataset.has_ner: raise Exception('Both datasets are expected to have entities')
if self.has_pos and dataset.has_pos: self.postags += dataset.postags
elif self.has_pos or dataset.has_pos: raise Exception('Both datasets are expected to have POS tags')
def get_current_scheme(self):
if not self.has_ner:
return None
ner_scheme = set()
for labels in self.entities:
for label in labels:
ner_scheme.add(label[0])
ner_scheme = sorted(ner_scheme)
if ner_scheme == sorted('BIO'):
return 'BIO'
elif ner_scheme == sorted('BIOES'):
return 'BIOES'
else:
raise NotImplemented(f'Unknown scheme! {ner_scheme}')
def save(self, filepath, predictions=None):
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, 'w+') as fp:
for i in range(len(self.tokens)):
for j in range(len(self.tokens[i])):
line_template = "{}\t{}".format(self.tokens[i][j], self.langids[i][j])
if self.has_ner: line_template += '\t{}'.format(self.entities[i][j])
if self.has_pos: line_template += '\t{}'.format(self.postags[i][j])
if predictions is not None:
line_template += '\t{}'.format(predictions[i][j])
fp.write(line_template + '\n')
fp.write('\n')
def change_scheme(self, scheme):
if not self.has_ner:
print("[WARNING] Your dataset does not have entity labels!")
return
if scheme == 'BIO':
if self.ner_scheme == 'BIOES':
for i in range(len(self.entities)):
self.entities[i] = from_bioes_to_bio(self.entities[i])
elif scheme == 'BIOES':
if self.ner_scheme == 'BIO':
for i in range(len(self.entities)):
self.entities[i] = from_bio_to_bioes(self.entities[i])
else:
raise NotImplemented(f"Scheme not implemented: {scheme}")
self.ner_scheme = self.get_current_scheme()
def sanity_check(self):
assert len(self.tokens) == len(self.langids)
assert len(self.tokens) == len(self.simplified)
assert not self.has_ner or len(self.tokens) == len(self.entities)
assert not self.has_pos or len(self.tokens) == len(self.postags)
for i in range(len(self)):
assert len(self.tokens[i]) == len(self.langids[i])
assert len(self.tokens[i]) == len(self.simplified[i])
assert not self.has_ner or len(self.tokens[i]) == len(self.entities[i])
assert not self.has_pos or len(self.tokens[i]) == len(self.postags[i])
class RawDataset(Dataset):
def __init__(self, dataset_path):
assert os.path.exists(dataset_path), 'File path not found: {}'.format(dataset_path)
self.dataset_path = dataset_path
self.postids = []
self.userids = []
self.starts = []
self.ends = []
self.tokens = []
self.labels1 = []
self.labels2 = []
self.postid_to_index = {}
curr_post_id = ''
curr_user_id = ''
curr_start = -1
toks_i = []
bots_i = []
eots_i = []
lids_i = []
ners_i = []
with open(self.dataset_path, 'r') as stream:
lines = [line.strip().split('\t') for line in stream] + [['']]
for i, token_pack in enumerate(lines):
if curr_post_id == '':
curr_post_id = token_pack[0].strip()
curr_user_id = token_pack[1].strip()
if toks_i and \
(token_pack == [''] or
(curr_post_id and token_pack[0] != curr_post_id) or
(curr_start >= int(token_pack[2]))):
self.postids.append(curr_post_id)
self.userids.append(curr_user_id)
self.postid_to_index[curr_post_id] = len(self.postid_to_index)
self.starts.append(bots_i)
self.ends.append(eots_i)
self.tokens.append(toks_i)
self.labels1.append(lids_i)
if ners_i:
self.labels2.append(ners_i)
toks_i = []
bots_i = []
eots_i = []
lids_i = []
ners_i = []
if token_pack != ['']:
curr_post_id = token_pack[0].strip()
curr_user_id = token_pack[1].strip()
curr_start = int(token_pack[2].strip())
if token_pack == ['']:
break
bots_i.append(token_pack[2].strip())
eots_i.append(token_pack[3].strip())
toks_i.append(token_pack[4].strip())
lids_i.append(token_pack[5].strip())
if len(token_pack) > 6:
ners_i.append(token_pack[6].strip())
def __len__(self):
return len(self.tokens)
def __getitem__(self, item):
sample = dict()
item = item if isinstance(item, int) else self.postid_to_index[item]
sample['postid'] = self.postids[item]
sample['userid'] = self.userids[item]
sample['starts'] = self.starts[item]
sample['ends'] = self.ends[item]
sample['tokens'] = self.tokens[item]
sample['labels1'] = self.labels1[item]
if self.labels2:
sample['labels2'] = self.labels2[item]
return sample
def merge(self, dataset):
for i in range(len(dataset)):
self.postid_to_index[dataset.postids[i]] = len(self.postid_to_index)
self.postids.append(dataset.postids[i])
self.userids.append(dataset.userids[i])
self.starts.append(dataset.starts[i])
self.ends.append(dataset.ends[i])
self.tokens.append(dataset.tokens[i])
self.labels1.append(dataset.labels1[i])
if dataset.labels2:
self.labels2.append(dataset.labels2)
def save(self, filepath, labels1first=True):
with open(filepath, 'w+') as fp:
for i in range(len(self.tokens)):
for j in range(len(self.tokens[i])):
if self.labels2:
fp.write('{}\t{}\t{}\t{}\t{}\t{}\t{}\n'.format(
self.postids[i],
self.userids[i],
self.starts[i][j],
self.ends[i][j],
self.tokens[i][j],
self.labels1[i][j] if labels1first else self.labels2[i][j],
self.labels2[i][j] if labels1first else self.labels1[i][j]))
else:
fp.write('{}\t{}\t{}\t{}\t{}\t{}\n'.format(
self.postids[i],
self.userids[i],
self.starts[i][j],
self.ends[i][j],
self.tokens[i][j],
self.labels1[i][j]))
def save_conll(self, filepath, labels1first=True):
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, 'w+') as fp:
for i in range(len(self.tokens)):
for j in range(len(self.tokens[i])):
if self.labels2:
fp.write('{}\t{}\t{}\n'.format(
self.tokens[i][j],
self.labels1[i][j] if labels1first else self.labels2[i][j],
self.labels2[i][j] if labels1first else self.labels1[i][j]))
else:
fp.write('{}\t{}\n'.format(
self.tokens[i][j],
self.labels1[i][j]))
fp.write('\n')
def read_lines(filepath):
lines = [line.strip() for line in open(filepath, 'r')] + ['']
post = []
for line in lines:
if not line:
if post:
yield post
post = []
else:
post.append(line)
def from_bio_to_bioes(labels):
for i in range(len(labels)):
last_i = i == len(labels) - 1
if labels[i].startswith('B'):
if last_i or labels[i + 1] == 'O' or labels[i + 1].startswith('B'):
labels[i] = 'S' + labels[i][1:] # Single-token entity
elif labels[i].startswith('I'):
if last_i or labels[i + 1] == 'O' or labels[i + 1].startswith('B'):
labels[i] = 'E' + labels[i][1:] # Ending of a multi-token entity
return labels
def from_bioes_to_bio(labels):
for i in range(len(labels)):
if labels[i].startswith('E'):
labels[i] = 'I' + labels[i][1:]
elif labels[i].startswith('S'):
labels[i] = 'B' + labels[i][1:]
return labels
def map_langids(langids):
new_langids = []
for lid in langids:
if lid == 'lang1' or lid == 'mixed' or lid == 'eng' or lid == 'en':
new_langids.append(lid)
else:
new_langids.append('other')
return new_langids
| 11,547 | 34.532308 | 108 | py |
CS_ELMo | CS_ELMo-master/src/utilities.py | import re
import numpy as np
import random
import os
import subprocess
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import sklearn
import warnings
import json
from modeling.seqtagger import SequenceTagger
from sklearn.metrics import classification_report, precision_recall_fscore_support
from sklearn.metrics import accuracy_score
from allennlp.training.learning_rate_schedulers import CosineWithRestarts, SlantedTriangular
from seqeval.metrics import f1_score as ner_f1, precision_score as ner_prec, recall_score as ner_rec, accuracy_score as ner_acc
from seqeval.metrics import classification_report as ner_classification_report
def flatten(elems):
return [e for elem in elems for e in elem]
def get_optimizer(model, args):
optim_args = args.training.optimizer
if args.training.lr_scheduler.name == 'slanted_triangular':
print('[LOG] Using grouped parameters for STLR scheduler')
params = model.get_param_groups()
else:
params = model.parameters() #; print('USING model.parameters()')
# params = list(filter(lambda p: p.requires_grad, params))
if optim_args.name == "sgd":
optimizer = optim.SGD(params,
lr=optim_args.lr,
momentum=optim_args.momentum,
weight_decay=optim_args.weight_decay)
elif optim_args.name == "asgd":
optimizer = optim.ASGD(params,
lr=optim_args.lr,
weight_decay=optim_args.weight_decay)
elif optim_args.name == "adam":
optimizer = optim.Adam(params,
lr=optim_args.lr,
weight_decay=optim_args.weight_decay,
betas=(optim_args.beta1, optim_args.beta2))
else:
raise Exception("Opimizer '{}' not found".format(optim_args.name))
return optimizer
def get_lr_scheduler(optimizer, train_size, args):
lrs_args = args.training.lr_scheduler
if lrs_args.name == "cos":
scheduler = CosineWithRestarts(optimizer, t_initial=lrs_args.t_initial, t_mul=lrs_args.t_mul) # t_initial=10, t_mul=2)
elif lrs_args.name == "step":
scheduler = lr_scheduler.StepLR(optimizer, step_size=lrs_args.step_size) # step_size=15)
elif lrs_args.name == "plateau":
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, factor=lrs_args.factor, patience=lrs_args.patience) # factor=0.3, patience=5)
elif lrs_args.name == "plateau_f1":
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, factor=lrs_args.factor, patience=lrs_args.patience, mode='max')
elif lrs_args.name == 'slanted_triangular':
stepsperepoch = get_steps_per_epoch(train_size, args.training.batch_size)
scheduler = SlantedTriangular(optimizer,
num_epochs=args.training.epochs,
num_steps_per_epoch=stepsperepoch,
gradual_unfreezing=lrs_args.gradual_unfreezing,
discriminative_fine_tuning=lrs_args.discriminative_fine_tuning)
elif lrs_args.name == "none":
scheduler = None
else:
raise Exception("Scheduler '{}' not found".format(choice))
return scheduler
def get_steps_per_epoch(dataset_size, batch_size):
if dataset_size % batch_size == 0:
return dataset_size // batch_size
else:
return dataset_size // batch_size + 1
def save_model(filename, state):
os.makedirs(os.path.dirname(filename), exist_ok=True)
torch.save(state, filename)
def try_load_model(filename, model, optimizer=None, trainer=None, scheduler=None, verbose=True):
if os.path.exists(filename):
state = torch.load(filename, map_location=f'cuda:{torch.cuda.current_device()}' if torch.cuda.is_available() else 'cpu')
if trainer is not None:
trainer.best_f1 = state['f1']
trainer.best_loss = state['loss']
trainer.starting_epoch = state['epoch'] + 1
model.load_state_dict(state['model'])
if optimizer is not None:
optimizer.load_state_dict(state['optimizer'])
if scheduler is not None and 'scheduler' in state:
scheduler.load_state_dict(state['scheduler'])
if verbose:
print(f"[LOG] Loading model from {filename}... Epoch: {state['epoch']:03d}, F1: {state['f1']:.5f}, Loss: {state['loss']:.5f}")
return True
else:
if verbose:
print("[LOG] No previous model found")
return False
def load_model_only(filename, model):
if os.path.exists(filename):
state = torch.load(filename, map_location=f'cuda:{torch.cuda.current_device()}' if torch.cuda.is_available() else 'cpu')
model.load_state_dict(state['model'])
print("[LOG] Loading LID model... Epoch: {:03d}, F1: {:.5f}, Loss: {:.5f}".format(state['epoch'], state['f1'], state['loss']))
def require_grad(parameters, required=True):
for p in parameters:
p.requires_grad = required
def choose_model(args, n_classes):
if args.name == 'elmo':
model = SequenceTagger(n_classes=n_classes,
word_hidden_dim=args.lstm_hidden_dim,
use_lstm=args.use_lstm,
use_position=args.charngrams.use_position,
use_second_task=args.charngrams.use_second_task,
charngram_mechanism=args.charngrams.mechanism,
ngram_order=args.charngrams.ngram_order,
dropout=args.dropout,
embeddings=args.embeddings,
use_ngram_vectors=args.charngrams.use_at_last_layer,
elmo_requires_grad=args.elmo_requires_grad,
elmo_version=args.version,
use_crf=args.use_crf)
else:
raise Exception('Unknown model: {}'.format(args.name))
return model
def get_pretrained_model_architecture(config_path, include_pretraining=True):
from dataset import CSDataset
from main import Arguments
args = Arguments(config_path=config_path)
n_langids = len(set(flatten(CSDataset(args.dataset.train).langids)))
model = SequenceTagger(n_classes=n_langids,
word_hidden_dim=args.model.lstm_hidden_dim,
use_lstm=args.model.use_lstm,
use_position=args.model.charngrams.use_position,
use_second_task=args.model.charngrams.use_second_task,
charngram_mechanism=args.model.charngrams.mechanism,
ngram_order=args.model.charngrams.ngram_order,
dropout=args.model.dropout,
embeddings=args.model.embeddings,
use_ngram_vectors=args.model.charngrams.use_at_last_layer,
elmo_requires_grad=args.model.elmo_requires_grad,
elmo_version=args.model.version,
use_crf=args.model.use_crf)
if include_pretraining:
bestpath = os.path.join(args.checkpoints, f'{args.experiment}.bestloss.pt')
if os.path.exists(bestpath):
load_model_only(bestpath, model)
print(f"[LOG] Successfully loaded the CS-adapted model from {bestpath}")
else:
raise Exception(f"[ERROR] There is not checkpoint at '{bestpath}'")
else:
print("[LOG] Returning model without CS pretraining (only ELMo pretrained knowledge)")
return model
def save_history(history, args):
hist_path = '{}/'.format(args.experiment)
hist_file = '{}.history.txt'.format(args.experiment)
os.makedirs(os.path.join(args.history, hist_path), exist_ok=True)
with open(os.path.join(args.history, hist_path, hist_file), 'a+') as fp:
for i in range(len(history['train']['f1'])):
train = '{}\t{}\t'.format(history['train']['loss'][i], history['train']['f1'][i])
valid = '{}\t{}\t'.format(history['dev']['loss'][i], history['dev']['f1'][i])
fp.write(train + valid)
def save_json(filename, content):
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'w+') as fp:
json.dump(content, fp)
def sklearn_fmeasure(gold, pred, verbose, average_choice='weighted'):
y_true = flatten(gold)
y_pred = flatten(pred)
assert len(y_true) == len(y_pred)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
prec, rec, f1, support = precision_recall_fscore_support(y_true, y_pred, average=average_choice)
acc = accuracy_score(y_true, y_pred)
if verbose:
report = classification_report(y_true, y_pred, digits=5)
print(report)
print('[LOG] Accuracy: {:.5f}'.format(acc))
return acc, prec, rec, f1
def save_predictions(filename, words, golds, preds):
with open(filename, 'w+') as fp:
for i in range(len(preds)):
for j in range(len(preds[i])):
line = '{}\t{}\t{}\n'.format(words[i][j], golds[i][j], preds[i][j])
fp.write(line)
fp.write('\n')
def running_fmeasure(gold, pred, verbose=False):
if verbose:
print(ner_classification_report(gold, pred, digits=5))
f1 = ner_f1(gold, pred, average='micro')
prec = ner_prec(gold, pred, average='micro')
rec = ner_rec(gold, pred, average='micro')
acc = ner_acc(gold, pred)
return acc, prec, rec, f1
def get_character_ngrams(characters, n_order):
charlen = len(characters)
assert 1 <= n_order <= charlen and isinstance(n_order, int)
ngrams = []
for i in range(charlen - n_order + 1):
ngrams.append(characters[i:i+n_order])
return ngrams
| 10,081 | 39.653226 | 139 | py |
CS_ELMo | CS_ELMo-master/src/trainer.py | import os
import time
import torch
import torch.nn as nn
import copy
import numpy as np
import utilities as utils
from utilities import running_fmeasure, sklearn_fmeasure
from utilities import save_model
from torch.utils.data import DataLoader
def to_tensor(labels, label2index, pad_value=0, return_mask=False, device='cpu'):
maxlen = max(map(len, labels))
target = torch.zeros(len(labels), maxlen).long() + pad_value
mask = torch.zeros(len(labels), maxlen).byte()
for i, labels_i in enumerate(labels):
sample = [label2index[label] for label in labels_i]
target[i, :len(sample)] = torch.tensor(sample, dtype=torch.long)
mask[i, :len(sample)] = 1
target = target.to(device)
mask = mask.to(device)
return (target, mask) if return_mask else target
class Stats(object):
def __init__(self, index2label):
self.loss_total = 0
self.loss_factor = 0
self.preds = []
self.golds = []
self.i2l = index2label
def update(self, scores, target, mask, loss, loss_factor=1):
self.loss_total += loss
self.loss_factor += loss_factor
preds = torch.max(scores, 2)[1]
lenghts = mask.cpu().long().sum(1)
for i in range(len(scores)):
self.preds.append(preds[i, :lenghts[i]].cpu().tolist())
self.golds.append(target[i, :lenghts[i]].cpu().tolist())
def loss(self):
return self.loss_total / self.loss_factor
def metrics(self, task, verbose=False):
decoded_preds = [[self.i2l[self.preds[i][j]] for j in range(len(self.preds[i]))] for i in range(len(self.preds))]
decoded_golds = [[self.i2l[self.golds[i][j]] for j in range(len(self.golds[i]))] for i in range(len(self.golds))]
if task.startswith('lid'): return sklearn_fmeasure(decoded_golds, decoded_preds, verbose=verbose, average_choice='weighted')
if task.startswith('ner'): return running_fmeasure(decoded_golds, decoded_preds, verbose=verbose)
if task.startswith('pos'): return sklearn_fmeasure(decoded_golds, decoded_preds, verbose=verbose, average_choice='micro')
raise Exception('Unknown task: {}'.format(task))
def collate(batch):
collated = {'tokens': [], 'langids': [], 'simplified': []}
for sample in batch:
collated['tokens'].append(sample['tokens'])
collated['langids'].append(sample['langids'])
collated['simplified'].append(sample['simplified'])
if 'entities' in sample:
if 'entities' in collated:
collated['entities'].append(sample['entities'])
else:
collated['entities'] = [sample['entities']]
return collated
class Trainer(object):
def __init__(self, datasets, args):
self.args = args
self.index2langid = dict(enumerate(sorted({l for d in datasets.values() for l in utils.flatten(d.langids)})))
self.index2simplified = dict(enumerate(sorted({l for d in datasets.values() for l in utils.flatten(d.simplified)})))
self.langid2index = {value: key for key, value in self.index2langid.items()}
self.simplified2index = {value: key for key, value in self.index2simplified.items()}
self.starting_epoch = 0
self.best_f1 = -1
self.best_loss = np.Inf
self.best_f1_state = None
self.best_loss_state = None
# TODO: try different weights when using MTL
self.alpha = 1
self.beta = 1
self.dataloaders = dict()
for dataset in datasets.keys():
batch_size = args.training.batch_size if dataset == 'train' else args.evaluation.batch_size
self.dataloaders[dataset] = DataLoader(datasets[dataset],
batch_size=batch_size,
shuffle=dataset == 'train',
collate_fn=collate,
num_workers=1)
def scheduler_step(self, scheduler, optimizer, epoch, dev_loss, dev_f1):
lr_changed = False
for param_group in optimizer.param_groups:
if not lr_changed and param_group['lr'] != self.args.training.optimizer.lr:
lr_changed = True
self.args.training.optimizer.lr = param_group['lr'] # Acknowledge the recent top lr
if self.args.training.lr_scheduler.name == 'plateau':
scheduler.step(dev_loss)
elif self.args.training.lr_scheduler.name == 'plateau_f1':
scheduler.step(dev_f1)
elif self.args.training.lr_scheduler.name == 'slanted_triangular':
scheduler.step(epoch=epoch)
lr_changed = False # for STLR, we don't restore the model because lr change every backprop iteration
else:
scheduler.step()
return lr_changed
def calculate_loss(self, result):
loss = result['word_output']['loss']
if self.args.model.charngrams.use_second_task:
loss = loss * self.beta + result['char_output']['loss'] * self.alpha
return loss
def train(self, model, optimizer, scheduler=None):
print("[LOG] Training model...")
ehist = {}
for epoch in range(self.starting_epoch, self.starting_epoch + self.args.training.epochs):
stats = {}
epoch_msg = 'Epoch {:04d}'.format(epoch)
for dataset in ['train', 'dev']:
if dataset == 'train':
model.train()
model.zero_grad()
else: model.eval()
ehist[dataset] = {'loss': [], 'f1': []}
stats[dataset] = Stats(self.index2langid)
# ================================================================================================
epoch_time = time.time()
for batch in self.dataloaders[dataset]:
batch['langids'] = to_tensor(batch['langids'], self.langid2index, device=self.args.device)
batch['simplified'] = to_tensor(batch['simplified'], self.simplified2index, device=self.args.device)
result = model(batch)
loss = self.calculate_loss(result)
ntoks = torch.sum(result['mask']).item()
loss /= ntoks
# L2 regularization
if self.args.training.l2 > 0:
loss = loss + model.get_l2_reg(self.args.training.l2)
if dataset == 'train':
loss.backward()
# Clipping the norm ||g|| of gradient g before the optmizer's step
if self.args.training.clip_grad > 0:
nn.utils.clip_grad_norm_(model.parameters(), self.args.training.clip_grad)
optimizer.step()
optimizer.zero_grad()
model.zero_grad()
if self.args.training.lr_scheduler.name == 'slanted_triangular':
for param_group in optimizer.param_groups:
self.args.training.optimizer.lr = param_group['lr'] # Acknowledge the most recent top lr
scheduler.step_batch()
stats[dataset].update(result['word_output']['logits'], batch['langids'], result['mask'].float(), loss.item() * ntoks, ntoks)
epoch_time = time.time() - epoch_time
# ================================================================================================
epoch_acc, _, _, epoch_f1 = stats[dataset].metrics(task=self.args.task)
epoch_loss = stats[dataset].loss()
ehist[dataset]['loss'].append(epoch_loss)
ehist[dataset]['f1'].append(epoch_f1)
if dataset == 'train':
epoch_msg += '| [{}] Loss: {:.5f}, F1: {:.5f}, Time: {:6.2f}s'.format(dataset.upper(), epoch_loss, epoch_f1, epoch_time)
else: epoch_msg += '| [{}] Loss: {:.5f}, F1: {:.5f}, Acc: {:.5f}'.format(dataset.upper(), epoch_loss, epoch_f1, epoch_acc)
lr_changed = False
if scheduler is not None:
lr_changed = self.scheduler_step(scheduler, optimizer, epoch, stats['dev'].loss(), ehist['dev']['f1'][-1])
epoch_msg += "| LR: {:.9f}".format(self.args.training.optimizer.lr)
epoch_msg += self.track_best_model(model, optimizer, scheduler, ehist['dev']['f1'][-1], ehist['dev']['loss'][-1], epoch, lr_changed)
print("[LOG] {}".format(epoch_msg))
return ehist
def predict(self, model, dataset):
print("[LOG] ============================================")
print("[LOG] {} PREDICTIONS".format(dataset.upper()))
print("[LOG] ============================================")
model.eval()
results = {
'stats': Stats(self.index2langid),
'preds': [],
'ngram': {'sentences': []},
}
for batch in self.dataloaders[dataset]:
batch_langids = batch['langids']
batch['langids'] = to_tensor(batch['langids'], self.langid2index, device=self.args.device)
batch['simplified'] = to_tensor(batch['simplified'], self.simplified2index, device=self.args.device)
result = model(batch)
loss = self.calculate_loss(result).item()
ntoks = torch.sum(result['mask']).item()
results['stats'].update(result['word_output']['logits'], batch['langids'], result['mask'].float(), loss, ntoks)
for sent_ix in range(len(result['word_output']['tags'])):
length = result['mask'][sent_ix].sum().item()
labels = list(map(self.index2langid.get, result['word_output']['tags'][sent_ix][:length]))
results['preds'].append(labels)
if 'char_output' in result and ('lo_attention' in result['char_output'] or 'hi_attention' in result['char_output']):
sentence = []
assert len(batch['tokens'][sent_ix]) == len(labels)
for word_ix in range(len(batch['tokens'][sent_ix])):
charmeta_for_word = {
'word': batch['tokens'][sent_ix][word_ix],
'label': batch_langids[sent_ix][word_ix],
'pred': labels[word_ix]
}
if 'lo_attention' in result['char_output']:
charlen = len(charmeta_for_word['word'])
for ngram_order in range(4):
if ngram_order >= charlen:
break
char_ngram_att = result['char_output']['lo_attention'][ngram_order][sent_ix][word_ix, :charlen - ngram_order]
charmeta_for_word[f'char_{ngram_order+1}gram_attentions'] = char_ngram_att.tolist()
if 'hi_attention' in result['char_output']:
cross_ngram_att = result['char_output']['hi_attention'][sent_ix, word_ix]
# assert sum(cross_ngram_att) > 1.0 - 1e-6
charmeta_for_word['char_nrgam'] = cross_ngram_att.tolist()
sentence.append(charmeta_for_word)
results['ngram']['sentences'].append(sentence)
acc, _, _, f1 = results['stats'].metrics(task=self.args.task, verbose=True)
print("[LOG] Loss: {:.5f}, F1: {:.5f}, Acc: {:.5f}".format(results['stats'].loss(), f1, acc))
return results
def track_best_model(self, model, optimizer, scheduler, val_f1, val_loss, epoch, lr_changed):
message = ""
loss_improved = self.best_loss > val_loss
f1_improved = self.best_f1 < val_f1
if not loss_improved and not f1_improved:
if lr_changed:
chckpt_fullpath = os.path.join(self.args.checkpoints, '{}.bestf1.pt'.format(self.args.experiment))
utils.try_load_model(chckpt_fullpath, model, verbose=False)
return "| Restoring model"
else:
return message
state = {
'epoch': epoch,
'f1': val_f1,
'loss': val_loss,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
if scheduler is not None:
state['scheduler'] = scheduler.state_dict()
if f1_improved:
message = "F1 improved"
self.best_f1 = val_f1
chckpt_fullpath = os.path.join(self.args.checkpoints, '{}.bestf1.pt'.format(self.args.experiment))
save_model(chckpt_fullpath, state)
if loss_improved:
message = "Loss improved"
self.best_loss = val_loss
chckpt_fullpath = os.path.join(self.args.checkpoints, '{}.bestloss.pt'.format(self.args.experiment))
save_model(chckpt_fullpath, state)
if loss_improved and f1_improved:
message = "F1 & Loss improved"
return "| {}...".format(message)
| 13,315 | 41.273016 | 144 | py |
CS_ELMo | CS_ELMo-master/src/experiments/experiment_pos.py | import os
import re
import json
import random
import numpy as np
import torch
import torch.nn as nn
import utilities as utils
from collections import Counter
from modeling.seqtagger import SequenceTagger
from trainer import Trainer
from dataset import CSDataset
from allennlp.modules import ConditionalRandomField
def main(args):
# =========================================================================================================
# THE POS DATASET
# =========================================================================================================
print("[LOG] =====================")
print("[LOG] PART-OF-SPEECH TAGGING EXPERIMENT")
datasets = {
'train': CSDataset(args.dataset.train, pos_index=2),
'dev': CSDataset(args.dataset.dev, pos_index=2),
'test': CSDataset(args.dataset.test, pos_index=2)
}
print("[LOG] =====================")
print("[LOG] CORPUS SAMPLES")
print("[LOG] Train -> Posts: {:5,} Tokens: {:7,}".format(len(datasets['train']), len(utils.flatten(datasets['train'].tokens))))
print("[LOG] Dev -> Posts: {:5,} Tokens: {:7,}".format(len(datasets['dev']), len(utils.flatten(datasets['dev'].tokens))))
print("[LOG] Test -> Posts: {:5,} Tokens: {:7,}".format(len(datasets['test']), len(utils.flatten(datasets['test'].tokens))))
print("[LOG] =====================")
print("[LOG] CORPUS LID DISTRIBUTION")
print("[LOG] Train -> {}".format(Counter(utils.flatten(datasets['train'].langids)).most_common()))
print("[LOG] Dev -> {}".format(Counter(utils.flatten(datasets['dev'].langids)).most_common()))
print("[LOG] Test -> {}".format(Counter(utils.flatten(datasets['test'].langids)).most_common()))
print("[LOG] =====================")
print("[LOG] CORPUS POS DISTRIBUTION")
print("[LOG] Train -> {}".format(Counter(utils.flatten(datasets['train'].postags)).most_common()))
print("[LOG] Dev -> {}".format(Counter(utils.flatten(datasets['dev'].postags)).most_common()))
print("[LOG] Test -> {}".format(Counter(utils.flatten(datasets['test'].postags)).most_common()))
n_langids = len(set(utils.flatten(datasets['train'].langids + datasets['dev'].langids + datasets['test'].langids)))
n_simplified = len(set(utils.flatten(datasets['train'].simplified + datasets['dev'].simplified + datasets['test'].simplified)))
n_entities = len(set(utils.flatten(datasets['train'].entities + datasets['dev'].entities + datasets['test'].entities)))
n_postags = len(set(utils.flatten(datasets['train'].postags + datasets['dev'].postags + datasets['test'].postags)))
print("[LOG] =====================")
print("[LOG] CORPUS LABELS")
print("[LOG] LangID classes:", n_langids)
print("[LOG] Simplified classes:", n_simplified)
print("[LOG] Entity classes:", n_entities)
print("[LOG] POSTag classes:", n_postags)
print("[LOG] =====================")
# TODO: improve this temporal hack; for now it makes it compatible with the training pipeline
datasets['train'].langids_tmp = datasets['train'].langids
datasets['dev'].langids_tmp = datasets['dev'].langids
datasets['test'].langids_tmp = datasets['test'].langids
datasets['train'].langids = datasets['train'].postags
datasets['dev'].langids = datasets['dev'].postags
datasets['test'].langids = datasets['test'].postags
# =========================================================================================================
# PREPARING THE MODEL
# =========================================================================================================
# Load the pretrained model
if args.pretrained_config is None:
print("[LOG] No pretrained config was specified. Creating model from main config only...")
model = utils.choose_model(args.model, n_classes=n_postags)
else:
# ================================================================================
# Building model and choosing parameters
print(f"[LOG] Model will be built in mode '{args.pretrained_config.pretrained_part}'")
if args.pretrained_config.pretrained_part == 'full':
# model = pretrained_model
# model.proj = nn.Linear(model.output_size, n_postags)
# model.crf = ConditionalRandomField(n_postags)
raise NotImplementedError
elif args.pretrained_config.pretrained_part == 'elmo':
# Load the pretrained LID model
print("[LOG] Loading pretrained model...")
pretrained_model = utils.get_pretrained_model_architecture(args.pretrained_config.path, include_pretraining=True)
model = utils.choose_model(args.model, n_classes=n_postags)
model.elmo = pretrained_model.elmo
del pretrained_model # free memory
else:
raise Exception(f"Unknown pretrained part: {args.pretrained_config.pretrained_part}")
# ================================================================================
# Fixing parameters according to fine-tuning mode
print(f"[LOG] Model will be fine-tuned in mode '{args.pretrained_config.finetuning_mode}'")
if args.pretrained_config.finetuning_mode == 'fully_trainable':
utils.require_grad(model.parameters(), True)
elif args.pretrained_config.finetuning_mode == 'frozen_elmo':
utils.require_grad(model.parameters(), True)
utils.require_grad(model.elmo.parameters(), False)
elif args.pretrained_config.finetuning_mode == 'inference':
# Fixed all the parameters first
utils.require_grad(model.parameters(), False)
# Unfreeze the last layers for the POS task
if hasattr(model, 'proj'): utils.require_grad(model.proj.parameters(), True)
if hasattr(model, 'crf'): utils.require_grad(model.crf.parameters(), True)
else:
raise Exception(f"[ERROR] Unknown finetuning mode: {args.pretrained_config.finetuning_mode}")
# Move to CUDA if available
if torch.cuda.is_available():
model.cuda()
# =========================================================================================================
# TRAINING THE MODEL
# =========================================================================================================
trainer = Trainer(datasets, args)
optimizer = utils.get_optimizer(model, args)
scheduler = utils.get_lr_scheduler(optimizer, len(datasets['train']), args)
bestpath = os.path.join(args.checkpoints, f'{args.experiment}.bestf1.pt')
if args.mode == 'train':
if os.path.exists(bestpath):
option = input("[LOG] Found a checkpoint! Choose an option:\n"
"\t0) Train from scratch and override the previous checkpoint\n"
"\t1) Load the checkpoint and train from there\nYour choice: ")
assert option in {"0", "1"}, "Unexpected choice"
if option == "1":
utils.try_load_model(bestpath, model, optimizer, trainer, scheduler)
history = trainer.train(model, optimizer, scheduler)
utils.save_history(history, args)
utils.try_load_model(bestpath, model, optimizer, trainer, scheduler)
# TODO: save predictions
# _ = trainer.predict(model, 'train')
dev_output = trainer.predict(model, 'dev')
tst_output = trainer.predict(model, 'test')
def require_grad(parameters, required=True):
for p in parameters:
p.requires_grad = required
| 7,582 | 45.521472 | 131 | py |
CS_ELMo | CS_ELMo-master/src/experiments/experiment_ner.py | import os
import re
import json
import torch
import torch.nn as nn
import utilities as utils
from collections import Counter
from modeling.seqtagger import SequenceTagger
from trainer import Trainer
from dataset import CSDataset
from allennlp.modules import ConditionalRandomField
def main(args):
# =========================================================================================================
# THE NER DATASET
# =========================================================================================================
print("[LOG] NAMED ENTITY RECOGNITION EXPERIMENT")
datasets = {
'train': CSDataset(args.dataset.train, ner_index=2),
'dev': CSDataset(args.dataset.dev, ner_index=2),
'test': CSDataset(args.dataset.test, ner_index=2)
}
print("[LOG] {}".format('=' * 40))
print("[LOG] CORPUS SAMPLES")
print("[LOG] Train -> Posts: {:5,} Tokens: {:7,}".format(len(datasets['train']), len(utils.flatten(datasets['train'].tokens))))
print("[LOG] Dev -> Posts: {:5,} Tokens: {:7,}".format(len(datasets['dev']), len(utils.flatten(datasets['dev'].tokens))))
print("[LOG] Test -> Posts: {:5,} Tokens: {:7,}".format(len(datasets['test']), len(utils.flatten(datasets['test'].tokens))))
print("[LOG] {}".format('=' * 40))
print("[LOG] CORPUS NER DISTRIBUTION")
print("[LOG] Train -> {}".format(Counter(utils.flatten(datasets['train'].entities)).most_common()))
print("[LOG] Dev -> {}".format(Counter(utils.flatten(datasets['dev'].entities)).most_common()))
print("[LOG] Test -> {}".format(Counter(utils.flatten(datasets['test'].entities)).most_common()))
n_langids = len(set(utils.flatten(datasets['train'].langids + datasets['dev'].langids + datasets['test'].langids)))
n_simplified = len(set(utils.flatten(datasets['train'].simplified + datasets['dev'].simplified + datasets['test'].simplified)))
n_entities = len(set(utils.flatten(datasets['train'].entities + datasets['dev'].entities + datasets['test'].entities)))
n_postags = len(set(utils.flatten(datasets['train'].postags + datasets['dev'].postags + datasets['test'].postags)))
print("[LOG] {}".format('=' * 40))
print("[LOG] CORPUS LABELS")
print("[LOG] LangID classes:", n_langids)
print("[LOG] Simplified classes:", n_simplified)
print("[LOG] Entity classes:", n_entities)
print("[LOG] POSTag classes:", n_postags)
print("[LOG] {}".format('=' * 40))
# TODO: improve this temporal hack; for now it makes it compatible with the training pipeline
datasets['train'].langids = datasets['train'].entities
datasets['dev'].langids = datasets['dev'].entities
datasets['test'].langids = datasets['test'].entities
# =========================================================================================================
# PREPARING THE MODEL
# =========================================================================================================
# Load the pretrained model
if args.pretrained_config is None:
print("[LOG] No pretrained config was specified. Creating model from main config only...")
model = utils.choose_model(args.model, n_classes=n_entities)
else:
# ================================================================================
# Building model and choosing parameters
print(f"[LOG] Model will be built in mode '{args.pretrained_config.pretrained_part}'")
if args.pretrained_config.pretrained_part == 'full':
# model = pretrained_model
# model.proj = nn.Linear(model.output_size, n_entities)
# model.crf = ConditionalRandomField(n_entities)
raise NotImplementedError
elif args.pretrained_config.pretrained_part == 'elmo':
# Load the pretrained LID model
print("[LOG] Loading pretrained model...")
pretrained_model = utils.get_pretrained_model_architecture(args.pretrained_config.path, include_pretraining=True)
model = utils.choose_model(args.model, n_classes=n_entities)
model.elmo = pretrained_model.elmo
del pretrained_model # free memory
else:
raise Exception(f"Unknown pretrained part: {args.pretrained_config.pretrained_part}")
# ================================================================================
# Fixing parameters according to fine-tuning mode
print(f"[LOG] Model will be fine-tuned in mode '{args.pretrained_config.finetuning_mode}'")
if args.pretrained_config.finetuning_mode == 'fully_trainable':
utils.require_grad(model.parameters(), True)
elif args.pretrained_config.finetuning_mode == 'frozen_elmo':
utils.require_grad(model.parameters(), True)
utils.require_grad(model.elmo.parameters(), False)
elif args.pretrained_config.finetuning_mode == 'inference':
# Fixed all the parameters first
utils.require_grad(model.parameters(), False)
# Unfreeze the last layers for the NER task
if hasattr(model, 'proj'): utils.require_grad(model.proj.parameters(), True)
if hasattr(model, 'crf'): utils.require_grad(model.crf.parameters(), True)
else:
raise Exception(f"[ERROR] Unknown finetuning mode: {args.pretrained_config.finetuning_mode}")
# Move to CUDA if available
if torch.cuda.is_available():
model.cuda()
print(model)
# =========================================================================================================
# TRAINING THE MODEL
# =========================================================================================================
trainer = Trainer(datasets, args)
optimizer = utils.get_optimizer(model, args)
scheduler = utils.get_lr_scheduler(optimizer, len(datasets['train']), args)
bestpath = os.path.join(args.checkpoints, f'{args.experiment}.bestf1.pt')
if args.mode == 'train':
if os.path.exists(bestpath):
option = input("[LOG] Found a checkpoint! Choose an option:\n"
"\t0) Train from scratch and override the previous checkpoint\n"
"\t1) Load the checkpoint and train from there\nYour choice: ")
assert option in {"0", "1"}, "Unexpected choice"
if option == "1":
utils.try_load_model(bestpath, model, optimizer, trainer, scheduler)
history = trainer.train(model, optimizer, scheduler)
utils.save_history(history, args)
utils.try_load_model(bestpath, model, optimizer, trainer, scheduler)
# TODO: save predictions
# _ = trainer.predict(model, 'train')
dev_output = trainer.predict(model, 'dev')
tst_output = trainer.predict(model, 'test')
| 6,851 | 45.612245 | 131 | py |
CS_ELMo | CS_ELMo-master/src/modeling/seqtagger.py | import torch
import torch.nn as nn
import numpy as np
from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings
from flair.data import Sentence
from modeling.attention import NgramEnhancer
from modeling.elmo import Elmo, batch_to_ids
from allennlp.modules import ConditionalRandomField
ELMO_SETTINGS = {
"small": {
"projection": 128 * 2,
"options": "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_options.json",
"weights": "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_weights.hdf5"
},
"medium": {
"projection": 256 * 2,
"options": "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x2048_256_2048cnn_1xhighway/elmo_2x2048_256_2048cnn_1xhighway_options.json",
"weights": "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x2048_256_2048cnn_1xhighway/elmo_2x2048_256_2048cnn_1xhighway_weights.hdf5"
},
"original": {
"projection": 512 * 2,
"options": "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway_5.5B/elmo_2x4096_512_2048cnn_2xhighway_5.5B_options.json",
"weights": "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway_5.5B/elmo_2x4096_512_2048cnn_2xhighway_5.5B_weights.hdf5"
}
}
class CharNgramTagger(nn.Module):
def __init__(self, input_size):
super(CharNgramTagger, self).__init__()
self.input_size = input_size
self.n_classes = 3
self.clf = nn.Linear(self.input_size, self.n_classes)
self.xent = nn.CrossEntropyLoss(reduction='sum')
def xentropy_loss(self, logits, target, mask):
predicted_tags = logits.max(-1)[1].cpu().tolist()
loss = self.xent(logits[mask == 1], target[mask == 1])
return {'loss': loss,
'logits': logits,
'tags': predicted_tags}
def forward(self, inputs, labels, mask):
return self.xentropy_loss(self.clf(inputs), labels, mask)
class SequenceTagger(nn.Module):
def __init__(self, n_classes, word_hidden_dim,
use_lstm=False,
use_position=False,
use_second_task=False,
charngram_mechanism='none',
ngram_order=7,
dropout=0.4,
embeddings=None,
use_ngram_vectors=False,
elmo_requires_grad=True,
use_crf=True,
elmo_version='original'):
super(SequenceTagger, self).__init__()
# ELMo settings: output size and conv channels
self.elmo_hidden_size = ELMO_SETTINGS[elmo_version]['projection']
self.elmo_convolutions = [32, 32, 64, 128, 256, 512, 1024]
self.word_hidden_size = word_hidden_dim
self.n_classes = n_classes
self.use_lstm = use_lstm
self.use_crf = use_crf
self.use_char_tagger = use_second_task
self.use_ngram_vectors = use_ngram_vectors
self.has_embeddings = embeddings is not None and len(embeddings) > 0
self.elmo_requires_grad = elmo_requires_grad
self.charngram_mechanism = charngram_mechanism
if charngram_mechanism != 'none':
char_enhancer = NgramEnhancer(variable_dims=self.elmo_convolutions[:ngram_order],
ngram_order=ngram_order,
attention_type=charngram_mechanism,
use_position=use_position)
else:
char_enhancer = None
self.elmo = Elmo(ELMO_SETTINGS[elmo_version]["options"],
ELMO_SETTINGS[elmo_version]["weights"],
num_output_representations=2,
dropout=dropout,
requires_grad=self.elmo_requires_grad,
char_enhancer=char_enhancer)
if self.has_embeddings:
print("[LOG] Stacking embeddings: {}".format(embeddings))
self.embeddings = StackedEmbeddings([
FlairEmbeddings(emb.split(":")[-1]) if emb.startswith('flair:') else
WordEmbeddings(emb) # 'glove', 'crawl', 'twitter'
for emb in embeddings
])
self.elmo_hidden_size += self.embeddings.embedding_length
if self.use_char_tagger:
self.charclf = CharNgramTagger(sum(self.elmo_convolutions[:ngram_order]))
if self.use_lstm:
self.lstm = nn.LSTM(self.elmo_hidden_size, self.word_hidden_size // 2, num_layers=1, batch_first=True, bidirectional=True)
self.output_size = self.word_hidden_size + sum(self.elmo_convolutions[:ngram_order]) if self.use_ngram_vectors else self.word_hidden_size
self.proj = nn.Linear(self.output_size, self.n_classes)
else:
self.output_size = self.elmo_hidden_size + sum(self.elmo_convolutions[:ngram_order]) if self.use_ngram_vectors else self.elmo_hidden_size
self.proj = nn.Linear(self.output_size, self.n_classes)
self.drop = nn.Dropout(dropout)
if self.use_crf:
self.crf = ConditionalRandomField(self.n_classes, constraints=None, include_start_end_transitions=True)
else: self.xent = nn.CrossEntropyLoss(reduction='sum')
def get_representations(self, inputs):
"""Get embedding character and word representations for the given input batch"""
encoded_chars = batch_to_ids(inputs['tokens'])
encoded_chars = encoded_chars['elmo_tokens']
batch_size = encoded_chars.size(0) # number of samples in batch
sent_length = encoded_chars.size(1) # max number of words in a sentence
word_length = encoded_chars.size(2) # max number of characters in a word
# building character length tensor
char_lenghts = torch.zeros(batch_size, sent_length).long()
for i in range(len(inputs['tokens'])):
for j in range(len(inputs['tokens'][i])):
char_lenghts[i, j] = min(len(inputs['tokens'][i][j]), word_length)
device = next(self.parameters()).device
encoded_chars = encoded_chars.to(device)
char_lenghts = char_lenghts.to(device)
outputs = self.elmo.forward(encoded_chars, char_lengths=char_lenghts)
return {
'char_enhancement': outputs['char_enhancement'], # convolutions of different kernel sizes from (1,) to (7,)
'word_syn': outputs['elmo_representations'][0], # first layer has syntactic features
'word_sem': outputs['elmo_representations'][1], # second layer has semantic features
'mask': outputs['mask'],
'char_lengths': char_lenghts.view(batch_size * sent_length).long()
}
@staticmethod
def _get_param_group1():
param_group = [
['_elmo_lstm._token_embedder._char_embedding_weights'],
['_elmo_lstm._token_embedder.char_conv_0.weight', '_elmo_lstm._token_embedder.char_conv_0.bias'],
['_elmo_lstm._token_embedder.char_conv_1.weight', '_elmo_lstm._token_embedder.char_conv_1.bias'],
['_elmo_lstm._token_embedder.char_conv_2.weight', '_elmo_lstm._token_embedder.char_conv_2.bias'],
['_elmo_lstm._token_embedder.char_conv_3.weight', '_elmo_lstm._token_embedder.char_conv_3.bias'],
['_elmo_lstm._token_embedder.char_conv_4.weight', '_elmo_lstm._token_embedder.char_conv_4.bias'],
['_elmo_lstm._token_embedder.char_conv_5.weight', '_elmo_lstm._token_embedder.char_conv_5.bias'],
['_elmo_lstm._token_embedder.char_conv_6.weight', '_elmo_lstm._token_embedder.char_conv_6.bias'],
['_elmo_lstm._token_embedder._highways._layers.0.weight', '_elmo_lstm._token_embedder._highways._layers.0.bias'],
['_elmo_lstm._token_embedder._projection.weight', '_elmo_lstm._token_embedder._projection.bias'],
['_elmo_lstm._elmo_lstm.forward_layer_0.input_linearity.weight', '_elmo_lstm._elmo_lstm.forward_layer_0.state_linearity.weight', '_elmo_lstm._elmo_lstm.forward_layer_0.state_linearity.bias', '_elmo_lstm._elmo_lstm.forward_layer_0.state_projection.weight'],
['_elmo_lstm._elmo_lstm.backward_layer_0.input_linearity.weight', '_elmo_lstm._elmo_lstm.backward_layer_0.state_linearity.weight', '_elmo_lstm._elmo_lstm.backward_layer_0.state_linearity.bias', '_elmo_lstm._elmo_lstm.backward_layer_0.state_projection.weight'],
['_elmo_lstm._elmo_lstm.forward_layer_1.input_linearity.weight', '_elmo_lstm._elmo_lstm.forward_layer_1.state_linearity.weight', '_elmo_lstm._elmo_lstm.forward_layer_1.state_linearity.bias', '_elmo_lstm._elmo_lstm.forward_layer_1.state_projection.weight'],
['_elmo_lstm._elmo_lstm.backward_layer_1.input_linearity.weight', '_elmo_lstm._elmo_lstm.backward_layer_1.state_linearity.weight', '_elmo_lstm._elmo_lstm.backward_layer_1.state_linearity.bias', '_elmo_lstm._elmo_lstm.backward_layer_1.state_projection.weight'],
['scalar_mix_0.gamma', 'scalar_mix_0.scalar_parameters.0', 'scalar_mix_0.scalar_parameters.1','scalar_mix_0.scalar_parameters.2'],
['scalar_mix_1.gamma', 'scalar_mix_1.scalar_parameters.0', 'scalar_mix_1.scalar_parameters.1','scalar_mix_1.scalar_parameters.2']
]
return param_group
@staticmethod
def _get_param_group2():
param_group = [
# Character embedding weights
['_elmo_lstm._token_embedder._char_embedding_weights'],
# Convolutional layer weights
['_elmo_lstm._token_embedder.char_conv_0.weight', '_elmo_lstm._token_embedder.char_conv_0.bias',
'_elmo_lstm._token_embedder.char_conv_1.weight', '_elmo_lstm._token_embedder.char_conv_1.bias',
'_elmo_lstm._token_embedder.char_conv_2.weight', '_elmo_lstm._token_embedder.char_conv_2.bias',
'_elmo_lstm._token_embedder.char_conv_3.weight', '_elmo_lstm._token_embedder.char_conv_3.bias',
'_elmo_lstm._token_embedder.char_conv_4.weight', '_elmo_lstm._token_embedder.char_conv_4.bias',
'_elmo_lstm._token_embedder.char_conv_5.weight', '_elmo_lstm._token_embedder.char_conv_5.bias',
'_elmo_lstm._token_embedder.char_conv_6.weight', '_elmo_lstm._token_embedder.char_conv_6.bias'],
# Highway network weights
['_elmo_lstm._token_embedder._highways._layers.0.weight', '_elmo_lstm._token_embedder._highways._layers.0.bias'],
# Token projection weights
['_elmo_lstm._token_embedder._projection.weight', '_elmo_lstm._token_embedder._projection.bias'],
# First bidirectional LSTM
['_elmo_lstm._elmo_lstm.forward_layer_0.input_linearity.weight', '_elmo_lstm._elmo_lstm.forward_layer_0.state_linearity.weight',
'_elmo_lstm._elmo_lstm.forward_layer_0.state_linearity.bias', '_elmo_lstm._elmo_lstm.forward_layer_0.state_projection.weight',
'_elmo_lstm._elmo_lstm.backward_layer_0.input_linearity.weight', '_elmo_lstm._elmo_lstm.backward_layer_0.state_linearity.weight',
'_elmo_lstm._elmo_lstm.backward_layer_0.state_linearity.bias', '_elmo_lstm._elmo_lstm.backward_layer_0.state_projection.weight'],
# Second bidirectional LSTM
['_elmo_lstm._elmo_lstm.forward_layer_1.input_linearity.weight', '_elmo_lstm._elmo_lstm.forward_layer_1.state_linearity.weight',
'_elmo_lstm._elmo_lstm.forward_layer_1.state_linearity.bias', '_elmo_lstm._elmo_lstm.forward_layer_1.state_projection.weight',
'_elmo_lstm._elmo_lstm.backward_layer_1.input_linearity.weight', '_elmo_lstm._elmo_lstm.backward_layer_1.state_linearity.weight',
'_elmo_lstm._elmo_lstm.backward_layer_1.state_linearity.bias', '_elmo_lstm._elmo_lstm.backward_layer_1.state_projection.weight'],
# Scalar mixers
['scalar_mix_0.gamma', 'scalar_mix_0.scalar_parameters.0', 'scalar_mix_0.scalar_parameters.1','scalar_mix_0.scalar_parameters.2',
'scalar_mix_1.gamma', 'scalar_mix_1.scalar_parameters.0', 'scalar_mix_1.scalar_parameters.1','scalar_mix_1.scalar_parameters.2']
]
return param_group
def get_param_groups(self):
elmo_param_groups = self._get_param_group2()
# There are at least len(elmo_param_groups) groups
params = [{'params': []} for _ in range(len(elmo_param_groups))]
# We also need to collect the character enhacer parameters within ELMo
enhancer_params = []
# Separating elmo layers in multiple param groups to unfreeze gradually
for name, param in self.elmo.named_parameters():
group_index = None
for i, group_names in enumerate(elmo_param_groups):
if name in group_names:
group_index = i
break
if group_index is None:
if '.char_enhancer.' in name:
enhancer_params.append(param)
continue
else:
raise Exception("[ERROR] Parameter not found in groups: {}".format(name))
params[group_index]['params'].append(param)
assert all(len(p['params']) > 0 for p in params), "There shouldn't be empty groups at this point!"
# The rest must be all in the last group, so that they get unfrozen first (why? because these parameters are not pretrained)
params.append({'params': []})
params[-1]['params'].extend(self.proj.parameters())
if self.use_lstm: params[-1]['params'].extend(self.lstm.parameters())
if self.use_crf: params[-1]['params'].extend(self.crf.parameters())
if self.use_char_tagger: params[-1]['params'].extend(self.charclf.parameters())
if self.has_embeddings: params[-1]['params'].extend(self.embeddings.parameters())
if enhancer_params: params[-1]['params'].extend(enhancer_params)
# The default empty parameter group
params.append({'params': []})
return params
def get_l2_reg(self, l2_lambda, include_elmo=True, include_crf=False):
def sum_l2reg(parameters, accumulated=None):
for W in parameters:
if W.requires_grad:
if accumulated is None:
accumulated = W.norm(2)
else:
accumulated = accumulated + W.norm(2)
return accumulated
l2_sum = sum_l2reg(self.proj.parameters()) * l2_lambda
if self.elmo_requires_grad and include_elmo:
l2_sum = l2_lambda * sum_l2reg(self.elmo.parameters(), l2_sum)
if self.use_crf and include_crf:
l2_sum = l2_lambda * sum_l2reg(self.crf.parameters(), l2_sum)
if self.use_lstm: l2_sum = l2_lambda * sum_l2reg(self.lstm.parameters(), l2_sum)
if self.use_char_tagger: l2_sum = l2_lambda * sum_l2reg(self.charclf.parameters(), l2_sum)
if self.has_embeddings: l2_sum = l2_lambda * sum_l2reg(self.embeddings.parameters(), l2_sum)
return l2_sum
def crf_loss(self, logits, target, mask):
best_paths = self.crf.viterbi_tags(logits, mask)
predicted_tags = [x for x, y in best_paths]
loss = -self.crf.forward(logits, target, mask) # neg log-likelihood loss
logits = logits * 0.0
for i, instance_tags in enumerate(predicted_tags):
for j, tag_id in enumerate(instance_tags):
logits[i, j, tag_id] = 1
return {'loss': loss, 'logits': logits, 'tags': predicted_tags}
def xent_loss(self, logits, target, mask):
predicted_tags = logits.max(-1)[1].cpu().tolist()
loss = self.xent(logits[mask == 1], target[mask == 1])
return {'loss': loss, 'logits': logits, 'tags': predicted_tags}
def get_embedding(self, inputs):
sentences = [Sentence(s) for s in inputs['tokens']]
maxlen = max([len(s) for s in sentences])
self.embeddings.embed(sentences)
device = next(self.parameters()).device
output = torch.zeros(len(sentences), maxlen, self.embeddings.embedding_length).to(device)
for i, sent in enumerate(sentences):
output[i, :len(sent)] = torch.cat([t.embedding.view(1, self.embeddings.embedding_length) for t in sent], dim=0)
return output
def forward(self, inputs):
"""
:param inputs: a batch of raw text
:return: dictionary with keys 'char_output', 'word_output', and 'mask'. The first two entries contain
dictionaries with loss, predicted tags, and logits
"""
results = dict()
representations = self.get_representations(inputs)
word_embedding = representations['word_sem']
if self.has_embeddings:
stacked_embedding = self.get_embedding(inputs)
stacked_embedding = self.drop(stacked_embedding)
word_embedding = torch.cat([word_embedding, stacked_embedding], dim=-1)
if self.use_lstm:
outputs, _ = self.lstm(word_embedding)
else: outputs = word_embedding
outputs = self.drop(outputs)
if self.use_char_tagger:
results['char_output'] = self.charclf.forward(representations['char_enhancement']['convolutions'],
inputs['simplified'],
representations['mask'])
if self.use_ngram_vectors:
outputs = torch.cat([outputs, representations['char_enhancement']['convolutions']], dim=-1)
outputs = self.drop(outputs)
word_logits = self.proj(outputs)
results['mask'] = representations['mask']
if self.use_crf:
results['word_output'] = self.crf_loss(word_logits, inputs['langids'], representations['mask'])
else: results['word_output'] = self.xent_loss(word_logits, inputs['langids'], representations['mask'])
return results
| 18,175 | 50.2 | 272 | py |
CS_ELMo | CS_ELMo-master/src/modeling/attention.py | import torch
import torch.nn as nn
def stable_softmax(scores, mask=None, epsilon=1e-9):
"""
:param scores: tensor of shape (batch_size, sequence_length)
:param mask: (optional) binary tensor in case of padded sequences, shape: (batch_size, sequence_length)
:param epsilon: epsilon to be added to the normalization factor
:return: probability tensor of shape (batch_size, sequence_length)
"""
batch, seq = scores.size()
# Numerically stable masked-softmax
maxvec, _ = scores.max(dim=1, keepdim=True)
scores = torch.exp(scores - maxvec) # greatest exp value is up to 1.0 to avoid overflow
if mask is not None:
scores = scores * mask.view(batch, seq).float()
sums = torch.sum(scores, dim=1, keepdim=True) + epsilon # add epsilon to avoid div by zero in case of underflow
prob = scores / sums
return prob
class NgramEnhancer(nn.Module):
def __init__(self, variable_dims, ngram_order, attention_type, use_position):
super(NgramEnhancer, self).__init__()
self.ngram_order = ngram_order
self.dims = variable_dims
self.attention_type = attention_type
self.lo_enhancer = None
self.hi_enhancer = None
assert attention_type in {'none', 'lo_attention', 'hi_attention', 'hier_attention'}
if attention_type == 'hier_attention' or attention_type == 'lo_attention':
self.lo_enhancer = LowNgramAttention(variable_dims, attn_size=100, use_position=use_position)
if attention_type == 'hier_attention' or attention_type == 'hi_attention':
self.hi_enhancer = HighNgramAttention(variable_dims, attn_size=256, ngram_space=128)
def lo_forward(self, convolved, n, char_lengths):
if self.lo_enhancer is None or n >= self.ngram_order:
# we do max pooling if no enhancer or ngram order is beyond the limit
attention = None
output, _ = torch.max(convolved, dim=-1)
else:
output, attention = self.lo_enhancer.forward_ngram_order(convolved, n, char_lengths)
return output, attention
def hi_forward(self, convolutions, mask):
if self.hi_enhancer is None:
attention = None
# convolutions = torch.cat(convolutions, dim=-1)
else:
convolutions, attention = self.hi_enhancer.forward(convolutions, mask)
return convolutions, attention
def forward(self, inputs, mask, char_lengths):
raise NotImplementedError('This class only supports calls for low- or high-level forward')
class LowNgramAttention(nn.Module):
"""Applies attention across the ngram vector sets (e.g., the trigrams are represented by a single vector"""
def __init__(self, variable_dims, attn_size, use_position):
"""
:param attn_size: int, specifies the attention space to which the inputs are projected
:param variable_dims: a list with the expected dimensions of the ngram convolutions (n_filters)
"""
super(LowNgramAttention, self).__init__()
self.dims = variable_dims
self.da = attn_size
self.ngram_space = sum(self.dims)
self.use_position = use_position
self.W = nn.ModuleList([nn.Linear(dim, self.da) for dim in self.dims])
self.v = nn.ModuleList([nn.Linear(self.da, 1) for _ in range(len(self.dims))])
if self.use_position:
self.positions = nn.ModuleList(
[nn.Embedding(50 - i + 1, channels, padding_idx=0)
for i, channels in enumerate(self.dims)])
else:
self.positions = None
@staticmethod
def _build_char_mask(char_lengths, i, ngramsize, n_words):
lengths_i = char_lengths - i
lengths_i[lengths_i < 0] = 0 # because of padding, 'char_length - i' will contain negative values
lengths_i = lengths_i.view(n_words, 1).long()
indexes = torch.arange(0, ngramsize).long().to(char_lengths.device) # (1, ngram_size)
char_mask = (indexes < lengths_i).float() # broadcasting compares against all the words
return char_mask
def _positional_encoding(self, n, ngram_size, device):
indexes = torch.arange(1, ngram_size + 1).view(1, ngram_size).to(device) # (1, ngram_size)
pos_enc = self.positions[n](indexes)
return pos_enc.unsqueeze(0) # (1, 1, ngram_size) -> unsqueeze to account for batch size
def forward_ngram_order(self, inputs, n, char_lengths):
inputs = inputs.transpose(1, 2)
n_words, ngramsize, channels = inputs.size()
batch, seqlen = char_lengths.size()
has_boundaries = (n_words // batch) == (seqlen + 2)
if has_boundaries:
char_bos_indexes = 0
char_eos_indexes = (char_lengths > 0).long().sum(dim=1) + 1 # +1 to account for bos
temp = torch.zeros(batch, seqlen + 2).long().to(char_lengths.device)
temp[:, 1:-1] += char_lengths
temp[range(batch), char_bos_indexes] = 1 # include the bos token
temp[range(batch), char_eos_indexes] = 1 # include the eos token
_char_lengths = temp
else:
_char_lengths = char_lengths
char_mask = self._build_char_mask(_char_lengths, n, ngramsize, n_words)
pos_enc = 0
if self.use_position:
pos_enc = self._positional_encoding(n, ngramsize, char_mask.device) # (1, 1, ngramsize, channels)
pos_enc = pos_enc.view(1, ngramsize, channels).repeat(n_words, 1, 1) # (n_words, ngramsize, channels)
scores = self.v[n](torch.tanh(self.W[n](inputs + pos_enc))) # (n_words, ngrams, 1)
scores = scores.view(n_words, ngramsize) # (n_words, ngrams)
a = stable_softmax(scores, mask=char_mask)
inputs = inputs * a.view(n_words, ngramsize, 1) # (n_words, ngrams, channels)
outputs = torch.sum(inputs, dim=1, keepdim=False) # (n_words, channels)
if has_boundaries:
a = a.view(batch, seqlen + 2, ngramsize)
else:
a = a.view(batch, seqlen, ngramsize)
return outputs, a.data.cpu()
def forward(self, inputs, mask, char_lengths):
"""
:param inputs: list of N convolutions with different filter size: (batch, sequence, ngrams, channels for the i-th width)
:param mask: tensor with word mask (batch, word_sequence)
:param char_lengths: tensor with character lengths (batch * word_sequence)
:return:
"""
attentions = []
for i, channels in enumerate(self.dims):
assert channels == inputs[i].size(2), "Expecting inputs to have shape (batch, seqlen, ngramsize, channels)"
inputs[i], a = self.forward_ngram_order(inputs[i], i, char_lengths)
attentions.append(a)
return inputs, attentions
class HighNgramAttention(nn.Module):
"""Applies attention across the ngram vector sets (e.g., the trigrams are represented by a single vector"""
def __init__(self, variable_dims, ngram_space=128, attn_size=256):
"""
:param attn_size: int, specifies the attention space to which the inputs are projected
:param variable_dims: a list with the expected dimensions of the ngram convolutions (n_filters)
"""
super(HighNgramAttention, self).__init__()
self.dims = variable_dims
self.dn = ngram_space
self.da = attn_size
self.W = nn.ModuleList([nn.Linear(dim, self.dn) for dim in self.dims])
self.U = nn.Linear(self.dn, self.da)
self.v = nn.Linear(self.da, 1)
def forward(self, inputs, mask):
"""
:param inputs: a list of N convolutions with different filters each: (batch * sequence, n_filters for the i-th width)
:param mask:
:return:
"""
batch_size, seq_length = mask.size()
projs = []
for i, channels in enumerate(self.dims):
inputs[i] = inputs[i].view(batch_size * seq_length, channels)
proj = self.W[i](inputs[i]).view(batch_size * seq_length, 1, self.dn) # (batch * seq, 1, attn)
projs.append(proj)
projs = torch.cat(projs, dim=1) # (batch * seq, N, attn)
u = self.v(torch.tanh(self.U(projs)))
u = u.view(batch_size * seq_length, len(self.dims))
a = stable_softmax(u) # (batch * seq, N)
# weight the original given channels
for ngram_order in range(len(self.dims)):
inputs[ngram_order] = inputs[ngram_order] * a[:, ngram_order].view(batch_size * seq_length, 1)
# o = torch.cat(inputs, dim=-1).view(batch_size, seq_length, sum(self.dims))
a = a.view(batch_size, seq_length, len(self.dims))
return inputs, a.data.cpu()
| 8,764 | 38.660633 | 128 | py |
CS_ELMo | CS_ELMo-master/src/modeling/elmo.py | import json
import logging
from typing import Union, List, Dict, Any
import warnings
import torch
from torch.nn.modules import Dropout
import numpy
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import h5py
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.common.checks import ConfigurationError
from allennlp.common import Params
from allennlp.common.util import lazy_groups_of
from allennlp.modules.elmo_lstm import ElmoLstm
from allennlp.modules.highway import Highway
from allennlp.modules.scalar_mix import ScalarMix
from allennlp.nn.util import remove_sentence_boundaries, add_sentence_boundary_token_ids, get_device_of
from allennlp.data.token_indexers.elmo_indexer import ELMoCharacterMapper, ELMoTokenCharactersIndexer
from allennlp.data import Batch
from allennlp.data import Token, Vocabulary, Instance
from allennlp.data.fields import TextField
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
# pylint: disable=attribute-defined-outside-init
class Elmo(torch.nn.Module):
"""
Compute ELMo representations using a pre-trained bidirectional language model.
See "Deep contextualized word representations", Peters et al. for details.
This module takes character id input and computes ``num_output_representations`` different layers
of ELMo representations. Typically ``num_output_representations`` is 1 or 2. For example, in
the case of the SRL model in the above paper, ``num_output_representations=1`` where ELMo was included at
the input token representation layer. In the case of the SQuAD model, ``num_output_representations=2``
as ELMo was also included at the GRU output layer.
In the implementation below, we learn separate scalar weights for each output layer,
but only run the biLM once on each input sequence for efficiency.
Parameters
----------
options_file : ``str``, required.
ELMo JSON options file
weight_file : ``str``, required.
ELMo hdf5 weight file
num_output_representations: ``int``, required.
The number of ELMo representation to output with
different linear weighted combination of the 3 layers (i.e.,
character-convnet output, 1st lstm output, 2nd lstm output).
requires_grad: ``bool``, optional
If True, compute gradient of ELMo parameters for fine tuning.
do_layer_norm : ``bool``, optional, (default = False).
Should we apply layer normalization (passed to ``ScalarMix``)?
dropout : ``float``, optional, (default = 0.5).
The dropout to be applied to the ELMo representations.
vocab_to_cache : ``List[str]``, optional, (default = None).
A list of words to pre-compute and cache character convolutions
for. If you use this option, Elmo expects that you pass word
indices of shape (batch_size, timesteps) to forward, instead
of character indices. If you use this option and pass a word which
wasn't pre-cached, this will break.
keep_sentence_boundaries : ``bool``, optional, (default = False)
If True, the representation of the sentence boundary tokens are
not removed.
scalar_mix_parameters : ``List[float]``, optional, (default = None)
If not ``None``, use these scalar mix parameters to weight the representations
produced by different layers. These mixing weights are not updated during
training.
module : ``torch.nn.Module``, optional, (default = None).
If provided, then use this module instead of the pre-trained ELMo biLM.
If using this option, then pass ``None`` for both ``options_file``
and ``weight_file``. The module must provide a public attribute
``num_layers`` with the number of internal layers and its ``forward``
method must return a ``dict`` with ``activations`` and ``mask`` keys
(see `_ElmoBilm`` for an example). Note that ``requires_grad`` is also
ignored with this option.
"""
def __init__(self,
options_file: str,
weight_file: str,
num_output_representations: int,
requires_grad: bool = False,
do_layer_norm: bool = False,
dropout: float = 0.5,
vocab_to_cache: List[str] = None,
keep_sentence_boundaries: bool = False,
scalar_mix_parameters: List[float] = None,
module: torch.nn.Module = None,
char_enhancer: torch.nn.Module = None) -> None:
super(Elmo, self).__init__()
logger.info("Initializing ELMo")
if module is not None:
if options_file is not None or weight_file is not None:
raise ConfigurationError(
"Don't provide options_file or weight_file with module")
self._elmo_lstm = module
else:
self._elmo_lstm = _ElmoBiLm(options_file,
weight_file,
requires_grad=requires_grad,
vocab_to_cache=vocab_to_cache,
char_enhacer=char_enhancer)
self._has_cached_vocab = vocab_to_cache is not None
self._keep_sentence_boundaries = keep_sentence_boundaries
self._dropout = Dropout(p=dropout)
self._scalar_mixes: Any = []
for k in range(num_output_representations):
scalar_mix = ScalarMix(
self._elmo_lstm.num_layers,
do_layer_norm=do_layer_norm,
initial_scalar_parameters=scalar_mix_parameters,
trainable=scalar_mix_parameters is None)
self.add_module('scalar_mix_{}'.format(k), scalar_mix)
self._scalar_mixes.append(scalar_mix)
def get_output_dim(self):
return self._elmo_lstm.get_output_dim()
def forward(self, # pylint: disable=arguments-differ
inputs: torch.Tensor,
word_inputs: torch.Tensor = None,
char_lengths: torch.Tensor = None) -> Dict[str, Union[torch.Tensor, List[torch.Tensor]]]:
"""
Parameters
----------
inputs: ``torch.Tensor``, required.
Shape ``(batch_size, timesteps, 50)`` of character ids representing the current batch.
word_inputs : ``torch.Tensor``, required.
If you passed a cached vocab, you can in addition pass a tensor of shape
``(batch_size, timesteps)``, which represent word ids which have been pre-cached.
Returns
-------
Dict with keys:
``'elmo_representations'``: ``List[torch.Tensor]``
A ``num_output_representations`` list of ELMo representations for the input sequence.
Each representation is shape ``(batch_size, timesteps, embedding_dim)``
``'mask'``: ``torch.Tensor``
Shape ``(batch_size, timesteps)`` long tensor with sequence mask.
"""
# reshape the input if needed
original_shape = inputs.size()
if len(original_shape) > 3:
timesteps, num_characters = original_shape[-2:]
reshaped_inputs = inputs.view(-1, timesteps, num_characters)
else:
reshaped_inputs = inputs
if word_inputs is not None:
original_word_size = word_inputs.size()
if self._has_cached_vocab and len(original_word_size) > 2:
reshaped_word_inputs = word_inputs.view(-1, original_word_size[-1])
elif not self._has_cached_vocab:
logger.warning("Word inputs were passed to ELMo but it does not have a cached vocab.")
reshaped_word_inputs = None
else:
reshaped_word_inputs = word_inputs
else:
reshaped_word_inputs = word_inputs
# run the biLM
bilm_output = self._elmo_lstm(reshaped_inputs, reshaped_word_inputs, char_lengths=char_lengths)
layer_activations = bilm_output['activations']
mask_with_bos_eos = bilm_output['mask']
# compute the elmo representations
representations = []
for i in range(len(self._scalar_mixes)):
scalar_mix = getattr(self, 'scalar_mix_{}'.format(i))
representation_with_bos_eos = scalar_mix(layer_activations, mask_with_bos_eos)
if self._keep_sentence_boundaries:
processed_representation = representation_with_bos_eos
processed_mask = mask_with_bos_eos
else:
representation_without_bos_eos, mask_without_bos_eos = remove_sentence_boundaries(
representation_with_bos_eos, mask_with_bos_eos)
processed_representation = representation_without_bos_eos
processed_mask = mask_without_bos_eos
representations.append(self._dropout(processed_representation))
convolutions = bilm_output['char_enhancement']['convolutions']
lo_attention = bilm_output['char_enhancement']['lo_attention']
hi_attention = bilm_output['char_enhancement']['hi_attention']
if convolutions is not None:
convolutions, _ = remove_sentence_boundaries(convolutions, mask_with_bos_eos)
if hi_attention is not None and ((isinstance(hi_attention, list) and hi_attention) or isinstance(hi_attention, torch.Tensor)):
hi_attention, _ = remove_sentence_boundaries(hi_attention, mask_with_bos_eos)
for i in range(len(lo_attention)):
if lo_attention[i] is not None:
lo_attention[i], _ = remove_sentence_boundaries(lo_attention[i], mask_with_bos_eos)
bilm_output['char_enhancement']['convolutions'] = convolutions
bilm_output['char_enhancement']['lo_attention'] = lo_attention
bilm_output['char_enhancement']['hi_attention'] = hi_attention
# reshape if necessary
if word_inputs is not None and len(original_word_size) > 2:
mask = processed_mask.view(original_word_size)
elmo_representations = [representation.view(original_word_size + (-1, ))
for representation in representations]
elif len(original_shape) > 3:
mask = processed_mask.view(original_shape[:-1])
elmo_representations = [representation.view(original_shape[:-1] + (-1, ))
for representation in representations]
else:
mask = processed_mask
elmo_representations = representations
return {'elmo_representations': elmo_representations,
'mask': mask,
'char_enhancement': bilm_output['char_enhancement']}
# The add_to_archive logic here requires a custom from_params.
@classmethod
def from_params(cls, params: Params) -> 'Elmo':
# Add files to archive
params.add_file_to_archive('options_file')
params.add_file_to_archive('weight_file')
options_file = params.pop('options_file')
weight_file = params.pop('weight_file')
requires_grad = params.pop('requires_grad', False)
num_output_representations = params.pop('num_output_representations')
do_layer_norm = params.pop_bool('do_layer_norm', False)
keep_sentence_boundaries = params.pop_bool('keep_sentence_boundaries', False)
dropout = params.pop_float('dropout', 0.5)
scalar_mix_parameters = params.pop('scalar_mix_parameters', None)
params.assert_empty(cls.__name__)
return cls(options_file=options_file,
weight_file=weight_file,
num_output_representations=num_output_representations,
requires_grad=requires_grad,
do_layer_norm=do_layer_norm,
keep_sentence_boundaries=keep_sentence_boundaries,
dropout=dropout,
scalar_mix_parameters=scalar_mix_parameters)
def batch_to_ids(batch: List[List[str]]) -> torch.Tensor:
"""
Converts a batch of tokenized sentences to a tensor representing the sentences with encoded characters
(len(batch), max sentence length, max word length).
Parameters
----------
batch : ``List[List[str]]``, required
A list of tokenized sentences.
Returns
-------
A tensor of padded character ids.
"""
instances = []
indexer = ELMoTokenCharactersIndexer()
for sentence in batch:
tokens = [Token(token) for token in sentence]
field = TextField(tokens,
{'character_ids': indexer})
instance = Instance({"elmo": field})
instances.append(instance)
dataset = Batch(instances)
vocab = Vocabulary()
dataset.index_instances(vocab)
return dataset.as_tensor_dict()['elmo']['character_ids']
class _ElmoCharacterEncoder(torch.nn.Module):
"""
Compute context insensitive token representation using pretrained biLM.
This embedder has input character ids of size (batch_size, sequence_length, 50)
and returns (batch_size, sequence_length + 2, embedding_dim), where embedding_dim
is specified in the options file (typically 512).
We add special entries at the beginning and end of each sequence corresponding
to <S> and </S>, the beginning and end of sentence tokens.
Note: this is a lower level class useful for advanced usage. Most users should
use ``ElmoTokenEmbedder`` or ``allennlp.modules.Elmo`` instead.
Parameters
----------
options_file : ``str``
ELMo JSON options file
weight_file : ``str``
ELMo hdf5 weight file
requires_grad: ``bool``, optional, (default = False).
If True, compute gradient of ELMo parameters for fine tuning.
The relevant section of the options file is something like:
.. example-code::
.. code-block:: python
{'char_cnn': {
'activation': 'relu',
'embedding': {'dim': 4},
'filters': [[1, 4], [2, 8], [3, 16], [4, 32], [5, 64]],
'max_characters_per_token': 50,
'n_characters': 262,
'n_highway': 2
}
}
"""
def __init__(self,
options_file: str,
weight_file: str,
requires_grad: bool = False,
char_enhancer: torch.nn.Module = None) -> None:
super(_ElmoCharacterEncoder, self).__init__()
with open(cached_path(options_file), 'r') as fin:
self._options = json.load(fin)
self._weight_file = weight_file
self.output_dim = self._options['lstm']['projection_dim']
self.requires_grad = requires_grad
self._load_weights()
# Cache the arrays for use in forward -- +1 due to masking.
self._beginning_of_sentence_characters = torch.from_numpy(
numpy.array(ELMoCharacterMapper.beginning_of_sentence_characters) + 1
)
self._end_of_sentence_characters = torch.from_numpy(
numpy.array(ELMoCharacterMapper.end_of_sentence_characters) + 1
)
# Store char_enhancer after loading the standard ELMo weights
self.char_enhancer = char_enhancer
def get_output_dim(self):
return self.output_dim
@overrides
def forward(self, inputs: torch.Tensor, char_lengths: torch.Tensor = None) -> Dict[str, torch.Tensor]: # pylint: disable=arguments-differ
"""
Compute context insensitive token embeddings for ELMo representations.
Parameters
----------
inputs: ``torch.Tensor``
Shape ``(batch_size, sequence_length, 50)`` of character ids representing the
current batch.
Returns
-------
Dict with keys:
``'token_embedding'``: ``torch.Tensor``
Shape ``(batch_size, sequence_length + 2, embedding_dim)`` tensor with context
insensitive token representations.
``'mask'``: ``torch.Tensor``
Shape ``(batch_size, sequence_length + 2)`` long tensor with sequence mask.
"""
# Add BOS/EOS
mask = ((inputs > 0).long().sum(dim=-1) > 0).long()
character_ids_with_bos_eos, mask_with_bos_eos = add_sentence_boundary_token_ids(
inputs,
mask,
self._beginning_of_sentence_characters,
self._end_of_sentence_characters
)
# the character id embedding
max_chars_per_token = self._options['char_cnn']['max_characters_per_token']
# (batch_size * sequence_length, max_chars_per_token, embed_dim)
character_embedding = torch.nn.functional.embedding(
character_ids_with_bos_eos.view(-1, max_chars_per_token),
self._char_embedding_weights
)
# run convolutions
cnn_options = self._options['char_cnn']
if cnn_options['activation'] == 'tanh':
activation = torch.tanh
elif cnn_options['activation'] == 'relu':
activation = torch.nn.functional.relu
else:
raise ConfigurationError("Unknown activation")
# (batch_size * sequence_length, embed_dim, max_chars_per_token)
character_embedding = torch.transpose(character_embedding, 1, 2)
convs = []
lo_attention, hi_attention = [], []
for i in range(len(self._convolutions)):
conv = getattr(self, 'char_conv_{}'.format(i))
convolved = conv(character_embedding)
convolved = activation(convolved)
# Attention for n-gram-specific features across a word
if self.char_enhancer is not None:
convolved, attn = self.char_enhancer.lo_forward(convolved, i, char_lengths)
lo_attention.append(attn)
else:
# (batch_size * sequence_length, n_filters for this width)
convolved, _ = torch.max(convolved, dim=-1)
convs.append(convolved)
# Attention across n-gram orders
if self.char_enhancer is not None and self.char_enhancer.hi_enhancer is not None:
# before concat, weight the n-grams across the orders
convs, hi_attention = self.char_enhancer.hi_forward(convs, mask_with_bos_eos)
enhanced_convs = torch.cat(convs[:self.char_enhancer.ngram_order], dim=-1)
else:
# (batch_size * sequence_length, n_filters)
enhanced_convs = None
token_embedding = torch.cat(convs, dim=-1)
# apply the highway layers (batch_size * sequence_length, n_filters)
token_embedding = self._highways(token_embedding)
# final projection (batch_size * sequence_length, embedding_dim)
token_embedding = self._projection(token_embedding)
# reshape to (batch_size, sequence_length, embedding_dim)
batch_size, sequence_length, _ = character_ids_with_bos_eos.size()
if enhanced_convs is not None:
enhanced_convs = enhanced_convs.view(batch_size, sequence_length, -1)
# keep a copy of the convolutions for the charngram representations [(batch, sequence_length, channels)]
# char_ngram_orders = [ngram.view(batch_size, sequence_length, ngram.size(1)) for ngram in convs]
return {
'mask': mask_with_bos_eos,
'token_embedding': token_embedding.view(batch_size, sequence_length, -1),
'char_enhancement': {
'convolutions': enhanced_convs,
'lo_attention': lo_attention,
'hi_attention': hi_attention
}
}
def _load_weights(self):
self._load_char_embedding()
self._load_cnn_weights()
self._load_highway()
self._load_projection()
def _load_char_embedding(self):
with h5py.File(cached_path(self._weight_file), 'r') as fin:
char_embed_weights = fin['char_embed'][...]
weights = numpy.zeros(
(char_embed_weights.shape[0] + 1, char_embed_weights.shape[1]),
dtype='float32'
)
weights[1:, :] = char_embed_weights
self._char_embedding_weights = torch.nn.Parameter(
torch.FloatTensor(weights), requires_grad=self.requires_grad
)
def _load_cnn_weights(self):
cnn_options = self._options['char_cnn']
filters = cnn_options['filters']
char_embed_dim = cnn_options['embedding']['dim']
convolutions = []
for i, (width, num) in enumerate(filters):
conv = torch.nn.Conv1d(
in_channels=char_embed_dim,
out_channels=num,
kernel_size=width,
bias=True
)
# load the weights
with h5py.File(cached_path(self._weight_file), 'r') as fin:
weight = fin['CNN']['W_cnn_{}'.format(i)][...]
bias = fin['CNN']['b_cnn_{}'.format(i)][...]
w_reshaped = numpy.transpose(weight.squeeze(axis=0), axes=(2, 1, 0))
if w_reshaped.shape != tuple(conv.weight.data.shape):
raise ValueError("Invalid weight file")
conv.weight.data.copy_(torch.FloatTensor(w_reshaped))
conv.bias.data.copy_(torch.FloatTensor(bias))
conv.weight.requires_grad = self.requires_grad
conv.bias.requires_grad = self.requires_grad
convolutions.append(conv)
self.add_module('char_conv_{}'.format(i), conv)
self._convolutions = convolutions
def _load_highway(self):
# pylint: disable=protected-access
# the highway layers have same dimensionality as the number of cnn filters
cnn_options = self._options['char_cnn']
filters = cnn_options['filters']
n_filters = sum(f[1] for f in filters)
n_highway = cnn_options['n_highway']
# create the layers, and load the weights
self._highways = Highway(n_filters, n_highway, activation=torch.nn.functional.relu)
for k in range(n_highway):
# The AllenNLP highway is one matrix multplication with concatenation of
# transform and carry weights.
with h5py.File(cached_path(self._weight_file), 'r') as fin:
# The weights are transposed due to multiplication order assumptions in tf
# vs pytorch (tf.matmul(X, W) vs pytorch.matmul(W, X))
w_transform = numpy.transpose(fin['CNN_high_{}'.format(k)]['W_transform'][...])
# -1.0 since AllenNLP is g * x + (1 - g) * f(x) but tf is (1 - g) * x + g * f(x)
w_carry = -1.0 * numpy.transpose(fin['CNN_high_{}'.format(k)]['W_carry'][...])
weight = numpy.concatenate([w_transform, w_carry], axis=0)
self._highways._layers[k].weight.data.copy_(torch.FloatTensor(weight))
self._highways._layers[k].weight.requires_grad = self.requires_grad
b_transform = fin['CNN_high_{}'.format(k)]['b_transform'][...]
b_carry = -1.0 * fin['CNN_high_{}'.format(k)]['b_carry'][...]
bias = numpy.concatenate([b_transform, b_carry], axis=0)
self._highways._layers[k].bias.data.copy_(torch.FloatTensor(bias))
self._highways._layers[k].bias.requires_grad = self.requires_grad
def _load_projection(self):
cnn_options = self._options['char_cnn']
filters = cnn_options['filters']
n_filters = sum(f[1] for f in filters)
self._projection = torch.nn.Linear(n_filters, self.output_dim, bias=True)
with h5py.File(cached_path(self._weight_file), 'r') as fin:
weight = fin['CNN_proj']['W_proj'][...]
bias = fin['CNN_proj']['b_proj'][...]
self._projection.weight.data.copy_(torch.FloatTensor(numpy.transpose(weight)))
self._projection.bias.data.copy_(torch.FloatTensor(bias))
self._projection.weight.requires_grad = self.requires_grad
self._projection.bias.requires_grad = self.requires_grad
class _ElmoBiLm(torch.nn.Module):
"""
Run a pre-trained bidirectional language model, outputting the activations at each
layer for weighting together into an ELMo representation (with
``allennlp.modules.seq2seq_encoders.Elmo``). This is a lower level class, useful
for advanced uses, but most users should use ``allennlp.modules.seq2seq_encoders.Elmo``
directly.
Parameters
----------
options_file : ``str``
ELMo JSON options file
weight_file : ``str``
ELMo hdf5 weight file
requires_grad: ``bool``, optional, (default = False).
If True, compute gradient of ELMo parameters for fine tuning.
vocab_to_cache : ``List[str]``, optional, (default = None).
A list of words to pre-compute and cache character convolutions
for. If you use this option, _ElmoBiLm expects that you pass word
indices of shape (batch_size, timesteps) to forward, instead
of character indices. If you use this option and pass a word which
wasn't pre-cached, this will break.
"""
def __init__(self,
options_file: str,
weight_file: str,
requires_grad: bool = False,
vocab_to_cache: List[str] = None,
char_enhacer: torch.nn.Module = None) -> None:
super(_ElmoBiLm, self).__init__()
self._token_embedder = _ElmoCharacterEncoder(options_file, weight_file,
requires_grad=requires_grad,
char_enhancer=char_enhacer)
self._requires_grad = requires_grad
if requires_grad and vocab_to_cache:
logging.warning("You are fine tuning ELMo and caching char CNN word vectors. "
"This behaviour is not guaranteed to be well defined, particularly. "
"if not all of your inputs will occur in the vocabulary cache.")
# This is an embedding, used to look up cached
# word vectors built from character level cnn embeddings.
self._word_embedding = None
self._bos_embedding: torch.Tensor = None
self._eos_embedding: torch.Tensor = None
if vocab_to_cache:
logging.info("Caching character cnn layers for words in vocabulary.")
# This sets 3 attributes, _word_embedding, _bos_embedding and _eos_embedding.
# They are set in the method so they can be accessed from outside the
# constructor.
self.create_cached_cnn_embeddings(vocab_to_cache)
with open(cached_path(options_file), 'r') as fin:
options = json.load(fin)
if not options['lstm'].get('use_skip_connections'):
raise ConfigurationError('We only support pretrained biLMs with residual connections')
self._elmo_lstm = ElmoLstm(input_size=options['lstm']['projection_dim'],
hidden_size=options['lstm']['projection_dim'],
cell_size=options['lstm']['dim'],
num_layers=options['lstm']['n_layers'],
memory_cell_clip_value=options['lstm']['cell_clip'],
state_projection_clip_value=options['lstm']['proj_clip'],
requires_grad=requires_grad)
self._elmo_lstm.load_weights(weight_file)
# Number of representation layers including context independent layer
self.num_layers = options['lstm']['n_layers'] + 1
def get_output_dim(self):
return 2 * self._token_embedder.get_output_dim()
def forward(self, # pylint: disable=arguments-differ
inputs: torch.Tensor,
word_inputs: torch.Tensor = None,
char_lengths: torch.Tensor = None) -> Dict[str, Union[torch.Tensor, List[torch.Tensor]]]:
"""
Parameters
----------
inputs: ``torch.Tensor``, required.
Shape ``(batch_size, timesteps, 50)`` of character ids representing the current batch.
word_inputs : ``torch.Tensor``, required.
If you passed a cached vocab, you can in addition pass a tensor of shape ``(batch_size, timesteps)``,
which represent word ids which have been pre-cached.
Returns
-------
Dict with keys:
``'activations'``: ``List[torch.Tensor]``
A list of activations at each layer of the network, each of shape
``(batch_size, timesteps + 2, embedding_dim)``
``'mask'``: ``torch.Tensor``
Shape ``(batch_size, timesteps + 2)`` long tensor with sequence mask.
Note that the output tensors all include additional special begin and end of sequence
markers.
"""
# if self._word_embedding is not None and word_inputs is not None:
# try:
# mask_without_bos_eos = (word_inputs > 0).long()
# # The character cnn part is cached - just look it up.
# embedded_inputs = self._word_embedding(word_inputs) # type: ignore
# # shape (batch_size, timesteps + 2, embedding_dim)
# type_representation, mask = add_sentence_boundary_token_ids(
# embedded_inputs,
# mask_without_bos_eos,
# self._bos_embedding,
# self._eos_embedding
# )
# except RuntimeError:
# # Back off to running the character convolutions,
# # as we might not have the words in the cache.
# token_embedding = self._token_embedder(inputs)
# mask = token_embedding['mask']
# type_representation = token_embedding['token_embedding']
# else:
token_embedding = self._token_embedder(inputs, char_lengths=char_lengths)
mask = token_embedding['mask']
type_representation = token_embedding['token_embedding']
# charngram_embedding = token_embedding['char_ngrams']
lstm_outputs = self._elmo_lstm(type_representation, mask)
# Prepare the output. The first layer is duplicated.
# Because of minor differences in how masking is applied depending
# on whether the char cnn layers are cached, we'll be defensive and
# multiply by the mask here. It's not strictly necessary, as the
# mask passed on is correct, but the values in the padded areas
# of the char cnn representations can change.
output_tensors = [
torch.cat([type_representation, type_representation], dim=-1) * mask.float().unsqueeze(-1)
]
for layer_activations in torch.chunk(lstm_outputs, lstm_outputs.size(0), dim=0):
output_tensors.append(layer_activations.squeeze(0))
return {
'activations': output_tensors,
'mask': mask,
'char_enhancement': token_embedding['char_enhancement']
}
def create_cached_cnn_embeddings(self, tokens: List[str]) -> None:
"""
Given a list of tokens, this method precomputes word representations
by running just the character convolutions and highway layers of elmo,
essentially creating uncontextual word vectors. On subsequent forward passes,
the word ids are looked up from an embedding, rather than being computed on
the fly via the CNN encoder.
This function sets 3 attributes:
_word_embedding : ``torch.Tensor``
The word embedding for each word in the tokens passed to this method.
_bos_embedding : ``torch.Tensor``
The embedding for the BOS token.
_eos_embedding : ``torch.Tensor``
The embedding for the EOS token.
Parameters
----------
tokens : ``List[str]``, required.
A list of tokens to precompute character convolutions for.
"""
tokens = [ELMoCharacterMapper.bos_token, ELMoCharacterMapper.eos_token] + tokens
timesteps = 32
batch_size = 32
chunked_tokens = lazy_groups_of(iter(tokens), timesteps)
all_embeddings = []
device = get_device_of(next(self.parameters()))
for batch in lazy_groups_of(chunked_tokens, batch_size):
# Shape (batch_size, timesteps, 50)
batched_tensor = batch_to_ids(batch)
# NOTE: This device check is for when a user calls this method having
# already placed the model on a device. If this is called in the
# constructor, it will probably happen on the CPU. This isn't too bad,
# because it's only a few convolutions and will likely be very fast.
if device >= 0:
batched_tensor = batched_tensor.cuda(device)
output = self._token_embedder(batched_tensor)
token_embedding = output["token_embedding"]
mask = output["mask"]
token_embedding, _ = remove_sentence_boundaries(token_embedding, mask)
all_embeddings.append(token_embedding.view(-1, token_embedding.size(-1)))
full_embedding = torch.cat(all_embeddings, 0)
# We might have some trailing embeddings from padding in the batch, so
# we clip the embedding and lookup to the right size.
full_embedding = full_embedding[:len(tokens), :]
embedding = full_embedding[2:len(tokens), :]
vocab_size, embedding_dim = list(embedding.size())
from allennlp.modules.token_embedders import Embedding # type: ignore
self._bos_embedding = full_embedding[0, :]
self._eos_embedding = full_embedding[1, :]
self._word_embedding = Embedding(vocab_size, # type: ignore
embedding_dim,
weight=embedding.data,
trainable=self._requires_grad,
padding_index=0)
| 34,407 | 45.185235 | 142 | py |
advbench | advbench-main/advbench/command_launchers.py |
import subprocess
import time
import torch
def local_launcher(commands):
for cmd in commands:
subprocess.call(cmd, shell=True)
def dummy_launcher(commands):
for cmd in commands:
print(f'Dummy launcher: {cmd}')
def multi_gpu_launcher(commands):
n_gpus = torch.cuda.device_count()
procs_by_gpu = [None for _ in range(n_gpus)]
while len(commands) > 0:
for gpu_idx in range(n_gpus):
proc = procs_by_gpu[gpu_idx]
if (proc is None) or (proc.poll() is not None):
# Nothing is running on this GPU; launch a command
cmd = commands.pop(0)
new_proc = subprocess.Popen(
f'CUDA_VISIBLE_DEVICES={gpu_idx} {cmd}',
shell=True)
procs_by_gpu[gpu_idx] = new_proc
break
time.sleep(1)
# Wait for the last few tasks to finish before returning
for p in procs_by_gpu:
if p is not None:
p.wait()
REGISTRY = {
'local': local_launcher,
'dummy': dummy_launcher,
'multi_gpu': multi_gpu_launcher
} | 1,119 | 23.888889 | 66 | py |
advbench | advbench-main/advbench/optimizers.py | import torch.optim as optim
import torch
class PrimalDualOptimizer:
def __init__(self, parameters, margin, eta):
self.parameters = parameters
self.margin = margin
self.eta = eta
def step(self, cost):
self.parameters['dual_var'] = self.relu(self.parameters['dual_var'] + self.eta * (cost - self.margin))
@staticmethod
def relu(x):
return x if x > 0 else torch.tensor(0).cuda()
| 436 | 24.705882 | 110 | py |
advbench | advbench-main/advbench/networks.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from collections import OrderedDict
def Classifier(input_shape, num_classes, hparams):
if input_shape[0] == 1:
# return SmallCNN()
return MNISTNet(input_shape, num_classes)
elif input_shape[0] == 3:
# return models.resnet18(num_classes=num_classes)
return ResNet18()
else:
assert False
class MNISTNet(nn.Module):
def __init__(self, input_shape, num_classes):
super(MNISTNet, self).__init__()
self.conv1 = nn.Conv2d(input_shape[0], 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, num_classes)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
return F.log_softmax(x, dim=1) #TODO(AR): might need to remove softmax for KL div in TRADES
"""Resnet implementation is based on the implementation found in:
https://github.com/YisenWang/MART/blob/master/resnet.py
"""
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2, 2, 2, 2])
def ResNet50():
return ResNet(Bottleneck, [3, 4, 6, 3]) | 4,850 | 34.669118 | 104 | py |
advbench | advbench-main/advbench/datasets.py | import torch
from torch.utils.data import Subset, ConcatDataset, TensorDataset
import torchvision.transforms as transforms
from torchvision.datasets import CIFAR10 as CIFAR10_
from torchvision.datasets import MNIST as TorchvisionMNIST
from torchvision.datasets import SVHN as SVHN_
SPLITS = ['train', 'val', 'test']
DATASETS = ['CIFAR10', 'MNIST', 'SVHN']
class AdvRobDataset:
N_WORKERS = 8 # Default, subclasses may override
INPUT_SHAPE = None # Subclasses should override
NUM_CLASSES = None # Subclasses should override
N_EPOCHS = None # Subclasses should override
CHECKPOINT_FREQ = None # Subclasses should override
LOG_INTERVAL = None # Subclasses should override
HAS_LR_SCHEDULE = False # Default, subclass may override
ON_DEVICE = False # Default, subclass may override
def __init__(self, device):
self.splits = dict.fromkeys(SPLITS)
self.device = device
class CIFAR10(AdvRobDataset):
INPUT_SHAPE = (3, 32, 32)
NUM_CLASSES = 10
N_EPOCHS = 115
CHECKPOINT_FREQ = 10
LOG_INTERVAL = 100
HAS_LR_SCHEDULE = True
def __init__(self, root, device):
super(CIFAR10, self).__init__(device)
train_transforms = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()])
test_transforms = transforms.ToTensor()
train_data = CIFAR10_(root, train=True, transform=train_transforms)
self.splits['train'] = train_data
# self.splits['train'] = Subset(train_data, range(5000))
train_data = CIFAR10_(root, train=True, transform=train_transforms)
self.splits['val'] = Subset(train_data, range(45000, 50000))
self.splits['test'] = CIFAR10_(root, train=False, transform=test_transforms)
@staticmethod
def adjust_lr(optimizer, epoch, hparams):
lr = hparams['learning_rate']
if epoch >= 55: # 150
lr = hparams['learning_rate'] * 0.1
if epoch >= 75: # 175
lr = hparams['learning_rate'] * 0.01
if epoch >= 90: # 190
lr = hparams['learning_rate'] * 0.001
for param_group in optimizer.param_groups:
param_group['lr'] = lr
class MNISTTensor(AdvRobDataset):
N_WORKERS = 0 # Needs to be zero so we don't fetch from GPU
INPUT_SHAPE = (1, 28, 28)
NUM_CLASSES = 10
N_EPOCHS = 50
CHECKPOINT_FREQ = 10
LOG_INTERVAL = 100
HAS_LR_SCHEDULE = True
ON_DEVICE = True
def __init__(self, root, device):
super(MNISTTensor, self).__init__(device)
train_data = TorchvisionMNIST(
root=root,
train=True,
transform=transforms.ToTensor())
test_data = TorchvisionMNIST(
root=root,
train=False,
transform=transforms.ToTensor())
all_imgs = torch.cat((
train_data.data,
test_data.data)).reshape(-1, 1, 28, 28).float().to(self.device)
all_labels = torch.cat((
train_data.targets,
test_data.targets)).to(self.device)
self.splits = {
'train': TensorDataset(all_imgs, all_labels),
'validation': TensorDataset(all_imgs, all_labels),
'test': TensorDataset(all_imgs, all_labels)
}
@staticmethod
def adjust_lr(optimizer, epoch, hparams):
lr = hparams['learning_rate']
if epoch >= 25:
lr = hparams['learning_rate'] * 0.1
if epoch >= 35:
lr = hparams['learning_rate'] * 0.01
if epoch >= 40:
lr = hparams['learning_rate'] * 0.001
for param_group in optimizer.param_groups:
param_group['lr'] = lr
class MNIST(AdvRobDataset):
INPUT_SHAPE = (1, 28, 28)
NUM_CLASSES = 10
N_EPOCHS = 50
CHECKPOINT_FREQ = 10
LOG_INTERVAL = 100
HAS_LR_SCHEDULE = True
def __init__(self, root, device):
super(MNIST, self).__init__(device)
train_data = TorchvisionMNIST(
root=root,
train=True,
transform=transforms.ToTensor())
test_data = TorchvisionMNIST(
root=root,
train=False,
transform=transforms.ToTensor())
# self.splits = {
# 'train': Subset(train_data, range(54000)),
# 'validation': Subset(train_data, range(54000, 60000)),
# 'test': test_data
# }
all_data = ConcatDataset([train_data, test_data])
self.splits = {
'train': all_data,
'validation': all_data,
'test': all_data
}
@staticmethod
def adjust_lr(optimizer, epoch, hparams):
lr = hparams['learning_rate']
if epoch >= 25:
lr = hparams['learning_rate'] * 0.1
if epoch >= 35:
lr = hparams['learning_rate'] * 0.01
if epoch >= 40:
lr = hparams['learning_rate'] * 0.001
for param_group in optimizer.param_groups:
param_group['lr'] = lr
class SVHN(AdvRobDataset):
INPUT_SHAPE = (3, 32, 32)
NUM_CLASSES = 10
N_EPOCHS = 115
CHECKPOINT_FREQ = 10
LOG_INTERVAL = 100
HAS_LR_SCHEDULE = False
def __init__(self, root, device):
super(SVHN, self).__init__(device)
train_transforms = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()])
test_transforms = transforms.ToTensor()
train_data = SVHN_(root, split='train', transform=train_transforms, download=True)
self.splits['train'] = train_data
self.splits['test'] = SVHN_(root, split='test', transform=test_transforms, download=True)
@staticmethod
def adjust_lr(optimizer, epoch, hparams):
lr = hparams['learning_rate']
if epoch >= 55: # 150
lr = hparams['learning_rate'] * 0.1
if epoch >= 75: # 175
lr = hparams['learning_rate'] * 0.01
if epoch >= 90: # 190
lr = hparams['learning_rate'] * 0.001
for param_group in optimizer.param_groups:
param_group['lr'] = lr | 6,297 | 31.632124 | 97 | py |
advbench | advbench-main/advbench/attacks.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.laplace import Laplace
class Attack(nn.Module):
def __init__(self, classifier, hparams, device):
super(Attack, self).__init__()
self.classifier = classifier
self.hparams = hparams
self.device = device
def forward(self, imgs, labels):
raise NotImplementedError
class Attack_Linf(Attack):
def __init__(self, classifier, hparams, device):
super(Attack_Linf, self).__init__(classifier, hparams, device)
def _clamp_perturbation(self, imgs, adv_imgs):
"""Clamp a perturbed image so that (1) the perturbation is bounded
in the l_inf norm by self.hparams['epsilon'] and (2) so that the
perturbed image is in [0, 1]^d."""
eps = self.hparams['epsilon']
adv_imgs = torch.min(torch.max(adv_imgs, imgs - eps), imgs + eps)
return torch.clamp(adv_imgs, 0.0, 1.0)
class PGD_Linf(Attack_Linf):
def __init__(self, classifier, hparams, device):
super(PGD_Linf, self).__init__(classifier, hparams, device)
def forward(self, imgs, labels):
self.classifier.eval()
adv_imgs = imgs.detach() # + 0.001 * torch.randn(imgs.shape).to(self.device).detach() #AR: is this detach necessary?
for _ in range(self.hparams['pgd_n_steps']):
adv_imgs.requires_grad_(True)
with torch.enable_grad():
adv_loss = F.cross_entropy(self.classifier(adv_imgs), labels)
grad = torch.autograd.grad(adv_loss, [adv_imgs])[0].detach()
adv_imgs = adv_imgs + self.hparams['pgd_step_size']* torch.sign(grad)
adv_imgs = self._clamp_perturbation(imgs, adv_imgs)
self.classifier.train()
return adv_imgs.detach() # this detach may not be necessary
class SmoothAdv(Attack_Linf):
def __init__(self, classifier, hparams, device):
super(SmoothAdv, self).__init__(classifier, hparams, device)
def sample_deltas(self, imgs):
sigma = self.hparams['rand_smoothing_sigma']
return sigma * torch.randn_like(imgs)
def forward(self, imgs, labels):
self.classifier.eval()
adv_imgs = imgs.detach()
for _ in range(self.hparams['rand_smoothing_n_steps']):
adv_imgs.requires_grad_(True)
loss = 0.
for _ in range(self.hparams['rand_smoothing_n_samples']):
deltas = self.sample_deltas(imgs)
loss += F.softmax(self.classifier(adv_imgs + deltas), dim=1)[range(imgs.size(0)), labels]
total_loss = -1. * torch.log(loss / self.hparams['rand_smoothing_n_samples']).mean()
grad = torch.autograd.grad(total_loss, [adv_imgs])[0].detach()
adv_imgs = imgs + self.hparams['rand_smoothing_step_size'] * torch.sign(grad)
adv_imgs = self._clamp_perturbation(imgs, adv_imgs)
self.classifier.train()
return adv_imgs.detach() # this detach may not be necessary
class TRADES_Linf(Attack_Linf):
def __init__(self, classifier, hparams, device):
super(TRADES_Linf, self).__init__(classifier, hparams, device)
self.kl_loss_fn = nn.KLDivLoss(reduction='batchmean') # AR: let's write a method to do the log-softmax part
def forward(self, imgs, labels):
self.classifier.eval()
adv_imgs = imgs.detach() + 0.001 * torch.randn(imgs.shape).to(self.device).detach() #AR: is this detach necessary?
for _ in range(self.hparams['trades_n_steps']):
adv_imgs.requires_grad_(True)
with torch.enable_grad():
adv_loss = self.kl_loss_fn(
F.log_softmax(self.classifier(adv_imgs), dim=1), # AR: Note that this means that we can't have softmax at output of classifier
F.softmax(self.classifier(imgs), dim=1))
grad = torch.autograd.grad(adv_loss, [adv_imgs])[0].detach()
adv_imgs = adv_imgs + self.hparams['trades_step_size']* torch.sign(grad)
adv_imgs = self._clamp_perturbation(imgs, adv_imgs)
self.classifier.train()
return adv_imgs.detach() # this detach may not be necessary
class FGSM_Linf(Attack):
def __init__(self, classifier, hparams, device):
super(FGSM_Linf, self).__init__(classifier, hparams, device)
def forward(self, imgs, labels):
self.classifier.eval()
imgs.requires_grad = True
adv_loss = F.cross_entropy(self.classifier(imgs), labels)
grad = torch.autograd.grad(adv_loss, [imgs])[0].detach()
adv_imgs = imgs + self.hparams['epsilon'] * grad.sign()
adv_imgs = torch.clamp(adv_imgs, 0.0, 1.0)
self.classifier.train()
return adv_imgs.detach()
class LMC_Gaussian_Linf(Attack_Linf):
def __init__(self, classifier, hparams, device):
super(LMC_Gaussian_Linf, self).__init__(classifier, hparams, device)
def forward(self, imgs, labels):
self.classifier.eval()
batch_size = imgs.size(0)
adv_imgs = imgs.detach() + 0.001 * torch.randn(imgs.shape).to(self.device).detach() #AR: is this detach necessary?
for _ in range(self.hparams['g_dale_n_steps']):
adv_imgs.requires_grad_(True)
with torch.enable_grad():
adv_loss = torch.log(1 - torch.softmax(self.classifier(adv_imgs), dim=1)[range(batch_size), labels]).mean()
# adv_loss = F.cross_entropy(self.classifier(adv_imgs), labels)
grad = torch.autograd.grad(adv_loss, [adv_imgs])[0].detach()
noise = torch.randn_like(adv_imgs).to(self.device).detach()
adv_imgs = adv_imgs + self.hparams['g_dale_step_size'] * torch.sign(grad) + self.hparams['g_dale_noise_coeff'] * noise
adv_imgs = self._clamp_perturbation(imgs, adv_imgs)
self.classifier.train()
return adv_imgs.detach()
class LMC_Laplacian_Linf(Attack_Linf):
def __init__(self, classifier, hparams, device):
super(LMC_Laplacian_Linf, self).__init__(classifier, hparams, device)
def forward(self, imgs, labels):
self.classifier.eval()
batch_size = imgs.size(0)
noise_dist = Laplace(torch.tensor(0.), torch.tensor(1.))
adv_imgs = imgs.detach() + 0.001 * torch.randn(imgs.shape).to(self.device).detach() #AR: is this detach necessary?
for _ in range(self.hparams['l_dale_n_steps']):
adv_imgs.requires_grad_(True)
with torch.enable_grad():
adv_loss = torch.log(1 - torch.softmax(self.classifier(adv_imgs), dim=1)[range(batch_size), labels]).mean()
grad = torch.autograd.grad(adv_loss, [adv_imgs])[0].detach()
noise = noise_dist.sample(grad.shape)
adv_imgs = adv_imgs + self.hparams['l_dale_step_size'] * torch.sign(grad + self.hparams['l_dale_noise_coeff'] * noise)
adv_imgs = self._clamp_perturbation(imgs, adv_imgs)
self.classifier.train()
return adv_imgs.detach() | 7,067 | 43.175 | 148 | py |
advbench | advbench-main/advbench/evalulation_methods.py | import torch
import torch.nn.functional as F
from advbench import attacks
class Evaluator:
# Sub-class should over-ride
NAME = ''
def __init__(self, algorithm, device, test_hparams):
self.algorithm = algorithm
self.device = device
self.test_hparams = test_hparams
def calculate(self, loader):
raise NotImplementedError
def sample_perturbations(self, imgs):
eps = self.test_hparams['epsilon']
return 2 * eps * torch.rand_like(imgs) - eps
@staticmethod
def clamp_imgs(imgs):
return torch.clamp(imgs, 0.0, 1.0)
class Clean(Evaluator):
"""Calculates the standard accuracy of a classifier."""
NAME = 'Clean'
def __init__(self, algorithm, device, test_hparams):
super(Clean, self).__init__(algorithm, device, test_hparams)
@torch.no_grad()
def calculate(self, loader):
self.algorithm.eval()
correct, total, loss_sum = 0, 0, 0
for imgs, labels in loader:
imgs, labels = imgs.to(self.device), labels.to(self.device)
logits = self.algorithm.predict(imgs)
loss_sum += F.cross_entropy(logits, labels, reduction='sum').item()
preds = logits.argmax(dim=1, keepdim=True)
correct += preds.eq(labels.view_as(preds)).sum().item()
total += imgs.size(0)
self.algorithm.train()
return {
f'{self.NAME}-Accuracy': 100. * correct / total,
f'{self.NAME}-Loss': loss_sum / total
}
class Adversarial(Evaluator):
"""Calculates the adversarial accuracy of a classifier."""
def __init__(self, algorithm, device, attack, test_hparams):
super(Adversarial, self).__init__(algorithm, device, test_hparams)
self.attack = attack
def calculate(self, loader):
self.algorithm.eval()
correct, total, loss_sum = 0, 0, 0
for imgs, labels in loader:
imgs, labels = imgs.to(self.device), labels.to(self.device)
adv_imgs = self.attack(imgs, labels)
with torch.no_grad():
logits = self.algorithm.predict(adv_imgs)
loss_sum += F.cross_entropy(logits, labels, reduction='sum').item()
preds = logits.argmax(dim=1, keepdim=True)
correct += preds.eq(labels.view_as(preds)).sum().item()
total += imgs.size(0)
self.algorithm.train()
return {
f'{self.NAME}-Accuracy': 100. * correct / total,
f'{self.NAME}-Loss': float(loss_sum) / total
}
class PGD(Adversarial):
"""Calculates the PGD adversarial accuracy of a classifier."""
NAME = 'PGD'
def __init__(self, algorithm, device, test_hparams):
attack = attacks.PGD_Linf(
classifier=algorithm.classifier,
hparams=test_hparams,
device=device)
super(PGD, self).__init__(
algorithm=algorithm,
device=device,
attack=attack,
test_hparams=test_hparams)
class FGSM(Adversarial):
"""Calculates the FGSM adversarial accuracy of a classifier."""
NAME = 'FGSM'
def __init__(self, algorithm, device, test_hparams):
attack = attacks.FGSM_Linf(
classifier=algorithm.classifier,
hparams=test_hparams,
device=device)
super(FGSM, self).__init__(
algorithm=algorithm,
device=device,
attack=attack,
test_hparams=test_hparams)
class CVaR(Evaluator):
"""Calculates the CVaR loss of a classifier."""
NAME = 'CVaR'
def __init__(self, algorithm, device, test_hparams):
super(CVaR, self).__init__(algorithm, device, test_hparams)
self.q = self.test_hparams['cvar_sgd_beta']
self.n_cvar_steps = self.test_hparams['cvar_sgd_n_steps']
self.M = self.test_hparams['cvar_sgd_M']
self.step_size = self.test_hparams['cvar_sgd_t_step_size']
@torch.no_grad()
def calculate(self, loader):
self.algorithm.eval()
loss_sum, total = 0, 0
for imgs, labels in loader:
imgs, labels = imgs.to(self.device), labels.to(self.device)
ts = torch.zeros(size=(imgs.size(0),)).to(self.device)
# perform n steps of optimization to compute inner inf
for _ in range(self.n_cvar_steps):
cvar_loss, indicator_sum = 0, 0
# number of samples in innner expectation in def. of CVaR
for _ in range(self.M):
perturbations = self.sample_perturbations(imgs)
perturbed_imgs = self.clamp_imgs(imgs + perturbations)
preds = self.algorithm.predict(perturbed_imgs)
loss = F.cross_entropy(preds, labels, reduction='none')
indicator_sum += torch.where(
loss > ts,
torch.ones_like(ts),
torch.zeros_like(ts))
cvar_loss += F.relu(loss - ts)
indicator_avg = indicator_sum / float(self.M)
cvar_loss = (ts + cvar_loss / (self.M * self.q)).mean()
# gradient update on ts
grad_ts = (1 - (1 / self.q) * indicator_avg) / float(imgs.size(0))
ts = ts - self.step_size * grad_ts
loss_sum += cvar_loss.item() * imgs.size(0)
total += imgs.size(0)
self.algorithm.train()
return {f'{self.NAME}-Loss': loss_sum / float(total)}
class Augmented(Evaluator):
"""Calculates the augmented accuracy of a classifier."""
NAME = 'Augmented'
def __init__(self, algorithm, device, test_hparams):
super(Augmented, self).__init__(algorithm, device, test_hparams)
self.n_aug_samples = self.test_hparams['aug_n_samples']
@staticmethod
def quantile_accuracy(q, accuracy_per_datum):
"""Calculate q-Quantile accuracy"""
# quantile predictions for each data point
beta_quantile_acc_per_datum = torch.where(
accuracy_per_datum > (1 - q) * 100.,
100. * torch.ones_like(accuracy_per_datum),
torch.zeros_like(accuracy_per_datum))
return beta_quantile_acc_per_datum.mean().item()
@torch.no_grad()
def calculate(self, loader):
self.algorithm.eval()
correct, total, loss_sum = 0, 0, 0
correct_per_datum = []
for imgs, labels in loader:
imgs, labels = imgs.to(self.device), labels.to(self.device)
batch_correct_ls = []
for _ in range(self.n_aug_samples):
perturbations = self.sample_perturbations(imgs)
perturbed_imgs = self.clamp_imgs(imgs + perturbations)
logits = self.algorithm.predict(perturbed_imgs)
loss_sum += F.cross_entropy(logits, labels, reduction='sum').item()
preds = logits.argmax(dim=1, keepdim=True)
# unreduced predictions
pert_preds = preds.eq(labels.view_as(preds))
# list of predictions for each data point
batch_correct_ls.append(pert_preds)
correct += pert_preds.sum().item()
total += imgs.size(0)
# number of correct predictions for each data point
batch_correct = torch.sum(torch.hstack(batch_correct_ls), dim=1)
correct_per_datum.append(batch_correct)
# accuracy for each data point
accuracy_per_datum = 100. * torch.hstack(correct_per_datum) / self.n_aug_samples
self.algorithm.train()
return_dict = {
f'{self.NAME}-Accuracy': 100. * correct / total,
f'{self.NAME}-Loss': loss_sum / total
}
if self.test_hparams['test_betas']:
return_dict.update({
f'{self.NAME}-{q}-Quantile-Accuracy': self.quantile_accuracy(q, accuracy_per_datum)
for q in self.test_hparams['test_betas']
})
return return_dict
| 8,108 | 32.647303 | 99 | py |
advbench | advbench-main/advbench/algorithms.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
import pandas as pd
import numpy as np
import torch.optim as optim
from advbench import networks
from advbench import optimizers
from advbench import attacks
from advbench.lib import meters
ALGORITHMS = [
'ERM',
'PGD',
'FGSM',
'TRADES',
'ALP',
'CLP',
'Gaussian_DALE',
'Laplacian_DALE',
'Gaussian_DALE_PD',
'Gaussian_DALE_PD_Reverse',
'KL_DALE_PD',
'CVaR_SGD',
'CVaR_SGD_Autograd',
'CVaR_SGD_PD',
'ERM_DataAug',
'TERM',
'RandSmoothing'
]
class Algorithm(nn.Module):
def __init__(self, input_shape, num_classes, hparams, device):
super(Algorithm, self).__init__()
self.hparams = hparams
self.classifier = networks.Classifier(
input_shape, num_classes, hparams)
self.optimizer = optim.SGD(
self.classifier.parameters(),
lr=hparams['learning_rate'],
momentum=hparams['sgd_momentum'],
weight_decay=hparams['weight_decay'])
self.device = device
self.meters = OrderedDict()
self.meters['Loss'] = meters.AverageMeter()
self.meters_df = None
def step(self, imgs, labels):
raise NotImplementedError
def predict(self, imgs):
return self.classifier(imgs)
@staticmethod
def img_clamp(imgs):
return torch.clamp(imgs, 0.0, 1.0)
def reset_meters(self):
for meter in self.meters.values():
meter.reset()
def meters_to_df(self, epoch):
if self.meters_df is None:
columns = ['Epoch'] + list(self.meters.keys())
self.meters_df = pd.DataFrame(columns=columns)
values = [epoch] + [m.avg for m in self.meters.values()]
self.meters_df.loc[len(self.meters_df)] = values
return self.meters_df
class ERM(Algorithm):
def __init__(self, input_shape, num_classes, hparams, device):
super(ERM, self).__init__(input_shape, num_classes, hparams, device)
def step(self, imgs, labels):
self.optimizer.zero_grad()
loss = F.cross_entropy(self.predict(imgs), labels)
loss.backward()
self.optimizer.step()
self.meters['Loss'].update(loss.item(), n=imgs.size(0))
class ERM_DataAug(Algorithm):
def __init__(self, input_shape, num_classes, hparams, device):
super(ERM_DataAug, self).__init__(input_shape, num_classes, hparams, device)
def sample_deltas(self, imgs):
eps = self.hparams['epsilon']
return 2 * eps * torch.rand_like(imgs) - eps
def step(self, imgs, labels):
self.optimizer.zero_grad()
loss = 0
for _ in range(self.hparams['cvar_sgd_M']):
loss += F.cross_entropy(self.predict(imgs), labels)
loss = loss / float(self.hparams['cvar_sgd_M'])
loss.backward()
self.optimizer.step()
self.meters['Loss'].update(loss.item(), n=imgs.size(0))
class TERM(Algorithm):
def __init__(self, input_shape, num_classes, hparams, device):
super(TERM, self).__init__(input_shape, num_classes, hparams, device)
self.meters['tilted loss'] = meters.AverageMeter()
self.t = torch.tensor(self.hparams['term_t'])
def step(self, imgs, labels):
self.optimizer.zero_grad()
loss = F.cross_entropy(self.predict(imgs), labels, reduction='none')
term_loss = torch.log(torch.exp(self.t * loss).mean() + 1e-6) / self.t
term_loss.backward()
self.optimizer.step()
self.meters['Loss'].update(loss.mean().item(), n=imgs.size(0))
self.meters['tilted loss'].update(term_loss.item(), n=imgs.size(0))
class PGD(Algorithm):
def __init__(self, input_shape, num_classes, hparams, device):
super(PGD, self).__init__(input_shape, num_classes, hparams, device)
self.attack = attacks.PGD_Linf(self.classifier, self.hparams, device)
def step(self, imgs, labels):
adv_imgs = self.attack(imgs, labels)
self.optimizer.zero_grad()
loss = F.cross_entropy(self.predict(adv_imgs), labels)
loss.backward()
self.optimizer.step()
self.meters['Loss'].update(loss.item(), n=imgs.size(0))
class RandSmoothing(Algorithm):
def __init__(self, input_shape, num_classes, hparams, device):
super(RandSmoothing, self).__init__(input_shape, num_classes, hparams, device)
self.attack = attacks.SmoothAdv(self.classifier, self.hparams, device)
def step(self, imgs, labels):
adv_imgs = self.attack(imgs, labels)
self.optimizer.zero_grad()
loss = F.cross_entropy(self.predict(adv_imgs), labels)
loss.backward()
self.optimizer.step()
self.meters['Loss'].update(loss.item(), n=imgs.size(0))
class FGSM(Algorithm):
def __init__(self, input_shape, num_classes, hparams, device):
super(FGSM, self).__init__(input_shape, num_classes, hparams, device)
self.attack = attacks.FGSM_Linf(self.classifier, self.hparams, device)
def step(self, imgs, labels):
adv_imgs = self.attack(imgs, labels)
self.optimizer.zero_grad()
loss = F.cross_entropy(self.predict(adv_imgs), labels)
loss.backward()
self.optimizer.step()
self.meters['Loss'].update(loss.item(), n=imgs.size(0))
class TRADES(Algorithm):
def __init__(self, input_shape, num_classes, hparams, device):
super(TRADES, self).__init__(input_shape, num_classes, hparams, device)
self.kl_loss_fn = nn.KLDivLoss(reduction='batchmean') # TODO(AR): let's write a method to do the log-softmax part
self.attack = attacks.TRADES_Linf(self.classifier, self.hparams, device)
self.meters['clean loss'] = meters.AverageMeter()
self.meters['invariance loss'] = meters.AverageMeter()
def step(self, imgs, labels):
adv_imgs = self.attack(imgs, labels)
self.optimizer.zero_grad()
clean_loss = F.cross_entropy(self.predict(adv_imgs), labels)
robust_loss = self.kl_loss_fn(
F.log_softmax(self.predict(adv_imgs), dim=1),
F.softmax(self.predict(imgs), dim=1))
total_loss = clean_loss + self.hparams['trades_beta'] * robust_loss
total_loss.backward()
self.optimizer.step()
self.meters['Loss'].update(total_loss.item(), n=imgs.size(0))
self.meters['clean loss'].update(clean_loss.item(), n=imgs.size(0))
self.meters['invariance loss'].update(robust_loss.item(), n=imgs.size(0))
return {'loss': total_loss.item()}
class LogitPairingBase(Algorithm):
def __init__(self, input_shape, num_classes, hparams, device):
super(LogitPairingBase, self).__init__(input_shape, num_classes, hparams, device)
self.attack = attacks.PGD_Linf(self.classifier, self.hparams, device)
self.meters['logit loss'] = meters.AverageMeter()
def pairing_loss(self, imgs, adv_imgs):
logit_diff = self.predict(adv_imgs) - self.predict(imgs)
return torch.norm(logit_diff, dim=1).mean()
class ALP(LogitPairingBase):
def __init__(self, input_shape, num_classes, hparams, device):
super(ALP, self).__init__(input_shape, num_classes, hparams, device)
self.attack = attacks.PGD_Linf(self.classifier, self.hparams, device)
self.meters['robust loss'] = meters.AverageMeter()
def step(self, imgs, labels):
adv_imgs = self.attack(imgs, labels)
self.optimizer.zero_grad()
robust_loss = F.cross_entropy(self.predict(adv_imgs), labels)
logit_pairing_loss = self.pairing_loss(imgs, adv_imgs)
total_loss = robust_loss + logit_pairing_loss
total_loss.backward()
self.optimizer.step()
self.meters['Loss'].update(total_loss.item(), n=imgs.size(0))
self.meters['robust loss'].update(robust_loss.item(), n=imgs.size(0))
self.meters['logit loss'].update(logit_pairing_loss.item(), n=imgs.size(0))
class CLP(LogitPairingBase):
def __init__(self, input_shape, num_classes, hparams, device):
super(CLP, self).__init__(input_shape, num_classes, hparams, device)
self.attack = attacks.PGD_Linf(self.classifier, self.hparams, device)
self.meters['clean loss'] = meters.AverageMeter()
def step(self, imgs, labels):
adv_imgs = self.attack(imgs, labels)
self.optimizer.zero_grad()
clean_loss = F.cross_entropy(self.predict(imgs), labels)
logit_pairing_loss = self.pairing_loss(imgs, adv_imgs)
total_loss = clean_loss + logit_pairing_loss
total_loss.backward()
self.optimizer.step()
self.meters['Loss'].update(total_loss.item(), n=imgs.size(0))
self.meters['clean loss'].update(clean_loss.item(), n=imgs.size(0))
self.meters['logit loss'].update(logit_pairing_loss.item(), n=imgs.size(0))
class MART(Algorithm):
def __init__(self, input_shape, num_classes, hparams, device):
super(MART, self).__init__(input_shape, num_classes, hparams, device)
self.kl_loss_fn = nn.KLDivLoss(reduction='none')
self.attack = attacks.PGD_Linf(self.classifier, self.hparams, device)
self.meters['robust loss'] = meters.AverageMeter()
self.meters['invariance loss'] = meters.AverageMeter()
def step(self, imgs, labels):
adv_imgs = self.attack(imgs, labels)
self.optimizer.zero_grad()
clean_output = self.classifier(imgs)
adv_output = self.classifier(adv_imgs)
adv_probs = F.softmax(adv_output, dim=1)
tmp1 = torch.argsort(adv_probs, dim=1)[:, -2:]
new_label = torch.where(tmp1[:, -1] == labels, tmp1[:, -2], tmp1[:, -1])
loss_adv = F.cross_entropy(adv_output, labels) + F.nll_loss(torch.log(1.0001 - adv_probs + 1e-12), new_label)
nat_probs = F.softmax(clean_output, dim=1)
true_probs = torch.gather(nat_probs, 1, (labels.unsqueeze(1)).long()).squeeze()
loss_robust = (1.0 / imgs.size(0)) * torch.sum(
torch.sum(self.kl_loss_fn(torch.log(adv_probs + 1e-12), nat_probs), dim=1) * (1.0000001 - true_probs))
loss = loss_adv + self.hparams['mart_beta'] * loss_robust
loss.backward()
self.optimizer.step()
self.meters['Loss'].update(loss.item(), n=imgs.size(0))
self.meters['robust loss'].update(loss_robust.item(), n=imgs.size(0))
self.meters['invariance loss'].update(loss_adv.item(), n=imgs.size(0))
class MMA(Algorithm):
pass
class Gaussian_DALE(Algorithm):
def __init__(self, input_shape, num_classes, hparams, device):
super(Gaussian_DALE, self).__init__(input_shape, num_classes, hparams, device)
self.attack = attacks.LMC_Gaussian_Linf(self.classifier, self.hparams, device)
self.meters['clean loss'] = meters.AverageMeter()
self.meters['robust loss'] = meters.AverageMeter()
def step(self, imgs, labels):
adv_imgs = self.attack(imgs, labels)
self.optimizer.zero_grad()
clean_loss = F.cross_entropy(self.predict(imgs), labels)
robust_loss = F.cross_entropy(self.predict(adv_imgs), labels)
total_loss = robust_loss + self.hparams['g_dale_nu'] * clean_loss
total_loss.backward()
self.optimizer.step()
self.meters['Loss'].update(total_loss.item(), n=imgs.size(0))
self.meters['clean loss'].update(clean_loss.item(), n=imgs.size(0))
self.meters['robust loss'].update(robust_loss.item(), n=imgs.size(0))
class Laplacian_DALE(Algorithm):
def __init__(self, input_shape, num_classes, hparams, device):
super(Laplacian_DALE, self).__init__(input_shape, num_classes, hparams, device)
self.attack = attacks.LMC_Laplacian_Linf(self.classifier, self.hparams, device)
self.meters['clean loss'] = meters.AverageMeter()
self.meters['robust loss'] = meters.AverageMeter()
def step(self, imgs, labels):
adv_imgs = self.attack(imgs, labels)
self.optimizer.zero_grad()
clean_loss = F.cross_entropy(self.predict(imgs), labels)
robust_loss = F.cross_entropy(self.predict(adv_imgs), labels)
total_loss = robust_loss + self.hparams['l_dale_nu'] * clean_loss
total_loss.backward()
self.optimizer.step()
self.meters['Loss'].update(total_loss.item(), n=imgs.size(0))
self.meters['clean loss'].update(clean_loss.item(), n=imgs.size(0))
self.meters['robust loss'].update(robust_loss.item(), n=imgs.size(0))
class PrimalDualBase(Algorithm):
def __init__(self, input_shape, num_classes, hparams, device):
super(PrimalDualBase, self).__init__(input_shape, num_classes, hparams, device)
self.dual_params = {'dual_var': torch.tensor(1.0).to(self.device)}
self.meters['clean loss'] = meters.AverageMeter()
self.meters['robust loss'] = meters.AverageMeter()
self.meters['dual variable'] = meters.AverageMeter()
class Gaussian_DALE_PD(PrimalDualBase):
def __init__(self, input_shape, num_classes, hparams, device):
super(Gaussian_DALE_PD, self).__init__(input_shape, num_classes, hparams, device)
self.attack = attacks.LMC_Gaussian_Linf(self.classifier, self.hparams, device)
self.pd_optimizer = optimizers.PrimalDualOptimizer(
parameters=self.dual_params,
margin=self.hparams['g_dale_pd_margin'],
eta=self.hparams['g_dale_pd_step_size'])
def step(self, imgs, labels):
adv_imgs = self.attack(imgs, labels)
self.optimizer.zero_grad()
clean_loss = F.cross_entropy(self.predict(imgs), labels)
robust_loss = F.cross_entropy(self.predict(adv_imgs), labels)
total_loss = robust_loss + self.dual_params['dual_var'] * clean_loss
total_loss.backward()
self.optimizer.step()
self.pd_optimizer.step(clean_loss.detach())
self.meters['Loss'].update(total_loss.item(), n=imgs.size(0))
self.meters['clean loss'].update(clean_loss.item(), n=imgs.size(0))
self.meters['robust loss'].update(robust_loss.item(), n=imgs.size(0))
self.meters['dual variable'].update(self.dual_params['dual_var'].item(), n=1)
class CVaR_SGD_Autograd(Algorithm):
def __init__(self, input_shape, num_classes, hparams, device):
super(CVaR_SGD_Autograd, self).__init__(input_shape, num_classes, hparams, device)
self.meters['avg t'] = meters.AverageMeter()
self.meters['plain loss'] = meters.AverageMeter()
def sample_deltas(self, imgs):
eps = self.hparams['epsilon']
return 2 * eps * torch.rand_like(imgs) - eps
def step(self, imgs, labels):
beta, M = self.hparams['cvar_sgd_beta'], self.hparams['cvar_sgd_M']
ts = torch.ones(size=(imgs.size(0),)).to(self.device)
self.optimizer.zero_grad()
for _ in range(self.hparams['cvar_sgd_n_steps']):
ts.requires_grad = True
cvar_loss = 0
for _ in range(M):
pert_imgs = self.img_clamp(imgs + self.sample_deltas(imgs))
curr_loss = F.cross_entropy(self.predict(pert_imgs), labels, reduction='none')
cvar_loss += F.relu(curr_loss - ts)
cvar_loss = (ts + cvar_loss / (float(M) * beta)).mean()
grad_ts = torch.autograd.grad(cvar_loss, [ts])[0].detach()
ts = ts - self.hparams['cvar_sgd_t_step_size'] * grad_ts
ts = ts.detach()
plain_loss, cvar_loss = 0, 0
for _ in range(M):
pert_imgs = self.img_clamp(imgs + self.sample_deltas(imgs))
curr_loss = F.cross_entropy(self.predict(pert_imgs), labels, reduction='none')
plain_loss += curr_loss.mean()
cvar_loss += F.relu(curr_loss - ts)
cvar_loss = (cvar_loss / (beta * float(M))).mean()
cvar_loss.backward()
self.optimizer.step()
self.meters['Loss'].update(cvar_loss.item(), n=imgs.size(0))
self.meters['avg t'].update(ts.mean().item(), n=imgs.size(0))
self.meters['plain loss'].update(plain_loss.item() / M, n=imgs.size(0))
class CVaR_SGD(Algorithm):
def __init__(self, input_shape, num_classes, hparams, device):
super(CVaR_SGD, self).__init__(input_shape, num_classes, hparams, device)
self.meters['avg t'] = meters.AverageMeter()
self.meters['plain loss'] = meters.AverageMeter()
def sample_deltas(self, imgs):
eps = self.hparams['epsilon']
return 2 * eps * torch.rand_like(imgs) - eps
def step(self, imgs, labels):
beta = self.hparams['cvar_sgd_beta']
M = self.hparams['cvar_sgd_M']
ts = torch.ones(size=(imgs.size(0),)).to(self.device)
self.optimizer.zero_grad()
for _ in range(self.hparams['cvar_sgd_n_steps']):
plain_loss, cvar_loss, indicator_sum = 0, 0, 0
for _ in range(self.hparams['cvar_sgd_M']):
pert_imgs = self.img_clamp(imgs + self.sample_deltas(imgs))
curr_loss = F.cross_entropy(self.predict(pert_imgs), labels, reduction='none')
indicator_sum += torch.where(curr_loss > ts, torch.ones_like(ts), torch.zeros_like(ts))
plain_loss += curr_loss.mean()
cvar_loss += F.relu(curr_loss - ts)
indicator_avg = indicator_sum / float(M)
cvar_loss = (ts + cvar_loss / (float(M) * beta)).mean()
# gradient update on ts
grad_ts = (1 - (1 / beta) * indicator_avg) / float(imgs.size(0))
ts = ts - self.hparams['cvar_sgd_t_step_size'] * grad_ts
cvar_loss.backward()
self.optimizer.step()
self.meters['Loss'].update(cvar_loss.item(), n=imgs.size(0))
self.meters['avg t'].update(ts.mean().item(), n=imgs.size(0))
self.meters['plain loss'].update(plain_loss.item() / M, n=imgs.size(0))
class CVaR_SGD_PD(Algorithm):
def __init__(self, input_shape, num_classes, hparams, device):
super(CVaR_SGD_PD, self).__init__(input_shape, num_classes, hparams, device)
self.dual_params = {'dual_var': torch.tensor(1.0).to(self.device)}
self.meters['avg t'] = meters.AverageMeter()
self.meters['plain loss'] = meters.AverageMeter()
self.meters['dual variable'] = meters.AverageMeter()
self.pd_optimizer = optimizers.PrimalDualOptimizer(
parameters=self.dual_params,
margin=self.hparams['g_dale_pd_margin'],
eta=self.hparams['g_dale_pd_step_size'])
def sample_deltas(self, imgs):
eps = self.hparams['epsilon']
return 2 * eps * torch.rand_like(imgs) - eps
def step(self, imgs, labels):
beta = self.hparams['cvar_sgd_beta']
M = self.hparams['cvar_sgd_M']
ts = torch.ones(size=(imgs.size(0),)).to(self.device)
self.optimizer.zero_grad()
for _ in range(self.hparams['cvar_sgd_n_steps']):
plain_loss, cvar_loss, indicator_sum = 0, 0, 0
for _ in range(self.hparams['cvar_sgd_M']):
pert_imgs = self.img_clamp(imgs + self.sample_deltas(imgs))
curr_loss = F.cross_entropy(self.predict(pert_imgs), labels, reduction='none')
indicator_sum += torch.where(curr_loss > ts, torch.ones_like(ts), torch.zeros_like(ts))
plain_loss += curr_loss.mean()
cvar_loss += F.relu(curr_loss - ts)
indicator_avg = indicator_sum / float(M)
cvar_loss = (ts + cvar_loss / (float(M) * beta)).mean()
# gradient update on ts
grad_ts = (1 - (1 / beta) * indicator_avg) / float(imgs.size(0))
ts = ts - self.hparams['cvar_sgd_t_step_size'] * grad_ts
loss = cvar_loss + self.dual_params['dual_var'] * (plain_loss / float(M))
loss.backward()
self.optimizer.step()
self.pd_optimizer.step(plain_loss.detach() / M)
self.meters['Loss'].update(cvar_loss.item(), n=imgs.size(0))
self.meters['avg t'].update(ts.mean().item(), n=imgs.size(0))
self.meters['plain loss'].update(plain_loss.item() / M, n=imgs.size(0))
self.meters['dual variable'].update(self.dual_params['dual_var'].item(), n=1)
class Gaussian_DALE_PD_Reverse(PrimalDualBase):
def __init__(self, input_shape, num_classes, hparams, device):
super(Gaussian_DALE_PD_Reverse, self).__init__(input_shape, num_classes, hparams, device)
self.attack = attacks.LMC_Gaussian_Linf(self.classifier, self.hparams, device)
self.pd_optimizer = optimizers.PrimalDualOptimizer(
parameters=self.dual_params,
margin=self.hparams['g_dale_pd_margin'],
eta=self.hparams['g_dale_pd_step_size'])
def step(self, imgs, labels):
adv_imgs = self.attack(imgs, labels)
self.optimizer.zero_grad()
clean_loss = F.cross_entropy(self.predict(imgs), labels)
robust_loss = F.cross_entropy(self.predict(adv_imgs), labels)
total_loss = clean_loss + self.dual_params['dual_var'] * robust_loss
total_loss.backward()
self.optimizer.step()
self.pd_optimizer.step(robust_loss.detach())
self.meters['Loss'].update(total_loss.item(), n=imgs.size(0))
self.meters['clean loss'].update(clean_loss.item(), n=imgs.size(0))
self.meters['robust loss'].update(robust_loss.item(), n=imgs.size(0))
self.meters['dual variable'].update(self.dual_params['dual_var'].item(), n=1)
class KL_DALE_PD(PrimalDualBase):
def __init__(self, input_shape, num_classes, hparams, device):
super(KL_DALE_PD, self).__init__(input_shape, num_classes, hparams, device)
self.attack = attacks.TRADES_Linf(self.classifier, self.hparams, device)
self.kl_loss_fn = nn.KLDivLoss(reduction='batchmean')
self.pd_optimizer = optimizers.PrimalDualOptimizer(
parameters=self.dual_params,
margin=self.hparams['g_dale_pd_margin'],
eta=self.hparams['g_dale_pd_step_size'])
def step(self, imgs, labels):
adv_imgs = self.attack(imgs, labels)
self.optimizer.zero_grad()
clean_loss = F.cross_entropy(self.predict(imgs), labels)
robust_loss = self.kl_loss_fn(
F.log_softmax(self.predict(adv_imgs), dim=1),
F.softmax(self.predict(imgs), dim=1))
total_loss = robust_loss + self.dual_params['dual_var'] * clean_loss
total_loss.backward()
self.optimizer.step()
self.pd_optimizer.step(clean_loss.detach())
self.meters['Loss'].update(total_loss.item(), n=imgs.size(0))
self.meters['clean loss'].update(clean_loss.item(), n=imgs.size(0))
self.meters['robust loss'].update(robust_loss.item(), n=imgs.size(0))
self.meters['dual variable'].update(self.dual_params['dual_var'].item(), n=1) | 22,949 | 41.657993 | 122 | py |
advbench | advbench-main/advbench/scripts/train.py | import argparse
import torch
from torch.utils.data import DataLoader
import os
import json
import pandas as pd
import time
import collections
from humanfriendly import format_timespan
from advbench import datasets
from advbench import algorithms
from advbench import evalulation_methods
from advbench import hparams_registry
from advbench.lib import misc, meters, reporting
def main(args, hparams, test_hparams):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
torch.manual_seed(0)
# paths for saving output
json_path = os.path.join(args.output_dir, 'results.json')
ckpt_path = misc.stage_path(args.output_dir, 'ckpts')
train_df_path = os.path.join(args.output_dir, 'train.pd')
selection_df_path = os.path.join(args.output_dir, 'selection.pd')
dataset = vars(datasets)[args.dataset](args.data_dir, device)
train_loader = DataLoader(
dataset=dataset.splits['train'],
batch_size=hparams['batch_size'],
num_workers=dataset.N_WORKERS,
pin_memory=False,
shuffle=True)
validation_loader = DataLoader(
dataset=dataset.splits['validation'],
batch_size=hparams['batch_size'],
num_workers=dataset.N_WORKERS,
pin_memory=False,
shuffle=False)
test_loader = DataLoader(
dataset=dataset.splits['test'],
batch_size=100,
num_workers=dataset.N_WORKERS,
pin_memory=False,
shuffle=False)
algorithm = vars(algorithms)[args.algorithm](
dataset.INPUT_SHAPE,
dataset.NUM_CLASSES,
hparams,
device).to(device)
def save_checkpoint(epoch):
torch.save(
obj={'state_dict': algorithm.state_dict()},
f=os.path.join(ckpt_path, f'model_ckpt_{epoch}.pkl')
)
evaluators = [
vars(evalulation_methods)[e](
algorithm=algorithm,
device=device,
output_dir=args.output_dir,
test_hparams=test_hparams)
for e in args.evaluators]
adjust_lr = None if dataset.HAS_LR_SCHEDULE is False else dataset.adjust_lr
total_time = 0
for epoch in range(0, dataset.N_EPOCHS):
if adjust_lr is not None:
adjust_lr(algorithm.optimizer, epoch, hparams)
timer = meters.TimeMeter()
epoch_start = time.time()
for batch_idx, (imgs, labels) in enumerate(train_loader):
timer.batch_start()
if not dataset.ON_DEVICE:
imgs, labels = imgs.to(device), labels.to(device)
algorithm.step(imgs, labels)
if batch_idx % dataset.LOG_INTERVAL == 0:
print(f'Epoch {epoch+1}/{dataset.N_EPOCHS} ', end='')
print(f'[{batch_idx * imgs.size(0)}/{len(train_loader.dataset)}', end=' ')
print(f'({100. * batch_idx / len(train_loader):.0f}%)]\t', end='')
for name, meter in algorithm.meters.items():
print(f'{name}: {meter.val:.3f} (avg. {meter.avg:.3f})\t', end='')
print(f'Time: {timer.batch_time.val:.3f} (avg. {timer.batch_time.avg:.3f})')
timer.batch_end()
results = {'Epoch': epoch, 'Train': {}, 'Validation': {}, 'Test': {}}
for name, meter in algorithm.meters.items():
results['Train'].update({name: meter.avg})
print('\nTrain')
misc.print_row([key for key in results['Train'].keys()])
misc.print_row([results['Train'][key] for key in results['Train'].keys()])
for evaluator in evaluators:
for k, v in evaluator.calculate(validation_loader).items():
results['Validation'].update({k: v})
print('\nValidation')
misc.print_row([key for key in results['Validation'].keys()])
misc.print_row([results['Validation'][key] for key in results['Validation'].keys()])
for evaluator in evaluators:
for k, v in evaluator.calculate(test_loader).items():
results['Test'].update({k: v})
print('\nTest')
misc.print_row([key for key in results['Test'].keys()])
misc.print_row([results['Test'][key] for key in results['Test'].keys()])
epoch_time = time.time() - epoch_start
total_time += epoch_time
results.update({
'Epoch-Time': epoch_time,
'Total-Time': total_time})
# print results
print(f'Epoch: {epoch+1}/{dataset.N_EPOCHS}\t', end='')
print(f'Epoch time: {format_timespan(epoch_time)}\t', end='')
print(f'Total time: {format_timespan(total_time)}')
results.update({'hparams': hparams, 'args': vars(args)})
with open(json_path, 'a') as f:
f.write(json.dumps(results, sort_keys=True) + '\n')
if args.save_model_every_epoch is True:
save_checkpoint(epoch)
algorithm.reset_meters()
save_checkpoint('final')
records = reporting.load_record(json_path)
train_dict = collections.defaultdict(lambda: [])
validation_dict = collections.defaultdict(lambda: [])
test_dict = collections.defaultdict(lambda: [])
for record in records:
for k in records[0]['Train'].keys():
train_dict[k].append(record['Train'][k])
for k in records[0]['Validation'].keys():
validation_dict[k].append(record['Validation'][k])
test_dict[k].append(record['Test'][k])
def dict_to_dataframe(split, d):
df = pd.DataFrame.from_dict(d)
df['Split'] = split
df = df.join(pd.DataFrame({
'Algorithm': args.algorithm,
'trial_seed': args.trial_seed,
'seed': args.seed,
'path': args.output_dir
}, index=df.index))
df['Epoch'] = range(dataset.N_EPOCHS)
return df
train_df = dict_to_dataframe('Train', train_dict)
validation_df = dict_to_dataframe('Validation', validation_dict)
test_df = dict_to_dataframe('Test', test_dict)
selection_df = pd.concat([validation_df, test_df], ignore_index=True)
train_df.to_pickle(train_df_path)
selection_df.to_pickle(selection_df_path)
with open(os.path.join(args.output_dir, 'done'), 'w') as f:
f.write('done')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Adversarial robustness')
parser.add_argument('--data_dir', type=str, default='./advbench/data')
parser.add_argument('--output_dir', type=str, default='train_output')
parser.add_argument('--dataset', type=str, default='MNIST', help='Dataset to use')
parser.add_argument('--algorithm', type=str, default='ERM', help='Algorithm to run')
parser.add_argument('--hparams', type=str, help='JSON-serialized hparams dict')
parser.add_argument('--hparams_seed', type=int, default=0, help='Seed for hyperparameters')
parser.add_argument('--trial_seed', type=int, default=0, help='Trial number')
parser.add_argument('--seed', type=int, default=0, help='Seed for everything else')
parser.add_argument('--evaluators', type=str, nargs='+', default=['Clean'])
parser.add_argument('--save_model_every_epoch', action='store_true')
args = parser.parse_args()
os.makedirs(os.path.join(args.output_dir), exist_ok=True)
print('Args:')
for k, v in sorted(vars(args).items()):
print(f'\t{k}: {v}')
with open(os.path.join(args.output_dir, 'args.json'), 'w') as f:
json.dump(args.__dict__, f, indent=2)
if args.dataset not in vars(datasets):
raise NotImplementedError(f'Dataset {args.dataset} is not implemented.')
if args.hparams_seed == 0:
hparams = hparams_registry.default_hparams(args.algorithm, args.dataset)
else:
seed = misc.seed_hash(args.hparams_seed, args.trial_seed)
hparams = hparams_registry.random_hparams(args.algorithm, args.dataset, seed)
print ('Hparams:')
for k, v in sorted(hparams.items()):
print(f'\t{k}: {v}')
with open(os.path.join(args.output_dir, 'hparams.json'), 'w') as f:
json.dump(hparams, f, indent=2)
test_hparams = hparams_registry.test_hparams(args.algorithm, args.dataset)
print('Test hparams:')
for k, v in sorted(test_hparams.items()):
print(f'\t{k}: {v}')
with open(os.path.join(args.output_dir, 'test_hparams.json'), 'w') as f:
json.dump(test_hparams, f, indent=2)
main(args, hparams, test_hparams) | 8,410 | 35.411255 | 95 | py |
advbench | advbench-main/advbench/lib/misc.py | import torch
import hashlib
import sys
import os
import json
from functools import wraps
from time import time
import pandas as pd
import torch.nn.functional as F
import numpy as np
from advbench.lib import meters
def timing(f):
@wraps(f)
def wrap(*args, **kw):
ts = time()
result = f(*args, **kw)
te = time()
print(f'func:{f.__name__} took: {te-ts:.3f} sec')
return result
return wrap
def seed_hash(*args):
"""Derive an integer hash from all args, for use as a random seed."""
args_str = str(args)
return int(hashlib.md5(args_str.encode("utf-8")).hexdigest(), 16) % (2**31)
def print_row(row, col_width=10):
sep, end_ = " ", ""
def format_val(x):
if np.issubdtype(type(x), np.floating):
x = f'{x:.5f}'
return str(x).ljust(col_width)[:col_width]
print(sep.join([format_val(x) for x in row]), end_)
def stage_path(data_dir, name):
path = os.path.join(data_dir, name)
os.makedirs(path) if not os.path.exists(path) else None
return path
def read_dict(fname):
with open(fname, 'r') as f:
d = json.load(f)
return d
def print_full_df(df):
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(df)
def sample_deltas(imgs, eps):
return 2 * eps * torch.rand_like(imgs) - eps
def img_clamp(imgs):
return torch.clamp(imgs, 0.0, 1.0)
@torch.no_grad()
def cvar_loss(algorithm, loader, device, test_hparams):
beta, M = test_hparams['cvar_sgd_beta'], test_hparams['cvar_sgd_M']
eps = test_hparams['epsilon']
cvar_meter = meters.AverageMeter()
algorithm.eval()
for batch_idx, (imgs, labels) in enumerate(loader):
imgs, labels = imgs.to(device), labels.to(device)
ts = torch.zeros(size=(imgs.size(0),)).to(device)
for _ in range(test_hparams['cvar_sgd_n_steps']):
cvar_loss, indicator_sum = 0, 0
for _ in range(test_hparams['cvar_sgd_M']):
pert_imgs = img_clamp(imgs + sample_deltas(imgs, eps))
curr_loss = F.cross_entropy(algorithm.predict(pert_imgs), labels, reduction='none')
indicator_sum += torch.where(curr_loss > ts, torch.ones_like(ts), torch.zeros_like(ts))
cvar_loss += F.relu(curr_loss - ts)
indicator_avg = indicator_sum / float(M)
cvar_loss = (ts + cvar_loss / (M * beta)).mean()
# gradient update on ts
grad_ts = (1 - (1 / beta) * indicator_avg) / float(imgs.size(0))
ts = ts - test_hparams['cvar_sgd_t_step_size'] * grad_ts
cvar_meter.update(cvar_loss.item(), n=imgs.size(0))
algorithm.train()
return cvar_meter.avg
def cvar_grad_loss(algorithm, loader, device, test_hparams):
beta, M = test_hparams['cvar_sgd_beta'], test_hparams['cvar_sgd_M']
eps = test_hparams['epsilon']
cvar_meter = meters.AverageMeter()
algorithm.eval()
for batch_idx, (imgs, labels) in enumerate(loader):
imgs, labels = imgs.to(device), labels.to(device)
ts = torch.zeros(size=(imgs.size(0),)).to(device)
for _ in range(test_hparams['cvar_sgd_n_steps']):
ts.requires_grad = True
cvar_loss = 0
for _ in range(M):
pert_imgs = img_clamp(imgs + sample_deltas(imgs, eps))
curr_loss = F.cross_entropy(algorithm.predict(pert_imgs), labels, reduction='none')
cvar_loss += F.relu(curr_loss - ts)
cvar_loss = (ts + cvar_loss / (float(M) * beta)).mean()
grad_ts = torch.autograd.grad(cvar_loss, [ts])[0].detach()
ts = ts - test_hparams['cvar_sgd_t_step_size'] * grad_ts
ts = ts.detach()
cvar_meter.update(cvar_loss.item(), n=imgs.size(0))
algorithm.train()
return cvar_meter.avg
class Tee:
def __init__(self, fname, mode="a"):
self.stdout = sys.stdout
self.file = open(fname, mode)
def write(self, message):
self.stdout.write(message)
self.file.write(message)
self.flush()
def flush(self):
self.stdout.flush()
self.file.flush() | 4,178 | 29.50365 | 103 | py |
LiveQ | LiveQ-master/liveq-dashboard/dashboard/server.py | ################################################################
# LiveQ - An interactive volunteering computing batch system
# Copyright (C) 2013 Ioannis Charalampidis
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
################################################################
import logging
import threading
import tornado.escape
import tornado.web
import tornado.websocket
import os.path
import uuid
from dashboard.dashboardsocket import DashboardSocket
from dashboard.config import Config
"""
Tornado Application class of the LiveQ Server
"""
class DashboardServer(tornado.web.Application):
def __init__(self):
# Setup handlers
handlers = [
(r"%s/?" % Config.BASE_URL, MainHandler),
(r"%s/socket" % Config.BASE_URL, DashboardSocket),
]
# Get root dir of files
filesDir = os.path.dirname(os.path.dirname(__file__))
# Setup tornado settings
settings = dict(
cookie_secret="zvn429jvANancjcaffeg4saacn40129def",
template_path=os.path.join(filesDir, "templates"),
static_path=os.path.join(filesDir, "static"),
static_url_prefix="/dashboard/static/",
xsrf_cookies=True,
)
# Setup tornado application
tornado.web.Application.__init__(self, handlers, **settings)
"""
Root page handler
"""
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("index.html")
| 1,994 | 29.692308 | 81 | py |
visualize_atari | visualize_atari-master/saliency.py | # Visualizing and Understanding Atari Agents | Sam Greydanus | 2017 | MIT License
from __future__ import print_function
import warnings ; warnings.filterwarnings('ignore') # mute warnings, live dangerously ;)
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
from scipy.ndimage.filters import gaussian_filter
from scipy.misc import imresize # preserves single-pixel info _unlike_ img = img[::2,::2]
prepro = lambda img: imresize(img[35:195].mean(2), (80,80)).astype(np.float32).reshape(1,80,80)/255.
searchlight = lambda I, mask: I*mask + gaussian_filter(I, sigma=3)*(1-mask) # choose an area NOT to blur
occlude = lambda I, mask: I*(1-mask) + gaussian_filter(I, sigma=3)*mask # choose an area to blur
def get_mask(center, size, r):
y,x = np.ogrid[-center[0]:size[0]-center[0], -center[1]:size[1]-center[1]]
keep = x*x + y*y <= 1
mask = np.zeros(size) ; mask[keep] = 1 # select a circle of pixels
mask = gaussian_filter(mask, sigma=r) # blur the circle of pixels. this is a 2D Gaussian for r=r^2=1
return mask/mask.max()
def run_through_model(model, history, ix, interp_func=None, mask=None, blur_memory=None, mode='actor'):
if mask is None:
im = prepro(history['ins'][ix])
else:
assert(interp_func is not None, "interp func cannot be none")
im = interp_func(prepro(history['ins'][ix]).squeeze(), mask).reshape(1,80,80) # perturb input I -> I'
tens_state = torch.Tensor(im)
state = Variable(tens_state.unsqueeze(0), volatile=True)
hx = Variable(torch.Tensor(history['hx'][ix-1]).view(1,-1))
cx = Variable(torch.Tensor(history['cx'][ix-1]).view(1,-1))
if blur_memory is not None: cx.mul_(1-blur_memory) # perturb memory vector
return model((state, (hx, cx)))[0] if mode == 'critic' else model((state, (hx, cx)))[1]
def score_frame(model, history, ix, r, d, interp_func, mode='actor'):
# r: radius of blur
# d: density of scores (if d==1, then get a score for every pixel...
# if d==2 then every other, which is 25% of total pixels for a 2D image)
assert mode in ['actor', 'critic'], 'mode must be either "actor" or "critic"'
L = run_through_model(model, history, ix, interp_func, mask=None, mode=mode)
scores = np.zeros((int(80/d)+1,int(80/d)+1)) # saliency scores S(t,i,j)
for i in range(0,80,d):
for j in range(0,80,d):
mask = get_mask(center=[i,j], size=[80,80], r=r)
l = run_through_model(model, history, ix, interp_func, mask=mask, mode=mode)
scores[int(i/d),int(j/d)] = (L-l).pow(2).sum().mul_(.5).data[0]
pmax = scores.max()
scores = imresize(scores, size=[80,80], interp='bilinear').astype(np.float32)
return pmax * scores / scores.max()
def saliency_on_atari_frame(saliency, atari, fudge_factor, channel=2, sigma=0):
# sometimes saliency maps are a bit clearer if you blur them
# slightly...sigma adjusts the radius of that blur
pmax = saliency.max()
S = imresize(saliency, size=[160,160], interp='bilinear').astype(np.float32)
S = S if sigma == 0 else gaussian_filter(S, sigma=sigma)
S -= S.min() ; S = fudge_factor*pmax * S / S.max()
I = atari.astype('uint16')
I[35:195,:,channel] += S.astype('uint16')
I = I.clip(1,255).astype('uint8')
return I
def get_env_meta(env_name):
meta = {}
if env_name=="Pong-v0":
meta['critic_ff'] = 600 ; meta['actor_ff'] = 500
elif env_name=="Breakout-v0":
meta['critic_ff'] = 600 ; meta['actor_ff'] = 300
elif env_name=="SpaceInvaders-v0":
meta['critic_ff'] = 400 ; meta['actor_ff'] = 400
else:
print('environment "{}" not supported'.format(env_name))
return meta | 3,722 | 47.986842 | 109 | py |
visualize_atari | visualize_atari-master/overfit_atari.py | # Visualizing and Understanding Atari Agents | Sam Greydanus | 2017 | MIT License
from __future__ import print_function
import warnings ; warnings.filterwarnings('ignore') # mute warnings, live dangerously ;)
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import gym, sys
import numpy as np
from scipy.misc import imresize # preserves single-pixel info _unlike_ img = img[::2,::2]
sys.path.append('..')
from visualize_atari import *
prepro = lambda img: imresize(img[35:195].mean(2), (80,80)).astype(np.float32).reshape(1,80,80)/255.
class OverfitAtari():
def __init__(self, env_name, expert_dir, seed=0):
self.atari = gym.make(env_name) ; self.atari.seed(seed)
self.action_space = self.atari.action_space
self.expert = NNPolicy(channels=1, num_actions=self.action_space.n)
self.expert.try_load(expert_dir)
self.cx = Variable(torch.zeros(1, 256)) # lstm memory vector
self.hx = Variable(torch.zeros(1, 256)) # lstm activation vector
def seed(self, s):
self.atari.seed(s) ; torch.manual_seed(s)
def reset(self):
self.cx = Variable(torch.zeros(1, 256))
self.hx = Variable(torch.zeros(1, 256))
return self.atari.reset()
def step(self, action):
state, reward, done, info = self.atari.step(action)
expert_state = torch.Tensor(prepro(state)) # get expert policy and incorporate it into environment
_, logit, (hx, cx) = self.expert((Variable(expert_state.view(1,1,80,80)), (self.hx, self.cx)))
self.hx, self.cx = Variable(hx.data), Variable(cx.data)
expert_action = int(F.softmax(logit).data.max(1)[1][0,0])
target = torch.zeros(logit.size()) ; target[0,expert_action] = 1
j = 72 ; k = 5
expert_action = expert_action if False else np.random.randint(self.atari.action_space.n)
for i in range(self.atari.action_space.n):
state[37:41, j + k*i: j+1+k*i,:] = 250 if expert_action == i else 50
return state, reward, done, target | 2,073 | 41.326531 | 106 | py |
visualize_atari | visualize_atari-master/policy.py | # Visualizing and Understanding Atari Agents | Sam Greydanus | 2017 | MIT License
from __future__ import print_function
import warnings ; warnings.filterwarnings('ignore') # mute warnings, live dangerously ;)
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
import glob
import numpy as np
from scipy.misc import imresize # preserves single-pixel info _unlike_ img = img[::2,::2]
class NNPolicy(torch.nn.Module): # an actor-critic neural network
def __init__(self, channels, num_actions):
super(NNPolicy, self).__init__()
self.conv1 = nn.Conv2d(channels, 32, 3, stride=2, padding=1)
self.conv2 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv3 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv4 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.lstm = nn.LSTMCell(32 * 5 * 5, 256)
self.critic_linear, self.actor_linear = nn.Linear(256, 1), nn.Linear(256, num_actions)
def forward(self, inputs):
inputs, (hx, cx) = inputs
x = F.elu(self.conv1(inputs))
x = F.elu(self.conv2(x))
x = F.elu(self.conv3(x))
x = F.elu(self.conv4(x))
x = x.view(-1, 32 * 5 * 5)
hx, cx = self.lstm(x, (hx, cx))
return self.critic_linear(hx), self.actor_linear(hx), (hx, cx)
def try_load(self, save_dir, checkpoint='*.tar'):
paths = glob.glob(save_dir + checkpoint) ; step = 0
if len(paths) > 0:
ckpts = [int(s.split('.')[-2]) for s in paths]
ix = np.argmax(ckpts) ; step = ckpts[ix]
self.load_state_dict(torch.load(paths[ix]))
print("\tno saved models") if step is 0 else print("\tloaded model: {}".format(paths[ix]))
return step | 1,770 | 41.166667 | 98 | py |
visualize_atari | visualize_atari-master/make_movie.py | # Visualizing and Understanding Atari Agents | Sam Greydanus | 2017 | MIT License
from __future__ import print_function
import warnings ; warnings.filterwarnings('ignore') # mute warnings, live dangerously
import matplotlib.pyplot as plt
import matplotlib as mpl ; mpl.use("Agg")
import matplotlib.animation as manimation
import gym, os, sys, time, argparse
sys.path.append('..')
from visualize_atari import *
def make_movie(env_name, checkpoint='*.tar', num_frames=20, first_frame=0, resolution=75, \
save_dir='./movies/', density=5, radius=5, prefix='default', overfit_mode=False):
# set up dir variables and environment
load_dir = '{}{}/'.format('overfit-' if overfit_mode else '', env_name.lower())
meta = get_env_meta(env_name)
env = gym.make(env_name) if not overfit_mode else OverfitAtari(env_name, load_dir+'expert/', seed=0) # make a seeded env
# set up agent
model = NNPolicy(channels=1, num_actions=env.action_space.n)
model.try_load(load_dir, checkpoint=checkpoint)
# get a rollout of the policy
movie_title = "{}-{}-{}.mp4".format(prefix, num_frames, env_name.lower())
print('\tmaking movie "{}" using checkpoint at {}{}'.format(movie_title, load_dir, checkpoint))
max_ep_len = first_frame + num_frames + 1
torch.manual_seed(0)
history = rollout(model, env, max_ep_len=max_ep_len)
print()
# make the movie!
start = time.time()
FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title=movie_title, artist='greydanus', comment='atari-saliency-video')
writer = FFMpegWriter(fps=8, metadata=metadata)
prog = '' ; total_frames = len(history['ins'])
f = plt.figure(figsize=[6, 6*1.3], dpi=resolution)
with writer.saving(f, save_dir + movie_title, resolution):
for i in range(num_frames):
ix = first_frame+i
if ix < total_frames: # prevent loop from trying to process a frame ix greater than rollout length
frame = history['ins'][ix].squeeze().copy()
actor_saliency = score_frame(model, history, ix, radius, density, interp_func=occlude, mode='actor')
critic_saliency = score_frame(model, history, ix, radius, density, interp_func=occlude, mode='critic')
frame = saliency_on_atari_frame(actor_saliency, frame, fudge_factor=meta['actor_ff'], channel=2)
frame = saliency_on_atari_frame(critic_saliency, frame, fudge_factor=meta['critic_ff'], channel=0)
plt.imshow(frame) ; plt.title(env_name.lower(), fontsize=15)
writer.grab_frame() ; f.clear()
tstr = time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - start))
print('\ttime: {} | progress: {:.1f}%'.format(tstr, 100*i/min(num_frames, total_frames)), end='\r')
print('\nfinished.')
# user might also want to access make_movie function from some other script
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=None)
parser.add_argument('-e', '--env', default='Breakout-v0', type=str, help='gym environment')
parser.add_argument('-d', '--density', default=5, type=int, help='density of grid of gaussian blurs')
parser.add_argument('-r', '--radius', default=5, type=int, help='radius of gaussian blur')
parser.add_argument('-f', '--num_frames', default=20, type=int, help='number of frames in movie')
parser.add_argument('-i', '--first_frame', default=150, type=int, help='index of first frame')
parser.add_argument('-dpi', '--resolution', default=75, type=int, help='resolution (dpi)')
parser.add_argument('-s', '--save_dir', default='./movies/', type=str, help='dir to save agent logs and checkpoints')
parser.add_argument('-p', '--prefix', default='default', type=str, help='prefix to help make video name unique')
parser.add_argument('-c', '--checkpoint', default='*.tar', type=str, help='checkpoint name (in case there is more than one')
parser.add_argument('-o', '--overfit_mode', default=False, type=bool, help='analyze an overfit environment (see paper)')
args = parser.parse_args()
make_movie(args.env, args.checkpoint, args.num_frames, args.first_frame, args.resolution,
args.save_dir, args.density, args.radius, args.prefix, args.overfit_mode)
| 4,337 | 53.911392 | 128 | py |
visualize_atari | visualize_atari-master/rollout.py | # Visualizing and Understanding Atari Agents | Sam Greydanus | 2017 | MIT License
from __future__ import print_function
import warnings ; warnings.filterwarnings('ignore') # mute warnings, live dangerously ;)
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
from scipy.misc import imresize # preserves single-pixel info _unlike_ img = img[::2,::2]
prepro = lambda img: imresize(img[35:195].mean(2), (80,80)).astype(np.float32).reshape(1,80,80)/255.
def rollout(model, env, max_ep_len=3e3, render=False):
history = {'ins': [], 'logits': [], 'values': [], 'outs': [], 'hx': [], 'cx': []}
state = torch.Tensor(prepro(env.reset())) # get first state
episode_length, epr, eploss, done = 0, 0, 0, False # bookkeeping
hx, cx = Variable(torch.zeros(1, 256)), Variable(torch.zeros(1, 256))
while not done and episode_length <= max_ep_len:
episode_length += 1
value, logit, (hx, cx) = model((Variable(state.view(1,1,80,80)), (hx, cx)))
hx, cx = Variable(hx.data), Variable(cx.data)
prob = F.softmax(logit)
action = prob.max(1)[1].data # prob.multinomial().data[0] #
obs, reward, done, expert_policy = env.step(action.numpy()[0])
if render: env.render()
state = torch.Tensor(prepro(obs)) ; epr += reward
# save info!
history['ins'].append(obs)
history['hx'].append(hx.squeeze(0).data.numpy())
history['cx'].append(cx.squeeze(0).data.numpy())
history['logits'].append(logit.data.numpy()[0])
history['values'].append(value.data.numpy()[0])
history['outs'].append(prob.data.numpy()[0])
print('\tstep # {}, reward {:.0f}'.format(episode_length, epr), end='\r')
return history
| 1,802 | 39.977273 | 100 | py |
MAESTROeX | MAESTROeX-main/sphinx_docs/source/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# MAESTROeX documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 25 18:42:54 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import re
import shlex
import subprocess
import sys
import breathe
sys.path.insert(0, os.path.abspath('../../'))
sys.path.append(os.path.dirname(breathe.__file__))
def get_version():
prog = shlex.split("git describe --tags --abbrev=0")
p0 = subprocess.Popen(prog, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout0, stderr0 = p0.communicate()
rc = p0.returncode
stdout = stdout0.decode('utf-8')
return stdout
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx_math_dollar',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinxcontrib.bibtex',
'sphinx.ext.autosummary',
'numpydoc',
'sphinx.ext.githubpages',
'sphinx_rtd_theme',
'sphinx_copybutton',
'sphinx-prompt',
'breathe',
'IPython.sphinxext.ipython_console_highlighting',
'sphinx.ext.intersphinx']
# bibtex
bibtex_bibfiles = ["maestro_doc.bib"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# see https://github.com/phn/pytpm/issues/3#issuecomment-12133978
numpydoc_show_class_members = False
# The main toctree document.
main_doc = 'index'
# General information about the project.
project = 'MAESTROeX'
copyright = '2018-2023, MAESTROeX development tem'
author = 'MAESTROeX development team'
html_logo = "maestroex_logo.png"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = get_version()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for MathJax
# for sphinx-math-dollar
mathjax3_config = {}
mathjax3_config["tex"] = {
"inlineMath": [['\\(', '\\)']],
"displayMath": [["\\[", "\\]"]],
}
mathjax3_config["tex"]["macros"] = {}
with open('mathsymbols.tex', 'r') as f:
for line in f:
macros = re.findall(r'\\newcommand{\\(.*?)}(\[(\d)\])?{(.+)}', line)
for macro in macros:
if len(macro[1]) == 0:
mathjax3_config['tex']['macros'][macro[0]
] = "{" + macro[3] + "}"
else:
mathjax3_config['tex']['macros'][macro[0]] = [
"{" + macro[3] + "}", int(macro[2])]
math_eqref_format = "Eq.{number}"
math_number_all = True
num_fig = True
math_num_fig = True
numfig = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ["theme_overrides.css"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'MAESTROeXdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(main_doc, 'MAESTROeX.tex', 'MAESTROeX Documentation',
'MAESTROeX development team', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(main_doc, 'MAESTROeX', 'MAESTROeX Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(main_doc, 'MAESTROeX', 'MAESTROeX Documentation',
author, 'MAESTROeX', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for breathe -------------------------------------------------
breathe_projects = {
"maestroex": "../doxy_files/xml",
}
breathe_default_project = "maestroex"
breathe_default_members = ('members', 'undoc-members', 'protected-members',
'private-members')
breathe_doxygen_config_options = {'EXTRACT_ALL': 'YES',
'SHOW_USED_FILES': 'YES', 'RECURSIVE': 'YES'
}
# -- Options for intersphinx --------------------------------------------
intersphinx_mapping = {
'amrex': ('https://amrex-codes.github.io/amrex/docs_html/', None)
}
| 7,732 | 28.628352 | 79 | py |
MTL-Bioinformatics-2016 | MTL-Bioinformatics-2016-master/models/baseline_config.py | class Defaults(object):
window_size = 7
hidden_sizes = [300]
hidden_activation = 'relu'
max_vocab_size = 1000000
optimizer = 'sgd' # 'adam'
learning_rate = 0.1 # 1e-4
epochs = 20
iobes = True # Map tags to IOBES on input
max_tokens = None # Max dataset size in tokens
encoding = 'utf-8' # Data encoding
output_drop_prob = 0.0 # Dropout probablility prior to output
token_level_eval = False # Force token-level evaluation
verbosity = 1 # 0=quiet, 1=progress bar, 2=one line per epoch
fixed_wordvecs = False # Don't fine-tune word vectors
word_features = True
batch_size = 50
viterbi = True
# Learning rate multiplier for embeddings. This is a tweak to
# implement faster learning for embeddings compared to other
# layers. As the feature is not yet implemented in Keras master
# (see https://github.com/fchollet/keras/pull/1991), this option
# currently requires the fork https://github.com/spyysalo/keras .
embedding_lr_multiplier = 1.0
| 1,050 | 41.04 | 69 | py |
MTL-Bioinformatics-2016 | MTL-Bioinformatics-2016-master/models/multi-output_MT-var-dataset.py | #!/usr/bin/env python
from __future__ import print_function
from logging import info
import random
import numpy as np
import datetime
from ltlib.evaluation import conll_summary, per_type_summary, is_iob_tagging
from keras.models import Model
from keras.layers import Input, Embedding, merge, Flatten, Dense
from keras.layers import Reshape, Convolution2D, Dropout
from ltlib import filelog
from ltlib import conlldata
from ltlib import viterbi
from ltlib.features import NormEmbeddingFeature, SennaCapsFeature
from ltlib.features import windowed_inputs
from ltlib.callbacks import token_evaluator, EpochTimer
from ltlib.layers import concat, inputs_and_embeddings
from ltlib.settings import cli_settings, log_settings
from ltlib.optimizers import get_optimizer
from ltlib.util import unique
from ltlib.output import save_token_predictions_multi_output
from config import Defaults
config = cli_settings(['datadir', 'datasets','wordvecs'], Defaults)
assert len(config.filter_nums) == len(config.filter_sizes)
datasets = config.datasets.split(',')
data = []
max_fs = []
max_vfs = []
for ind, dataset in enumerate(datasets):
data_path = config.datadir + '/' + dataset
data.append(conlldata.load_dir(data_path, config))
max_fs.append((0.0,0.0))
max_vfs.append((0.0,0.0))
max_y = 0
for ind, ds in enumerate(data):
y_shape = ds.train.tokens.targets.shape
if y_shape[1] > max_y:
max_y = y_shape[1]
all_vocab = set()
for ind, dataset in enumerate(datasets):
all_vocab = set().union(*[all_vocab, data[ind].vocabulary])
w2v = NormEmbeddingFeature.from_file(config.wordvecs,
max_rank=config.max_vocab_size,
vocabulary=all_vocab,
name='words')
features = [w2v]
if config.word_features:
features.append(SennaCapsFeature('caps'))
for ind, dataset in enumerate(datasets):
data[ind].tokens.add_features(features)
data[ind].tokens.add_inputs(windowed_inputs(config.window_size, features))
# Log word vector feature stat summary
info('{}: {}'.format(config.wordvecs, w2v.summary()))
inputs, embeddings = inputs_and_embeddings(features, config)
# Combine and reshape for convolution
seq = concat(embeddings)
cshape = (config.window_size, sum(f.output_dim for f in features))
seq = Reshape((1,) + cshape)(seq)
# Convolutions
conv_outputs = []
for filter_size, filter_num in zip(config.filter_sizes, config.filter_nums):
conv = Convolution2D(filter_num, filter_size, cshape[1],activation='relu')(seq)
cout = Flatten()(conv)
conv_outputs.append(cout)
seq = concat(conv_outputs)
for size in config.hidden_sizes:
seq = Dense(size, activation=config.hidden_activation)(seq)
seq = Dropout(config.output_drop_prob)(seq)
#Create private outputs
outs = []
for ind, dataset in enumerate(datasets):
#outs.append(Dense(data[ind].tokens.target_dim, activation='softmax')(seq))
outs.append(Dense(max_y, activation='softmax')(seq))
model = Model(input=inputs, output=outs)
optimizer = get_optimizer(config)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
ind_bounds = [(0, len(ds.train.tokens.inputs['words'])) for ds in data]
if config.percent_keep < 1.0:
percnt_keep = config.percent_keep
#Only limit first dataset. Other remains full
for ind, ds in enumerate(data[:1]):
amt_keep = len(ds.train.tokens.inputs['words']) * percnt_keep
print("%s: Total: %s. Keeping: %s" % (datasets[ind], len(ds.train.tokens.inputs['words']), amt_keep))
start = random.randrange(int(len(ds.train.tokens.inputs['words']) - amt_keep + 1))
end = int(start + amt_keep)
ind_bounds[ind] = (start,end)
x_batch = []
y_batch = []
concatenated = True
for ind, ds in enumerate(data):
start = ind_bounds[ind][0]
end = ind_bounds[ind][1]
x_batch.append(data[ind].train.tokens.inputs['words'][start:end])
out_labels = [np.zeros((dataset.train.tokens.targets[ind_bounds[ind_][0]:ind_bounds[ind_][1]].shape[0], max_y)) for ind_, dataset in enumerate(data)]
y_ = data[ind].train.tokens.targets[start:end]
if y_.shape[1] < max_y:
y_ = np.concatenate([y_, np.zeros((y_.shape[0],max_y - y_.shape[1]))], axis=1)
out_labels[ind] = y_ #data[ind].train.tokens.targets[start:end]
try:
y_batch.append(np.concatenate(out_labels, axis=0))
except:
print("Cannot concatenate Datasets. Expect slower run time.")
concatenated = False
break
if concatenated:
x_batch = np.concatenate(x_batch, axis=0)
"""
for ind, ds in enumerate(data):
x_batch.append(ds.train.tokens.inputs['words'])
#out_labels = [np.zeros(data[ind_].train.tokens.targets.shape) for ind_, dataset in enumerate(datasets)]
#out_labels[ind] = data[ind].train.tokens.targets
out_labels = [np.zeros((data[ind_].train.tokens.targets.shape[0], max_y)) for ind_, dataset in enumerate(datasets)]
y_ = data[ind].train.tokens.targets
if y_.shape[1] < max_y:
y_ = np.concatenate([y_, np.zeros((y_.shape[0],max_y - y_.shape[1]))], axis=1)
out_labels[ind] = y_
try:
y_batch.append(np.concatenate(out_labels, axis=0))
except:
print("Cannot concatenate Datasets. Expect slower run time.")
concatenated = False
break
if concatenated:
x_batch = np.concatenate(x_batch, axis=0)
"""
time_str = datetime.datetime.now().isoformat()
print("Started training at: %s" % time_str)
for step in range(1, config.train_steps + 1):
if concatenated:
start = random.randrange(len(x_batch))
end = start + config.batch_size
x = x_batch[start:end]
y = [y_[start:end] for y_ in y_batch]
else:
#Untested in var dataset context
y = []
data_ind = random.randrange(len(data))
#Start create batch
start = random.randrange(len(data[data_ind].train.tokens.inputs['words']))
end = start + config.batch_size
x = data[data_ind].train.tokens.inputs['words'][start:end]
#End create batch
#Keras requires labels for all outputs. Create dummy outputs for ones not being trained.
out_labels = []
out_labels = [np.zeros(data[ind].train.tokens.targets[0:len(x)].shape) for ind, dataset in enumerate(datasets)]
out_labels[data_ind] = data[data_ind].train.tokens.targets[start:end]
for ind, ol in enumerate(out_labels):
if len(y) == len(data):
y[ind] = np.concatenate([y[ind], ol], axis=0)
else:
y.append(ol)
model.train_on_batch(x, y)
if step % config.evaluate_every == 0 and step >= config.evaluate_min:
time_str = datetime.datetime.now().isoformat()
info("\nEvaluating. Time: {}. Step: {}".format(time_str, step))
for eval_ind, dataset in enumerate(datasets[:1]):
info("Dataset: {}".format(datasets[eval_ind]))
predictions = model.predict(data[eval_ind].devel.tokens.inputs)
if type(predictions).__name__ != 'list':
predictions = [predictions]
pred = predictions[eval_ind]
if is_iob_tagging(unique(data[eval_ind].tokens.target_strs)):
data[eval_ind].devel.tokens.set_predictions(pred)
summary = conll_summary(data[eval_ind].devel.sentences)
#Track Maxes
f_score = summary.split(':')[2].split('%')[0].strip()
try:
f_score = float(f_score)
except:
print("%s is not floatable!" % f_score)
if f_score > max_fs[eval_ind][0]:
max_fs[eval_ind] = (f_score, max_fs[eval_ind][0])
save_token_predictions_multi_output(data[eval_ind].test, conlldata.write, predictions=pred)
elif f_score > max_fs[eval_ind][1]:
max_fs[eval_ind] = (max_fs[eval_ind][0], f_score)
save_token_predictions_multi_output(data[eval_ind].test, conlldata.write, predictions=pred)
#End Track Maxes
info("{}".format(summary))
info("Max Fs: {}".format(str(max_fs[eval_ind])))
if config.viterbi:
vmapper = viterbi.TokenPredictionMapper(data[eval_ind].train.sentences)
data[eval_ind].devel.sentences.map_predictions(vmapper)
info(vmapper.summary())
vsummary = conll_summary(data[eval_ind].devel.sentences)
#Track Maxes
vf_score = vsummary.split(':')[2].split('%')[0].strip()
try:
vf_score = float(vf_score)
except:
print("Viterbi %s is not floatable!" % vf_score)
if vf_score > max_vfs[eval_ind][0]:
max_vfs[eval_ind] = (vf_score, max_vfs[eval_ind][0])
save_token_predictions_multi_output(data[eval_ind].test, conlldata.write, vmapper=vmapper)
elif vf_score > max_vfs[eval_ind][1]:
max_vfs[eval_ind] = (max_vfs[eval_ind][0], vf_score)
save_token_predictions_multi_output(data[eval_ind].test, conlldata.write, vmapper=vmapper)
#End Track Maxes
info("{}".format(vsummary))
info("Max Viterbi Fs: {}".format(str(max_vfs[eval_ind])))
else:
info("{}".format(per_type_summary(data[eval_ind].tokens)))
| 9,676 | 39.320833 | 153 | py |
MTL-Bioinformatics-2016 | MTL-Bioinformatics-2016-master/models/MT-dependent.py | #!/usr/bin/env python
from __future__ import print_function
from logging import info
import random
import numpy as np
import datetime
from ltlib.evaluation import conll_summary, per_type_summary, is_iob_tagging
from keras.models import Model
from keras.layers import Input, Embedding, merge, Flatten, Dense
from keras.layers import Reshape, Convolution2D, Dropout
from ltlib import filelog
from ltlib import conlldata
from ltlib import viterbi
from ltlib.features import NormEmbeddingFeature, SennaCapsFeature
from ltlib.features import windowed_inputs
from ltlib.callbacks import token_evaluator, EpochTimer
from ltlib.layers import concat, inputs_and_embeddings
from ltlib.settings import cli_settings, log_settings
from ltlib.optimizers import get_optimizer
from ltlib.util import unique
from ltlib.output import save_token_predictions_multi_output
from config import Defaults
config = cli_settings(['datadir', 'datasets','wordvecs'], Defaults)
assert len(config.filter_nums) == len(config.filter_sizes)
datasets = config.datasets.split(',')
data = []
max_fs = []
max_vfs = []
vmappers = []
for ind, dataset in enumerate(datasets):
data_path = config.datadir + '/' + dataset
if ind != 0:
config.iobes = True
data.append(conlldata.load_dir(data_path, config))
max_fs.append((0.0,0.0))
max_vfs.append((0.0,0.0))
all_vocab = set()
for ind, dataset in enumerate(datasets):
all_vocab = set().union(*[all_vocab, data[ind].vocabulary])
for ind, dataset in enumerate(datasets):
w2v = NormEmbeddingFeature.from_file(config.wordvecs,
max_rank=config.max_vocab_size,
vocabulary=all_vocab,
name='words-%s' % ind)
features = [w2v]
if config.word_features:
features.append(SennaCapsFeature('caps'))
data[ind].tokens.add_features(features)
data[ind].tokens.add_inputs(windowed_inputs(config.window_size, features))
# Log word vector feature stat summary
info('{}: {}'.format(config.wordvecs, w2v.summary()))
if ind == 0:
pos_inputs, pos_embeddings = inputs_and_embeddings(features, config)
pos_x = concat(pos_embeddings)
if ind == 1:
ner_inputs, ner_embeddings = inputs_and_embeddings(features, config)
ner_x = concat(ner_embeddings)
cshapes = []
reshapes = []
# Combine and reshape for convolution
pos_cshape = (config.window_size, sum(f.output_dim for f in features))
ner_cshape = (config.window_size, sum(f.output_dim for f in features))
cshapes.append(pos_cshape)
cshapes.append(ner_cshape)
pos_reshape = Reshape((1,) + (pos_cshape), name='pos-reshape')(pos_x)
ner_reshape = Reshape((1,) + (ner_cshape), name='ner-reshape')(ner_x)
reshapes.append(pos_reshape)
reshapes.append(ner_reshape)
# Convolutions
conv_outputs = []
fully_connected = []
dropout = []
for ind, dataset in enumerate(datasets):
conv_outputs.append([])
for filter_size, filter_num in zip(config.filter_sizes, config.filter_nums):
conv = Convolution2D(filter_num, filter_size, cshapes[ind][1], activation='relu', name='convolution-%d-%d' % (ind,filter_size))(reshapes[ind])
flatten = Flatten(name='flatten-%d-%d' % (ind,filter_size))(conv)
conv_outputs[ind].append(flatten)
seq = concat(conv_outputs[ind])
for size in config.hidden_sizes:
fully_connected.append(Dense(size, activation=config.hidden_activation, name='dense-1-%d' % ind)(seq))
dropout.append(Dropout(config.output_drop_prob, name='dropout-%d' % ind)(fully_connected[ind]))
pos_dense_out = Dense(data[0].tokens.target_dim, activation='softmax', name='pos-dense-out')(dropout[0])
ner_merged = merge([dropout[0], dropout[1]], mode='concat')
ner_dense_out = Dense(data[1].tokens.target_dim, activation='softmax', name='ner-dense-out')(ner_merged)
pos_model = Model(input=pos_inputs, output=pos_dense_out)
ner_model = Model(input=pos_inputs + ner_inputs, output=ner_dense_out)
pos_model.compile(optimizer=get_optimizer(config), loss='categorical_crossentropy', metrics=['accuracy'])
ner_model.compile(optimizer=get_optimizer(config), loss='categorical_crossentropy', metrics=['accuracy'])
models = [pos_model, ner_model]
time_str = datetime.datetime.now().isoformat()
print("Started training at: %s" % time_str)
for ind, ds in enumerate(data):
for ep in range(1, config.epochs + 1):
percnt_keep = config.percent_keep
amt_keep = len(ds.train.tokens.inputs['words-%s' % ind]) * percnt_keep
print("Total: %s. Keeping: %s" % (len(ds.train.tokens.inputs['words-%s' % ind]), amt_keep))
start = random.randrange(int(len(ds.train.tokens.inputs['words-%s' % ind]) - amt_keep) + 1)
end = int(start + amt_keep)
x = ds.train.tokens.inputs['words-%s' % ind][start:end]
if ind > 0:
x = [x, x]
models[ind].fit(
x,
ds.train.tokens.targets[start:end],
batch_size=config.batch_size,
nb_epoch=1,
verbose=config.verbosity
)
time_str = datetime.datetime.now().isoformat()
info("\nEvaluating. Time: {}. Epoch: {}".format(time_str, ep))
info("Dataset: {}".format(datasets[ind]))
if ind > 0:
predictions = models[ind].predict([ds.test.tokens.inputs['words-%s' % ind], ds.test.tokens.inputs['words-%s' % ind]])
else:
predictions = models[ind].predict(ds.test.tokens.inputs['words-%s' % ind])
if is_iob_tagging(unique(ds.tokens.target_strs)):
ds.test.tokens.set_predictions(predictions)
summary = conll_summary(ds.test.sentences)
#Track Maxes
f_score = summary.split(':')[2].split('%')[0].strip()
try:
f_score = float(f_score)
except:
print("%s is not floatable!" % f_score)
if f_score > max_fs[ind][0]:
max_fs[ind] = (f_score, max_fs[ind][0])
save_token_predictions_multi_output(data[eval_ind].test, conlldata.write, predictions=pred)
elif f_score > max_fs[ind][1]:
max_fs[ind] = (max_fs[ind][0], f_score)
save_token_predictions_multi_output(data[eval_ind].test, conlldata.write, predictions=pred)
#End Track Maxes
info("{}".format(summary))
info("Max Fs: {}".format(str(max_fs[ind])))
if config.viterbi:
vmapper = viterbi.TokenPredictionMapper(ds.train.sentences)
ds.test.sentences.map_predictions(vmapper)
info(vmapper.summary())
vsummary = conll_summary(ds.test.sentences)
#Track Maxes
vf_score = vsummary.split(':')[2].split('%')[0].strip()
try:
vf_score = float(vf_score)
except:
print("Viterbi %s is not floatable!" % vf_score)
if vf_score > max_vfs[ind][0]:
max_vfs[ind] = (vf_score, max_vfs[ind][0])
save_token_predictions_multi_output(data[eval_ind].test, conlldata.write, vmapper=vmapper)
elif vf_score > max_vfs[ind][1]:
max_vfs[ind] = (max_vfs[ind][0], vf_score)
save_token_predictions_multi_output(data[eval_ind].test, conlldata.write, vmapper=vmapper)
#End Track Maxes
info("{}".format(vsummary))
info("Max Viterbi Fs: {}".format(str(max_vfs[ind])))
else:
ds.test.tokens.set_predictions(predictions)
info("{}".format(per_type_summary(ds.test.tokens)))
| 7,786 | 39.984211 | 150 | py |
MTL-Bioinformatics-2016 | MTL-Bioinformatics-2016-master/models/multi-output_MT.py | #!/usr/bin/env python
from __future__ import print_function
from logging import info
import random
import numpy as np
import datetime
from ltlib.evaluation import conll_summary, per_type_summary, is_iob_tagging
from keras.models import Model
from keras.layers import Input, Embedding, merge, Flatten, Dense
from keras.layers import Reshape, Convolution2D, Dropout
from ltlib import filelog
from ltlib import conlldata
from ltlib import viterbi
from ltlib.features import NormEmbeddingFeature, SennaCapsFeature
from ltlib.features import windowed_inputs
from ltlib.callbacks import token_evaluator, EpochTimer
from ltlib.layers import concat, inputs_and_embeddings
from ltlib.settings import cli_settings, log_settings
from ltlib.optimizers import get_optimizer
from ltlib.util import unique
from ltlib.output import save_token_predictions_multi_output
from config import Defaults
config = cli_settings(['datadir', 'datasets','wordvecs'], Defaults)
assert len(config.filter_nums) == len(config.filter_sizes)
datasets = config.datasets.split(',')
data = []
max_fs = []
max_vfs = []
for ind, dataset in enumerate(datasets):
data_path = config.datadir + '/' + dataset
data.append(conlldata.load_dir(data_path, config))
max_fs.append((0.0,0.0))
max_vfs.append((0.0,0.0))
max_y = 0
for ind, ds in enumerate(data):
y_shape = ds.train.tokens.targets.shape
if y_shape[1] > max_y:
max_y = y_shape[1]
all_vocab = set()
for ind, dataset in enumerate(datasets):
all_vocab = set().union(*[all_vocab, data[ind].vocabulary])
w2v = NormEmbeddingFeature.from_file(config.wordvecs,
max_rank=config.max_vocab_size,
vocabulary=all_vocab,
name='words')
features = [w2v]
if config.word_features:
features.append(SennaCapsFeature('caps'))
for ind, dataset in enumerate(datasets):
data[ind].tokens.add_features(features)
data[ind].tokens.add_inputs(windowed_inputs(config.window_size, features))
# Log word vector feature stat summary
info('{}: {}'.format(config.wordvecs, w2v.summary()))
inputs, embeddings = inputs_and_embeddings(features, config)
# Combine and reshape for convolution
seq = concat(embeddings)
cshape = (config.window_size, sum(f.output_dim for f in features))
seq = Reshape((1,) + cshape)(seq)
# Convolutions
conv_outputs = []
for filter_size, filter_num in zip(config.filter_sizes, config.filter_nums):
conv = Convolution2D(filter_num, filter_size, cshape[1],activation='relu')(seq)
cout = Flatten()(conv)
conv_outputs.append(cout)
seq = concat(conv_outputs)
for size in config.hidden_sizes:
seq = Dense(size, activation=config.hidden_activation)(seq)
seq = Dropout(config.output_drop_prob)(seq)
#Create private outputs
outs = []
for ind, dataset in enumerate(datasets):
#outs.append(Dense(data[ind].tokens.target_dim, activation='softmax')(seq))
outs.append(Dense(max_y, activation='softmax')(seq))
model = Model(input=inputs, output=outs)
optimizer = get_optimizer(config)
model.compile(loss=['categorical_crossentropy']* len(datasets), optimizer=optimizer, metrics=['accuracy'])
#model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
x_batch = []
y_batch = []
concatenated = True
for ind, ds in enumerate(data):
x_batch.append(ds.train.tokens.inputs['words'])
#out_labels = [np.zeros(data[ind_].train.tokens.targets.shape) for ind_, dataset in enumerate(datasets)]
#out_labels[ind] = data[ind].train.tokens.targets
out_labels = [np.zeros((data[ind_].train.tokens.targets.shape[0], max_y)) for ind_, dataset in enumerate(datasets)]
y_ = data[ind].train.tokens.targets
if y_.shape[1] < max_y:
y_ = np.concatenate([y_, np.zeros((y_.shape[0],max_y - y_.shape[1]))], axis=1)
out_labels[ind] = y_
try:
y_batch.append(np.concatenate(out_labels, axis=0))
except:
print("Cannot concatenate Datasets. Expect slower run time.")
concatenated = False
break
if concatenated:
x_batch = np.concatenate(x_batch, axis=0)
time_str = datetime.datetime.now().isoformat()
print("Started training at: %s" % time_str)
for step in range(1, config.train_steps + 1):
if concatenated:
start = random.randrange(len(x_batch))
end = start + config.batch_size
x = x_batch[start:end]
y = [y_[start:end] for y_ in y_batch]
else:
y = []
data_ind = random.randrange(len(data))
#Start create batch
start = random.randrange(len(data[data_ind].train.tokens.inputs['words']))
end = start + config.batch_size
x = data[data_ind].train.tokens.inputs['words'][start:end]
#End create batch
#Keras requires labels for all outputs. Create dummy outputs for ones not being trained.
out_labels = []
out_labels = [np.zeros(data[ind].train.tokens.targets[0:len(x)].shape) for ind, dataset in enumerate(datasets)]
out_labels[data_ind] = data[data_ind].train.tokens.targets[start:end]
for ind, ol in enumerate(out_labels):
if len(y) == len(data):
y[ind] = np.concatenate([y[ind], ol], axis=0)
else:
y.append(ol)
model.train_on_batch(x, y)
#model.fit(x, y, batch_size=config.batch_size, nb_epoch=1, verbose=config.verbosity)
#if step % 1000 == 0:
# time_str = datetime.datetime.now().isoformat()
# print("Step %s at: %s" % (step, time_str))
if step % config.evaluate_every == 0 and step >= config.evaluate_min:
time_str = datetime.datetime.now().isoformat()
info("\nEvaluating. Time: {}. Step: {}".format(time_str, step))
for eval_ind, dataset in enumerate(datasets):
info("Dataset: {}".format(datasets[eval_ind]))
predictions = model.predict(data[eval_ind].test.tokens.inputs)
if type(predictions).__name__ != 'list':
predictions = [predictions]
pred = predictions[eval_ind]
if len(pred[0]) != data[eval_ind].tokens.target_dim:
length = data[eval_ind].tokens.target_dim
pred = [p[:length] for p in pred]
if is_iob_tagging(unique(data[eval_ind].tokens.target_strs)):
data[eval_ind].test.tokens.set_predictions(pred)
summary = conll_summary(data[eval_ind].test.sentences)
#Track Maxes
f_score = summary.split(':')[2].split('%')[0].strip()
try:
f_score = float(f_score)
except:
print("%s is not floatable!" % f_score)
if f_score > max_fs[eval_ind][0]:
max_fs[eval_ind] = (f_score, max_fs[eval_ind][0])
save_token_predictions_multi_output(data[eval_ind].test, conlldata.write, predictions=pred)
elif f_score > max_fs[eval_ind][1]:
max_fs[eval_ind] = (max_fs[eval_ind][0], f_score)
save_token_predictions_multi_output(data[eval_ind].test, conlldata.write, predictions=pred)
#End Track Maxes
info("{}".format(summary))
info("Max Fs: {}".format(str(max_fs[eval_ind])))
if config.viterbi:
vmapper = viterbi.TokenPredictionMapper(data[eval_ind].train.sentences)
data[eval_ind].test.sentences.map_predictions(vmapper)
info(vmapper.summary())
vsummary = conll_summary(data[eval_ind].test.sentences)
#Track Maxes
vf_score = vsummary.split(':')[2].split('%')[0].strip()
try:
vf_score = float(vf_score)
except:
print("Viterbi %s is not floatable!" % vf_score)
if vf_score > max_vfs[eval_ind][0]:
max_vfs[eval_ind] = (vf_score, max_vfs[eval_ind][0])
save_token_predictions_multi_output(data[eval_ind].test, conlldata.write, vmapper=vmapper)
elif vf_score > max_vfs[eval_ind][1]:
max_vfs[eval_ind] = (max_vfs[eval_ind][0], vf_score)
save_token_predictions_multi_output(data[eval_ind].test, conlldata.write, vmapper=vmapper)
#End Track Maxes
info("{}".format(vsummary))
info("Max Viterbi Fs: {}".format(str(max_vfs[eval_ind])))
else:
data[eval_ind].test.tokens.set_predictions(pred)
info("{}".format(per_type_summary(data[eval_ind].test.tokens)))
| 8,810 | 39.232877 | 119 | py |
MTL-Bioinformatics-2016 | MTL-Bioinformatics-2016-master/models/single_task.py | #!/usr/bin/env python
from __future__ import print_function
import random
from logging import info
from keras.models import Model
from keras.layers import Input, Embedding, Flatten, Dense
from keras.layers import Reshape, Convolution2D, Dropout
from ltlib import filelog
from ltlib import conlldata
from ltlib import viterbi
from ltlib.features import NormEmbeddingFeature, SennaCapsFeature
from ltlib.features import windowed_inputs
from ltlib.callbacks import token_evaluator, EpochTimer
from ltlib.layers import concat, inputs_and_embeddings
from ltlib.settings import cli_settings, log_settings
from ltlib.optimizers import get_optimizer
from ltlib.output import save_token_predictions
from config import Defaults
config = cli_settings(['datadir', 'wordvecs'], Defaults)
assert len(config.filter_nums) == len(config.filter_sizes)
data = conlldata.load_dir(config.datadir, config)
if config.viterbi:
vmapper = viterbi.TokenPredictionMapper(data.train.sentences)
else:
vmapper = None
w2v = NormEmbeddingFeature.from_file(config.wordvecs,
max_rank=config.max_vocab_size,
vocabulary=data.vocabulary,
name='words')
features = [w2v]
if config.word_features:
features.append(SennaCapsFeature(name='caps'))
data.tokens.add_features(features)
data.tokens.add_inputs(windowed_inputs(config.window_size, features))
# Log word vector feature stat summary
info('{}: {}'.format(config.wordvecs, w2v.summary()))
inputs, embeddings = inputs_and_embeddings(features, config)
# Combine and reshape for convolution
seq = concat(embeddings)
cshape = (config.window_size, sum(f.output_dim for f in features))
seq = Reshape((1,)+cshape)(seq)
# Convolutions
conv_outputs = []
for filter_size, filter_num in zip(config.filter_sizes, config.filter_nums):
conv = Convolution2D(filter_num, filter_size, cshape[1],
activation='relu')(seq)
cout = Flatten()(conv)
conv_outputs.append(cout)
seq = concat(conv_outputs)
for size in config.hidden_sizes:
seq = Dense(size, activation=config.hidden_activation)(seq)
seq = Dropout(config.output_drop_prob)(seq)
out = Dense(data.tokens.target_dim, activation='softmax')(seq)
model = Model(input=inputs, output=out)
optimizer = get_optimizer(config)
model.compile(loss='categorical_crossentropy', optimizer=optimizer,
metrics=['accuracy'])
callbacks = [
EpochTimer(),
token_evaluator(data.train, mapper=vmapper, config=config),
token_evaluator(data.test, mapper=vmapper, config=config),
]
percnt_keep = config.percent_keep
amt_keep = len(data.train.tokens.inputs['words']) * percnt_keep
print("Total: %s. Keeping: %s" % (len(data.train.tokens.inputs['words']), amt_keep))
start = random.randrange(int(len(data.train.tokens.inputs['words']) - amt_keep + 1))
end = int(start + amt_keep)
x = data.train.tokens.inputs['words'][start:end]
model.fit(
#data.train.tokens.inputs,
x,
data.train.tokens.targets[start:end],
callbacks=callbacks,
batch_size=config.batch_size,
nb_epoch=config.epochs,
verbose=config.verbosity
)
save_token_predictions(data.devel, model, conlldata.write)
save_token_predictions(data.test, model, conlldata.write)
| 3,285 | 31.86 | 84 | py |
MTL-Bioinformatics-2016 | MTL-Bioinformatics-2016-master/models/baseline.py | #!/usr/bin/env python
from __future__ import print_function
from logging import info
from keras.models import Model
from keras.layers import Input, Embedding, merge, Flatten, Dense
from keras.layers import Dropout
from ltlib import filelog
from ltlib import conlldata
from ltlib import viterbi
from ltlib.features import NormEmbeddingFeature, SennaCapsFeature
from ltlib.features import windowed_inputs
from ltlib.callbacks import token_evaluator, EpochTimer
from ltlib.layers import concat, inputs_and_embeddings
from ltlib.settings import cli_settings, log_settings
from ltlib.optimizers import get_optimizer
from ltlib.output import save_token_predictions
from baseline_config import Defaults
config = cli_settings(['datadir', 'wordvecs'], Defaults)
data = conlldata.load_dir(config.datadir, config)
vmapper = viterbi.get_prediction_mapper(data.train.sentences, config)
w2v = NormEmbeddingFeature.from_file(config.wordvecs,
max_rank=config.max_vocab_size,
vocabulary=data.vocabulary,
name='words')
features = [w2v]
if config.word_features:
features.append(SennaCapsFeature(name='caps'))
data.tokens.add_features(features)
data.tokens.add_inputs(windowed_inputs(config.window_size, features))
# Log word vector feature stat summary
info('{}: {}'.format(config.wordvecs, w2v.summary()))
inputs, embeddings = inputs_and_embeddings(features, config)
seq = concat(embeddings)
seq = Flatten()(seq)
for size in config.hidden_sizes:
seq = Dense(size, activation=config.hidden_activation)(seq)
seq = Dropout(config.output_drop_prob)(seq)
out = Dense(data.tokens.target_dim, activation='softmax')(seq)
model = Model(input=inputs, output=out)
optimizer = get_optimizer(config)
model.compile(loss='categorical_crossentropy', optimizer=optimizer,
metrics=['accuracy'])
callbacks = [
EpochTimer(),
token_evaluator(data.train, config=config),
token_evaluator(data.test, mapper=vmapper, config=config),
]
model.fit(
data.train.tokens.inputs,
data.train.tokens.targets,
callbacks=callbacks,
batch_size=config.batch_size,
nb_epoch=config.epochs,
verbose=config.verbosity
)
save_token_predictions(data.devel, model, conlldata.write)
save_token_predictions(data.test, model, conlldata.write)
| 2,362 | 30.092105 | 69 | py |
MTL-Bioinformatics-2016 | MTL-Bioinformatics-2016-master/models/ltlib/callbacks.py | from sys import stdout
from logging import info
from datetime import datetime
from abc import ABCMeta, abstractmethod
from keras.callbacks import Callback
from evaluation import per_type_summary, conll_summary, is_iob_tagging
from util import unique
from defaults import defaults
class LtlCallback(Callback):
"""Adds after_epoch_end() to Callback.
after_epoch_end() is invoked after all calls to on_epoch_end() and
is intended to work around the fixed callback ordering in Keras,
which can cause output from callbacks to mess up the progress bar
(related: https://github.com/fchollet/keras/issues/2521).
"""
def __init__(self):
super(LtlCallback, self).__init__()
self.epoch = 0
def after_epoch_end(self, epoch):
pass
def on_epoch_begin(self, epoch, logs={}):
if epoch > 0:
self.after_epoch_end(self.epoch)
self.epoch += 1
def on_train_end(self, logs={}):
self.after_epoch_end(self.epoch)
class CallbackChain(Callback):
"""Chain of callbacks."""
def __init__(self, callbacks):
super(CallbackChain, self).__init__()
self._callbacks = callbacks
def _set_params(self, params):
for callback in self._callbacks:
callback._set_params(params)
def _set_model(self, model):
for callback in self._callbacks:
callback._set_model(model)
def on_epoch_begin(self, *args, **kwargs):
for callback in self._callbacks:
callback.on_epoch_begin(*args, **kwargs)
def on_epoch_end(self, *args, **kwargs):
for callback in self._callbacks:
callback.on_epoch_end(*args, **kwargs)
def on_batch_begin(self, *args, **kwargs):
for callback in self._callbacks:
callback.on_batch_begin(*args, **kwargs)
def on_batch_end(self, *args, **kwargs):
for callback in self._callbacks:
callback.on_batch_end(*args, **kwargs)
def on_train_begin(self, *args, **kwargs):
for callback in self._callbacks:
callback.on_train_begin(*args, **kwargs)
def on_train_end(self, *args, **kwargs):
for callback in self._callbacks:
callback.on_train_end(*args, **kwargs)
class EvaluatorCallback(LtlCallback):
"""Abstract base class for evaluator callbacks."""
__metaclass__ = ABCMeta
def __init__(self, dataset, label=None, writer=None):
super(EvaluatorCallback, self).__init__()
if label is None:
label = dataset.name
if writer is None:
writer = info
self.dataset = dataset
self.label = label
self.writer = writer
self.summaries = []
def __call__(self):
"""Execute Callback. Invoked after end of each epoch."""
summary = self.evaluation_summary()
self.summaries.append(summary)
epoch = len(self.summaries)
for s in summary.split('\n'):
self.writer('{} Ep: {} {}'.format(self.label, epoch, s))
@abstractmethod
def evaluation_summary(self):
"""Return string summarizing evaluation results."""
pass
def after_epoch_end(self, epoch):
self()
class EpochTimer(LtlCallback):
"""Callback that logs timing information."""
def __init__(self, label='', writer=info):
super(EpochTimer, self).__init__()
self.label = '' if not label else label + ' '
self.writer = writer
def on_epoch_begin(self, epoch, logs={}):
super(EpochTimer, self).on_epoch_begin(epoch, logs)
self.start_time = datetime.now()
def after_epoch_end(self, epoch):
end_time = datetime.now()
delta = end_time - self.start_time
start = str(self.start_time).split('.')[0]
end = str(end_time).split('.')[0]
self.writer('{}Ep: {} {}s (start {}, end {})'.format(
self.label, epoch, delta.seconds, start, end
))
class Predictor(LtlCallback):
"""Makes and stores predictions for data item sequence."""
def __init__(self, dataitems):
super(Predictor, self).__init__()
self.dataitems = dataitems
def after_epoch_end(self, epoch):
predictions = self.model.predict(self.dataitems.inputs)
self.dataitems.set_predictions(predictions)
class PredictionMapper(LtlCallback):
"""Maps predictions to strings for data item sequence."""
def __init__(self, dataitems, mapper):
super(PredictionMapper, self).__init__()
self.dataitems = dataitems
self.mapper = mapper
def after_epoch_end(self, epoch):
self.dataitems.map_predictions(self.mapper)
# TODO check if summary() is defined
info(self.mapper.summary())
class ConllEvaluator(EvaluatorCallback):
"""Evaluates performance using CoNLL criteria."""
def __init__(self, dataset, label=None, writer=None):
super(ConllEvaluator, self).__init__(dataset, label, writer)
def evaluation_summary(self):
return conll_summary(self.dataset.sentences)
class TokenLevelEvaluator(EvaluatorCallback):
"""Evaluates performance using token-level metrics."""
def __init__(self, dataset, label=None, writer=None):
super(TokenLevelEvaluator, self).__init__(dataset, label, writer)
def evaluation_summary(self):
return per_type_summary(self.dataset.tokens)
class TokenAccuracyEvaluator(EvaluatorCallback):
"""Evaluates performance using token-level accuracy."""
# TODO why does this class exist? Isn't TokenLevelEvaluator better
# in every way?
def __init__(self, dataset, label=None, writer=None):
super(TokenAccuracyEvaluator, self).__init__(dataset, label, writer)
def evaluation_summary(self):
gold = self.dataset.tokens.target_strs
pred = self.dataset.tokens.prediction_strs
assert len(gold) == len(pred)
total = len(gold)
correct = sum(int(p==g) for p, g in zip(pred, gold))
return 'acc: {:.2%} ({}/{})'.format(1.*correct/total, correct, total)
def token_evaluator(dataset, label=None, writer=None, mapper=None,
config=defaults):
"""Return appropriate evaluator callback for dataset."""
if config.token_level_eval:
evaluator = TokenLevelEvaluator
elif is_iob_tagging(unique(dataset.tokens.target_strs)):
evaluator = ConllEvaluator
else:
evaluator = TokenLevelEvaluator # default
info('using {} for {}'.format(evaluator.__name__, dataset.name))
callbacks = []
callbacks.append(Predictor(dataset.tokens))
callbacks.append(evaluator(dataset, label=label, writer=writer))
if mapper is not None:
# TODO don't assume the mapper expects sentences.
callbacks.append(PredictionMapper(dataset.sentences, mapper))
# TODO do we really want a second eval here?
callbacks.append(evaluator(dataset, label=label, writer=writer))
return CallbackChain(callbacks)
| 6,986 | 32.591346 | 77 | py |
MTL-Bioinformatics-2016 | MTL-Bioinformatics-2016-master/models/ltlib/features.py | import re
import numpy as np
import wordvecdata
from logging import warn
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from bidict import IncBidict
def uniform(shape, scale=0.05):
# TODO move to more sensible place, avoid redundancy with
# keras.initializations.uniform
return np.random.uniform(low=-scale, high=scale, size=shape)
class FeatureMapping(object):
"""Abstract base class for feature mappings."""
__metaclass__ = ABCMeta
def __init__(self, name):
"""Initialize FeatureMapping.
Args:
name: feature name. Used as default key to feature dict
in DataItem.
"""
self.name = name
@abstractmethod
def __call__(self, dataitem):
"""Return value of feature for given DataItem."""
pass
@abstractmethod
def invert(self, value):
"""Return string representation of feature value."""
pass
class IndexedFeatureMapping(FeatureMapping):
"""Feature mapping from discrete keys to integer indices."""
# Data key to use by default
default_data_key = 'text'
def __init__(self, index_map=None, data_key=None, name=None):
"""Initialize IndexedFeatureMapping.
Args:
index_map: mapping from values to indices.
data_key: key identifying the DataItem data to map. If None,
defaults to default_data_key.
"""
super(IndexedFeatureMapping, self).__init__(name)
if index_map is not None:
self.index_map = IncBidict(index_map)
else:
self.index_map = IncBidict()
if data_key is None:
data_key = self.default_data_key
self.data_key = data_key
def __call__(self, dataitem):
"""Return value of feature for given DataItem."""
return self.index_map[dataitem.data[self.data_key]]
def __getitem__(self, key):
"""Direct mapping lookup."""
return self.index_map[key]
def __len__(self):
return len(self.index_map)
def invert(self, index):
return self.index_map.inv[index]
class EmbeddingFeature(IndexedFeatureMapping):
"""Feature mapping to indices with associated vector values."""
# Missing key value to use by default.
default_missing_key = None
# Required vocabulary items (other than the missing key value).
required_vocabulary = []
def __init__(self, index_map=None, weights=None, output_dim=None,
init=uniform, missing_key=None, data_key=None, name=None):
"""Initialize EmbeddingFeature.
Either initial weights or output dimension must be provided.
Args:
index_map: mapping from values to indices.
weights: array-like of initial embedding weights.
output_dim: embedding dimension.
init: initialization function to use for embeddings. Only
used when not specified in weights.
missing_key: key to use for lookup for keys not in index_map.
If None, extend index_map as needed.
data_key: key identifying the DataItem data to map. If None,
defaults to default_data_key.
"""
super(EmbeddingFeature, self).__init__(
index_map=index_map, data_key=data_key, name=name,
)
if weights is None:
self._weights = None
self._output_dim = output_dim
else:
self._weights = np.asarray(weights)
self._output_dim = self._weights.shape[1]
if self.output_dim is None:
raise ValueError('could not determine embedding dimension')
if output_dim is not None and output_dim != self.output_dim:
raise ValueError('inconsistent embedding dimension')
if missing_key is None:
missing_key = self.default_missing_key
self._init = init
self._missing_key = missing_key
self.total_count = 0
self.missing_count = 0
self.oov_count = defaultdict(int)
def __call__(self, dataitem):
key = dataitem.data[self.data_key]
key = self.normalize(key, self.index_map)
if key not in self.index_map:
key = self.__missing__(key)
self.total_count += 1
return self.index_map[key]
def __missing__(self, key):
# TODO reconsider special function for this.
self.missing_count += 1
self.oov_count[key] += 1
if self._missing_key is not None:
key = self._missing_key
return key
@property
def weights(self):
if self._weights is None:
return self._init((self.input_dim, self.output_dim))
elif self.input_dim <= self._weights.shape[0]:
return self._weights
else:
# Partial weights, add in newly initialized for missing.
missing = self.input_dim - self._weights.shape[0]
warn('incomplete weights, added {} missing'.format(missing))
return np.concatenate([self._weights,
self._init((missing, self.output_dim))])
@property
def input_dim(self):
return len(self.index_map)
@property
def output_dim(self):
return self._output_dim
def average_weight(self):
"""Return the average weight vector length."""
if self._weights is None:
warn('average_weight: no weights')
return 0.0
else:
return np.average([np.linalg.norm(w) for w in self._weights])
def missing_rate(self):
"""Return the ratio of missing to total lookups."""
return 1.*self.missing_count/self.total_count
def most_frequent_oov(self, max_rank=5):
freq_oov = [(v, k) for k, v in self.oov_count.items()]
return sorted(freq_oov, reverse=True)[:max_rank]
def summary(self):
"""Return string summarizing embedding statistics."""
return ('Vocab {} words, avg wv len {}, OOV {:.2%} ({}/{}) '
'(top OOV: {})'.format(
len(self), self.average_weight(), self.missing_rate(),
self.missing_count, self.total_count,
' '.join('{} ({})'.format(w, n)
for n, w in self.most_frequent_oov())
))
@classmethod
def from_file(cls, filename, max_rank=None, vocabulary=None, name=None,
add_missing=False, **kwargs):
index_map, weights = wordvecdata.load(filename, max_rank)
if vocabulary is not None:
# Filter to vocabulary, taking normalization into account.
vocabulary = set(cls.normalize(w, index_map) for w in vocabulary)
# Make sure the missing key value is included in the vocabulary.
missing_key = kwargs.get('missing_key', cls.default_missing_key)
if missing_key is not None:
vocabulary.add(missing_key)
# ... and other required vocab items (TODO: clean up logic)
for w in cls.required_vocabulary:
vocabulary.add(w)
index_map, weights = wordvecdata.filter_words(index_map, weights,
vocabulary)
obj = cls(index_map=index_map, weights=weights, name=name, **kwargs)
if vocabulary is not None and add_missing:
# add missing vocabulary items to embedding
for v in vocabulary:
obj[v]
return obj
@staticmethod
def normalize(key, vocabulary):
"""Return form of key to use for lookup in vocabulary."""
# Static method to allow normalization to apply to vocabulary
# filtering in from_file() before initialization
return key
class NormEmbeddingFeature(EmbeddingFeature):
"""Embedding lookup feature with normalization."""
default_missing_key = 'UNKNOWN'
required_vocabulary = ['PADDING']
@staticmethod
def normalize(key, vocabulary):
orig_key = key
# Normalize as fallback if direct lookup fails
for norm in (lambda s: s.lower(),
lambda s: re.sub(r'[+-]?(?:[.,]?[0-9])+', '0', s)):
if key in vocabulary:
return key
key = norm(key)
# Return original for missing for accurate OOV stats
return orig_key
class SennaEmbeddingFeature(EmbeddingFeature):
"""Embedding lookup feature with SENNA-like normalization."""
default_missing_key = 'UNKNOWN'
required_vocabulary = ['PADDING']
@staticmethod
def normalize(key, vocabulary):
# No change for direct hits
if key in vocabulary:
return key
# SENNA normalization: lowercase and replace numbers with "0"
return re.sub(r'[+-]?(?:[.,]?[0-9])+', '0', key.lower())
class SennaCapsFeature(EmbeddingFeature):
"""Token capitalization feature using SENNA categories."""
def __init__(self, data_key='text', name=None, output_dim=5):
super(SennaCapsFeature, self).__init__(
output_dim=output_dim, data_key=data_key, name=name
)
def __call__(self, dataitem):
text = dataitem.data[self.data_key]
if text.isupper():
category = 'allcaps'
elif text[0].isupper():
category = 'initcap'
elif any(c.isupper() for c in text):
category = 'hascap'
else:
category = 'nocaps'
return self[category]
# TODO: not sure this belongs here. Unlike the feature mappings above,
# this assumes quite a lot of knowledge of DataItem structure.
class WindowedInput(object):
"""Catenate feature values in a window of items in a sequence."""
def __init__(self, window_size, padding, key):
if window_size % 2 == 0:
raise ValueError('window size must be odd')
self.window_size = window_size
self.padding = padding
self.key = key
def __call__(self, dataitem):
windowed = []
half_win = (self.window_size - 1) / 2
for offset in range(-half_win, half_win+1):
sibling = dataitem.sibling(offset)
if sibling is not None:
windowed.append(sibling.feature[self.key])
else:
windowed.append(self.padding)
return np.array(windowed)
@property
def input_length(self):
return self.window_size
@property
def shape(self):
"""Return shape of generated input."""
# TODO this depends on the base features, don't assume fixed size
return (self.input_length,)
def windowed_inputs(window_size, features, padding_key='PADDING'):
"""Return list of WindowedInputs, one for each given Feature.
Given Features must support indexing by padding_key.
"""
return [
WindowedInput(window_size, f[padding_key], f.name) for f in features
]
| 10,940 | 34.293548 | 77 | py |
MTL-Bioinformatics-2016 | MTL-Bioinformatics-2016-master/models/ltlib/optimizers.py | from keras import optimizers
DEFAULT_OPTIMIZER = 'adam'
def get_optimizer(config):
"""Return optimizer specified by configuration."""
config = vars(config)
name = config.get('optimizer', DEFAULT_OPTIMIZER)
optimizer = optimizers.get(name) # Default parameters
lr = config.get('learning_rate')
if lr is not None:
optimizer = type(optimizer)(lr=lr)
return optimizer
| 405 | 28 | 60 | py |
MTL-Bioinformatics-2016 | MTL-Bioinformatics-2016-master/models/ltlib/layers.py | import numpy as np
from keras import backend as K
from keras import initializations
from keras.layers import Layer, Input, Embedding, merge
class FixedEmbedding(Layer):
"""Embedding with fixed weights.
Modified from keras/layers/embeddings.py in Keras (http://keras.io).
WARNING: this is experimental and not fully tested, use at your
own risk.
"""
input_ndim = 2
def __init__(self, input_dim, output_dim, weights=None, input_length=None,
mask_zero=False, dropout=0., **kwargs):
self.input_dim = input_dim
self.output_dim = output_dim
self.input_length = input_length
self.mask_zero = mask_zero
self.dropout = dropout
if 0. < self.dropout < 1.:
self.uses_learning_phase = True
if (not isinstance(weights, list) or len(weights) != 1 or
weights[0].shape != (input_dim, output_dim)):
raise ValueError('weights must be a list with single element'
' with shape (input_dim, output_dim).')
self.initial_weights = weights
kwargs['input_shape'] = (self.input_length,)
kwargs['input_dtype'] = 'int32'
super(FixedEmbedding, self).__init__(**kwargs)
def build(self, input_shape):
self.W = K.variable(np.zeros((self.input_dim, self.output_dim),
dtype='int32'),
name='{}_W'.format(self.name))
self.non_trainable_weights = [self.W]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
def compute_mask(self, x, mask=None):
if not self.mask_zero:
return None
else:
return K.not_equal(x, 0)
def get_output_shape_for(self, input_shape):
if not self.input_length:
input_length = input_shape[1]
else:
input_length = self.input_length
return (input_shape[0], input_length, self.output_dim)
def call(self, x, mask=None):
if 0. < self.dropout < 1.:
retain_p = 1. - self.dropout
B = K.random_binomial((self.input_dim,), p=retain_p) * (1. / retain_p)
B = K.expand_dims(B)
W = K.in_train_phase(self.W * B, self.W)
else:
W = self.W
out = K.gather(W, x)
return out
def get_config(self):
config = {'input_dim': self.input_dim,
'output_dim': self.output_dim,
'input_length': self.input_length,
'mask_zero': self.mask_zero,
'dropout': self.dropout}
base_config = super(Embedding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def input_and_embedding(embedding, input_length, name=None, fixed=False,
**kwargs):
"""Create Input layer followed by embedding."""
if name is None:
name = embedding.name
i = Input(shape=(input_length,), dtype='int32', name=name)
E = Embedding if not fixed else FixedEmbedding
e = E(embedding.input_dim, embedding.output_dim,
weights=[embedding.weights], input_length=input_length,
**kwargs)(i)
return i, e
def inputs_and_embeddings(features, config):
inputs, embeddings = [], []
window, fixed, args = config.window_size, config.fixed_wordvecs, {}
if (config.embedding_lr_multiplier and
config.embedding_lr_multiplier != 1.0):
# Note: the per-layer learning rate multiplier argument
# `W_lr_multiplier` is not supported in Keras master
# (see https://github.com/fchollet/keras/pull/1991).
# Grab the fork fork https://github.com/spyysalo/keras
# to use this option.
args['W_lr_multiplier'] = config.embedding_lr_multiplier
for f in features:
kwargs = args.copy()
if fixed:
# No learning rate multiplier in fixed embedding
kwargs.pop('W_lr_multiplier', None)
i, e = input_and_embedding(f, window, fixed=fixed, **kwargs)
inputs.append(i)
embeddings.append(e)
# By convention, word vectors are the first (index 0) feature.
# No other embedding features can be fixed.
# TODO: generalize identification of word vectors.
fixed = False
return inputs, embeddings
def concat(inputs, concat_axis=-1, output_shape=None, name=None):
"""Concatenate tensors.
This is Keras merge with mode='concat' and support for the
degenerate case of catenating a single input.
"""
if len(inputs) == 1:
# Degenerate case. TODO: handle output_shape and name.
return inputs[0]
else:
return merge(inputs, mode='concat', concat_axis=concat_axis,
output_shape=output_shape, name=name)
| 4,834 | 36.773438 | 82 | py |
TOV-VICReg | TOV-VICReg-main/tov_vicreg/dataset/dqn_dataset.py | import os
import gzip
from pathlib import Path
from typing import List
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from tov_vicreg.utils.pytorch_utils import *
class DQNReplayDataset(Dataset):
"""
A dataset of observations from a one checkpoint of one game.
It saves a tensor of dimension: (dataset_size, h, w)
and given an index i returns a slice starting at i and
ending in i plus a number of frames: (slice_size, h, w).
The slice size should be equivalent to the number of frames stacked
during the RL phase.
In add adjacent mode the dataset returns three stacked observations
the observation before i the observation i and the observation after i.
(3, slice_size, h, w)
"""
def __init__(
self,
data_path: Path,
game: str,
checkpoint: int,
frames: int,
max_size: int,
transform: object,
add_adjacent=False,
adjacent_transform=None,
actions=False,
start_index=0,
) -> None:
self.add_adjacent = add_adjacent
self.actions = None
data = torch.tensor([])
self.start_index = start_index
filename = Path(data_path / f"{game}/observation_{checkpoint}.gz")
print(f"Loading {filename}")
zipFile = gzip.GzipFile(filename=filename)
loaded_data = np.load(zipFile)
loaded_data_capped = np.copy(
loaded_data[self.start_index : self.start_index + max_size]
)
print(f"Using {loaded_data.size * loaded_data.itemsize} bytes")
print(f"Shape {loaded_data.shape}")
data = torch.from_numpy(loaded_data_capped)
setattr(self, "observation", data)
del loaded_data
del zipFile
del loaded_data_capped
if actions:
actions_filename = Path(data_path / f"{game}/action_{checkpoint}.gz")
actions_zipFile = gzip.GzipFile(filename=actions_filename)
actions_loaded_data = np.load(actions_zipFile)
actions_data_capped = np.copy(
actions_loaded_data[self.start_index : self.start_index + max_size]
)
data = torch.from_numpy(actions_data_capped)
setattr(self, "actions", data)
self.size = min(data.shape[0], max_size)
self.game = game
self.frames = frames
self.effective_size = self.size - self.frames + 1
self.transform = transform
self.adjacent_transform = adjacent_transform
def __len__(self):
return self.effective_size
def __getitem__(self, index: int) -> torch.Tensor:
time_ind = index % self.effective_size
if self.frames <= 1:
obs = self.observation[time_ind]
else:
sl = slice(time_ind, time_ind + self.frames)
obs = self.observation[sl]
res_action = self.actions[time_ind] if self.actions is not None else 0
res_obs = None
if self.add_adjacent:
before_index = max(0, index - self.frames)
after_index = min(self.effective_size - 1, index + self.frames)
if self.frames <= 1:
before_obs = self.observation[before_index]
after_obs = self.observation[after_index]
else:
before_slice = slice(before_index, before_index + self.frames)
after_slice = slice(after_index, after_index + self.frames)
before_obs = self.observation[before_slice]
# now_obs = self.observation[sl]
after_obs = self.observation[after_slice]
if self.transform is not None:
transformed_obs = self.transform(obs)
if not isinstance(transformed_obs, (list, tuple)):
transformed_obs = [transformed_obs]
if self.adjacent_transform is not None:
before_obs = self.adjacent_transform(before_obs)
# now_obs = self.adjacent_transform(now_obs)
after_obs = self.adjacent_transform(after_obs)
transformed_obs.extend([before_obs, after_obs])
res_obs = transformed_obs
else:
res_obs = [obs, before_obs, after_obs]
else:
res_obs = self.transform(obs) if self.transform is not None else obs
return res_obs, res_action
class MultiDQNReplayDataset(Dataset):
"""
This dataset corresponds to the concatenation of several DQNReplayDataset.
Meaning that it contains several checkpoints from several games.
"""
def __init__(
self,
data_path: Path,
games: List[str],
checkpoints: List[int],
frames: int,
max_size: int,
transform: object,
add_adjacent=False,
adjacent_transform=None,
actions=False,
start_index=0,
) -> None:
self.actions = actions
self.n_checkpoints_per_game = len(checkpoints)
self.add_adjacent = add_adjacent
self.datasets = [
DQNReplayDataset(
data_path,
game,
ckpt,
frames,
max_size,
transform,
add_adjacent,
adjacent_transform,
actions,
start_index,
)
for ckpt in checkpoints
for game in games
]
self.n_datasets = len(self.datasets)
self.single_dataset_size = len(self.datasets[0])
def get_seq_samples(self, seq_len, n_games):
start_index = 100
res = []
for i in range(n_games):
dataset_index = (
i * self.n_checkpoints_per_game + 1
if self.n_checkpoints_per_game > 1
else i * self.n_checkpoints_per_game
)
for j in range(start_index, start_index + seq_len):
datapoint, _ = self.datasets[dataset_index][j]
if isinstance(
datapoint, (list, tuple)
): # add_adjacent and transform might return lists
datapoint = datapoint[0]
res.append(datapoint)
return torch.stack(res)
def __len__(self) -> int:
return self.n_datasets * self.single_dataset_size
def __getitem__(self, index: int) -> torch.Tensor:
multidataset_index = index % self.n_datasets
dataset_index = index // self.n_datasets
res_obs, res_action = self.datasets[multidataset_index][dataset_index]
return [res_obs, res_action]
def _get_DQN_Replay_loader(
data_path: Path,
games: List[str],
checkpoints: List[int],
frames: int,
max_size_per_single_dataset: int,
batch_size: int,
num_workers: int,
transform,
) -> DataLoader:
dataset = MultiDQNReplayDataset(
data_path,
games,
checkpoints,
frames,
max_size_per_single_dataset,
transform,
)
dataloader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
drop_last=True,
num_workers=num_workers,
)
return dataloader
def get_DQN_Replay_loader(
dataset: Dataset,
batch_size: int,
num_workers: int,
pin_memory: bool,
drop_last: bool,
sampler=None,
) -> DataLoader:
if sampler == None:
dataloader = DataLoader(
dataset,
batch_size=batch_size,
drop_last=drop_last,
shuffle=True,
num_workers=num_workers,
)
else:
dataloader = DataLoader(
dataset,
batch_size=batch_size,
drop_last=drop_last,
sampler=sampler,
num_workers=num_workers,
)
return dataloader
def get_DQN_Replay_dataset(
data_path=Path("/media/msgstorage/dqn"),
games=["Alien"],
checkpoints=["1"],
frames=3,
max_size_per_single_dataset=1000,
transform=None,
) -> MultiDQNReplayDataset:
return MultiDQNReplayDataset(
data_path,
games,
checkpoints,
frames,
max_size_per_single_dataset,
transform,
)
| 8,283 | 30.142857 | 83 | py |
TOV-VICReg | TOV-VICReg-main/tov_vicreg/models/main.py | # inspired by: https://github.com/facebookresearch/moco-v3/blob/main/moco
import argparse
import math
import os
from pathlib import Path
import random
import shutil
import time
import warnings
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.models as torchvision_models
import tov_vicreg.models.builder as tov_vicreg_builder
import tov_vicreg.models.optimizer as tov_vicreg_optimizer
from tov_vicreg.dataset.dqn_dataset import MultiDQNReplayDataset, get_DQN_Replay_loader
from tov_vicreg.models.logger import Logger
torchvision_model_names = sorted(name for name in torchvision_models.__dict__
if name.islower() and not name.startswith("__")
and callable(torchvision_models.__dict__[name]))
model_names = ['vit_small', 'vit_base', 'vit_tiny', 'sgi_resnet', 'resnet'] + torchvision_model_names
parser = argparse.ArgumentParser(description='TOV-VICReg Pre-Training')
parser.add_argument('--output_dir', default=".", type=str, help='Path to save logs and checkpoints.')
parser.add_argument('--experiment_name', default=None, type=str, help='')
parser.add_argument('--save_only_final', action='store_true', help='')
parser.add_argument('--data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='vit_tiny',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument('--epochs', default=10, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=1024, type=int,
metavar='N',
help='mini-batch size (default: 4096)')
parser.add_argument('--lr', '--learning-rate', default=0.6, type=float,
metavar='LR', help='initial (base) learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-6, type=float,
metavar='W', help='weight decay (default: 1e-6)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--image_size', default=84, type=int,
help='')
parser.add_argument('--mlp', default='1024-1024-1024', type=str,
help='hidden dimension in MLPs (default: 3 layers with 1024)')
# Loss
parser.add_argument("--sim-coeff", type=float, default=25.0,
help='Invariance regularization loss coefficient')
parser.add_argument("--std-coeff", type=float, default=25.0,
help='Variance regularization loss coefficient')
parser.add_argument("--temporal-coeff", type=float, default=1.0,
help='Variance regularization loss coefficient')
parser.add_argument("--cov-coeff", type=float, default=1.0,
help='Covariance regularization loss coefficient')
# vit specific configs:
parser.add_argument('--stop-grad-conv1', action='store_true',
help='stop-grad after first conv, or patch embedding')
parser.add_argument('--patch_size', default=16, type=int, help="""Size in pixels
of input square patches - default 16 (for 16x16 patches). Using smaller
values leads to better performance but requires more memory. Applies only
for ViTs (vit_tiny, vit_small and vit_base).""")
# other upgrades
parser.add_argument('--optimizer', default='lars', type=str,
choices=['lars', 'adamw'],
help='optimizer used (default: lars)')
parser.add_argument('--warmup-epochs', default=2, type=int, metavar='N',
help='number of warmup epochs')
parser.add_argument('--crop-min', default=0.08, type=float,
help='minimum scale for random cropping (default: 0.08)')
parser.add_argument('--tmp_data_path', default='/path/to/tmp/train/', type=str,
help='Please specify path to a directory for the tmp data.')
parser.add_argument("--dqn_games", nargs="+", default=["Breakout"])
parser.add_argument("--dqn_checkpoints", nargs="+", default=[1, 5])
parser.add_argument('--dqn_frames', type=int, default=3, help='Number of frames per observation')
parser.add_argument('--dqn_single_dataset_max_size', type=int, default=1000, help='Maximum size of a single dataset')
def main():
args = parser.parse_args()
args.output_dir = os.path.join(args.output_dir, args.experiment_name)
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
train(args)
def train(args):
# create model
print("=> creating model '{}'".format(args.arch))
model = tov_vicreg_builder.TOVVICReg(args)
# infer learning rate before changing batch size
args.lr = args.lr * args.batch_size / 256
if args.gpu is not None:
print("Using GPU: {} for training".format(args.gpu))
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
if args.optimizer == 'lars':
optimizer = tov_vicreg_optimizer.LARS(model.parameters(), args.lr,
weight_decay=args.weight_decay,
momentum=args.momentum)
elif args.optimizer == 'adamw':
optimizer = torch.optim.AdamW(model.parameters(), args.lr,
weight_decay=args.weight_decay)
scaler = torch.cuda.amp.GradScaler()
logger = Logger(name=args.experiment_name, type="ssl_train", group="tov", args=vars(args))
logger.plots["singular_values"] = []
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
scaler.load_state_dict(checkpoint['scaler'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
# Data loading code
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = None
# follow BYOL's augmentation recipe: https://arxiv.org/abs/2006.07733
normalize = transforms.ConvertImageDtype(torch.float) # DQN Replay uses 0-255 uint8 and the Transformer expects a float
augmentation1 = [
transforms.RandomResizedCrop(args.image_size, scale=(args.crop_min, 1.)),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([transforms.GaussianBlur((7, 7), sigma=(.1, .2))], p=1.0),
transforms.RandomHorizontalFlip(),
normalize
]
augmentation2 = [
transforms.RandomResizedCrop(args.image_size, scale=(args.crop_min, 1.)),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([transforms.GaussianBlur((7, 7), sigma=(.1, .2))], p=0.1),
transforms.RandomSolarize(120, p=0.2),
transforms.RandomHorizontalFlip(),
normalize
]
augmentation3 = [
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)
], p=0.8),
transforms.RandomGrayscale(p=0.2),
normalize
]
train_dataset = MultiDQNReplayDataset(
Path(args.data),
args.dqn_games,
args.dqn_checkpoints,
args.dqn_frames,
args.dqn_single_dataset_max_size,
TwoCropsTransform(transforms.Compose(augmentation1),
transforms.Compose(augmentation2)),
add_adjacent=True,
adjacent_transform=transforms.Compose(augmentation3)
)
train_loader = get_DQN_Replay_loader(
train_dataset,
batch_size=args.batch_size,
num_workers=args.workers,
pin_memory=True,
drop_last=True,
sampler=None
)
for epoch in range(args.start_epoch, args.epochs):
# train for one epoch
train_one_epoch(train_loader, model, optimizer, scaler, logger, epoch, args)
if epoch + 1 == args.epochs:
save_encoder(model.backbone.state_dict(), os.path.join(args.output_dir, 'final_encoder.pth'))
elif not args.save_only_final:
save_encoder(model.backbone.state_dict(), os.path.join(args.output_dir, f'encoder_{epoch}.pth'))
logger.close()
def train_one_epoch(train_loader, model, optimizer, scaler, logger, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
learning_rates = AverageMeter('LR', ':.4e')
losses = AverageMeter('Loss', ':.4e')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, learning_rates, losses],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
iters_per_epoch = len(train_loader)
for i, (images, _) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# adjust learning rate and momentum coefficient per iteration
lr = adjust_learning_rate(optimizer, epoch + i / iters_per_epoch, args)
learning_rates.update(lr)
if args.gpu is not None:
images[0] = images[0].cuda(args.gpu, non_blocking=True) # o_t with augmentation 1
images[1] = images[1].cuda(args.gpu, non_blocking=True) # o_t with augmentation 2
images[2] = images[2].cuda(args.gpu, non_blocking=True) # o_t-1 with augmentation 3
images[3] = images[3].cuda(args.gpu, non_blocking=True) # o_t+1 with augmentation 3
# compute output
with torch.cuda.amp.autocast(True):
loss = model(images[0], images[1], images[2], images[3])
losses.update(loss.item(), images[0].size(0))
log_info = {"loss": loss.item(), **model.log}
logger.log_step(log_info)
# compute gradient and do SGD step
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def save_encoder(state, filename='final_encoder.pth'):
torch.save(state, filename)
class TwoCropsTransform:
"""Take two random crops of one image"""
def __init__(self, base_transform1, base_transform2):
self.base_transform1 = base_transform1
self.base_transform2 = base_transform2
def __call__(self, x):
im1 = self.base_transform1(x)
im2 = self.base_transform2(x)
return [im1, im2]
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Decays the learning rate with half-cycle cosine after warmup"""
if epoch < args.warmup_epochs:
lr = args.lr * epoch / args.warmup_epochs
else:
lr = args.lr * 0.5 * (1. + math.cos(math.pi * (epoch - args.warmup_epochs) / (args.epochs - args.warmup_epochs)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
if __name__ == '__main__':
main()
| 14,600 | 38.462162 | 123 | py |
TOV-VICReg | TOV-VICReg-main/tov_vicreg/models/load_models.py | import os
import torch
from torch import nn
import torchvision.models as torchvision_models
import tov_vicreg.models.networks.vit as vits
from tov_vicreg.models.networks.resnet import ResnetCNN
import tov_vicreg.utils.pytorch_utils as pytorch_utils
def load_model(arch, pretrained_weights=None, patch_size=8, num_classes=0, freeze=False, n_channels=3, input_shape=(1, 3, 84, 84)):
if arch.startswith("vit_"):
model = vits.__dict__[arch](patch_size=patch_size, num_classes=num_classes)
elif arch == "sgi_resnet":
model = ResnetCNN(input_channels=n_channels, depths=[64, 128, 256, 512], strides=[2,2,2,2])
elif arch == "rainbow_cnn" or arch == "canonical":
model = nn.Sequential(
nn.Conv2d(n_channels, 32, 8, stride=4, padding=0),
nn.ReLU(),
nn.Conv2d(32, 64, 4, stride=2, padding=0),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1, padding=0),
nn.ReLU(),
nn.Flatten()
)
elif arch == "data-efficient" or arch == "der":
model = nn.Sequential(
nn.Conv2d(n_channels, 32, 5, stride=5, padding=0),
nn.ReLU(),
nn.Conv2d(32, 64, 5, stride=5, padding=0),
nn.ReLU(),
nn.Flatten()
)
elif arch == "linear":
model = nn.Sequential(nn.Linear(4, 128), nn.ReLU(),)
model.num_features = 128
else:
model = torchvision_models.__dict__[arch]()
if "vit" not in arch or "linear" not in arch:
with torch.no_grad():
model.num_features = model(torch.ones(input_shape)).shape[1]
for p in model.parameters():
p.requires_grad = not freeze
if pretrained_weights is None or pretrained_weights == "_":
print(f"No pretrained weights provided, using random {arch}")
return model
if os.path.isfile(pretrained_weights):
state_dict = torch.load(pretrained_weights, map_location="cpu")
if "teacher" in state_dict.keys(): # DINO
state_dict = state_dict["teacher"]
elif "state_dict" in state_dict.keys(): # MOCOv3
state_dict = state_dict["state_dict"]
for k in list(state_dict.keys()):
# retain only base_encoder up to before the embedding layer
if k.startswith("base_encoder") and not k.startswith(
"base_encoder.head"
):
# remove prefix
state_dict[k[len("base_encoder.") :]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
elif list(state_dict.keys())[0].startswith("vit."):
for k in list(state_dict.keys()):
if k.startswith("vit") and not k.startswith("vit.head"):
# remove prefix
state_dict[k[len("vit.") :]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
elif list(state_dict.keys())[0].startswith("encoder."):
for k in list(state_dict.keys()):
if k.startswith("encoder") and not k.startswith("encoder.head"):
# remove prefix
state_dict[k[len("encoder.") :]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
# remove `module.` prefix
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# remove `backbone.` prefix induced by multicrop wrapper
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
# remove head
for k in list(state_dict.keys()):
if k.startswith("head."):
del state_dict[k]
msg = model.load_state_dict(state_dict, strict=False)
print(
"Pretrained weights found at {} and loaded with msg: {}".format(
pretrained_weights, msg
)
)
else:
raise FileExistsError("Can't find file with pretrained weights")
return model
| 4,059 | 39.19802 | 131 | py |
TOV-VICReg | TOV-VICReg-main/tov_vicreg/models/builder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from tov_vicreg.models.load_models import load_model
class TOVVICReg(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
self.log = {}
self.backbone = load_model(args.arch, patch_size=args.patch_size)
self.representation_size = self.backbone.num_features
self.embedding_size = int(args.mlp.split("-")[-1])
self.projector = Projector(args, self.representation_size)
self.combinations = torch.tensor([[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 0, 1],
[2, 1, 0]])
self.temporal_order_predictor = nn.Sequential(
nn.Linear(self.representation_size * 3, 1)
)
self.temporal_order_loss = nn.BCEWithLogitsLoss(reduction='mean')
def covariance_loss(self, cov):
return off_diagonal(cov).pow_(2).sum().div(
self.embedding_size
)
def forward(self, x, y, z, w):
# x and y are augmentations of the same source X_t
# z, w and u are augmentations of a source X_{t-1}, X_{t} and X_{t+1}, respectively
repr_x = self.backbone(x)
repr_y = self.backbone(y)
repr_z = self.backbone(z)
repr_w = self.backbone(w)
# repr_u = self.backbone(u)
x = self.projector(repr_x)
y = self.projector(repr_y)
z = self.projector(repr_z)
w = self.projector(repr_w)
# u = self.projector(repr_u)
repr_loss = F.mse_loss(x, y)
x = x - x.mean(dim=0)
y = y - y.mean(dim=0)
z = z - z.mean(dim=0)
w = w - w.mean(dim=0)
# u = u - u.mean(dim=0)
std_x = torch.sqrt(x.var(dim=0) + 0.0001)
std_y = torch.sqrt(y.var(dim=0) + 0.0001)
cov_x = (x.T @ x) / (self.args.batch_size - 1)
cov_y = (y.T @ y) / (self.args.batch_size - 1)
gamma = 1
# randomly select a combination for each sample
shuffle_labels = torch.randint(0, self.combinations.shape[0], (self.args.batch_size,))
# get combination selected
shuffle_indexes = self.combinations[shuffle_labels].cuda(self.args.gpu)
# change combination label to binary (correct order or not)
shuffle_binary_labels = torch.where(shuffle_labels == 0, shuffle_labels, 1).view(-1, 1).type(torch.float).cuda(self.args.gpu)
# concatenate the representation in correct order
temporal_samples = torch.cat([repr_x.unsqueeze(1), repr_z.unsqueeze(1), repr_w.unsqueeze(1)], dim=1)
# Shuffle representations according to the combination selected
x_1_indexes = shuffle_indexes[:, 0].view(-1, 1).repeat(1, self.representation_size).view(self.args.batch_size, 1, self.representation_size)
x_1 = torch.gather(temporal_samples, 1, x_1_indexes).squeeze(1)
x_2_indexes = shuffle_indexes[:, 1].view(-1, 1).repeat(1, self.representation_size).view(self.args.batch_size, 1, self.representation_size)
x_2 = torch.gather(temporal_samples, 1, x_2_indexes).squeeze(1)
x_3_indexes = shuffle_indexes[:, 2].view(-1, 1).repeat(1, self.representation_size).view(self.args.batch_size, 1, self.representation_size)
x_3 = torch.gather(temporal_samples, 1, x_3_indexes).squeeze(1)
shuffled_concat = torch.concat([x_1, x_2, x_3], dim=1)
# End of shuffle
# Predict binary classification
pred_temporal_class = self.temporal_order_predictor(shuffled_concat)
# Compute temporal loss
temporal_loss = self.temporal_order_loss(pred_temporal_class, shuffle_binary_labels)
std_loss = torch.mean(F.relu(gamma - std_x)) / 2 + \
torch.mean(F.relu(gamma - std_y)) / 2
cov_loss = self.covariance_loss(cov_x) + \
self.covariance_loss(cov_y)
self.log = {
"temporal_loss": temporal_loss,
"invariance_loss": repr_loss,
"variance_loss": std_loss,
"covariance_loss": cov_loss
}
loss = (
self.args.sim_coeff * repr_loss
+ self.args.temporal_coeff * temporal_loss
+ self.args.std_coeff * std_loss
+ self.args.cov_coeff * cov_loss
)
return loss
def Projector(args, embedding):
mlp_spec = f"{embedding}-{args.mlp}"
layers = []
f = list(map(int, mlp_spec.split("-")))
for i in range(len(f) - 2):
layers.append(nn.Linear(f[i], f[i + 1]))
layers.append(nn.BatchNorm1d(f[i + 1]))
layers.append(nn.ReLU(True))
layers.append(nn.Linear(f[-2], f[-1], bias=False))
return nn.Sequential(*layers)
def exclude_bias_and_norm(p):
return p.ndim == 1
def off_diagonal(x):
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
| 5,048 | 36.4 | 147 | py |
TOV-VICReg | TOV-VICReg-main/tov_vicreg/models/optimizer.py | # from: https://github.com/facebookresearch/moco-v3/blob/main/moco/optimizer.py
import torch
class LARS(torch.optim.Optimizer):
"""
LARS optimizer, no rate scaling or weight decay for parameters <= 1D.
"""
def __init__(self, params, lr=0, weight_decay=0, momentum=0.9, trust_coefficient=0.001):
defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum, trust_coefficient=trust_coefficient)
super().__init__(params, defaults)
@torch.no_grad()
def step(self):
for g in self.param_groups:
for p in g['params']:
dp = p.grad
if dp is None:
continue
if p.ndim > 1: # if not normalization gamma/beta or bias
dp = dp.add(p, alpha=g['weight_decay'])
param_norm = torch.norm(p)
update_norm = torch.norm(dp)
one = torch.ones_like(param_norm)
q = torch.where(param_norm > 0.,
torch.where(update_norm > 0,
(g['trust_coefficient'] * param_norm / update_norm), one),
one)
dp = dp.mul(q)
param_state = self.state[p]
if 'mu' not in param_state:
param_state['mu'] = torch.zeros_like(p)
mu = param_state['mu']
mu.mul_(g['momentum']).add_(dp)
p.add_(mu, alpha=-g['lr'])
| 1,524 | 37.125 | 113 | py |
TOV-VICReg | TOV-VICReg-main/tov_vicreg/models/networks/resnet.py | from torch import nn
import numpy as np
def fixup_init(layer, num_layers):
nn.init.normal_(layer.weight, mean=0, std=np.sqrt(
2 / (layer.weight.shape[0] * np.prod(layer.weight.shape[2:]))) * num_layers ** (-0.25))
class InvertedResidual(nn.Module):
def __init__(self, in_channels, out_channels, stride, expand_ratio,
norm_type, num_layers=1, groups=-1,
drop_prob=0., bias=True):
super(InvertedResidual, self).__init__()
assert stride in [1, 2, 3]
self.drop_prob = drop_prob
hidden_dim = round(in_channels * expand_ratio)
if groups <= 0:
groups = hidden_dim
conv = nn.Conv2d
if stride != 1:
self.downsample = nn.Conv2d(in_channels, out_channels, 3, stride, 1)
nn.init.normal_(self.downsample.weight, mean=0, std=
np.sqrt(2 / (self.downsample.weight.shape[0] *
np.prod(self.downsample.weight.shape[2:]))))
else:
self.downsample = False
if expand_ratio == 1:
conv1 = conv(hidden_dim, hidden_dim, 3, stride, 1, groups=groups, bias=bias)
conv2 = conv(hidden_dim, out_channels, 1, 1, 0, bias=bias)
fixup_init(conv1, num_layers)
fixup_init(conv2, num_layers)
self.conv = nn.Sequential(
# dw
conv1,
init_normalization(hidden_dim, norm_type),
nn.ReLU(inplace=True),
# pw-linear
conv2,
init_normalization(out_channels, norm_type),
)
nn.init.constant_(self.conv[-1].weight, 0)
else:
conv1 = conv(in_channels, hidden_dim, 1, 1, 0, bias=bias)
conv2 = conv(hidden_dim, hidden_dim, 3, stride, 1, groups=groups, bias=bias)
conv3 = conv(hidden_dim, out_channels, 1, 1, 0, bias=bias)
fixup_init(conv1, num_layers)
fixup_init(conv2, num_layers)
fixup_init(conv3, num_layers)
self.conv = nn.Sequential(
# pw
conv1,
init_normalization(hidden_dim, norm_type),
nn.ReLU(inplace=True),
# dw
conv2,
init_normalization(hidden_dim, norm_type),
nn.ReLU(inplace=True),
# pw-linear
conv3,
init_normalization(out_channels, norm_type)
)
if norm_type != "none":
nn.init.constant_(self.conv[-1].weight, 0)
def forward(self, x):
if self.downsample:
identity = self.downsample(x)
else:
identity = x
if self.training and np.random.uniform() < self.drop_prob:
return identity
else:
return identity + self.conv(x)
class ResnetCNN(nn.Module):
def __init__(self, input_channels=3,
depths=[32, 64, 64],
strides=[3, 2, 2],
blocks_per_group=3,
norm_type="bn",
resblock=InvertedResidual,
expand_ratio=2):
super(ResnetCNN, self).__init__()
self.depths = [input_channels] + depths
self.resblock = resblock
self.expand_ratio = expand_ratio
self.blocks_per_group = blocks_per_group
self.layers = []
self.norm_type = norm_type
self.num_layers = self.blocks_per_group*len(depths)
for i in range(len(depths)):
self.layers.append(self._make_layer(self.depths[i],
self.depths[i+1],
strides[i],
))
self.layers.append(nn.Flatten())
self.layers = nn.Sequential(*self.layers)
self.train()
def _make_layer(self, in_channels, depth, stride,):
blocks = [self.resblock(in_channels, depth,
expand_ratio=self.expand_ratio,
stride=stride,
norm_type=self.norm_type,
num_layers=self.num_layers,)]
for i in range(1, self.blocks_per_group):
blocks.append(self.resblock(depth, depth,
expand_ratio=self.expand_ratio,
stride=1,
norm_type=self.norm_type,
num_layers=self.num_layers,))
return nn.Sequential(*blocks)
@property
def local_layer_depth(self):
return self.depths[-2]
def forward(self, inputs):
return self.layers(inputs)
def init_normalization(channels, type="bn", affine=True, one_d=False):
assert type in ["bn", "ln", "in", "gn", "max", "none", None]
if type == "bn":
if one_d:
return nn.BatchNorm1d(channels, affine=affine)
else:
return nn.BatchNorm2d(channels, affine=affine)
elif type == "ln":
if one_d:
return nn.LayerNorm(channels, elementwise_affine=affine)
else:
return nn.GroupNorm(1, channels, affine=affine)
elif type == "in":
return nn.GroupNorm(channels, channels, affine=affine)
elif type == "gn":
groups = max(min(32, channels//4), 1)
return nn.GroupNorm(groups, channels, affine=affine)
elif type == "none" or type is None:
return nn.Identity() | 5,586 | 37.006803 | 95 | py |
TOV-VICReg | TOV-VICReg-main/tov_vicreg/models/networks/vit.py | import math
from functools import partial
import torch
import torch.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, return_attention=False):
y, attn = self.attn(self.norm1(x))
if return_attention:
return attn
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = (img_size // patch_size) * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return self.head(x[:, 0])
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def vit_tiny(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_small(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_base(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
| 10,503 | 37.057971 | 112 | py |
TOV-VICReg | TOV-VICReg-main/tov_vicreg/utils/experiments_utils.py | from collections import deque
import random
from pathlib import Path
import numpy as np
import torch
from torchvision import transforms
import torch.nn.functional as F
from torch.utils.data import DataLoader
from sklearn.manifold import TSNE, Isomap
import matplotlib.pyplot as plt
from matplotlib import cm, colors as matplot_colors
from dataset.dqn_dataset import MultiDQNReplayDataset, _get_DQN_Replay_loader
def is_flicker_frame(obs):
threshold = 0.99
torch.count_nonzero(obs) / torch.numel(obs) > threshold
def get_datapoints(data_loaders, n_datapoints_per_game):
data_points = []
for data_loader in data_loaders:
random_indexes = []
n_index = 0
while n_index < n_datapoints_per_game:
rnd_num = int(random.random()*len(data_loader))
if not rnd_num in random_indexes:
random_indexes.append(rnd_num)
n_index += 1
i = 0
n = 0
for obj in data_loader:
if i in random_indexes:
n += 1
if not is_flicker_frame(obj[0]):
data_points.append(obj[0])
if n >= n_datapoints_per_game or i >= len(data_loader):
break
i += 1
return torch.cat(data_points).cuda()
def get_sequential_datapoints(datasets, start_indx, n_datapoints_per_game, skip=0):
data_points = []
for dataset in datasets:
for i in range(0, n_datapoints_per_game * (skip + 1), skip+1):
if i % 3 == 0 and not is_flicker_frame(dataset[i + start_indx][0]):
data_points.append(dataset[i + start_indx][0])
return torch.stack(data_points).cuda()
def get_sequence_similarities(datapoints, sequences_size, n_games, n_datapoints_per_game):
seq = []
game_seq = []
game = -1
prev = None
for i in range(sequences_size * n_games):
if i % sequences_size == 0:
if i > 1:
seq.append(game_seq)
game_seq = []
game += 1
prev = None
curr = datapoints[i % n_datapoints_per_game + n_datapoints_per_game * game]
if prev is not None:
game_seq.append(calculate_cosine_similarity(prev, curr))
prev = curr
return seq
def get_data_loaders(games):
data_loaders = []
obs_transform = transforms.Compose([
transforms.ConvertImageDtype(torch.float)
])
for game in games:
data_loaders.append(_get_DQN_Replay_loader(Path("/media/msgstorage/dqn"), [game], ["1"], 3, 10000, 1, 1, obs_transform))
return data_loaders
def get_datasets(games, size=10000, actions=False, checkpoint="1"):
datasets = []
obs_transform = transforms.Compose([
transforms.ConvertImageDtype(torch.float)
])
for game in games:
datasets.append(MultiDQNReplayDataset(Path("/media/msgstorage/dqn"), [game], [checkpoint], 3, size, obs_transform, actions=actions))
return datasets
def calculate_cosine_similarity(a, b):
assert a.shape == b.shape
a = F.normalize(a, dim=-1)
b = F.normalize(b, dim=-1)
return (a*b).sum(dim=-1).item()
def create_similarity_matrix(representations, min=False):
matrix = []
n_datapoints = len(representations)
for i in range(n_datapoints):
row = []
for j in range(n_datapoints):
if j < i and min:
similarity = -1.0
else:
similarity = calculate_cosine_similarity(representations[i], representations[j])
row.append(similarity)
matrix.append(row)
return matrix
def calculate_tsne(representations):
tsne = TSNE(n_components=2, verbose=1, n_iter=300, learning_rate='auto', init='pca')
return tsne.fit_transform(representations)
def calculate_umap(representations):
return umap.UMAP().fit_transform(representations)
def calculate_isomap(representations):
isomap = Isomap(n_components=2, n_neighbors=10)
return isomap.fit_transform(representations)
def plot_tsne(tsne_results, sources_names=[""]):
n_sources = len(sources_names)
step = tsne_results.shape[0] // n_sources
fig, ax = plt.subplots(figsize=(15, 7))
for i in range(n_sources):
ax.scatter(tsne_results[step*i:step*(i+1)+1, 0], tsne_results[step*i:step*(i+1)+1, 1], label=f"{sources_names[i]}", c=i)
ax.legend()
plt.show()
def get_2d_plot(tsne_results, sources_names=[""]):
n_sources = len(sources_names)
step = tsne_results.shape[0] // n_sources
color_map = cm.get_cmap("GnBu")
norm = matplot_colors.Normalize(vmin=-1, vmax=n_sources-1)
fig, ax = plt.subplots(figsize=(15, 7))
for i in range(n_sources):
ax.scatter(tsne_results[step*i:step*(i+1), 0], tsne_results[step*i:step*(i+1), 1],
label=sources_names[i],
c=color_map(norm([i for _ in range(step)])))
ax.legend()
return fig
def get_2d_seq_points_plot(points, n_steps):
step = points.shape[0] // n_steps
color_map = cm.get_cmap("nipy_spectral")
alphas = list(np.arange(1, n_steps + 1) / n_steps)
norm = matplot_colors.Normalize(vmin=-1, vmax=step)
time_colors = color_map(norm(np.arange(step)))
fig, ax = plt.subplots(figsize=(15, 7))
for i in range(n_steps):
ax.scatter(points[step*i:step*(i+1), 0], points[step*i:step*(i+1), 1],
c=time_colors, alpha=alphas[i])
plt.colorbar(cm.ScalarMappable(norm=norm, cmap=color_map), ax=ax)
return fig
def plot_visualization_experiments(representations_list, sources_names=[], tsne=False, isomap=False):
representations = np.concatenate(representations_list)
if tsne:
print("t-SNE:")
tsne_results = calculate_tsne(representations)
plot_tsne(tsne_results, sources_names)
if isomap:
print("Isomap:")
isomap_results = calculate_isomap(representations)
plot_tsne(isomap_results, sources_names)
def plot_evolution(points, step_map, sources_names):
fig, ax = plt.subplots(figsize=(15, 7))
color_maps = [cm.get_cmap("GnBu"), cm.get_cmap("Oranges"), cm.get_cmap("Greys"), cm.get_cmap("YlGn"), cm.get_cmap("PuRd")]
step = 0
for i in range(len(step_map)):
curr_step_map = step_map[i]
norm = matplot_colors.Normalize(vmin=-1, vmax=curr_step_map[0]-1)
color_map = color_maps[i]
for j in range(curr_step_map[0]):
size = curr_step_map[1]
label = None
if j == curr_step_map[0] - 1:
label = sources_names[i]
ax.scatter(points[step:step+size, 0], points[step:step+size, 1], label=label, c=color_map(norm([j for _ in range(size)])))
step += size
ax.legend()
plt.show()
def plot_evolution_visualization_experiments(representations_list, step_map, sources_names=[], tsne=False, isomap=False):
representations = np.concatenate(representations_list)
if tsne:
print("t-SNE:")
tsne_results = calculate_tsne(representations)
plot_evolution(tsne_results, step_map, sources_names)
if isomap:
print("Isomap:")
isomap_results = calculate_isomap(representations)
plot_evolution(isomap_results, step_map, sources_names)
def plot_experiments(representations, n_datapoints_per_game=10, similarity=False):
if similarity:
print("Similarity:")
similarity_matrix = create_similarity_matrix(representations)
min_similarity_matrix = create_similarity_matrix(representations, min=True)
game_similarity_matrix = create_similarity_matrix(representations[2*n_datapoints_per_game:3*n_datapoints_per_game])
fig, axs = plt.subplots(1,3, figsize=(13, 5))
mat1 = axs[0].matshow(similarity_matrix)
mat2 = axs[1].matshow(min_similarity_matrix)
mat3 = axs[2].matshow(game_similarity_matrix)
fig.colorbar(mat1, ax=axs[0])
fig.colorbar(mat2, ax=axs[1])
fig.colorbar(mat3, ax=axs[2])
plt.show()
| 8,027 | 36.166667 | 140 | py |
TOV-VICReg | TOV-VICReg-main/tov_vicreg/utils/pytorch_utils.py | import torch
device = None
def init_gpu(use_gpu=True, gpu_id=0):
global device
if torch.cuda.is_available() and use_gpu:
device = torch.device("cuda:" + str(gpu_id))
print("Using GPU id {}".format(gpu_id))
else:
device = torch.device("cpu")
print("GPU not detected. Defaulting to CPU.")
def set_device(gpu_id):
torch.cuda.set_device(gpu_id)
def from_numpy(*args, **kwargs):
return torch.from_numpy(*args, **kwargs).float().to(device)
def ones(*args, **kwargs):
return torch.ones(*args, **kwargs).to(device)
def to_numpy(tensor):
return tensor.to('cpu').detach().numpy()
| 642 | 19.741935 | 63 | py |
ped | ped-main/model.py | import numpy as np
import torch
import torch.nn as nn
import torch.autograd as autograd
from lib.solvers import anderson
import matplotlib.pyplot as plt
EPS = 1e-03 # Avoid division by zero with continuous Bernoulli
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class DeepEncoderLayerFC(nn.Module):
def __init__(self, layer_widths, lamb = .1,
act = 'bernoulli',
dropout_mode = 'from_latents'):
"""
layer_widths (list(int)): [input_dim, width1, ..., widthL-1, output_dim]
"""
super().__init__()
self.layer_widths = layer_widths
self.fc_or_conv = 'fc'
self.dropout_mode = dropout_mode
self.act_str = act
self._init_params()
self.lamb = lamb
self.T = lambda x: x
if act == 'bernoulli':
self.act = torch.sigmoid
elif act == 'binomial':
self.act = lambda x: 10*torch.sigmoid(x)
elif act == 'cts-bernoulli':
self.act = lambda x: (torch.exp(x)*(x-1)+1)/(x*(torch.exp(x)-1)+EPS)
elif act == 'gauss':
self.act = lambda x: x
self.T = lambda x: np.sqrt(self.lamb)*x
elif act == 'relu':
self.act = nn.functional.relu
self.T = lambda x: np.sqrt(self.lamb)*x
elif act == 'poisson':
self.act = torch.exp
self.output_dim = np.sum(self.layer_widths[1:])
def _init_params(self):
# Initialise weights
self.M_layers = nn.ModuleList([None])
self.biases = nn.ParameterList([None])#[None]
for l in range(len(self.layer_widths)-1):
in_dim = self.layer_widths[l]
out_dim = self.layer_widths[l+1]
M_layer = nn.Linear(in_dim, out_dim, bias = False)
if (self.act_str == 'poisson'):
nn.init.normal_(M_layer.weight, 0, 0.1/np.sqrt(out_dim))
else:
nn.init.normal_(M_layer.weight, 0, 1/np.sqrt(in_dim))
# Initialise biases
bias = torch.empty(in_dim).to(device)
nn.init.uniform_(bias, -1/np.sqrt(out_dim),
1./np.sqrt(out_dim))
self.M_layers.append(M_layer)
self.biases.append(nn.Parameter(bias))
self.M_layers.append(None)
self.biases.append(None)
def forward(self, f, Y, mode='encoder'):
"""
Map input Y and output f to output f. f is a collection of zs in every layer
"""
# Make a list of z's
z_list = [Y]
layer_idx0 = 0
for idx, l in enumerate(self.layer_widths):
if idx == 0:
continue
z = f[:, layer_idx0:l+layer_idx0]
z_list.append(z)
layer_idx0 = l
z_list.append(None)
z_list_new = []
for lm1 in range(len(self.layer_widths)-1):
zlm1 = z_list[lm1]
zl = z_list[lm1+1]
zlp1 = z_list[lm1+2]
Ml = self.M_layers[lm1+1]
Mlp1 = self.M_layers[lm1+2]
bl = self.biases[lm1+1]
blp1 = self.biases[lm1+2]
etal = nn.functional.linear(zl, Ml.weight.T, bias=bl)
momentl = self.act(etal)
if not (Mlp1 is None):
etalp1 = nn.functional.linear(zlp1, Mlp1.weight.T, bias=blp1)
momentlp1 = self.act(etalp1)
else:
momentlp1 = 0
dropout = 1
if self.act_str == 'relu':
if self.dropout_mode == 'from_inputs':
dropout = (zlm1 > 0)*1.
elif self.dropout_mode == 'from_latents':
dropout = (etal > 0)*1.
elif self.dropout_mode == 'off':
dropout = 1.
zl_new = (Ml(self.T(zlm1)*dropout - momentl) +\
self.T(momentlp1))/self.lamb
z_list_new.append(zl_new)
return torch.hstack(z_list_new)
class ExpfamLoss(nn.Module):
def __init__(self, exp_str='bernoulli', lamb=1.):
super().__init__()
self.exp_str = exp_str
if exp_str == 'bernoulli':
self.T = lambda y: y
self.A = lambda eta: torch.logaddexp(torch.zeros_like(eta), eta)
self.log_h = lambda y : y*0
elif exp_str == 'binomial':
self.T = lambda y: y
self.A = lambda eta: 10* torch.logaddexp(torch.zeros_like(eta), eta)
self.log_h = lambda y : y*0
elif exp_str == 'cts-bernoulli':
self.T = lambda y: y
self.A = lambda eta: torch.log((torch.exp(eta) - 1)/(eta + EPS))
self.log_h = lambda y : y*0
elif exp_str == 'gauss':
self.T = lambda y: y
self.A = lambda eta: eta**2/2
self.log_h = lambda y : -y**2*lamb/2
elif exp_str == 'relu':
self.T = lambda y: y*np.sqrt(lamb)
self.A = lambda eta: nn.functional.relu(eta)**2/2
self.log_h = lambda y : -y**2*lamb/2
elif exp_str == 'poisson':
self.T = lambda y: y
self.A = lambda eta: torch.exp(eta)
self.log_h = lambda y : y*0
self.lamb = lamb
def forward(self, target, eta, z_hidden=None, epoch=None):
factor = 1
if epoch is None:
epoch = np.inf
if z_hidden is None:
return torch.sum(-self.T(target)*eta + self.A(eta))/torch.numel(target) - \
torch.sum(self.log_h(target))/torch.numel(target)
else:
total_err = 0
total_num = 0
for l in range(len(z_hidden)):
if l == 0:
zlm1 = target
else:
zlm1 = z_hidden[l-1]
zl = z_hidden[l]
etal = eta[l]
new_layer_loss = self.forward(zlm1,
etal.reshape(zlm1.shape))*\
torch.numel(zlm1) * factor**l
total_err = total_err + new_layer_loss
# The last layer also needs to add the log prior
#(for other layers this is handled through log_h)
total_err = total_err + \
self.lamb*torch.linalg.norm(zl, ord='fro')**2*factor**l
total_num = total_num + torch.numel(zlm1)
return total_err/(zlm1.shape[0]*2)
class DeepDEQAutoencoder(nn.Module):
def __init__(self, encoder, backbone = False):
super().__init__()
self.f = encoder
self.solver = anderson
self.backbone = backbone
def forward(self, Y):
thres = 50
f0 = torch.zeros((Y.shape[0], int(np.prod(self.f.output_dim))), device=Y.device)
# Forward pass
with torch.no_grad():
f_star = self.solver(\
lambda f: self.f(f, Y), f0, threshold=thres)['result']
new_f_star = f_star
# (Prepare for) Backward pass
if self.training:
new_f_star = self.f(f_star.requires_grad_(), Y)
def backward_hook(grad):
if self.hook is not None:
self.hook.remove()
try:
torch.cuda.synchronize() # To avoid infinite recursion
except:
pass
# Compute the fixed point of yJ + grad, where J=J_f is the Jacobian of f at z_star
new_grad = self.solver(lambda y: autograd.grad(new_f_star, f_star, y,
retain_graph=True)[0] + grad, \
torch.zeros_like(grad), threshold=thres)['result']
return new_grad
self.hook = new_f_star.register_hook(backward_hook)
if self.backbone: # In this case, only worry about returning the last layer
_, R = torch.qr(self.f.M_layers[-2].weight.T)
ret = (R @ new_f_star[:,-self.f.layer_widths[-1]:].T).T
return ret
return self.calc_all_etas(new_f_star)
def calc_all_etas(self, z):
start_idx = 0
etas = []
zs = []
for l_idx, l_width in enumerate(self.f.layer_widths):
if l_idx == 0:
continue
zl = z[:,start_idx:self.f.layer_widths[l_idx]+start_idx]
etal =\
nn.functional.linear(zl,
self.f.M_layers[l_idx].weight.T, bias=self.f.biases[l_idx])
etas.append(etal)
zs.append(zl)
start_idx = start_idx + self.f.layer_widths[l_idx]
return etas, zs
class DeepPED(object):
def __init__(self, layer_widths):
self.layer_widths = layer_widths
def _freeze_unfreeze(self, epoch):
L = len(self.encoder.M_layers)-2
for l in range(1, L+1):
bool_ = (epoch >= 5*(l-1))
for param in self.encoder.M_layers[l].parameters():
param.requires_grad = bool_
self.encoder.biases[l].requires_grad = bool_
def fit(self, data_loader, lamb=1,
dist='bernoulli', weight_decay=0.001, num_epochs=20,
plot_bool=False, plot_freq=5, lr=0.01):
features, label = next(iter(data_loader))
if len(list(features.shape)) == 4:
dim_input = np.prod(list(features.shape[1:]))
else:
dim_input = features.shape[-1]
# Initialise the model
self.encoder = DeepEncoderLayerFC(self.layer_widths,
act=dist, lamb=lamb)
self.encoder.to(device)
self.model = DeepDEQAutoencoder(self.encoder)
self.model.to(device)
#self.optimiser = torch.optim.AdamW(self.model.parameters(), lr=lr,
# weight_decay=weight_decay)
params_ = []
L = len(self.encoder.M_layers)-2
for l in range(1, L+1):
params_ = params_ + [{'params': self.encoder.M_layers[l].parameters(),
'weight_decay': (self.layer_widths[l] +\
self.layer_widths[l-1])*weight_decay/1000}]
# Divide the weight decay by 1000 for reasonable units
# (Note this also occurs in the loss ExpfamLoss)
params_ = params_ + [{'params': self.encoder.biases[l]}]
self.optimiser = torch.optim.Adam(params_, lr=lr)
self.loss = ExpfamLoss(dist, lamb=lamb)
for epoch in range(num_epochs):
print(epoch, flush=True)
#if epoch > 3:
# self.model.f.dropout_mode = 'from_latents'
self._freeze_unfreeze(epoch)
self._train(epoch, data_loader)
if plot_bool and (epoch % plot_freq) == 0:
self._test(epoch, data_loader, plot_bool=plot_bool)
def fit_transform(self, data_loader, lamb=1,
dist='bernoulli', weight_decay=0.001, num_epochs=20, plot_bool=True,
plot_freq = 10, lr=0.01, data_loader_test=None, layer_out = -1):
self.fit(data_loader, lamb, dist, weight_decay, num_epochs,
plot_bool, plot_freq, lr=lr)
if data_loader_test is None:
data_loader_test = data_loader
return self._test(num_epochs, data_loader_test, plot_bool=plot_bool,
layer_out=layer_out)
def _train(self, epoch, data_loader):
self.model.train()
for batch_idx, (data, labels) in enumerate(data_loader):
self.optimiser.zero_grad()
layer_in = data.to(device)
eta, f_star = self.model(layer_in)
loss_eval = self.loss(layer_in, eta, f_star, epoch=epoch)
loss_eval.backward()
self.optimiser.step()
print(loss_eval.item(), flush=True)
_, R = torch.qr(self.model.f.M_layers[-2].weight.T)
print(R)
for i in range(1, len(self.model.f.M_layers)-1):
W = self.encoder.M_layers[i].weight.T
print(torch.linalg.norm(W @ W.T, ord=2)/self.encoder.lamb)
def _test(self, epoch, data_loader, plot_bool=False, layer_out=-1):
self.model.eval()
ret = np.empty((0, self.layer_widths[-1]))
# For some pruposes (e.g. visualisation) it makes sense to orthoganlise the basis
_, R = torch.qr(self.model.f.M_layers[layer_out-1].weight.T)
print(R)
R = R.detach().cpu().numpy()
if plot_bool:
plt.figure(figsize=(10,10))
for batch_idx, (data, labels) in enumerate(data_loader):
layer_in = data.to(device)
with torch.no_grad():
_, f_star = self.model(layer_in)
f_star = f_star[layer_out].detach().cpu().numpy()
# Orthogonalise the basis
f_star = (R @ f_star.T).T
ret = np.vstack((ret, f_star))
if plot_bool:
plt.scatter(f_star[:,0], f_star[:,1], s=1)
if plot_bool:
plt.savefig('outputs/' + str(epoch) + '.png', bbox_inches='tight')
plt.close()
return ret
| 12,969 | 35.846591 | 98 | py |
ped | ped-main/script_synthetic.py | import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from lib.plotters import matplotlib_config
from model import DeepPED
from model import DeepDEQAutoencoder
from model import DeepEncoderLayerFC
import sys
import os
import umap
import time
BATCH_SIZE_TRAIN = 500
DIST_TRUE = 'relu'
DIST_MODEL = DIST_TRUE
NUM_EPOCHS_SUPERVISED = 100
DIMS_TRUE = [50, 2]
DIMS_MODEL = DIMS_TRUE
if len(DIMS_TRUE) > 2:
NUM_EPOCHS_UNSUPERVISED = 10
else:
NUM_EPOCHS_UNSUPERVISED = 30
WEIGHT_DECAY = 10.*(len(DIMS_MODEL)-1)
if (DIST_MODEL == 'relu') or (DIST_MODEL == 'poisson'):
LAMBDA = 1.
else:
LAMBDA = 0.1
BINOMIAL_N = 10
NUM_POINTS = 100000 #10000
SCRIPT_RUN_ID = int(sys.argv[1])
OUTPUT_DIR = 'outputs/synthetic/' + DIST_TRUE + \
str(len(DIMS_MODEL)-1) + '/' + \
str(SCRIPT_RUN_ID) + '/'
LR = 0.001
SHAPE = 'shape'
PRETRAIN = True
try:
os.makedirs(OUTPUT_DIR)
except:
pass
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
matplotlib_config()
def _generate_square(num_points):
x = np.linspace(-5, 5, int(np.sqrt(num_points)))
label1 = np.linspace(0, 1, int(np.sqrt(num_points)))
z = np.transpose([np.tile(x, len(x)), np.repeat(x, len(x))]).T
label2 = np.transpose([np.tile(label1, len(label1)),
np.repeat(label1, len(label1))])
labels = np.hstack((np.zeros((label2.shape[0], 1)), label2))
labels = torch.Tensor(labels)
return z, labels
def _generate_shape(num_points):
z, labels = _generate_square(num_points)
# Circles
idx = np.logical_or(np.linalg.norm(z-2, axis=0) < 3, np.linalg.norm(z+3, axis=0)<2)
# Square
idx = np.logical_or(idx, np.linalg.norm(z-np.asarray([[4],[-4]]), axis=0, ord=1) < 1)
z = z[:,idx]
labels = labels[idx,:]
return z, labels
def _generate_circle(num_points):
z, labels = _generate_square(num_points)
idx = (np.linalg.norm(z, axis=0) < 5)
z = z[:,idx]
labels = labels[idx,:]
return z, labels
def generate_data(num_points, batch_size, dims, shape):
# generate some points
if shape == 'square':
z, labels = _generate_square(num_points)
elif shape == 'circle':
z, labels = _generate_circle(num_points)
elif shape == 'shape':
z, labels = _generate_shape(num_points)
dims = dims[::-1]
z_in = np.copy(z)
for dim_idx in range(len(dims)-1):
low_dim = dims[dim_idx]
big_dim = dims[dim_idx+1]
# Generate a basis
W = np.random.normal(0, 1, (big_dim, low_dim))/(np.sqrt(low_dim))
eta = (W @ z_in).T # size = (num_points*num_clusters, big_dim)
if (DIST_TRUE == 'bernoulli'):
p = 1/(1+np.exp(-eta))
Y = np.random.binomial(1, p)
if (DIST_TRUE == 'binomial'):
p = 1/(1+np.exp(-eta))
Y = np.random.binomial(BINOMIAL_N, p)
if (DIST_TRUE == 'poisson'):
lam = np.exp(eta*0.5)
Y = np.random.poisson(lam)
if DIST_TRUE == 'gauss':
Y = np.random.normal(eta/np.sqrt(LAMBDA),
scale=1/np.sqrt(LAMBDA)*np.ones_like(eta))
if DIST_TRUE == 'cauchy':
eta = (eta > 0) * eta
Y = np.random.standard_cauchy(\
size=eta.shape).astype(np.float32) + eta.astype(np.float32)
if DIST_TRUE == 'student':
eta = (eta > 0) * eta
Y = np.random.standard_t(2, \
size=eta.shape).astype(np.float32) + eta.astype(np.float32)
if DIST_TRUE == 'relu':
eta = (eta > 0) * eta
Y = np.random.normal(eta/np.sqrt(LAMBDA),
scale=1/np.sqrt(LAMBDA)*np.ones_like(eta))
Y = torch.Tensor(Y)
z_in = np.copy(Y).T
print(Y)
data = torch.utils.data.TensorDataset(Y, labels)
data_loader = torch.utils.data.DataLoader(data, batch_size = batch_size, shuffle=False)
data_loader_test = \
torch.utils.data.DataLoader(data, batch_size = batch_size, shuffle=False)
return data_loader, data, Y, labels, z, data_loader_test
def plot_z_space(z1, z2, z3, labels, prefix):
if not (z3 is None):
fig = plt.figure(figsize=(12, 12))
ax = fig.add_subplot(projection='3d')
ax.scatter(z1, z2, z3, c=labels, s=1)
else:
plt.scatter(z1, z2, c=labels, s=1)
plt.gca().set_aspect('equal', adjustable='box')
plt.savefig(OUTPUT_DIR + prefix + '_waxis.png')
plt.axis('off')
plt.gcf().axes[0].get_xaxis().set_visible(False)
plt.gcf().axes[0].get_yaxis().set_visible(False)
plt.savefig(OUTPUT_DIR + prefix + '_woaxis.png', bbox_inches='tight', pad_inches=0)
plt.close()
# Load/generate some synthetic data
data_loader, data_set, true_Y, true_labels, true_z, data_loader_test = \
generate_data(NUM_POINTS, BATCH_SIZE_TRAIN, DIMS_TRUE, SHAPE)
if PRETRAIN:
############################################Plot ground truth latents
z3 = true_z.T[:,2] if DIMS_TRUE[-1] == 3 else None
plot_z_space(true_z.T[:,0], true_z.T[:,1], z3, true_labels.numpy(), 'gt' + SHAPE)
#################################################### Fit TSNE then visualise
"""
print('Applying tSNE...')
t0 = time.time()
tsne_z = TSNE(n_components=DIMS_MODEL[-1], learning_rate='auto',
init='pca').fit_transform(true_Y)
print('Took ' + str(time.time() - t0) + ' seconds.')
z3 = tsne_z[:,2] if DIMS_MODEL[-1] == 3 else None
plot_z_space(tsne_z[:,0], tsne_z[:,1], z3, true_labels.numpy(), 'tsne' + SHAPE)
"""
#################################################### Fit UMAP then visualise
print('Applying UMAP...')
t0 = time.time()
umap_z = umap.UMAP().fit_transform(true_Y)
print('Took ' + str(time.time() - t0) + ' seconds.')
z3 = umap_z[:,2] if DIMS_MODEL[-1] == 3 else None
plot_z_space(umap_z[:,0], umap_z[:,1], z3, true_labels.numpy(), 'umap' + SHAPE)
#################################################### Fit PCA then visualise
print('Applying PCA...')
t0 = time.time()
scaler = StandardScaler()
data_scaled = scaler.fit_transform(true_Y.numpy())
pca_z = PCA(n_components=DIMS_MODEL[-1]).fit_transform(data_scaled)
print('Took ' + str(time.time() - t0) + ' seconds.')
z3 = pca_z[:,2] if DIMS_MODEL[-1] == 3 else None
plot_z_space(pca_z[:,0], pca_z[:,1], z3, true_labels.numpy(), 'pca' + SHAPE)
############################################### Fit PED then visualise
print('Applying PED...')
t0 = time.time()
ped = DeepPED(DIMS_MODEL)
ped_z = ped.fit_transform(data_loader, lamb=LAMBDA,
dist=DIST_MODEL, weight_decay=WEIGHT_DECAY, num_epochs=NUM_EPOCHS_UNSUPERVISED,
plot_bool=False, plot_freq=50, lr=LR, data_loader_test=data_loader_test)
print('Took ' + str(time.time() - t0) + ' seconds.')
z3 = ped_z[:,2] if DIMS_MODEL[-1] == 3 else None
plot_z_space(ped_z[:,0], ped_z[:,1], z3, true_labels.numpy(), 'ped' + SHAPE)
###################################################
# Regression with 3 neural nets. One has a PED backbone,
# The other has a PCA backbone, one with tSNE
def train(epoch, data_loader, model, optimiser, loss):
model.train()
for batch_idx, (data, target) in enumerate(data_loader):
optimiser.zero_grad()
data = data.to(device); target = target.to(device)
prediction = model(data)
loss_eval = loss(prediction, target)
loss_eval.backward()
optimiser.step()
def test(epoch, data_loader, model, loss):
model.eval()
loss_total = 0
total_num = 0
for batch_idx, (data, target) in enumerate(data_loader):
data = data.to(device); target = target.to(device)
prediction = model(data)
loss_eval = loss(prediction, target)*data.shape[0]
loss_total = loss_total + loss_eval
total_num = total_num + data.shape[0]
ret = (loss_total/total_num)
print(ret)
return ret.cpu().detach().numpy()
def headnet_gen():
headnet = nn.Sequential(\
nn.Linear(DIMS_MODEL[-1], 100),
nn.ReLU(),
nn.Linear(100, 1))
return headnet
loss = torch.nn.MSELoss()
# With each representation z of Y, try and find f(z) for the true z
num_train = int(0.8*true_z.shape[1])
fz = torch.Tensor(true_z[0,:] + true_z[1,:])
data = torch.utils.data.TensorDataset(true_Y, fz.reshape((-1,1)))
idx = np.random.permutation(true_z.shape[1])
fz_train = torch.Tensor(fz.reshape((-1,1))[idx[:num_train],:])
fz_test = torch.Tensor(fz.reshape((-1,1))[idx[num_train:],:])
train_set_ = torch.utils.data.Subset(data, idx[:num_train])
test_set_ = torch.utils.data.Subset(data, idx[num_train:])
train_set = torch.utils.data.DataLoader(train_set_, BATCH_SIZE_TRAIN)
test_set = torch.utils.data.DataLoader(test_set_, BATCH_SIZE_TRAIN)
####################### PED Backbone network
encoder = DeepEncoderLayerFC(DIMS_MODEL, lamb = LAMBDA,
act = DIST_MODEL)
if PRETRAIN:
backbone = ped.model
ped.model.backbone = True
else:
backbone = DeepDEQAutoencoder(encoder, backbone=True)
headnet = headnet_gen()
pedmodel = nn.Sequential(backbone,headnet)
pedmodel.to(device)
optimiser = torch.optim.Adam(pedmodel.parameters())
ped_loss = []
backbone.f.dropout_mode = 'off'
print('Training PED backbone...')
for epoch in range(NUM_EPOCHS_SUPERVISED):
ped_loss.append(test(epoch, test_set, pedmodel, loss))
train(epoch, train_set, pedmodel, optimiser, loss)
ped_loss.append(test(NUM_EPOCHS_SUPERVISED, test_set, pedmodel, loss))
##################### PCA backbone network
train_set_ = torch.utils.data.TensorDataset(torch.Tensor(pca_z[idx[:num_train],:]),
fz_train.reshape((-1,1)))
test_set_ = torch.utils.data.TensorDataset(torch.Tensor(pca_z[idx[num_train:],:]),
fz_test.reshape((-1,1)))
train_set = torch.utils.data.DataLoader(train_set_, BATCH_SIZE_TRAIN)
test_set = torch.utils.data.DataLoader(test_set_, BATCH_SIZE_TRAIN)
#### Now the actual model
headnet = headnet_gen()
headnet.to(device)
optimiser = torch.optim.Adam(headnet.parameters())
pca_loss = []
print('Training PCA backbone...')
for epoch in range(NUM_EPOCHS_SUPERVISED):
pca_loss.append(test(epoch, test_set, headnet, loss))
train(epoch, train_set, headnet, optimiser, loss)
pca_loss.append(test(NUM_EPOCHS_SUPERVISED, test_set, headnet, loss))
##################### tSNE backbone network
if NUM_POINTS <= 10000:
train_set_ = torch.utils.data.TensorDataset(torch.Tensor(tsne_z[idx[:num_train],:]),
fz_train.reshape((-1,1)))
test_set_ = torch.utils.data.TensorDataset(torch.Tensor(tsne_z[idx[num_train:],:]),
fz_test.reshape((-1,1)))
train_set = torch.utils.data.DataLoader(train_set_, BATCH_SIZE_TRAIN)
test_set = torch.utils.data.DataLoader(test_set_, BATCH_SIZE_TRAIN)
#### Now the actual model
headnet = headnet_gen()
headnet.to(device)
optimiser = torch.optim.Adam(headnet.parameters())
tsne_loss = []
print('Training tSNE backbone...')
for epoch in range(NUM_EPOCHS_SUPERVISED):
tsne_loss.append(test(epoch, test_set, headnet, loss))
train(epoch, train_set, headnet, optimiser, loss)
tsne_loss.append(test(NUM_EPOCHS_SUPERVISED, test_set, headnet, loss))
##################### UMAP backbone network
train_set_ = torch.utils.data.TensorDataset(torch.Tensor(umap_z[idx[:num_train],:]),
fz_train.reshape((-1,1)))
test_set_ = torch.utils.data.TensorDataset(torch.Tensor(umap_z[idx[num_train:],:]),
fz_test.reshape((-1,1)))
train_set = torch.utils.data.DataLoader(train_set_, BATCH_SIZE_TRAIN)
test_set = torch.utils.data.DataLoader(test_set_, BATCH_SIZE_TRAIN)
#### Now the actual model
headnet = headnet_gen()
headnet.to(device)
optimiser = torch.optim.Adam(headnet.parameters())
umap_loss = []
print('Training UMAP backbone...')
for epoch in range(NUM_EPOCHS_SUPERVISED):
umap_loss.append(test(epoch, test_set, headnet, loss))
train(epoch, train_set, headnet, optimiser, loss)
umap_loss.append(test(NUM_EPOCHS_SUPERVISED, test_set, headnet, loss))
##################### Network that uses the true zs as inputs
z_train = torch.Tensor(true_z[:,idx[:num_train]])
z_test = torch.Tensor(true_z[:,idx[num_train:]])
train_set_ = torch.utils.data.TensorDataset(z_train.T, fz_train.reshape((-1,1)))
test_set_ = torch.utils.data.TensorDataset(z_test.T, fz_test.reshape((-1,1)))
train_set = torch.utils.data.DataLoader(train_set_, BATCH_SIZE_TRAIN)
test_set = torch.utils.data.DataLoader(test_set_, BATCH_SIZE_TRAIN)
#### Now the actual model
headnet = headnet_gen()
headnet.to(device)
optimiser = torch.optim.Adam(headnet.parameters())
original_loss = []
print('Training with true Zs...')
for epoch in range(NUM_EPOCHS_SUPERVISED):
original_loss.append(test(epoch, test_set, headnet, loss))
train(epoch, train_set, headnet, optimiser, loss)
original_loss.append(test(NUM_EPOCHS_SUPERVISED, test_set, headnet, loss))
############################### Save data
if NUM_POINTS <= 10000:
save_data = np.vstack((ped_loss, pca_loss, tsne_loss, umap_loss, original_loss))
else:
save_data = np.vstack((ped_loss, pca_loss, np.full((len(pca_loss),),np.inf), umap_loss, original_loss))
np.save(OUTPUT_DIR + 'losses.npy', save_data)
print(save_data)
| 13,528 | 35.964481 | 107 | py |
ped | ped-main/lib/radam.py | import math
import torch
from torch.optim.optimizer import Optimizer, required
class RAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = group['lr'] * math.sqrt(
(1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (
N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
return loss
class PlainRAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(PlainRAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(PlainRAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = group['lr'] * math.sqrt(
(1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (
N_sma_max - 2)) / (1 - beta1 ** state['step'])
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
return loss
class AdamW(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, warmup=0):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, warmup=warmup)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
if group['warmup'] > state['step']:
scheduled_lr = 1e-8 + state['step'] * group['lr'] / group['warmup']
else:
scheduled_lr = group['lr']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * scheduled_lr, p_data_fp32)
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
p.data.copy_(p_data_fp32)
return loss | 8,090 | 36.985915 | 116 | py |
ped | ped-main/lib/layer_utils.py | import torch
import torch.nn.functional as F
import torch.nn as nn
def list2vec(z1_list):
"""Convert list of tensors to a vector"""
bsz = z1_list[0].size(0)
return torch.cat([elem.reshape(bsz, -1, 1) for elem in z1_list], dim=1)
def vec2list(z1, cutoffs):
"""Convert a vector back to a list, via the cutoffs specified"""
bsz = z1.shape[0]
z1_list = []
start_idx, end_idx = 0, cutoffs[0][0] * cutoffs[0][1] * cutoffs[0][2]
for i in range(len(cutoffs)):
z1_list.append(z1[:, start_idx:end_idx].view(bsz, *cutoffs[i]))
if i < len(cutoffs)-1:
start_idx = end_idx
end_idx += cutoffs[i + 1][0] * cutoffs[i + 1][1] * cutoffs[i + 1][2]
return z1_list
def conv3x3(in_planes, out_planes, stride=1, bias=False):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=bias)
def conv5x5(in_planes, out_planes, stride=1, bias=False):
"""5x5 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=5, stride=stride, padding=2, bias=bias)
def norm_diff(new, old, show_list=False):
if show_list:
return [(new[i] - old[i]).norm().item() for i in range(len(new))]
return np.sqrt(sum((new[i] - old[i]).norm().item()**2 for i in range(len(new)))) | 1,332 | 35.027027 | 95 | py |
ped | ped-main/lib/optimizations.py | from torch.nn.parameter import Parameter
import torch.nn as nn
import torch
import torch.nn.functional as F
from torch.autograd import Variable
##############################################################################################################
#
# Temporal DropConnect in a feed-forward setting
#
##############################################################################################################
class WeightDrop(torch.nn.Module):
def __init__(self, module, weights, dropout=0, temporal=True):
"""
Weight DropConnect, adapted from a recurrent setting by Merity et al. 2017
:param module: The module whose weights are to be applied dropout on
:param weights: A 2D list identifying the weights to be regularized. Each element of weights should be a
list containing the "path" to the weight kernel. For instance, if we want to regularize
module.layer2.weight3, then this should be ["layer2", "weight3"].
:param dropout: The dropout rate (0 means no dropout)
:param temporal: Whether we apply DropConnect only to the temporal parts of the weight (empirically we found
this not very important)
"""
super(WeightDrop, self).__init__()
self.module = module
self.weights = weights
self.dropout = dropout
self.temporal = temporal
if self.dropout > 0.0:
self._setup()
def _setup(self):
for path in self.weights:
full_name_w = '.'.join(path)
module = self.module
name_w = path[-1]
for i in range(len(path) - 1):
module = getattr(module, path[i])
w = getattr(module, name_w)
del module._parameters[name_w]
module.register_parameter(name_w + '_raw', Parameter(w.data))
def _setweights(self):
for path in self.weights:
module = self.module
name_w = path[-1]
for i in range(len(path) - 1):
module = getattr(module, path[i])
raw_w = getattr(module, name_w + '_raw')
if len(raw_w.size()) > 2 and raw_w.size(2) > 1 and self.temporal:
# Drop the temporal parts of the weight; if 1x1 convolution then drop the whole kernel
w = torch.cat([F.dropout(raw_w[:, :, :-1], p=self.dropout, training=self.training),
raw_w[:, :, -1:]], dim=2)
else:
w = F.dropout(raw_w, p=self.dropout, training=self.training)
setattr(module, name_w, w)
def forward(self, *args, **kwargs):
if self.dropout > 0.0:
self._setweights()
return self.module.forward(*args, **kwargs)
def matrix_diag(a, dim=2):
"""
a has dimension (N, (L,) C), we want a matrix/batch diag that produces (N, (L,) C, C) from the last dimension of a
"""
if dim == 2:
res = torch.zeros(a.size(0), a.size(1), a.size(1))
res.as_strided(a.size(), [res.stride(0), res.size(2)+1]).copy_(a)
else:
res = torch.zeros(a.size(0), a.size(1), a.size(2), a.size(2))
res.as_strided(a.size(), [res.stride(0), res.stride(1), res.size(3)+1]).copy_(a)
return res
##############################################################################################################
#
# Embedding dropout
#
##############################################################################################################
def embedded_dropout(embed, words, dropout=0.1, scale=None):
"""
Apply embedding encoder (whose weight we apply a dropout)
:param embed: The embedding layer
:param words: The input sequence
:param dropout: The embedding weight dropout rate
:param scale: Scaling factor for the dropped embedding weight
:return: The embedding output
"""
if dropout:
mask = embed.weight.data.new().resize_((embed.weight.size(0), 1)).bernoulli_(1 - dropout).expand_as(
embed.weight) / (1 - dropout)
mask = Variable(mask)
masked_embed_weight = mask * embed.weight
else:
masked_embed_weight = embed.weight
if scale:
masked_embed_weight = scale.expand_as(masked_embed_weight) * masked_embed_weight
padding_idx = embed.padding_idx
if padding_idx is None:
padding_idx = -1
X = F.embedding(words, masked_embed_weight, padding_idx, embed.max_norm, embed.norm_type,
embed.scale_grad_by_freq, embed.sparse)
return X
##############################################################################################################
#
# Variational dropout (for input/output layers, and for hidden layers)
#
##############################################################################################################
class VariationalDropout(nn.Module):
def __init__(self):
"""
Feed-forward version of variational dropout that applies the same mask at every time step
"""
super(VariationalDropout, self).__init__()
def forward(self, x, dropout=0.5, dim=3):
if not self.training or not dropout:
return x
if dim == 4:
# Dimension (M, N, L, C), where C stands for channels
m = torch.zeros_like(x[:,:,:1]).bernoulli_(1 - dropout)
else:
# Dimension (N, L, C)
m = torch.zeros_like(x[:,:1]).bernoulli_(1 - dropout)
mask = m.requires_grad_(False) / (1 - dropout)
mask = mask.expand_as(x).to(x)
return mask * x
class VariationalHidDropout(nn.Module):
def __init__(self, dropout=0.0, length_first=False):
"""
Hidden-to-hidden (VD-based) dropout that applies the same mask at every time step and every layer
:param dropout: The dropout rate (0 means no dropout is applied)
:param temporal: Whether the dropout mask is the same across the temporal dimension (or only the depth dimension)
"""
super(VariationalHidDropout, self).__init__()
self.dropout = dropout
self.mask = None
self.length_first = length_first
def reset_mask(self, bsz, d, length):
if self.length_first:
# Dimension (N, L, C)
m = torch.zeros(bsz, 1, d).bernoulli_(1 - self.dropout)
else:
# Dimension (N, C, L)
m = torch.zeros(bsz, d, 1).bernoulli_(1 - self.dropout)
mask = m.requires_grad_(False) / (1 - self.dropout)
self.mask = mask
return mask
def forward(self, x):
if not self.training or self.dropout == 0:
return x
assert self.mask is not None, f"You need to reset mask before using {self.__class__.__name__}"
mask = self.mask.expand_as(x) # Make sure the dimension matches
return mask * x
class VariationalAttnDropout(VariationalHidDropout):
def __init__(self, dropout=0.0, temporal=True):
super(VariationalAttnDropout, self).__init__(dropout)
def reset_mask(self, bsz, n_head, qlen, klen):
# Dimension (N, n_head, L1, L2)
m = torch.zeros(bsz, n_head, qlen, klen).bernoulli_(1 - self.dropout)
mask = m.requires_grad_(False) / (1 - self.dropout)
self.mask = mask
return mask
class VariationalHidDropout2d(VariationalHidDropout):
def __init__(self, dropout=0.0, spatial=True):
"""
Hidden-to-hidden (VD-based) 2D dropout that applies the same mask at every layer
:param spatial: If True, then all pixels of the HxW feature map will be applied the
same mask as well (i.e., certain entire channels of all pixels may be
masked out).
"""
super(VariationalHidDropout2d, self).__init__(dropout)
self.spatial = spatial
def reset_mask(self, bsz, d, H, W):
# Dimension (N, C, H, W)
if self.spatial:
m = torch.zeros(bsz, d, 1, 1).bernoulli_(1 - self.dropout)
else:
m = torch.zeros(bsz, d, H, W).bernoulli_(1 - self.dropout)
mask = m.requires_grad_(False) / (1 - self.dropout)
self.mask = mask
return mask
##############################################################################################################
#
# Weight normalization. Modified from the original PyTorch's implementation of weight normalization.
#
##############################################################################################################
def _norm(p, dim):
"""Computes the norm over all dimensions except dim"""
if dim is None:
return p.norm()
elif dim == 0:
output_size = (p.size(0),) + (1,) * (p.dim() - 1)
return p.contiguous().view(p.size(0), -1).norm(dim=1).view(*output_size)
elif dim == p.dim() - 1:
output_size = (1,) * (p.dim() - 1) + (p.size(-1),)
return p.contiguous().view(-1, p.size(-1)).norm(dim=0).view(*output_size)
else:
return _norm(p.transpose(0, dim), 0).transpose(0, dim)
class WeightNorm(object):
def __init__(self, names, dim):
"""
Weight normalization module
:param names: The list of weight names to apply weightnorm on
:param dim: The dimension of the weights to be normalized
"""
self.names = names
self.dim = dim
def compute_weight(self, module, name):
g = getattr(module, name + '_g')
v = getattr(module, name + '_v')
return v * (g / _norm(v, self.dim))
@staticmethod
def apply(module, names, dim):
fn = WeightNorm(names, dim)
for name in names:
weight = getattr(module, name)
# remove w from parameter list
del module._parameters[name]
# add g and v as new parameters and express w as g/||v|| * v
module.register_parameter(name + '_g', Parameter(_norm(weight, dim).data))
module.register_parameter(name + '_v', Parameter(weight.data))
setattr(module, name, fn.compute_weight(module, name))
# recompute weight before every forward()
module.register_forward_pre_hook(fn)
return fn
def remove(self, module):
for name in self.names:
weight = self.compute_weight(module, name)
delattr(module, name)
del module._parameters[name + '_g']
del module._parameters[name + '_v']
module.register_parameter(name, Parameter(weight.data))
def reset(self, module):
for name in self.names:
setattr(module, name, self.compute_weight(module, name))
def __call__(self, module, inputs):
# Typically, every time the module is called we need to recompute the weight. However,
# in the case of TrellisNet, the same weight is shared across layers, and we can save
# a lot of intermediate memory by just recomputing once (at the beginning of first call).
pass
def weight_norm(module, names, dim=0):
fn = WeightNorm.apply(module, names, dim)
return module, fn
| 11,133 | 37.93007 | 121 | py |
ped | ped-main/lib/solvers.py | # Modified based on the DEQ repo.
import torch
from torch import nn
import torch.nn.functional as functional
from torch.autograd import Function
import numpy as np
import pickle
import sys
import os
from scipy.optimize import root
import time
from termcolor import colored
def _safe_norm(v):
if not torch.isfinite(v).all():
return np.inf
return torch.norm(v)
def scalar_search_armijo(phi, phi0, derphi0, c1=1e-4, alpha0=1, amin=0):
ite = 0
phi_a0 = phi(alpha0) # First do an update with step size 1
if phi_a0 <= phi0 + c1*alpha0*derphi0:
return alpha0, phi_a0, ite
# Otherwise, compute the minimizer of a quadratic interpolant
alpha1 = -(derphi0) * alpha0**2 / 2.0 / (phi_a0 - phi0 - derphi0 * alpha0)
phi_a1 = phi(alpha1)
# Otherwise loop with cubic interpolation until we find an alpha which
# satisfies the first Wolfe condition (since we are backtracking, we will
# assume that the value of alpha is not too small and satisfies the second
# condition.
while alpha1 > amin: # we are assuming alpha>0 is a descent direction
factor = alpha0**2 * alpha1**2 * (alpha1-alpha0)
a = alpha0**2 * (phi_a1 - phi0 - derphi0*alpha1) - \
alpha1**2 * (phi_a0 - phi0 - derphi0*alpha0)
a = a / factor
b = -alpha0**3 * (phi_a1 - phi0 - derphi0*alpha1) + \
alpha1**3 * (phi_a0 - phi0 - derphi0*alpha0)
b = b / factor
alpha2 = (-b + torch.sqrt(torch.abs(b**2 - 3 * a * derphi0))) / (3.0*a)
phi_a2 = phi(alpha2)
ite += 1
if (phi_a2 <= phi0 + c1*alpha2*derphi0):
return alpha2, phi_a2, ite
if (alpha1 - alpha2) > alpha1 / 2.0 or (1 - alpha2/alpha1) < 0.96:
alpha2 = alpha1 / 2.0
alpha0 = alpha1
alpha1 = alpha2
phi_a0 = phi_a1
phi_a1 = phi_a2
# Failed to find a suitable step length
return None, phi_a1, ite
def line_search(update, x0, g0, g, nstep=0, on=True):
"""
`update` is the propsoed direction of update.
Code adapted from scipy.
"""
tmp_s = [0]
tmp_g0 = [g0]
tmp_phi = [torch.norm(g0)**2]
s_norm = torch.norm(x0) / torch.norm(update)
def phi(s, store=True):
if s == tmp_s[0]:
return tmp_phi[0] # If the step size is so small... just return something
x_est = x0 + s * update
g0_new = g(x_est)
phi_new = _safe_norm(g0_new)**2
if store:
tmp_s[0] = s
tmp_g0[0] = g0_new
tmp_phi[0] = phi_new
return phi_new
if on:
s, phi1, ite = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0], amin=1e-2)
if (not on) or s is None:
s = 1.0
ite = 0
x_est = x0 + s * update
if s == tmp_s[0]:
g0_new = tmp_g0[0]
else:
g0_new = g(x_est)
return x_est, g0_new, x_est - x0, g0_new - g0, ite
def rmatvec(part_Us, part_VTs, x):
# Compute x^T(-I + UV^T)
# x: (N, 2d, L')
# part_Us: (N, 2d, L', threshold)
# part_VTs: (N, threshold, 2d, L')
if part_Us.nelement() == 0:
return -x
xTU = torch.einsum('bij, bijd -> bd', x, part_Us) # (N, threshold)
return -x + torch.einsum('bd, bdij -> bij', xTU, part_VTs) # (N, 2d, L'), but should really be (N, 1, (2d*L'))
def matvec(part_Us, part_VTs, x):
# Compute (-I + UV^T)x
# x: (N, 2d, L')
# part_Us: (N, 2d, L', threshold)
# part_VTs: (N, threshold, 2d, L')
if part_Us.nelement() == 0:
return -x
VTx = torch.einsum('bdij, bij -> bd', part_VTs, x) # (N, threshold)
return -x + torch.einsum('bijd, bd -> bij', part_Us, VTx) # (N, 2d, L'), but should really be (N, (2d*L'), 1)
def broyden(f, x0, threshold, eps=1e-3, stop_mode="rel", ls=False, name="unknown"):
bsz, total_hsize, seq_len = x0.size()
g = lambda y: f(y) - y
dev = x0.device
alternative_mode = 'rel' if stop_mode == 'abs' else 'abs'
x_est = x0 # (bsz, 2d, L')
gx = g(x_est) # (bsz, 2d, L')
nstep = 0
tnstep = 0
# For fast calculation of inv_jacobian (approximately)
Us = torch.zeros(bsz, total_hsize, seq_len, threshold).to(dev) # One can also use an L-BFGS scheme to further reduce memory
VTs = torch.zeros(bsz, threshold, total_hsize, seq_len).to(dev)
update = -matvec(Us[:,:,:,:nstep], VTs[:,:nstep], gx) # Formally should be -torch.matmul(inv_jacobian (-I), gx)
prot_break = False
# To be used in protective breaks
protect_thres = (1e6 if stop_mode == "abs" else 1e3) * seq_len
new_objective = 1e8
trace_dict = {'abs': [],
'rel': []}
lowest_dict = {'abs': 1e8,
'rel': 1e8}
lowest_step_dict = {'abs': 0,
'rel': 0}
nstep, lowest_xest, lowest_gx = 0, x_est, gx
while nstep < threshold:
x_est, gx, delta_x, delta_gx, ite = line_search(update, x_est, gx, g, nstep=nstep, on=ls)
nstep += 1
tnstep += (ite+1)
abs_diff = torch.norm(gx).item()
rel_diff = abs_diff / (torch.norm(gx + x_est).item() + 1e-9)
diff_dict = {'abs': abs_diff,
'rel': rel_diff}
trace_dict['abs'].append(abs_diff)
trace_dict['rel'].append(rel_diff)
for mode in ['rel', 'abs']:
if diff_dict[mode] < lowest_dict[mode]:
if mode == stop_mode:
lowest_xest, lowest_gx = x_est.clone().detach(), gx.clone().detach()
lowest_dict[mode] = diff_dict[mode]
lowest_step_dict[mode] = nstep
new_objective = diff_dict[stop_mode]
if new_objective < eps: break
if new_objective < 3*eps and nstep > 30 and np.max(trace_dict[stop_mode][-30:]) / np.min(trace_dict[stop_mode][-30:]) < 1.3:
# if there's hardly been any progress in the last 30 steps
break
if new_objective > trace_dict[stop_mode][0] * protect_thres:
prot_break = True
break
part_Us, part_VTs = Us[:,:,:,:nstep-1], VTs[:,:nstep-1]
vT = rmatvec(part_Us, part_VTs, delta_x)
u = (delta_x - matvec(part_Us, part_VTs, delta_gx)) / torch.einsum('bij, bij -> b', vT, delta_gx)[:,None,None]
vT[vT != vT] = 0
u[u != u] = 0
VTs[:,nstep-1] = vT
Us[:,:,:,nstep-1] = u
update = -matvec(Us[:,:,:,:nstep], VTs[:,:nstep], gx)
# Fill everything up to the threshold length
for _ in range(threshold+1-len(trace_dict[stop_mode])):
trace_dict[stop_mode].append(lowest_dict[stop_mode])
trace_dict[alternative_mode].append(lowest_dict[alternative_mode])
return {"result": lowest_xest,
"lowest": lowest_dict[stop_mode],
"nstep": lowest_step_dict[stop_mode],
"prot_break": prot_break,
"abs_trace": trace_dict['abs'],
"rel_trace": trace_dict['rel'],
"eps": eps,
"threshold": threshold}
def anderson(f, x0, m=6, lam=1e-4, threshold=50, eps=1e-3, stop_mode='rel', beta=1.0, **kwargs):
""" Anderson acceleration for fixed point iteration. """
bsz = x0.shape[0]
dL = x0.shape[1]
alternative_mode = 'rel' if stop_mode == 'abs' else 'abs'
X = torch.zeros(bsz, m, dL, dtype=x0.dtype, device=x0.device)
F = torch.zeros(bsz, m, dL, dtype=x0.dtype, device=x0.device)
X[:,0], F[:,0] = x0.reshape(bsz, -1), f(x0).reshape(bsz, -1)
X[:,1], F[:,1] = F[:,0], f(F[:,0].reshape_as(x0)).reshape(bsz, -1)
H = torch.zeros(bsz, m+1, m+1, dtype=x0.dtype, device=x0.device)
H[:,0,1:] = H[:,1:,0] = 1
y = torch.zeros(bsz, m+1, 1, dtype=x0.dtype, device=x0.device)
y[:,0] = 1
trace_dict = {'abs': [],
'rel': []}
lowest_dict = {'abs': 1e8,
'rel': 1e8}
lowest_step_dict = {'abs': 0,
'rel': 0}
for k in range(2, threshold):
n = min(k, m)
G = F[:,:n]-X[:,:n]
H[:,1:n+1,1:n+1] = torch.bmm(G,G.transpose(1,2)) + lam*torch.eye(n, dtype=x0.dtype,device=x0.device)[None]
# Original (depcrecated) call
#alpha = torch.solve(y[:,:n+1], H[:,:n+1,:n+1])[0][:, 1:n+1, 0] # (bsz x n)
# Solve assuming the system is invertible
#alpha = torch.linalg.solve(H[:,:n+1,:n+1],y[:,:n+1])[:, 1:n+1, 0]
# Solve when system is not invertible
sol = torch.linalg.lstsq(H[:,:n+1,:n+1],y[:,:n+1])[0]
alpha = sol[:, 1:n+1, 0]
X[:,k%m] = beta * (alpha[:,None] @ F[:,:n])[:,0] + (1-beta)*(alpha[:,None] @ X[:,:n])[:,0]
F[:,k%m] = f(X[:,k%m].reshape_as(x0)).reshape(bsz, -1)
gx = (F[:,k%m] - X[:,k%m]).view_as(x0)
abs_diff = gx.norm().item()
rel_diff = abs_diff / (1e-5 + F[:,k%m].norm().item())
diff_dict = {'abs': abs_diff,
'rel': rel_diff}
trace_dict['abs'].append(abs_diff)
trace_dict['rel'].append(rel_diff)
for mode in ['rel', 'abs']:
if diff_dict[mode] < lowest_dict[mode]:
if mode == stop_mode:
lowest_xest, lowest_gx = X[:,k%m].view_as(x0).clone().detach(), gx.clone().detach()
lowest_dict[mode] = diff_dict[mode]
lowest_step_dict[mode] = k
if trace_dict[stop_mode][-1] < eps:
for _ in range(threshold-1-k):
trace_dict[stop_mode].append(lowest_dict[stop_mode])
trace_dict[alternative_mode].append(lowest_dict[alternative_mode])
break
out = {"result": lowest_xest,
"lowest": lowest_dict[stop_mode],
"nstep": lowest_step_dict[stop_mode],
"prot_break": False,
"abs_trace": trace_dict['abs'],
"rel_trace": trace_dict['rel'],
"eps": eps,
"threshold": threshold}
X = F = None
return out
def analyze_broyden(res_info, err=None, judge=True, name='forward', training=True, save_err=True):
"""
For debugging use only :-)
"""
res_est = res_info['result']
nstep = res_info['nstep']
diff = res_info['diff']
diff_detail = res_info['diff_detail']
prot_break = res_info['prot_break']
trace = res_info['trace']
eps = res_info['eps']
threshold = res_info['threshold']
if judge:
return nstep >= threshold or (nstep == 0 and (diff != diff or diff > eps)) or prot_break or torch.isnan(res_est).any()
assert (err is not None), "Must provide err information when not in judgment mode"
prefix, color = ('', 'red') if name == 'forward' else ('back_', 'blue')
eval_prefix = '' if training else 'eval_'
# Case 1: A nan entry is produced in Broyden
if torch.isnan(res_est).any():
msg = colored(f"WARNING: nan found in Broyden's {name} result. Diff: {diff}", color)
print(msg)
if save_err: pickle.dump(err, open(f'{prefix}{eval_prefix}nan.pkl', 'wb'))
return (1, msg, res_info)
# Case 2: Unknown problem with Broyden's method (probably due to nan update(s) to the weights)
if nstep == 0 and (diff != diff or diff > eps):
msg = colored(f"WARNING: Bad Broyden's method {name}. Why?? Diff: {diff}. STOP.", color)
print(msg)
if save_err: pickle.dump(err, open(f'{prefix}{eval_prefix}badbroyden.pkl', 'wb'))
return (2, msg, res_info)
# Case 3: Protective break during Broyden (so that it does not diverge to infinity)
if prot_break and np.random.uniform(0,1) < 0.05:
msg = colored(f"WARNING: Hit Protective Break in {name}. Diff: {diff}. Total Iter: {len(trace)}", color)
print(msg)
if save_err: pickle.dump(err, open(f'{prefix}{eval_prefix}prot_break.pkl', 'wb'))
return (3, msg, res_info)
return (-1, '', res_info)
| 11,850 | 36.742038 | 132 | py |
ped | ped-main/lib/jacobian.py | import torch
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
def jac_loss_estimate(f0, z0, vecs=2, create_graph=True):
"""Estimating tr(J^TJ)=tr(JJ^T) via Hutchinson estimator
Args:
f0 (torch.Tensor): Output of the function f (whose J is to be analyzed)
z0 (torch.Tensor): Input to the function f
vecs (int, optional): Number of random Gaussian vectors to use. Defaults to 2.
create_graph (bool, optional): Whether to create backward graph (e.g., to train on this loss).
Defaults to True.
Returns:
torch.Tensor: A 1x1 torch tensor that encodes the (shape-normalized) jacobian loss
"""
vecs = vecs
result = 0
for i in range(vecs):
v = torch.randn(*z0.shape).to(z0)
vJ = torch.autograd.grad(f0, z0, v, retain_graph=True, create_graph=create_graph)[0]
result += vJ.norm()**2
return result / vecs / np.prod(z0.shape)
def power_method(f0, z0, n_iters=200):
"""Estimating the spectral radius of J using power method
Args:
f0 (torch.Tensor): Output of the function f (whose J is to be analyzed)
z0 (torch.Tensor): Input to the function f
n_iters (int, optional): Number of power method iterations. Defaults to 200.
Returns:
tuple: (largest eigenvector, largest (abs.) eigenvalue)
"""
evector = torch.randn_like(z0)
bsz = evector.shape[0]
for i in range(n_iters):
vTJ = torch.autograd.grad(f0, z0, evector, retain_graph=(i < n_iters-1), create_graph=False)[0]
evalue = (vTJ * evector).reshape(bsz, -1).sum(1, keepdim=True) / (evector * evector).reshape(bsz, -1).sum(1, keepdim=True)
evector = (vTJ.reshape(bsz, -1) / vTJ.reshape(bsz, -1).norm(dim=1, keepdim=True)).reshape_as(z0)
return (evector, torch.abs(evalue)) | 1,863 | 40.422222 | 130 | py |
T2I_CL | T2I_CL-main/DM-GAN+CL/code/main.py | from __future__ import print_function
from miscc.config import cfg, cfg_from_file
from datasets import TextDataset
from trainer import condGANTrainer as trainer
import os
import sys
import time
import random
import pprint
import datetime
import dateutil.tz
import argparse
import numpy as np
import torch
import torchvision.transforms as transforms
dir_path = (os.path.abspath(os.path.join(os.path.realpath(__file__), './.')))
sys.path.append(dir_path)
def parse_args():
parser = argparse.ArgumentParser(description='Train a AttnGAN network')
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfg/bird_DMGAN.yml', type=str)
parser.add_argument('--gpu', dest='gpu_id', type=int, default=-1)
parser.add_argument('--data_dir', dest='data_dir', type=str, default='')
parser.add_argument('--NET_G', type=str, default='')
parser.add_argument('--manualSeed', type=int, help='manual seed')
args = parser.parse_args()
return args
def gen_example(wordtoix, algo):
'''generate images from example sentences'''
from nltk.tokenize import RegexpTokenizer
filepath = '%s/example_filenames.txt' % (cfg.DATA_DIR)
data_dic = {}
with open(filepath, "r") as f:
filenames = f.read().decode('utf8').split('\n')
for name in filenames:
if len(name) == 0:
continue
filepath = '%s/%s.txt' % (cfg.DATA_DIR, name)
with open(filepath, "r") as f:
print('Load from:', name)
sentences = f.read().decode('utf8').split('\n')
# a list of indices for a sentence
captions = []
cap_lens = []
for sent in sentences:
if len(sent) == 0:
continue
sent = sent.replace("\ufffd\ufffd", " ")
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(sent.lower())
if len(tokens) == 0:
print('sent', sent)
continue
rev = []
for t in tokens:
t = t.encode('ascii', 'ignore').decode('ascii')
if len(t) > 0 and t in wordtoix:
rev.append(wordtoix[t])
captions.append(rev)
cap_lens.append(len(rev))
max_len = np.max(cap_lens)
sorted_indices = np.argsort(cap_lens)[::-1]
cap_lens = np.asarray(cap_lens)
cap_lens = cap_lens[sorted_indices]
cap_array = np.zeros((len(captions), max_len), dtype='int64')
for i in range(len(captions)):
idx = sorted_indices[i]
cap = captions[idx]
c_len = len(cap)
cap_array[i, :c_len] = cap
key = name[(name.rfind('/') + 1):]
data_dic[key] = [cap_array, cap_lens, sorted_indices]
algo.gen_example(data_dic)
if __name__ == "__main__":
args = parse_args()
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.gpu_id != -1:
cfg.GPU_ID = args.gpu_id
else:
cfg.CUDA = False
if args.NET_G != '':
cfg.TRAIN.NET_G = args.NET_G
if args.data_dir != '':
cfg.DATA_DIR = args.data_dir
print('Using config:')
pprint.pprint(cfg)
if not cfg.TRAIN.FLAG:
args.manualSeed = 100
elif args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
random.seed(args.manualSeed)
np.random.seed(args.manualSeed)
torch.manual_seed(args.manualSeed)
if cfg.CUDA:
torch.cuda.manual_seed_all(args.manualSeed)
torch.cuda.set_device(cfg.GPU_ID)
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
print("Seed: %d" % (args.manualSeed))
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
output_dir = '../output/%s_%s_%s' % \
(cfg.DATASET_NAME, cfg.CONFIG_NAME, timestamp)
split_dir, bshuffle = 'train', True
if not cfg.TRAIN.FLAG:
# bshuffle = False
split_dir = 'test'
# Get data loader
imsize = cfg.TREE.BASE_SIZE * (2 ** (cfg.TREE.BRANCH_NUM - 1))
image_transform = transforms.Compose([
transforms.Scale(int(imsize * 76 / 64)),
transforms.RandomCrop(imsize),
transforms.RandomHorizontalFlip()])
dataset = TextDataset(cfg.DATA_DIR, split_dir,
base_size=cfg.TREE.BASE_SIZE,
transform=image_transform)
assert dataset
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=cfg.TRAIN.BATCH_SIZE,
drop_last=True, shuffle=bshuffle, num_workers=int(cfg.WORKERS))
# Define models and go to train/evaluate
algo = trainer(output_dir, dataloader, dataset.n_words, dataset.ixtoword, dataset)
start_t = time.time()
if cfg.TRAIN.FLAG:
algo.train()
else:
'''generate images from pre-extracted embeddings'''
if cfg.B_VALIDATION:
algo.sampling(split_dir) # generate images for the whole valid dataset
else:
gen_example(dataset.wordtoix, algo) # generate images for customized captions
end_t = time.time()
print('Total time for training:', end_t - start_t)
| 5,482 | 33.923567 | 90 | py |
T2I_CL | T2I_CL-main/DM-GAN+CL/code/masks.py | import torch
def mask_correlated_samples(args):
mask = torch.ones((args.batch_size * 2, args.batch_size * 2), dtype=bool)
mask = mask.fill_diagonal_(0)
for i in range(args.batch_size):
mask[i, args.batch_size + i] = 0
mask[args.batch_size + i, i] = 0
return mask
def mask_correlated_samples_2(batch_size):
mask = torch.ones((batch_size * 2, batch_size * 2), dtype=bool)
mask = mask.fill_diagonal_(0)
for i in range(batch_size):
mask[i, batch_size + i] = 0
mask[batch_size + i, i] = 0
return mask
| 562 | 30.277778 | 77 | py |
T2I_CL | T2I_CL-main/DM-GAN+CL/code/pretrain_DAMSM.py | from __future__ import print_function
from miscc.utils import mkdir_p
from miscc.utils import build_super_images
from miscc.losses import sent_loss, words_loss
from miscc.config import cfg, cfg_from_file
from datasets import TextDataset
from datasets import prepare_data
from model import RNN_ENCODER, CNN_ENCODER
import os
import sys
import time
import random
import pprint
import datetime
import dateutil.tz
import argparse
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
from masks import mask_correlated_samples_2
from nt_xent import NT_Xent
dir_path = (os.path.abspath(os.path.join(os.path.realpath(__file__), './.')))
sys.path.append(dir_path)
UPDATE_INTERVAL = 50
def l2norm(X, dim, eps=1e-8):
"""L2-normalize columns of X
"""
norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps
X = torch.div(X, norm)
return X
def parse_args():
parser = argparse.ArgumentParser(description='Train a DAMSM network')
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfg/DAMSM/bird.yml', type=str)
parser.add_argument('--gpu', dest='gpu_id', type=int, default=0)
parser.add_argument('--data_dir', dest='data_dir', type=str, default='')
parser.add_argument('--manualSeed', type=int, help='manual seed')
args = parser.parse_args()
return args
def train(dataloader, cnn_model, rnn_model, batch_size,
labels, optimizer, epoch, ixtoword, image_dir, criterion):
cnn_model.train()
rnn_model.train()
s_total_loss0 = 0
s_total_loss1 = 0
w_total_loss0 = 0
w_total_loss1 = 0
count = (epoch + 1) * len(dataloader)
start_time = time.time()
for step, data in enumerate(dataloader, 0):
# print('step', step)
rnn_model.zero_grad()
cnn_model.zero_grad()
# imgs, captions, cap_lens, \
# class_ids, keys = prepare_data(data)
imgs, imgs_2, captions, cap_lens, class_ids, keys, captions_2, cap_lens_2, class_ids_2, \
sort_ind, sort_ind_2 = prepare_data(data)
# words_features: batch_size x nef x 17 x 17
# sent_code: batch_size x nef
words_features, sent_code = cnn_model(imgs[-1])
words_features_2, sent_code_2 = cnn_model(imgs_2[-1])
# --> batch_size x nef x 17*17
nef, att_sze = words_features.size(1), words_features.size(2)
# nef_2, att_sze_2 = words_features_2.size(1), words_features_2.size(2)
# words_features = words_features.view(batch_size, nef, -1)
hidden = rnn_model.init_hidden(batch_size)
# words_emb: batch_size x nef x seq_len
# sent_emb: batch_size x nef
words_emb, sent_emb = rnn_model(captions, cap_lens, hidden)
words_emb_2, sent_emb_2 = rnn_model(captions_2, cap_lens_2, hidden)
w_loss0, w_loss1, attn_maps = words_loss(words_features, words_emb, labels,
cap_lens, class_ids, batch_size)
w_total_loss0 += w_loss0.data
w_total_loss1 += w_loss1.data
loss = w_loss0 + w_loss1
w2_loss0, w2_loss1, attn_maps_2 = words_loss(words_features_2, words_emb_2, labels,
cap_lens_2, class_ids_2, batch_size)
w_total_loss0 += w2_loss0.data
w_total_loss1 += w2_loss1.data
loss += w2_loss0 + w2_loss1
s_loss0, s_loss1 = \
sent_loss(sent_code, sent_emb, labels, class_ids, batch_size)
loss += s_loss0 + s_loss1
s_total_loss0 += s_loss0.data
s_total_loss1 += s_loss1.data
s2_loss0, s2_loss1 = \
sent_loss(sent_code_2, sent_emb_2, labels, class_ids_2, batch_size)
loss += s2_loss0 + s2_loss1
s_total_loss0 += s2_loss0.data
s_total_loss1 += s2_loss1.data
_, ori_indices = torch.sort(sort_ind, 0)
_, ori_indices_2 = torch.sort(sort_ind_2, 0)
sent_emb = sent_emb[ori_indices]
sent_emb_2 = sent_emb_2[ori_indices_2]
# sent_emb = l2norm(sent_emb, dim=1)
sent_emb = l2norm(sent_emb, dim=1)
sent_emb_2 = l2norm(sent_emb_2, dim=1)
contrative_loss = criterion(sent_emb, sent_emb_2)
loss += contrative_loss
#
# mse_loss = nn.MSELoss(reduction='sum')
# q_out = netD.Q_NET(fake_features)
# l2_loss = mse_loss(sent_code, sent_emb)
# batch_size = region_features.size(0)
# l2_loss = l2_loss / batch_size
# l2_loss = l2_loss * 0.1
# print(l2_loss)
# loss += l2_loss
loss.backward()
#
# `clip_grad_norm` helps prevent
# the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm(rnn_model.parameters(),
cfg.TRAIN.RNN_GRAD_CLIP)
optimizer.step()
if step % UPDATE_INTERVAL == 0:
count = epoch * len(dataloader) + step
s_cur_loss0 = s_total_loss0.item() / UPDATE_INTERVAL
s_cur_loss1 = s_total_loss1.item() / UPDATE_INTERVAL
w_cur_loss0 = w_total_loss0.item() / UPDATE_INTERVAL
w_cur_loss1 = w_total_loss1.item() / UPDATE_INTERVAL
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | ms/batch {:5.2f} | '
's_loss {:5.2f} {:5.2f} | '
'w_loss {:5.2f} {:5.2f}'
.format(epoch, step, len(dataloader),
elapsed * 1000. / UPDATE_INTERVAL,
s_cur_loss0, s_cur_loss1,
w_cur_loss0, w_cur_loss1))
s_total_loss0 = 0
s_total_loss1 = 0
w_total_loss0 = 0
w_total_loss1 = 0
start_time = time.time()
# attention Maps
# img_set, _ = \
# build_super_images(imgs[-1].cpu(), captions,
# ixtoword, attn_maps, att_sze)
# if img_set is not None:
# im = Image.fromarray(img_set)
# fullpath = '%s/attention_maps%d.png' % (image_dir, step)
# im.save(fullpath)
return count
def evaluate(dataloader, cnn_model, rnn_model, batch_size, criterion):
cnn_model.eval()
rnn_model.eval()
s_total_loss = 0
w_total_loss = 0
for step, data in enumerate(dataloader, 0):
# real_imgs, captions, cap_lens, \
# class_ids, keys = prepare_data(data)
real_imgs, imgs_2, captions, cap_lens, class_ids, keys, captions_2, cap_lens_2, class_ids_2, \
sort_ind, sort_ind_2 = prepare_data(data)
words_features, sent_code = cnn_model(real_imgs[-1])
# nef = words_features.size(1)
# words_features = words_features.view(batch_size, nef, -1)
hidden = rnn_model.init_hidden(batch_size)
words_emb, sent_emb = rnn_model(captions, cap_lens, hidden)
w_loss0, w_loss1, attn = words_loss(words_features, words_emb, labels,
cap_lens, class_ids, batch_size)
w_total_loss += (w_loss0 + w_loss1).data
s_loss0, s_loss1 = \
sent_loss(sent_code, sent_emb, labels, class_ids, batch_size)
s_total_loss += (s_loss0 + s_loss1).data
if step == 50:
break
s_cur_loss = s_total_loss.item() / step
w_cur_loss = w_total_loss.item() / step
return s_cur_loss, w_cur_loss
def build_models():
# build model ############################################################
text_encoder = RNN_ENCODER(dataset.n_words, nhidden=cfg.TEXT.EMBEDDING_DIM)
image_encoder = CNN_ENCODER(cfg.TEXT.EMBEDDING_DIM)
labels = Variable(torch.LongTensor(range(batch_size)))
start_epoch = 0
if cfg.TRAIN.NET_E != '':
state_dict = torch.load(cfg.TRAIN.NET_E)
text_encoder.load_state_dict(state_dict)
print('Load ', cfg.TRAIN.NET_E)
#
name = cfg.TRAIN.NET_E.replace('text_encoder', 'image_encoder')
state_dict = torch.load(name)
image_encoder.load_state_dict(state_dict)
print('Load ', name)
istart = cfg.TRAIN.NET_E.rfind('_') + 8
iend = cfg.TRAIN.NET_E.rfind('.')
start_epoch = cfg.TRAIN.NET_E[istart:iend]
start_epoch = int(start_epoch) + 1
print('start_epoch', start_epoch)
if cfg.CUDA:
text_encoder = text_encoder.cuda()
image_encoder = image_encoder.cuda()
labels = labels.cuda()
return text_encoder, image_encoder, labels, start_epoch
if __name__ == "__main__":
args = parse_args()
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.gpu_id == -1:
cfg.CUDA = False
else:
cfg.GPU_ID = args.gpu_id
if args.data_dir != '':
cfg.DATA_DIR = args.data_dir
print('Using config:')
pprint.pprint(cfg)
if not cfg.TRAIN.FLAG:
args.manualSeed = 100
elif args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
random.seed(args.manualSeed)
np.random.seed(args.manualSeed)
torch.manual_seed(args.manualSeed)
if cfg.CUDA:
torch.cuda.manual_seed_all(args.manualSeed)
##########################################################################
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
output_dir = '../output/%s_%s_%s' % \
(cfg.DATASET_NAME, cfg.CONFIG_NAME, timestamp)
model_dir = os.path.join(output_dir, 'Model')
image_dir = os.path.join(output_dir, 'Image')
mkdir_p(model_dir)
mkdir_p(image_dir)
torch.cuda.set_device(cfg.GPU_ID)
cudnn.benchmark = True
# Get data loader ##################################################
imsize = cfg.TREE.BASE_SIZE * (2 ** (cfg.TREE.BRANCH_NUM-1))
batch_size = cfg.TRAIN.BATCH_SIZE
image_transform = transforms.Compose([
transforms.Scale(int(imsize * 76 / 64)),
transforms.RandomCrop(imsize),
transforms.RandomHorizontalFlip()])
dataset = TextDataset(cfg.DATA_DIR, 'train',
base_size=cfg.TREE.BASE_SIZE,
transform=image_transform)
print(dataset.n_words, dataset.embeddings_num)
assert dataset
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, drop_last=True,
shuffle=True, num_workers=int(cfg.WORKERS))
# # validation data #
dataset_val = TextDataset(cfg.DATA_DIR, 'test',
base_size=cfg.TREE.BASE_SIZE,
transform=image_transform)
dataloader_val = torch.utils.data.DataLoader(
dataset_val, batch_size=batch_size, drop_last=True,
shuffle=True, num_workers=int(cfg.WORKERS))
# Train ##############################################################
text_encoder, image_encoder, labels, start_epoch = build_models()
para = list(text_encoder.parameters())
for v in image_encoder.parameters():
if v.requires_grad:
para.append(v)
# optimizer = optim.Adam(para, lr=cfg.TRAIN.ENCODER_LR, betas=(0.5, 0.999))
# At any point you can hit Ctrl + C to break out of training early.
mask = mask_correlated_samples_2(batch_size)
temperature = 0.5
device = labels.get_device()
criterion = NT_Xent(batch_size, temperature, mask, device)
try:
lr = cfg.TRAIN.ENCODER_LR
for epoch in range(start_epoch, cfg.TRAIN.MAX_EPOCH):
optimizer = optim.Adam(para, lr=lr, betas=(0.5, 0.999))
epoch_start_time = time.time()
count = train(dataloader, image_encoder, text_encoder,
batch_size, labels, optimizer, epoch,
dataset.ixtoword, image_dir, criterion)
print('-' * 89)
if len(dataloader_val) > 0:
s_loss, w_loss = evaluate(dataloader_val, image_encoder,
text_encoder, batch_size, criterion)
print('| end epoch {:3d} | valid loss '
'{:5.2f} {:5.2f} | lr {:.5f}|'
.format(epoch, s_loss, w_loss, lr))
print('-' * 89)
if lr > cfg.TRAIN.ENCODER_LR/10.:
lr *= 0.98
if (epoch % cfg.TRAIN.SNAPSHOT_INTERVAL == 0 or
epoch == cfg.TRAIN.MAX_EPOCH):
torch.save(image_encoder.state_dict(),
'%s/image_encoder%d.pth' % (model_dir, epoch))
torch.save(text_encoder.state_dict(),
'%s/text_encoder%d.pth' % (model_dir, epoch))
print('Save G/Ds models.')
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
| 13,050 | 34.083333 | 102 | py |
T2I_CL | T2I_CL-main/DM-GAN+CL/code/model.py | import torch
import torch.nn as nn
import torch.nn.parallel
from torch.autograd import Variable
from torchvision import models
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from miscc.config import cfg
from GlobalAttention import GlobalAttentionGeneral as ATT_NET
from GlobalAttention import GlobalAttention_text as ATT_NET_text
from spectral import SpectralNorm
class GLU(nn.Module):
def __init__(self):
super(GLU, self).__init__()
def forward(self, x):
nc = x.size(1)
assert nc % 2 == 0, 'channels dont divide 2!'
nc = int(nc/2)
return x[:, :nc] * F.sigmoid(x[:, nc:])
def conv1x1(in_planes, out_planes, bias=False):
"1x1 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=bias)
def conv3x3(in_planes, out_planes, bias=False):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=bias)
# Upsale the spatial size by a factor of 2
def upBlock(in_planes, out_planes):
block = nn.Sequential(
nn.Upsample(scale_factor=2, mode='nearest'),
conv3x3(in_planes, out_planes * 2),
nn.BatchNorm2d(out_planes * 2),
GLU())
return block
# Keep the spatial size
def Block3x3_relu(in_planes, out_planes):
block = nn.Sequential(
conv3x3(in_planes, out_planes * 2),
nn.BatchNorm2d(out_planes * 2),
GLU())
return block
class ResBlock(nn.Module):
def __init__(self, channel_num):
super(ResBlock, self).__init__()
self.block = nn.Sequential(
conv3x3(channel_num, channel_num * 2),
nn.BatchNorm2d(channel_num * 2),
GLU(),
conv3x3(channel_num, channel_num),
nn.BatchNorm2d(channel_num))
def forward(self, x):
residual = x
out = self.block(x)
out += residual
return out
# ############## Text2Image Encoder-Decoder #######
class RNN_ENCODER(nn.Module):
def __init__(self, ntoken, ninput=300, drop_prob=0.5,
nhidden=128, nlayers=1, bidirectional=True):
super(RNN_ENCODER, self).__init__()
self.n_steps = cfg.TEXT.WORDS_NUM
self.ntoken = ntoken # size of the dictionary
self.ninput = ninput # size of each embedding vector
self.drop_prob = drop_prob # probability of an element to be zeroed
self.nlayers = nlayers # Number of recurrent layers
self.bidirectional = bidirectional
self.rnn_type = cfg.RNN_TYPE
if bidirectional:
self.num_directions = 2
else:
self.num_directions = 1
# number of features in the hidden state
self.nhidden = nhidden // self.num_directions
self.define_module()
self.init_weights()
def define_module(self):
self.encoder = nn.Embedding(self.ntoken, self.ninput)
self.drop = nn.Dropout(self.drop_prob)
if self.rnn_type == 'LSTM':
# dropout: If non-zero, introduces a dropout layer on
# the outputs of each RNN layer except the last layer
self.rnn = nn.LSTM(self.ninput, self.nhidden,
self.nlayers, batch_first=True,
dropout=self.drop_prob,
bidirectional=self.bidirectional)
elif self.rnn_type == 'GRU':
self.rnn = nn.GRU(self.ninput, self.nhidden,
self.nlayers, batch_first=True,
dropout=self.drop_prob,
bidirectional=self.bidirectional)
else:
raise NotImplementedError
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
# Do not need to initialize RNN parameters, which have been initialized
# http://pytorch.org/docs/master/_modules/torch/nn/modules/rnn.html#LSTM
# self.decoder.weight.data.uniform_(-initrange, initrange)
# self.decoder.bias.data.fill_(0)
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return (Variable(weight.new(self.nlayers * self.num_directions, bsz, self.nhidden).zero_()),
Variable(weight.new(self.nlayers * self.num_directions, bsz, self.nhidden).zero_()))
else:
return Variable(weight.new(self.nlayers * self.num_directions, bsz, self.nhidden).zero_())
def forward(self, captions, cap_lens, hidden, mask=None):
# input: torch.LongTensor of size batch x n_steps
# --> emb: batch x n_steps x ninput
emb = self.drop(self.encoder(captions))
#
# Returns: a PackedSequence object
cap_lens = cap_lens.data.tolist()
emb = pack_padded_sequence(emb, cap_lens, batch_first=True)
# #hidden and memory (num_layers * num_directions, batch, hidden_size):
# tensor containing the initial hidden state for each element in batch.
# #output (batch, seq_len, hidden_size * num_directions)
# #or a PackedSequence object:
# tensor containing output features (h_t) from the last layer of RNN
output, hidden = self.rnn(emb, hidden)
# PackedSequence object
# --> (batch, seq_len, hidden_size * num_directions)
output = pad_packed_sequence(output, batch_first=True)[0]
# output = self.drop(output)
# --> batch x hidden_size*num_directions x seq_len
words_emb = output.transpose(1, 2)
# --> batch x num_directions*hidden_size
if self.rnn_type == 'LSTM':
sent_emb = hidden[0].transpose(0, 1).contiguous()
else:
sent_emb = hidden.transpose(0, 1).contiguous()
sent_emb = sent_emb.view(-1, self.nhidden * self.num_directions)
return words_emb, sent_emb
class CNN_ENCODER(nn.Module):
def __init__(self, nef):
super(CNN_ENCODER, self).__init__()
if cfg.TRAIN.FLAG:
self.nef = nef
else:
self.nef = 256 # define a uniform ranker
model = models.inception_v3()
url = 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth'
model.load_state_dict(model_zoo.load_url(url))
for param in model.parameters():
param.requires_grad = False
print('Load pretrained model from ', url)
# print(model)
self.define_module(model)
self.init_trainable_weights()
def define_module(self, model):
self.Conv2d_1a_3x3 = model.Conv2d_1a_3x3
self.Conv2d_2a_3x3 = model.Conv2d_2a_3x3
self.Conv2d_2b_3x3 = model.Conv2d_2b_3x3
self.Conv2d_3b_1x1 = model.Conv2d_3b_1x1
self.Conv2d_4a_3x3 = model.Conv2d_4a_3x3
self.Mixed_5b = model.Mixed_5b
self.Mixed_5c = model.Mixed_5c
self.Mixed_5d = model.Mixed_5d
self.Mixed_6a = model.Mixed_6a
self.Mixed_6b = model.Mixed_6b
self.Mixed_6c = model.Mixed_6c
self.Mixed_6d = model.Mixed_6d
self.Mixed_6e = model.Mixed_6e
self.Mixed_7a = model.Mixed_7a
self.Mixed_7b = model.Mixed_7b
self.Mixed_7c = model.Mixed_7c
self.emb_features = conv1x1(768, self.nef)
self.emb_cnn_code = nn.Linear(2048, self.nef)
def init_trainable_weights(self):
initrange = 0.1
self.emb_features.weight.data.uniform_(-initrange, initrange)
self.emb_cnn_code.weight.data.uniform_(-initrange, initrange)
def forward(self, x):
features = None
# --> fixed-size input: batch x 3 x 299 x 299
x = nn.Upsample(size=(299, 299), mode='bilinear', align_corners=True)(x)
# 299 x 299 x 3
x = self.Conv2d_1a_3x3(x)
# 149 x 149 x 32
x = self.Conv2d_2a_3x3(x)
# 147 x 147 x 32
x = self.Conv2d_2b_3x3(x)
# 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 73 x 73 x 64
x = self.Conv2d_3b_1x1(x)
# 73 x 73 x 80
x = self.Conv2d_4a_3x3(x)
# 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 35 x 35 x 192
x = self.Mixed_5b(x)
# 35 x 35 x 256
x = self.Mixed_5c(x)
# 35 x 35 x 288
x = self.Mixed_5d(x)
# 35 x 35 x 288
x = self.Mixed_6a(x)
# 17 x 17 x 768
x = self.Mixed_6b(x)
# 17 x 17 x 768
x = self.Mixed_6c(x)
# 17 x 17 x 768
x = self.Mixed_6d(x)
# 17 x 17 x 768
x = self.Mixed_6e(x)
# 17 x 17 x 768
# image region features
features = x
# 17 x 17 x 768
x = self.Mixed_7a(x)
# 8 x 8 x 1280
x = self.Mixed_7b(x)
# 8 x 8 x 2048
x = self.Mixed_7c(x)
# 8 x 8 x 2048
x = F.avg_pool2d(x, kernel_size=8)
# 1 x 1 x 2048
# x = F.dropout(x, training=self.training)
# 1 x 1 x 2048
x = x.view(x.size(0), -1)
# 2048
# global image features
cnn_code = self.emb_cnn_code(x)
# 512
if features is not None:
features = self.emb_features(features)
return features, cnn_code
# ############## G networks ###################
class CA_NET(nn.Module):
# some code is modified from vae examples
# (https://github.com/pytorch/examples/blob/master/vae/main.py)
def __init__(self):
super(CA_NET, self).__init__()
self.t_dim = cfg.TEXT.EMBEDDING_DIM
self.c_dim = cfg.GAN.CONDITION_DIM
self.fc = nn.Linear(self.t_dim, self.c_dim * 4, bias=True)
self.relu = GLU()
def encode(self, text_embedding):
x = self.relu(self.fc(text_embedding))
mu = x[:, :self.c_dim]
logvar = x[:, self.c_dim:]
return mu, logvar
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
if cfg.CUDA:
eps = torch.cuda.FloatTensor(std.size()).normal_()
else:
eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu)
def forward(self, text_embedding):
mu, logvar = self.encode(text_embedding)
c_code = self.reparametrize(mu, logvar)
return c_code, mu, logvar
class INIT_STAGE_G(nn.Module):
def __init__(self, ngf, ncf):
super(INIT_STAGE_G, self).__init__()
self.gf_dim = ngf
self.in_dim = cfg.GAN.Z_DIM + ncf # cfg.TEXT.EMBEDDING_DIM
self.define_module()
def define_module(self):
nz, ngf = self.in_dim, self.gf_dim
self.fc = nn.Sequential(
nn.Linear(nz, ngf * 4 * 4 * 2, bias=False),
nn.BatchNorm1d(ngf * 4 * 4 * 2),
GLU())
self.upsample1 = upBlock(ngf, ngf // 2)
self.upsample2 = upBlock(ngf // 2, ngf // 4)
self.upsample3 = upBlock(ngf // 4, ngf // 8)
self.upsample4 = upBlock(ngf // 8, ngf // 16)
def forward(self, z_code, c_code):
"""
:param z_code: batch x cfg.GAN.Z_DIM
:param c_code: batch x cfg.TEXT.EMBEDDING_DIM
:return: batch x ngf/16 x 64 x 64
"""
c_z_code = torch.cat((c_code, z_code), 1)
# state size ngf x 4 x 4
out_code = self.fc(c_z_code)
out_code = out_code.view(-1, self.gf_dim, 4, 4)
# state size ngf/3 x 8 x 8
out_code = self.upsample1(out_code)
# state size ngf/4 x 16 x 16
out_code = self.upsample2(out_code)
# state size ngf/8 x 32 x 32
out_code32 = self.upsample3(out_code)
# state size ngf/16 x 64 x 64
out_code64 = self.upsample4(out_code32)
return out_code64
class Memory(nn.Module):
def __init__(self):
super(Memory, self).__init__()
self.sm = nn.Softmax()
self.mask = None
def applyMask(self, mask):
self.mask = mask # batch x sourceL
def forward(self, input, context_key, content_value):#
"""
input: batch x idf x ih x iw (queryL=ihxiw)
context: batch x idf x sourceL
"""
ih, iw = input.size(2), input.size(3)
queryL = ih * iw
batch_size, sourceL = context_key.size(0), context_key.size(2)
# --> batch x queryL x idf
target = input.view(batch_size, -1, queryL)
targetT = torch.transpose(target, 1, 2).contiguous()
sourceT = context_key
# Get weight
# (batch x queryL x idf)(batch x idf x sourceL)-->batch x queryL x sourceL
weight = torch.bmm(targetT, sourceT)
# --> batch*queryL x sourceL
weight = weight.view(batch_size * queryL, sourceL)
if self.mask is not None:
# batch_size x sourceL --> batch_size*queryL x sourceL
mask = self.mask.repeat(queryL, 1)
weight.data.masked_fill_(mask.data, -float('inf'))
weight = torch.nn.functional.softmax(weight, dim=1)
# --> batch x queryL x sourceL
weight = weight.view(batch_size, queryL, sourceL)
# --> batch x sourceL x queryL
weight = torch.transpose(weight, 1, 2).contiguous()
# (batch x idf x sourceL)(batch x sourceL x queryL) --> batch x idf x queryL
weightedContext = torch.bmm(content_value, weight) #
weightedContext = weightedContext.view(batch_size, -1, ih, iw)
weight = weight.view(batch_size, -1, ih, iw)
return weightedContext, weight
class NEXT_STAGE_G(nn.Module):
def __init__(self, ngf, nef, ncf, size):
super(NEXT_STAGE_G, self).__init__()
self.gf_dim = ngf
self.ef_dim = nef
self.cf_dim = ncf
self.num_residual = cfg.GAN.R_NUM
self.size = size
self.define_module()
def _make_layer(self, block, channel_num):
layers = []
for i in range(cfg.GAN.R_NUM):
layers.append(block(channel_num))
return nn.Sequential(*layers)
def define_module(self):
ngf = self.gf_dim
self.avg = nn.AvgPool2d(kernel_size=self.size)
self.A = nn.Linear(self.ef_dim, 1, bias=False)
self.B = nn.Linear(self.gf_dim, 1, bias=False)
self.sigmoid = nn.Sigmoid()
self.M_r = nn.Sequential(
nn.Conv1d(ngf, ngf * 2, kernel_size=1, stride=1, padding=0),
nn.ReLU()
)
self.M_w = nn.Sequential(
nn.Conv1d(self.ef_dim, ngf * 2, kernel_size=1, stride=1, padding=0),
nn.ReLU()
)
self.key = nn.Sequential(
nn.Conv1d(ngf*2, ngf, kernel_size=1, stride=1, padding=0),
nn.ReLU()
)
self.value = nn.Sequential(
nn.Conv1d(ngf*2, ngf, kernel_size=1, stride=1, padding=0),
nn.ReLU()
)
self.memory_operation = Memory()
self.response_gate = nn.Sequential(
nn.Conv2d(self.gf_dim * 2, 1, kernel_size=1, stride=1, padding=0),
nn.Sigmoid()
)
self.residual = self._make_layer(ResBlock, ngf * 2)
self.upsample = upBlock(ngf * 2, ngf)
def forward(self, h_code, c_code, word_embs, mask, cap_lens):
"""
h_code(image features): batch x idf x ih x iw (queryL=ihxiw)
word_embs(word features): batch x cdf x sourceL (sourceL=seq_len)
c_code: batch x idf x queryL
att1: batch x sourceL x queryL
"""
# Memory Writing
word_embs_T = torch.transpose(word_embs, 1, 2).contiguous()
h_code_avg = self.avg(h_code).detach()
h_code_avg = h_code_avg.squeeze(3)
h_code_avg_T = torch.transpose(h_code_avg, 1, 2).contiguous()
gate1 = torch.transpose(self.A(word_embs_T), 1, 2).contiguous()
gate2 = self.B(h_code_avg_T).repeat(1, 1, word_embs.size(2))
writing_gate = torch.sigmoid(gate1 + gate2)
h_code_avg = h_code_avg.repeat(1, 1, word_embs.size(2))
memory = self.M_w(word_embs) * writing_gate + self.M_r(h_code_avg) * (1 - writing_gate)
# Key Addressing and Value Reading
key = self.key(memory)
value = self.value(memory)
self.memory_operation.applyMask(mask)
memory_out, att = self.memory_operation(h_code, key, value)
# Key Response
response_gate = self.response_gate(torch.cat((h_code, memory_out), 1))
h_code_new = h_code * (1 - response_gate) + response_gate * memory_out
h_code_new = torch.cat((h_code_new, h_code_new), 1)
out_code = self.residual(h_code_new)
# state size ngf/2 x 2in_size x 2in_size
out_code = self.upsample(out_code)
return out_code, att
class GET_IMAGE_G(nn.Module):
def __init__(self, ngf):
super(GET_IMAGE_G, self).__init__()
self.gf_dim = ngf
self.img = nn.Sequential(
conv3x3(ngf, 3),
nn.Tanh()
)
def forward(self, h_code):
out_img = self.img(h_code)
return out_img
class G_NET(nn.Module):
def __init__(self):
super(G_NET, self).__init__()
ngf = cfg.GAN.GF_DIM
nef = cfg.TEXT.EMBEDDING_DIM
ncf = cfg.GAN.CONDITION_DIM
self.ca_net = CA_NET()
if cfg.TREE.BRANCH_NUM > 0:
self.h_net1 = INIT_STAGE_G(ngf * 16, ncf)
self.img_net1 = GET_IMAGE_G(ngf)
# gf x 64 x 64
if cfg.TREE.BRANCH_NUM > 1:
self.h_net2 = NEXT_STAGE_G(ngf, nef, ncf, 64)
self.img_net2 = GET_IMAGE_G(ngf)
if cfg.TREE.BRANCH_NUM > 2:
self.h_net3 = NEXT_STAGE_G(ngf, nef, ncf, 128)
self.img_net3 = GET_IMAGE_G(ngf)
def forward(self, z_code, sent_emb, word_embs, mask, cap_lens):
"""
:param z_code: batch x cfg.GAN.Z_DIM
:param sent_emb: batch x cfg.TEXT.EMBEDDING_DIM
:param word_embs: batch x cdf x seq_len
:param mask: batch x seq_len
:return:
"""
fake_imgs = []
att_maps = []
c_code, mu, logvar = self.ca_net(sent_emb)
if cfg.TREE.BRANCH_NUM > 0:
h_code1 = self.h_net1(z_code, c_code)
fake_img1 = self.img_net1(h_code1)
fake_imgs.append(fake_img1)
if cfg.TREE.BRANCH_NUM > 1:
h_code2, att1 = self.h_net2(h_code1, c_code, word_embs, mask, cap_lens)
fake_img2 = self.img_net2(h_code2)
fake_imgs.append(fake_img2)
if att1 is not None:
att_maps.append(att1)
if cfg.TREE.BRANCH_NUM > 2:
h_code3, att2 = self.h_net3(h_code2, c_code, word_embs, mask, cap_lens)
fake_img3 = self.img_net3(h_code3)
fake_imgs.append(fake_img3)
if att2 is not None:
att_maps.append(att2)
return fake_imgs, att_maps, mu, logvar
class G_DCGAN(nn.Module):
def __init__(self):
super(G_DCGAN, self).__init__()
ngf = cfg.GAN.GF_DIM
nef = cfg.TEXT.EMBEDDING_DIM
ncf = cfg.GAN.CONDITION_DIM
self.ca_net = CA_NET()
# 16gf x 64 x 64 --> gf x 64 x 64 --> 3 x 64 x 64
if cfg.TREE.BRANCH_NUM > 0:
self.h_net1 = INIT_STAGE_G(ngf * 16, ncf)
# gf x 64 x 64
if cfg.TREE.BRANCH_NUM > 1:
self.h_net2 = NEXT_STAGE_G(ngf, nef, ncf)
if cfg.TREE.BRANCH_NUM > 2:
self.h_net3 = NEXT_STAGE_G(ngf, nef, ncf)
self.img_net = GET_IMAGE_G(ngf)
def forward(self, z_code, sent_emb, word_embs, mask):
"""
:param z_code: batch x cfg.GAN.Z_DIM
:param sent_emb: batch x cfg.TEXT.EMBEDDING_DIM
:param word_embs: batch x cdf x seq_len
:param mask: batch x seq_len
:return:
"""
att_maps = []
c_code, mu, logvar = self.ca_net(sent_emb)
if cfg.TREE.BRANCH_NUM > 0:
h_code = self.h_net1(z_code, c_code)
if cfg.TREE.BRANCH_NUM > 1:
h_code, att1 = self.h_net2(h_code, c_code, word_embs, mask)
if att1 is not None:
att_maps.append(att1)
if cfg.TREE.BRANCH_NUM > 2:
h_code, att2 = self.h_net3(h_code, c_code, word_embs, mask)
if att2 is not None:
att_maps.append(att2)
fake_imgs = self.img_net(h_code)
return [fake_imgs], att_maps, mu, logvar
# ############## D networks ##########################
def Block3x3_leakRelu(in_planes, out_planes):
block = nn.Sequential(
SpectralNorm(conv3x3(in_planes, out_planes, bias=True)),
nn.LeakyReLU(0.2, inplace=True)
)
return block
# Downsale the spatial size by a factor of 2
def downBlock(in_planes, out_planes):
block = nn.Sequential(
SpectralNorm(nn.Conv2d(in_planes, out_planes, 4, 2, 1, bias=True)),
nn.LeakyReLU(0.2, inplace=True)
)
return block
# Downsale the spatial size by a factor of 16
def encode_image_by_16times(ndf):
layers = []
layers.append(SpectralNorm(nn.Conv2d(3, ndf, 4, 2, 1, bias=True)))
layers.append(nn.LeakyReLU(0.2, inplace=True),)
layers.append(SpectralNorm(nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=True)))
layers.append(nn.LeakyReLU(0.2, inplace=True))
layers.append(SpectralNorm(nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=True)))
layers.append(nn.LeakyReLU(0.2, inplace=True))
layers.append(SpectralNorm(nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=True)))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return nn.Sequential(*layers)
class D_GET_LOGITS(nn.Module):
def __init__(self, ndf, nef, bcondition=False):
super(D_GET_LOGITS, self).__init__()
self.df_dim = ndf
self.ef_dim = nef
self.bcondition = bcondition
if self.bcondition:
self.jointConv = Block3x3_leakRelu(ndf * 8 + nef, ndf * 8)
self.outlogits = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
nn.Sigmoid())
def forward(self, h_code, c_code=None):
if self.bcondition and c_code is not None:
# conditioning output
c_code = c_code.view(-1, self.ef_dim, 1, 1)
c_code = c_code.repeat(1, 1, 4, 4)
# state size (ngf+egf) x 4 x 4
h_c_code = torch.cat((h_code, c_code), 1)
# state size ngf x in_size x in_size
h_c_code = self.jointConv(h_c_code)
else:
h_c_code = h_code
output = self.outlogits(h_c_code)
return output.view(-1)
# For 64 x 64 images
class D_NET64(nn.Module):
def __init__(self, b_jcu=True):
super(D_NET64, self).__init__()
ndf = cfg.GAN.DF_DIM
nef = cfg.TEXT.EMBEDDING_DIM
self.img_code_s16 = encode_image_by_16times(ndf)
if b_jcu:
self.UNCOND_DNET = D_GET_LOGITS(ndf, nef, bcondition=False)
else:
self.UNCOND_DNET = None
self.COND_DNET = D_GET_LOGITS(ndf, nef, bcondition=True)
def forward(self, x_var):
x_code4 = self.img_code_s16(x_var) # 4 x 4 x 8df
return x_code4
# For 128 x 128 images
class D_NET128(nn.Module):
def __init__(self, b_jcu=True):
super(D_NET128, self).__init__()
ndf = cfg.GAN.DF_DIM
nef = cfg.TEXT.EMBEDDING_DIM
self.img_code_s16 = encode_image_by_16times(ndf)
self.img_code_s32 = downBlock(ndf * 8, ndf * 16)
self.img_code_s32_1 = Block3x3_leakRelu(ndf * 16, ndf * 8)
#
if b_jcu:
self.UNCOND_DNET = D_GET_LOGITS(ndf, nef, bcondition=False)
else:
self.UNCOND_DNET = None
self.COND_DNET = D_GET_LOGITS(ndf, nef, bcondition=True)
def forward(self, x_var):
x_code8 = self.img_code_s16(x_var) # 8 x 8 x 8df
x_code4 = self.img_code_s32(x_code8) # 4 x 4 x 16df
x_code4 = self.img_code_s32_1(x_code4) # 4 x 4 x 8df
return x_code4
# For 256 x 256 images
class D_NET256(nn.Module):
def __init__(self, b_jcu=True):
super(D_NET256, self).__init__()
ndf = cfg.GAN.DF_DIM
nef = cfg.TEXT.EMBEDDING_DIM
self.img_code_s16 = encode_image_by_16times(ndf)
self.img_code_s32 = downBlock(ndf * 8, ndf * 16)
self.img_code_s64 = downBlock(ndf * 16, ndf * 32)
self.img_code_s64_1 = Block3x3_leakRelu(ndf * 32, ndf * 16)
self.img_code_s64_2 = Block3x3_leakRelu(ndf * 16, ndf * 8)
if b_jcu:
self.UNCOND_DNET = D_GET_LOGITS(ndf, nef, bcondition=False)
else:
self.UNCOND_DNET = None
self.COND_DNET = D_GET_LOGITS(ndf, nef, bcondition=True)
def forward(self, x_var):
x_code16 = self.img_code_s16(x_var)
x_code8 = self.img_code_s32(x_code16)
x_code4 = self.img_code_s64(x_code8)
x_code4 = self.img_code_s64_1(x_code4)
x_code4 = self.img_code_s64_2(x_code4)
return x_code4
| 25,027 | 34.652422 | 104 | py |
T2I_CL | T2I_CL-main/DM-GAN+CL/code/GlobalAttention.py | """
Global attention takes a matrix and a query metrix.
Based on each query vector q, it computes a parameterized convex combination of the matrix
based.
H_1 H_2 H_3 ... H_n
q q q q
| | | |
\ | | /
.....
\ | /
a
Constructs a unit mapping.
$$(H_1 + H_n, q) => (a)$$
Where H is of `batch x n x dim` and q is of `batch x dim`.
References:
https://github.com/OpenNMT/OpenNMT-py/tree/fc23dfef1ba2f258858b2765d24565266526dc76/onmt/modules
http://www.aclweb.org/anthology/D15-1166
"""
import torch
import torch.nn as nn
def conv1x1(in_planes, out_planes):
"1x1 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1,
padding=0, bias=False)
def func_attention(query, context, gamma1):
"""
query: batch x ndf x queryL
context: batch x ndf x ih x iw (sourceL=ihxiw)
mask: batch_size x sourceL
"""
batch_size, queryL = query.size(0), query.size(2)
ih, iw = context.size(2), context.size(3)
sourceL = ih * iw
# --> batch x sourceL x ndf
context = context.view(batch_size, -1, sourceL)
contextT = torch.transpose(context, 1, 2).contiguous()
# Get attention
# (batch x sourceL x ndf)(batch x ndf x queryL)
# -->batch x sourceL x queryL
attn = torch.bmm(contextT, query) # Eq. (7) in AttnGAN paper
# --> batch*sourceL x queryL
attn = attn.view(batch_size * sourceL, queryL)
attn = nn.Softmax()(attn) # Eq. (8)
# --> batch x sourceL x queryL
attn = attn.view(batch_size, sourceL, queryL)
# --> batch*queryL x sourceL
attn = torch.transpose(attn, 1, 2).contiguous()
attn = attn.view(batch_size * queryL, sourceL)
# Eq. (9)
attn = attn * gamma1
attn = nn.Softmax()(attn)
attn = attn.view(batch_size, queryL, sourceL)
# --> batch x sourceL x queryL
attnT = torch.transpose(attn, 1, 2).contiguous()
# (batch x ndf x sourceL)(batch x sourceL x queryL)
# --> batch x ndf x queryL
weightedContext = torch.bmm(context, attnT)
return weightedContext, attn.view(batch_size, -1, ih, iw)
class GlobalAttentionGeneral(nn.Module):
def __init__(self, idf, cdf):
super(GlobalAttentionGeneral, self).__init__()
#self.conv_context = conv1x1(cdf, idf)
self.sm = nn.Softmax()
self.mask = None
def applyMask(self, mask):
self.mask = mask # batch x sourceL
def forward(self, input, context_key, content_value):#
"""
input: batch x idf x ih x iw (queryL=ihxiw)
context: batch x cdf x sourceL
"""
ih, iw = input.size(2), input.size(3)
queryL = ih * iw
batch_size, sourceL = context_key.size(0), context_key.size(2)
# --> batch x queryL x idf
target = input.view(batch_size, -1, queryL)
targetT = torch.transpose(target, 1, 2).contiguous()
# batch x cdf x sourceL --> batch x cdf x sourceL x 1
#sourceT = context.unsqueeze(3)
# --> batch x idf x sourceL
#sourceT = self.conv_context(sourceT).squeeze(3)
sourceT = context_key
# Get attention
# (batch x queryL x idf)(batch x idf x sourceL)
# -->batch x queryL x sourceL
attn = torch.bmm(targetT, sourceT)
text_weighted = None
# text_attn = torch.transpose(attn, 1, 2).contiguous() # batch x sourceL x queryL
# text_attn = text_attn.view(batch_size*sourceL, queryL)
# if self.mask is not None:
# mask = self.mask.repeat(queryL, 1)
# mask = mask.view(batch_size, queryL, sourceL)
# mask = torch.transpose(mask, 1, 2).contiguous()
# mask = mask.view(batch_size*sourceL, queryL)
# text_attn.data.masked_fill_(mask.data, -float('inf'))
# text_attn = self.sm(text_attn)
# text_attn = text_attn.view(batch_size,sourceL, queryL)
# text_attn = torch.transpose(text_attn, 1, 2).contiguous() # batch x queryL x sourceL
# # (batch x idf x queryL) * (batch x queryL x sourceL) -> batch x idf x sourceL
# text_weighted = torch.bmm(target, text_attn)
# --> batch*queryL x sourceL
attn = attn.view(batch_size * queryL, sourceL)
if self.mask is not None:
# batch_size x sourceL --> batch_size*queryL x sourceL
mask = self.mask.repeat(queryL, 1)
attn.data.masked_fill_(mask.data, -float('inf'))
attn = self.sm(attn) # Eq. (2)
# --> batch x queryL x sourceL
attn = attn.view(batch_size, queryL, sourceL)
# --> batch x sourceL x queryL
attn = torch.transpose(attn, 1, 2).contiguous()
# (batch x idf x sourceL)(batch x sourceL x queryL)
# --> batch x idf x queryL
weightedContext = torch.bmm(content_value, attn) #
#weightedContext = torch.bmm(sourceT, attn)
weightedContext = weightedContext.view(batch_size, -1, ih, iw)
attn = attn.view(batch_size, -1, ih, iw)
return weightedContext, attn
class GlobalAttention_text(nn.Module):
def __init__(self, idf, cdf):
super(GlobalAttention_text, self).__init__()
self.conv_context = nn.Conv1d(cdf, idf, kernel_size=1, stride=1, padding=0)
self.sm = nn.Softmax()
self.mask = None
def applyMask(self, mask):
self.mask = mask # batch x sourceL
def forward(self, input, context):
"""
input: batch x idf x ih x iw (queryL=ihxiw)
context: batch x cdf x sourceL
"""
ih, iw = input.size(2), input.size(3)
queryL = ih * iw
batch_size, sourceL = context.size(0), context.size(2)
# --> batch x queryL x idf
target = input.view(batch_size, -1, queryL)
targetT = torch.transpose(target, 1, 2).contiguous()
sourceT = self.conv_context(context)
# Get attention
# (batch x queryL x idf)(batch x idf x sourceL)
# -->batch x queryL x sourceL
attn = torch.bmm(targetT, sourceT)
# --> batch*queryL x sourceL
attn = attn.view(batch_size * queryL, sourceL)
if self.mask is not None:
# batch_size x sourceL --> batch_size*queryL x sourceL
mask = self.mask.repeat(queryL, 1)
attn.data.masked_fill_(mask.data, -float('inf'))
#attn_o = self.sm(attn) # Eq. (2)
#attn_o = attn_o.view(batch_size, queryL, sourceL)
attn = attn.view(batch_size, queryL, sourceL)
attn = torch.nn.Softmax(dim=1)(attn)
#import ipdb;
#ipdb.set_trace() # BREAKPOINT
# (batch x idf x queryL) * (batch x queryL x sourceL) -> batch x idf x sourceL
text_weighted = torch.bmm(target, attn)
return text_weighted
| 6,815 | 34.873684 | 96 | py |
T2I_CL | T2I_CL-main/DM-GAN+CL/code/datasets.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from nltk.tokenize import RegexpTokenizer
from collections import defaultdict
from miscc.config import cfg
import torch
import torch.utils.data as data
from torch.autograd import Variable
import torchvision.transforms as transforms
import os
import sys
import numpy as np
import pandas as pd
from PIL import Image
import numpy.random as random
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
def prepare_data(data):
imgs, captions, captions_lens, class_ids, keys, captions_2, captions_lens_2 = data
# sort data by the length in a decreasing order
sorted_cap_lens, sorted_cap_indices = \
torch.sort(captions_lens, 0, True)
sorted_cap_lens_2, sorted_cap_indices_2 = \
torch.sort(captions_lens_2, 0, True)
imgs_2 = imgs.copy()
real_imgs = []
for i in range(len(imgs)):
imgs[i] = imgs[i][sorted_cap_indices]
if cfg.CUDA:
real_imgs.append(Variable(imgs[i]).cuda())
else:
real_imgs.append(Variable(imgs[i]))
real_imgs_2 = []
for i in range(len(imgs_2)):
imgs_2[i] = imgs_2[i][sorted_cap_indices_2]
if cfg.CUDA:
real_imgs_2.append(Variable(imgs_2[i]).cuda())
else:
real_imgs_2.append(Variable(imgs_2[i]))
captions = captions[sorted_cap_indices].squeeze()
captions_2 = captions_2[sorted_cap_indices_2].squeeze()
# sorted_captions_lens_2 = captions_lens_2[sorted_cap_indices].squeeze()
# captions = torch.cat([captions, captions_2], dim=0)
# sorted_cap_lens = torch.cat([sorted_cap_lens, sorted_captions_lens_2], dim=0)
class_ids_1 = class_ids[sorted_cap_indices].numpy()
class_ids_2 = class_ids[sorted_cap_indices_2].numpy()
# sent_indices = sent_indices[sorted_cap_indices]
keys = [keys[i] for i in sorted_cap_indices.numpy()]
# print('keys', type(keys), keys[-1]) # list
if cfg.CUDA:
captions = Variable(captions).cuda()
sorted_cap_lens = Variable(sorted_cap_lens).cuda()
captions_2 = Variable(captions_2).cuda()
sorted_cap_lens_2 = Variable(sorted_cap_lens_2).cuda()
sorted_cap_indices = sorted_cap_indices.cuda()
sorted_cap_indices_2 = sorted_cap_indices_2.cuda()
else:
captions = Variable(captions)
sorted_cap_lens = Variable(sorted_cap_lens)
captions_2 = Variable(captions_2)
sorted_cap_lens_2 = Variable(sorted_cap_lens_2)
return [real_imgs, real_imgs_2, captions, sorted_cap_lens,
class_ids_1, keys, captions_2, sorted_cap_lens_2, class_ids_2, sorted_cap_indices, sorted_cap_indices_2]
def get_imgs(img_path, imsize, bbox=None,
transform=None, normalize=None):
img = Image.open(img_path).convert('RGB')
width, height = img.size
if bbox is not None:
r = int(np.maximum(bbox[2], bbox[3]) * 0.75)
center_x = int((2 * bbox[0] + bbox[2]) / 2)
center_y = int((2 * bbox[1] + bbox[3]) / 2)
y1 = np.maximum(0, center_y - r)
y2 = np.minimum(height, center_y + r)
x1 = np.maximum(0, center_x - r)
x2 = np.minimum(width, center_x + r)
img = img.crop([x1, y1, x2, y2])
if transform is not None:
img = transform(img)
ret = []
if cfg.GAN.B_DCGAN:
ret = [normalize(img)]
else:
for i in range(cfg.TREE.BRANCH_NUM):
# print(imsize[i])
if i < (cfg.TREE.BRANCH_NUM - 1):
re_img = transforms.Resize(imsize[i])(img)
else:
re_img = img
ret.append(normalize(re_img))
return ret
class TextDataset(data.Dataset):
def __init__(self, data_dir, split='train',
base_size=64,
transform=None, target_transform=None):
self.transform = transform
self.norm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
self.target_transform = target_transform
self.embeddings_num = cfg.TEXT.CAPTIONS_PER_IMAGE
self.imsize = []
for i in range(cfg.TREE.BRANCH_NUM):
self.imsize.append(base_size)
base_size = base_size * 2
self.data = []
self.data_dir = data_dir
if data_dir.find('birds') != -1:
self.bbox = self.load_bbox()
else:
self.bbox = None
split_dir = os.path.join(data_dir, split)
self.filenames, self.captions, self.ixtoword, \
self.wordtoix, self.n_words = self.load_text_data(data_dir, split)
self.class_id = self.load_class_id(split_dir, len(self.filenames))
self.number_example = len(self.filenames)
def load_bbox(self):
data_dir = self.data_dir
bbox_path = os.path.join(data_dir, 'CUB_200_2011/bounding_boxes.txt')
df_bounding_boxes = pd.read_csv(bbox_path,
delim_whitespace=True,
header=None).astype(int)
#
filepath = os.path.join(data_dir, 'CUB_200_2011/images.txt')
df_filenames = \
pd.read_csv(filepath, delim_whitespace=True, header=None)
filenames = df_filenames[1].tolist()
print('Total filenames: ', len(filenames), filenames[0])
#
filename_bbox = {img_file[:-4]: [] for img_file in filenames}
numImgs = len(filenames)
# for i in xrange(0, numImgs):
for i in range(0, numImgs):
# bbox = [x-left, y-top, width, height]
bbox = df_bounding_boxes.iloc[i][1:].tolist()
key = filenames[i][:-4]
filename_bbox[key] = bbox
#
return filename_bbox
def load_captions(self, data_dir, filenames):
all_captions = []
for i in range(len(filenames)):
cap_path = '%s/text/%s.txt' % (data_dir, filenames[i])
with open(cap_path, "r") as f:
captions = f.read().decode('utf8').split('\n')
cnt = 0
for cap in captions:
if len(cap) == 0:
continue
cap = cap.replace("\ufffd\ufffd", " ")
# picks out sequences of alphanumeric characters as tokens
# and drops everything else
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(cap.lower())
# print('tokens', tokens)
if len(tokens) == 0:
print('cap', cap)
continue
tokens_new = []
for t in tokens:
t = t.encode('ascii', 'ignore').decode('ascii')
if len(t) > 0:
tokens_new.append(t)
all_captions.append(tokens_new)
cnt += 1
if cnt == self.embeddings_num:
break
if cnt < self.embeddings_num:
print('ERROR: the captions for %s less than %d'
% (filenames[i], cnt))
return all_captions
def build_dictionary(self, train_captions, test_captions):
word_counts = defaultdict(float)
captions = train_captions + test_captions
for sent in captions:
for word in sent:
word_counts[word] += 1
vocab = [w for w in word_counts if word_counts[w] >= 0]
ixtoword = {}
ixtoword[0] = '<end>'
wordtoix = {}
wordtoix['<end>'] = 0
ix = 1
for w in vocab:
wordtoix[w] = ix
ixtoword[ix] = w
ix += 1
train_captions_new = []
for t in train_captions:
rev = []
for w in t:
if w in wordtoix:
rev.append(wordtoix[w])
# rev.append(0) # do not need '<end>' token
train_captions_new.append(rev)
test_captions_new = []
for t in test_captions:
rev = []
for w in t:
if w in wordtoix:
rev.append(wordtoix[w])
# rev.append(0) # do not need '<end>' token
test_captions_new.append(rev)
return [train_captions_new, test_captions_new,
ixtoword, wordtoix, len(ixtoword)]
def load_text_data(self, data_dir, split):
filepath = os.path.join(data_dir, 'captions.pickle')
train_names = self.load_filenames(data_dir, 'train')
test_names = self.load_filenames(data_dir, 'test')
if not os.path.isfile(filepath):
train_captions = self.load_captions(data_dir, train_names)
test_captions = self.load_captions(data_dir, test_names)
train_captions, test_captions, ixtoword, wordtoix, n_words = \
self.build_dictionary(train_captions, test_captions)
with open(filepath, 'wb') as f:
pickle.dump([train_captions, test_captions,
ixtoword, wordtoix], f, protocol=2)
print('Save to: ', filepath)
else:
with open(filepath, 'rb') as f:
x = pickle.load(f)
train_captions, test_captions = x[0], x[1]
ixtoword, wordtoix = x[2], x[3]
del x
n_words = len(ixtoword)
print('Load from: ', filepath)
if split == 'train':
# a list of list: each list contains
# the indices of words in a sentence
captions = train_captions
filenames = train_names
else: # split=='test'
captions = test_captions
filenames = test_names
return filenames, captions, ixtoword, wordtoix, n_words
def load_class_id(self, data_dir, total_num):
if os.path.isfile(data_dir + '/class_info.pickle'):
with open(data_dir + '/class_info.pickle', 'rb') as f:
# class_id = pickle.load(f)
class_id = pickle.load(f, encoding='latin1')
else:
class_id = np.arange(total_num)
return class_id
def load_filenames(self, data_dir, split):
filepath = '%s/%s/filenames.pickle' % (data_dir, split)
if os.path.isfile(filepath):
with open(filepath, 'rb') as f:
filenames = pickle.load(f)
print('Load filenames from: %s (%d)' % (filepath, len(filenames)))
else:
filenames = []
return filenames
def get_caption(self, sent_ix):
# a list of indices for a sentence
sent_caption = np.asarray(self.captions[sent_ix]).astype('int64')
if (sent_caption == 0).sum() > 0:
print('ERROR: do not need END (0) token', sent_caption)
num_words = len(sent_caption)
# pad with 0s (i.e., '<end>')
x = np.zeros((cfg.TEXT.WORDS_NUM, 1), dtype='int64')
x_len = num_words
if num_words <= cfg.TEXT.WORDS_NUM:
x[:num_words, 0] = sent_caption
else:
ix = list(np.arange(num_words)) # 1, 2, 3,..., maxNum
np.random.shuffle(ix)
ix = ix[:cfg.TEXT.WORDS_NUM]
ix = np.sort(ix)
x[:, 0] = sent_caption[ix]
x_len = cfg.TEXT.WORDS_NUM
return x, x_len
def __getitem__(self, index):
#
key = self.filenames[index]
cls_id = self.class_id[index]
#
if self.bbox is not None:
bbox = self.bbox[key]
data_dir = '%s/CUB_200_2011' % self.data_dir
else:
bbox = None
data_dir = self.data_dir
#
img_name = '%s/images/%s.jpg' % (data_dir, key)
imgs = get_imgs(img_name, self.imsize,
bbox, self.transform, normalize=self.norm)
# random select a sentence
sent_ix = random.randint(0, self.embeddings_num)
new_sent_ix = index * self.embeddings_num + sent_ix
caps, cap_len = self.get_caption(new_sent_ix)
# second sentence
sent_ix = random.randint(0, self.embeddings_num)
new_sent_ix = index * self.embeddings_num + sent_ix
caps_two, cap_len_two = self.get_caption(new_sent_ix)
return imgs, caps, cap_len, cls_id, key, caps_two, cap_len_two
def get_mis_caption(self, cls_id):
mis_match_captions_t = []
mis_match_captions = torch.zeros(99, cfg.TEXT.WORDS_NUM)
mis_match_captions_len = torch.zeros(99)
i = 0
while len(mis_match_captions_t) < 99:
idx = random.randint(0, self.number_example)
if cls_id == self.class_id[idx]:
continue
sent_ix = random.randint(0, self.embeddings_num)
new_sent_ix = idx * self.embeddings_num + sent_ix
caps_t, cap_len_t = self.get_caption(new_sent_ix)
mis_match_captions_t.append(torch.from_numpy(caps_t).squeeze())
mis_match_captions_len[i] = cap_len_t
i = i +1
sorted_cap_lens, sorted_cap_indices = torch.sort(mis_match_captions_len, 0, True)
#import ipdb
#ipdb.set_trace()
for i in range(99):
mis_match_captions[i,:] = mis_match_captions_t[sorted_cap_indices[i]]
return mis_match_captions.type(torch.LongTensor).cuda(), sorted_cap_lens.type(torch.LongTensor).cuda()
def __len__(self):
return len(self.filenames)
| 13,771 | 34.864583 | 116 | py |
T2I_CL | T2I_CL-main/DM-GAN+CL/code/spectral.py | import torch
from torch.optim.optimizer import Optimizer, required
from torch.autograd import Variable
import torch.nn.functional as F
from torch import nn
from torch import Tensor
from torch.nn import Parameter
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
class SpectralNorm(nn.Module):
def __init__(self, module, name='weight', power_iterations=1):
super(SpectralNorm, self).__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
if not self._made_params():
self._make_params()
def _update_u_v(self):
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data))
u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data))
# sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
def _made_params(self):
try:
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
return True
except AttributeError:
return False
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + "_u", u)
self.module.register_parameter(self.name + "_v", v)
self.module.register_parameter(self.name + "_bar", w_bar)
def forward(self, *args):
self._update_u_v()
return self.module.forward(*args) | 2,250 | 32.102941 | 83 | py |
T2I_CL | T2I_CL-main/DM-GAN+CL/code/nt_xent.py | import torch
import torch.nn as nn
class NT_Xent(nn.Module):
def __init__(self, batch_size, temperature, mask, device):
super(NT_Xent, self).__init__()
self.batch_size = batch_size
self.temperature = temperature
self.mask = mask
self.device = device
self.criterion = nn.CrossEntropyLoss(reduction="sum")
self.similarity_f = nn.CosineSimilarity(dim=2)
def forward(self, z_i, z_j):
"""
We do not sample negative examples explicitly.
Instead, given a positive pair, similar to (Chen et al., 2017), we treat the other 2(N − 1) augmented examples within a minibatch as negative examples.
"""
p1 = torch.cat((z_i, z_j), dim=0)
sim = self.similarity_f(p1.unsqueeze(1), p1.unsqueeze(0)) / self.temperature
sim_i_j = torch.diag(sim, self.batch_size)
sim_j_i = torch.diag(sim, -self.batch_size)
positive_samples = torch.cat((sim_i_j, sim_j_i), dim=0).reshape(self.batch_size * 2, 1)
negative_samples = sim[self.mask].reshape(self.batch_size * 2, -1)
labels = torch.zeros(self.batch_size * 2).to(self.device).long()
logits = torch.cat((positive_samples, negative_samples), dim=1)
loss = self.criterion(logits, labels)
loss /= 2 * self.batch_size
return loss
| 1,339 | 36.222222 | 159 | py |
T2I_CL | T2I_CL-main/DM-GAN+CL/code/trainer.py | from __future__ import print_function
from six.moves import range
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torchvision
from PIL import Image
from miscc.config import cfg
from miscc.utils import mkdir_p
from miscc.utils import build_super_images, build_super_images2
from miscc.utils import weights_init, load_params, copy_G_params
from model import G_DCGAN, G_NET
from datasets import prepare_data
from model import RNN_ENCODER, CNN_ENCODER
from miscc.losses import words_loss
from miscc.losses import discriminator_loss, generator_loss, KL_loss
import os
import time
import numpy as np
import sys
from masks import mask_correlated_samples
from nt_xent import NT_Xent
def l2norm(X, dim, eps=1e-8):
"""L2-normalize columns of X
"""
norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps
X = torch.div(X, norm)
return X
# ################# Text to image task############################ #
class condGANTrainer(object):
def __init__(self, output_dir, data_loader, n_words, ixtoword, dataset):
if cfg.TRAIN.FLAG:
self.model_dir = os.path.join(output_dir, 'Model')
self.image_dir = os.path.join(output_dir, 'Image')
mkdir_p(self.model_dir)
mkdir_p(self.image_dir)
#torch.cuda.set_device(cfg.GPU_ID)
#cudnn.benchmark = True
self.batch_size = cfg.TRAIN.BATCH_SIZE
self.max_epoch = cfg.TRAIN.MAX_EPOCH
self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL
self.n_words = n_words
self.ixtoword = ixtoword
self.data_loader = data_loader
self.dataset = dataset
self.num_batches = len(self.data_loader)
def build_models(self):
def count_parameters(model):
total_param = 0
for name, param in model.named_parameters():
if param.requires_grad:
num_param = np.prod(param.size())
if param.dim() > 1:
print(name, ':', 'x'.join(str(x) for x in list(param.size())), '=', num_param)
else:
print(name, ':', num_param)
total_param += num_param
return total_param
# ###################encoders######################################## #
if cfg.TRAIN.NET_E == '':
print('Error: no pretrained text-image encoders')
return
image_encoder = CNN_ENCODER(cfg.TEXT.EMBEDDING_DIM)
img_encoder_path = cfg.TRAIN.NET_E.replace('text_encoder', 'image_encoder')
state_dict = \
torch.load(img_encoder_path, map_location=lambda storage, loc: storage)
image_encoder.load_state_dict(state_dict)
for p in image_encoder.parameters():
p.requires_grad = False
print('Load image encoder from:', img_encoder_path)
image_encoder.eval()
text_encoder = \
RNN_ENCODER(self.n_words, nhidden=cfg.TEXT.EMBEDDING_DIM)
state_dict = \
torch.load(cfg.TRAIN.NET_E,
map_location=lambda storage, loc: storage)
text_encoder.load_state_dict(state_dict)
for p in text_encoder.parameters():
p.requires_grad = False
print('Load text encoder from:', cfg.TRAIN.NET_E)
text_encoder.eval()
# #######################generator and discriminators############## #
netsD = []
if cfg.GAN.B_DCGAN:
if cfg.TREE.BRANCH_NUM ==1:
from model import D_NET64 as D_NET
elif cfg.TREE.BRANCH_NUM == 2:
from model import D_NET128 as D_NET
else: # cfg.TREE.BRANCH_NUM == 3:
from model import D_NET256 as D_NET
# TODO: elif cfg.TREE.BRANCH_NUM > 3:
netG = G_DCGAN()
netsD = [D_NET(b_jcu=False)]
else:
from model import D_NET64, D_NET128, D_NET256
netG = G_NET()
if cfg.TREE.BRANCH_NUM > 0:
netsD.append(D_NET64())
if cfg.TREE.BRANCH_NUM > 1:
netsD.append(D_NET128())
if cfg.TREE.BRANCH_NUM > 2:
netsD.append(D_NET256())
# TODO: if cfg.TREE.BRANCH_NUM > 3:
print('number of trainable parameters =', count_parameters(netG))
print('number of trainable parameters =', count_parameters(netsD[-1]))
netG.apply(weights_init)
# print(netG)
for i in range(len(netsD)):
netsD[i].apply(weights_init)
# print(netsD[i])
print('# of netsD', len(netsD))
#
epoch = 0
if cfg.TRAIN.NET_G != '':
state_dict = \
torch.load(cfg.TRAIN.NET_G, map_location=lambda storage, loc: storage)
netG.load_state_dict(state_dict)
print('Load G from: ', cfg.TRAIN.NET_G)
istart = cfg.TRAIN.NET_G.rfind('_') + 1
iend = cfg.TRAIN.NET_G.rfind('.')
epoch = cfg.TRAIN.NET_G[istart:iend]
epoch = int(epoch) + 1
if cfg.TRAIN.B_NET_D:
Gname = cfg.TRAIN.NET_G
for i in range(len(netsD)):
s_tmp = Gname[:Gname.rfind('/')]
Dname = '%s/netD%d.pth' % (s_tmp, i)
print('Load D from: ', Dname)
state_dict = \
torch.load(Dname, map_location=lambda storage, loc: storage)
netsD[i].load_state_dict(state_dict)
# ########################################################### #
if cfg.CUDA:
text_encoder = text_encoder.cuda()
image_encoder = image_encoder.cuda()
netG.cuda()
for i in range(len(netsD)):
netsD[i].cuda()
return [text_encoder, image_encoder, netG, netsD, epoch]
def define_optimizers(self, netG, netsD):
optimizersD = []
num_Ds = len(netsD)
for i in range(num_Ds):
opt = optim.Adam(filter(lambda p: p.requires_grad, netsD[i].parameters()),
lr=cfg.TRAIN.DISCRIMINATOR_LR,
betas=(0.5, 0.999))
optimizersD.append(opt)
optimizerG = optim.Adam(netG.parameters(),
lr=cfg.TRAIN.GENERATOR_LR,
betas=(0.5, 0.999))
return optimizerG, optimizersD
def prepare_labels(self):
batch_size = self.batch_size
real_labels = Variable(torch.FloatTensor(batch_size).fill_(1))
fake_labels = Variable(torch.FloatTensor(batch_size).fill_(0))
match_labels = Variable(torch.LongTensor(range(batch_size)))
if cfg.CUDA:
real_labels = real_labels.cuda()
fake_labels = fake_labels.cuda()
match_labels = match_labels.cuda()
return real_labels, fake_labels, match_labels
def save_model(self, netG, avg_param_G, netsD, epoch):
backup_para = copy_G_params(netG)
load_params(netG, avg_param_G)
torch.save(netG.state_dict(),
'%s/netG_epoch_%d.pth' % (self.model_dir, epoch))
load_params(netG, backup_para)
#
for i in range(len(netsD)):
netD = netsD[i]
torch.save(netD.state_dict(),
'%s/netD%d.pth' % (self.model_dir, i))
print('Save G/Ds models.')
def set_requires_grad_value(self, models_list, brequires):
for i in range(len(models_list)):
for p in models_list[i].parameters():
p.requires_grad = brequires
def save_img_results(self, netG, noise, sent_emb, words_embs, mask,
image_encoder, captions, cap_lens,
gen_iterations, real_image, name='current'):
# Save images
fake_imgs, attention_maps, _, _ = netG(noise, sent_emb, words_embs, mask, cap_lens)
for i in range(len(attention_maps)):
if len(fake_imgs) > 1:
img = fake_imgs[i + 1].detach().cpu()
lr_img = fake_imgs[i].detach().cpu()
else:
img = fake_imgs[0].detach().cpu()
lr_img = None
attn_maps = attention_maps[i]
att_sze = attn_maps.size(2)
img_set, _ = \
build_super_images(img, captions, self.ixtoword,
attn_maps, att_sze, lr_imgs=lr_img)
if img_set is not None:
im = Image.fromarray(img_set)
fullpath = '%s/G_%s_%d_%d.png'% (self.image_dir, name, gen_iterations, i)
im.save(fullpath)
# for i in range(len(netsD)):
i = -1
img = fake_imgs[i].detach()
region_features, _ = image_encoder(img)
att_sze = region_features.size(2)
_, _, att_maps = words_loss(region_features.detach(),
words_embs.detach(),
None, cap_lens,
None, self.batch_size)
img_set, _ = \
build_super_images(fake_imgs[i].detach().cpu(),
captions, self.ixtoword, att_maps, att_sze)
if img_set is not None:
im = Image.fromarray(img_set)
fullpath = '%s/D_%s_%d.png'\
% (self.image_dir, name, gen_iterations)
im.save(fullpath)
#print(real_image.type)
def train(self):
text_encoder, image_encoder, netG, netsD, start_epoch = self.build_models()
avg_param_G = copy_G_params(netG)
optimizerG, optimizersD = self.define_optimizers(netG, netsD)
real_labels, fake_labels, match_labels = self.prepare_labels()
real_labels_2, fake_labels_2, match_labels_2 = self.prepare_labels()
batch_size = self.batch_size
nz = cfg.GAN.Z_DIM
noise = Variable(torch.FloatTensor(batch_size, nz))
fixed_noise = Variable(torch.FloatTensor(batch_size, nz).normal_(0, 1))
if cfg.CUDA:
noise, fixed_noise = noise.cuda(), fixed_noise.cuda()
gen_iterations = 0
mask = mask_correlated_samples(self)
temperature = 0.5
device = noise.get_device()
criterion = NT_Xent(batch_size, temperature, mask, device)
# gen_iterations = start_epoch * self.num_batches
for epoch in range(start_epoch, self.max_epoch):
start_t = time.time()
data_iter = iter(self.data_loader)
step = 0
while step < self.num_batches:
# reset requires_grad to be trainable for all Ds
# self.set_requires_grad_value(netsD, True)
######################################################
# (1) Prepare training data and Compute text embeddings
######################################################
data = data_iter.next()
# imgs, captions, cap_lens, class_ids, keys = prepare_data(data)
imgs, imgs_2, captions, cap_lens, class_ids, keys, captions_2, cap_lens_2, class_ids_2, \
sort_ind, sort_ind_2 = prepare_data(data)
hidden = text_encoder.init_hidden(batch_size)
# words_embs: batch_size x nef x seq_len
# sent_emb: batch_size x nef
words_embs, sent_emb = text_encoder(captions, cap_lens, hidden)
words_embs, sent_emb = words_embs.detach(), sent_emb.detach()
mask = (captions == 0)
num_words = words_embs.size(2)
if mask.size(1) > num_words:
mask = mask[:, :num_words]
words_embs_2, sent_emb_2 = text_encoder(captions_2, cap_lens_2, hidden)
words_embs_2, sent_emb_2 = words_embs_2.detach(), sent_emb_2.detach()
mask_2 = (captions_2 == 0)
num_words_2 = words_embs_2.size(2)
if mask_2.size(1) > num_words_2:
mask_2 = mask_2[:, :num_words_2]
#######################################################
# (2) Generate fake images
######################################################
noise.data.normal_(0, 1)
fake_imgs, _, mu, logvar = netG(noise, sent_emb, words_embs, mask, cap_lens)
fake_imgs_2, _, mu_2, logvar_2 = netG(noise, sent_emb_2, words_embs_2, mask_2, cap_lens_2)
#######################################################
# (3) Update D network
######################################################
errD_total = 0
D_logs = ''
for i in range(len(netsD)):
netsD[i].zero_grad()
errD, log = discriminator_loss(netsD[i], imgs[i], fake_imgs[i],
sent_emb, real_labels, fake_labels)
errD_2, log_2 = discriminator_loss(netsD[i], imgs_2[i], fake_imgs_2[i],
sent_emb_2, real_labels_2, fake_labels_2)
errD += errD_2
# backward and update parameters
errD.backward()
optimizersD[i].step()
errD_total += errD
D_logs += 'errD%d: %.2f ' % (i, errD.item())
D_logs += log
D_logs += 'errD%d_2: %.2f ' % (i, errD_2.item()) + log_2
#######################################################
# (4) Update G network: maximize log(D(G(z)))
######################################################
# compute total loss for training G
step += 1
gen_iterations += 1
# do not need to compute gradient for Ds
# self.set_requires_grad_value(netsD, False)
netG.zero_grad()
errG_total, G_logs, cnn_code = \
generator_loss(netsD, image_encoder, fake_imgs, real_labels,
words_embs, sent_emb, match_labels, cap_lens, class_ids)
kl_loss = KL_loss(mu, logvar)
errG_total += kl_loss
G_logs += 'kl_loss: %.2f ' % kl_loss.item()
errG_total_2, G_logs_2, cnn_code_2 = \
generator_loss(netsD, image_encoder, fake_imgs_2, real_labels_2,
words_embs_2, sent_emb_2, match_labels_2, cap_lens_2, class_ids_2)
kl_loss_2 = KL_loss(mu_2, logvar_2)
errG_total_2 += kl_loss_2
G_logs_2 += 'kl_loss: %.2f ' % kl_loss_2.item()
G_logs += G_logs_2
errG_total += errG_total_2
_, ori_indices = torch.sort(sort_ind, 0)
_, ori_indices_2 = torch.sort(sort_ind_2, 0)
# total_contra_loss = 0
# i = -1
cnn_code = cnn_code[ori_indices]
cnn_code_2 = cnn_code_2[ori_indices_2]
cnn_code = l2norm(cnn_code, dim=1)
cnn_code_2 = l2norm(cnn_code_2, dim=1)
contrative_loss = criterion(cnn_code, cnn_code_2)
# total_contra_loss += contrative_loss
contrative_loss = contrative_loss * 0.2
G_logs += 'contrative_loss: %.2f ' % contrative_loss.item()
errG_total += contrative_loss
# backward and update parameters
errG_total.backward()
optimizerG.step()
for p, avg_p in zip(netG.parameters(), avg_param_G):
avg_p.mul_(0.999).add_(0.001, p.data)
if gen_iterations % 100 == 0:
print('Epoch [{}/{}] Step [{}/{}]'.format(epoch, self.max_epoch, step,
self.num_batches) + ' ' + D_logs + ' ' + G_logs)
# save images
if gen_iterations % 10000 == 0:
backup_para = copy_G_params(netG)
load_params(netG, avg_param_G)
#self.save_img_results(netG, fixed_noise, sent_emb, words_embs, mask, image_encoder,
# captions, cap_lens, epoch, imgs[-1], name='average')
load_params(netG, backup_para)
#
# self.save_img_results(netG, fixed_noise, sent_emb,
# words_embs, mask, image_encoder,
# captions, cap_lens,
# epoch, name='current')
# if gen_iterations % 1000 == 0:
# time.sleep(30)
# if gen_iterations % 10000 == 0:
# time.sleep(160)
end_t = time.time()
print('''[%d/%d] Loss_D: %.2f Loss_G: %.2f Time: %.2fs''' % (
epoch, self.max_epoch, errD_total.item(), errG_total.item(), end_t - start_t))
print('-' * 89)
if epoch % cfg.TRAIN.SNAPSHOT_INTERVAL == 0: # and epoch != 0:
self.save_model(netG, avg_param_G, netsD, epoch)
self.save_model(netG, avg_param_G, netsD, self.max_epoch)
def save_singleimages(self, images, filenames, save_dir,
split_dir, sentenceID=0):
for i in range(images.size(0)):
s_tmp = '%s/single_samples/%s/%s' %\
(save_dir, split_dir, filenames[i])
folder = s_tmp[:s_tmp.rfind('/')]
if not os.path.isdir(folder):
print('Make a new folder: ', folder)
mkdir_p(folder)
fullpath = '%s_%d.jpg' % (s_tmp, sentenceID)
# range from [-1, 1] to [0, 1]
# img = (images[i] + 1.0) / 2
img = images[i].add(1).div(2).mul(255).clamp(0, 255).byte()
# range from [0, 1] to [0, 255]
ndarr = img.permute(1, 2, 0).data.cpu().numpy()
im = Image.fromarray(ndarr)
im.save(fullpath)
def sampling(self, split_dir):
if cfg.TRAIN.NET_G == '':
print('Error: the path for morels is not found!')
else:
if split_dir == 'test':
split_dir = 'valid'
# Build and load the generator
if cfg.GAN.B_DCGAN:
netG = G_DCGAN()
else:
netG = G_NET()
netG.apply(weights_init)
netG.cuda()
netG.eval()
# load text encoder
text_encoder = RNN_ENCODER(self.n_words, nhidden=cfg.TEXT.EMBEDDING_DIM)
state_dict = torch.load(cfg.TRAIN.NET_E, map_location=lambda storage, loc: storage)
text_encoder.load_state_dict(state_dict)
print('Load text encoder from:', cfg.TRAIN.NET_E)
text_encoder = text_encoder.cuda()
text_encoder.eval()
#load image encoder
image_encoder = CNN_ENCODER(cfg.TEXT.EMBEDDING_DIM)
img_encoder_path = cfg.TRAIN.NET_E.replace('text_encoder', 'image_encoder')
state_dict = torch.load(img_encoder_path, map_location=lambda storage, loc: storage)
image_encoder.load_state_dict(state_dict)
print('Load image encoder from:', img_encoder_path)
image_encoder = image_encoder.cuda()
image_encoder.eval()
batch_size = self.batch_size
nz = cfg.GAN.Z_DIM
noise = Variable(torch.FloatTensor(batch_size, nz), volatile=True)
noise = noise.cuda()
model_dir = cfg.TRAIN.NET_G
state_dict = torch.load(model_dir, map_location=lambda storage, loc: storage)
# state_dict = torch.load(cfg.TRAIN.NET_G)
netG.load_state_dict(state_dict)
print('Load G from: ', model_dir)
# the path to save generated images
s_tmp = model_dir[:model_dir.rfind('.pth')]
save_dir = '%s/%s' % (s_tmp, split_dir)
mkdir_p(save_dir)
cnt = 0
R_count = 0
R = np.zeros(30000)
cont = True
for ii in range(11): # (cfg.TEXT.CAPTIONS_PER_IMAGE):
if (cont == False):
break
for step, data in enumerate(self.data_loader, 0):
cnt += batch_size
if (cont == False):
break
if step % 100 == 0:
print('cnt: ', cnt)
# if step > 50:
# break
#imgs, captions, cap_lens, class_ids, keys = prepare_data(data)
imgs, imgs_2, captions, cap_lens, class_ids, keys, captions_2, cap_lens_2, class_ids_2, \
sort_ind, sort_ind_2 = prepare_data(data)
hidden = text_encoder.init_hidden(batch_size)
# words_embs: batch_size x nef x seq_len
# sent_emb: batch_size x nef
words_embs, sent_emb = text_encoder(captions, cap_lens, hidden)
words_embs, sent_emb = words_embs.detach(), sent_emb.detach()
mask = (captions == 0)
num_words = words_embs.size(2)
if mask.size(1) > num_words:
mask = mask[:, :num_words]
#######################################################
# (2) Generate fake images
######################################################
noise.data.normal_(0, 1)
fake_imgs, _, _, _ = netG(noise, sent_emb, words_embs, mask, cap_lens)
for j in range(batch_size):
s_tmp = '%s/single/%s' % (save_dir, keys[j])
folder = s_tmp[:s_tmp.rfind('/')]
if not os.path.isdir(folder):
#print('Make a new folder: ', folder)
mkdir_p(folder)
k = -1
# for k in range(len(fake_imgs)):
im = fake_imgs[k][j].data.cpu().numpy()
# [-1, 1] --> [0, 255]
im = (im + 1.0) * 127.5
im = im.astype(np.uint8)
im = np.transpose(im, (1, 2, 0))
im = Image.fromarray(im)
fullpath = '%s_s%d_%d.png' % (s_tmp, k, ii)
im.save(fullpath)
_, cnn_code = image_encoder(fake_imgs[-1])
for i in range(batch_size):
mis_captions, mis_captions_len = self.dataset.get_mis_caption(class_ids[i])
hidden = text_encoder.init_hidden(99)
_, sent_emb_t = text_encoder(mis_captions, mis_captions_len, hidden)
rnn_code = torch.cat((sent_emb[i, :].unsqueeze(0), sent_emb_t), 0)
### cnn_code = 1 * nef
### rnn_code = 100 * nef
scores = torch.mm(cnn_code[i].unsqueeze(0), rnn_code.transpose(0, 1)) # 1* 100
cnn_code_norm = torch.norm(cnn_code[i].unsqueeze(0), 2, dim=1, keepdim=True)
rnn_code_norm = torch.norm(rnn_code, 2, dim=1, keepdim=True)
norm = torch.mm(cnn_code_norm, rnn_code_norm.transpose(0, 1))
scores0 = scores / norm.clamp(min=1e-8)
if torch.argmax(scores0) == 0:
R[R_count] = 1
R_count += 1
if R_count >= 30000:
sum = np.zeros(10)
np.random.shuffle(R)
for i in range(10):
sum[i] = np.average(R[i * 3000:(i + 1) * 3000 - 1])
R_mean = np.average(sum)
R_std = np.std(sum)
print("R mean:{:.4f} std:{:.4f}".format(R_mean, R_std))
cont = False
def gen_example(self, data_dic):
if cfg.TRAIN.NET_G == '':
print('Error: the path for morels is not found!')
else:
# Build and load the generator
text_encoder = \
RNN_ENCODER(self.n_words, nhidden=cfg.TEXT.EMBEDDING_DIM)
state_dict = \
torch.load(cfg.TRAIN.NET_E, map_location=lambda storage, loc: storage)
text_encoder.load_state_dict(state_dict)
print('Load text encoder from:', cfg.TRAIN.NET_E)
text_encoder = text_encoder.cuda()
text_encoder.eval()
# the path to save generated images
if cfg.GAN.B_DCGAN:
netG = G_DCGAN()
else:
netG = G_NET()
s_tmp = cfg.TRAIN.NET_G[:cfg.TRAIN.NET_G.rfind('.pth')]
model_dir = cfg.TRAIN.NET_G
state_dict = \
torch.load(model_dir, map_location=lambda storage, loc: storage)
netG.load_state_dict(state_dict)
print('Load G from: ', model_dir)
netG.cuda()
netG.eval()
for key in data_dic:
save_dir = '%s/%s' % (s_tmp, key)
mkdir_p(save_dir)
captions, cap_lens, sorted_indices = data_dic[key]
batch_size = captions.shape[0]
nz = cfg.GAN.Z_DIM
captions = Variable(torch.from_numpy(captions), volatile=True)
cap_lens = Variable(torch.from_numpy(cap_lens), volatile=True)
captions = captions.cuda()
cap_lens = cap_lens.cuda()
for i in range(1): # 16
noise = Variable(torch.FloatTensor(batch_size, nz), volatile=True)
noise = noise.cuda()
#######################################################
# (1) Extract text embeddings
######################################################
hidden = text_encoder.init_hidden(batch_size)
# words_embs: batch_size x nef x seq_len
# sent_emb: batch_size x nef
words_embs, sent_emb = text_encoder(captions, cap_lens, hidden)
mask = (captions == 0)
#######################################################
# (2) Generate fake images
######################################################
noise.data.normal_(0, 1)
fake_imgs, attention_maps, _, _ = netG(noise, sent_emb, words_embs, mask, cap_lens)
# G attention
cap_lens_np = cap_lens.cpu().data.numpy()
for j in range(batch_size):
save_name = '%s/%d_s_%d' % (save_dir, i, sorted_indices[j])
for k in range(len(fake_imgs)):
im = fake_imgs[k][j].data.cpu().numpy()
im = (im + 1.0) * 127.5
im = im.astype(np.uint8)
# print('im', im.shape)
im = np.transpose(im, (1, 2, 0))
# print('im', im.shape)
im = Image.fromarray(im)
fullpath = '%s_g%d.png' % (save_name, k)
im.save(fullpath)
for k in range(len(attention_maps)):
if len(fake_imgs) > 1:
im = fake_imgs[k + 1].detach().cpu()
else:
im = fake_imgs[0].detach().cpu()
attn_maps = attention_maps[k]
att_sze = attn_maps.size(2)
img_set, sentences = \
build_super_images2(im[j].unsqueeze(0),
captions[j].unsqueeze(0),
[cap_lens_np[j]], self.ixtoword,
[attn_maps[j]], att_sze)
if img_set is not None:
im = Image.fromarray(img_set)
fullpath = '%s_a%d.png' % (save_name, k)
im.save(fullpath)
| 29,111 | 42.975831 | 110 | py |
T2I_CL | T2I_CL-main/DM-GAN+CL/code/miscc/losses.py | import torch
import torch.nn as nn
import numpy as np
from miscc.config import cfg
from GlobalAttention import func_attention
# ##################Loss for matching text-image###################
def cosine_similarity(x1, x2, dim=1, eps=1e-8):
"""Returns cosine similarity between x1 and x2, computed along dim.
"""
w12 = torch.sum(x1 * x2, dim)
w1 = torch.norm(x1, 2, dim)
w2 = torch.norm(x2, 2, dim)
return (w12 / (w1 * w2).clamp(min=eps)).squeeze()
def sent_loss(cnn_code, rnn_code, labels, class_ids,
batch_size, eps=1e-8):
# ### Mask mis-match samples ###
# that come from the same class as the real sample ###
masks = []
if class_ids is not None:
for i in range(batch_size):
mask = (class_ids == class_ids[i]).astype(np.uint8)
mask[i] = 0
masks.append(mask.reshape((1, -1)))
masks = np.concatenate(masks, 0)
# masks: batch_size x batch_size
# masks = torch.ByteTensor(masks)
masks = torch.BoolTensor(masks)
if cfg.CUDA:
masks = masks.cuda()
# --> seq_len x batch_size x nef
if cnn_code.dim() == 2:
cnn_code = cnn_code.unsqueeze(0)
rnn_code = rnn_code.unsqueeze(0)
# cnn_code_norm / rnn_code_norm: seq_len x batch_size x 1
cnn_code_norm = torch.norm(cnn_code, 2, dim=2, keepdim=True)
rnn_code_norm = torch.norm(rnn_code, 2, dim=2, keepdim=True)
# scores* / norm*: seq_len x batch_size x batch_size
scores0 = torch.bmm(cnn_code, rnn_code.transpose(1, 2))
norm0 = torch.bmm(cnn_code_norm, rnn_code_norm.transpose(1, 2))
scores0 = scores0 / norm0.clamp(min=eps) * cfg.TRAIN.SMOOTH.GAMMA3
# --> batch_size x batch_size
scores0 = scores0.squeeze()
if class_ids is not None:
scores0.data.masked_fill_(masks, -float('inf'))
scores1 = scores0.transpose(0, 1)
if labels is not None:
loss0 = nn.CrossEntropyLoss()(scores0, labels)
loss1 = nn.CrossEntropyLoss()(scores1, labels)
else:
loss0, loss1 = None, None
return loss0, loss1
def words_loss(img_features, words_emb, labels,
cap_lens, class_ids, batch_size):
"""
words_emb(query): batch x nef x seq_len
img_features(context): batch x nef x 17 x 17
"""
masks = []
att_maps = []
similarities = []
cap_lens = cap_lens.data.tolist()
for i in range(batch_size):
if class_ids is not None:
mask = (class_ids == class_ids[i]).astype(np.uint8)
mask[i] = 0
masks.append(mask.reshape((1, -1)))
# Get the i-th text description
words_num = cap_lens[i]
# -> 1 x nef x words_num
word = words_emb[i, :, :words_num].unsqueeze(0).contiguous()
# -> batch_size x nef x words_num
word = word.repeat(batch_size, 1, 1)
# batch x nef x 17*17
context = img_features
"""
word(query): batch x nef x words_num
context: batch x nef x 17 x 17
weiContext: batch x nef x words_num
attn: batch x words_num x 17 x 17
"""
weiContext, attn = func_attention(word, context, cfg.TRAIN.SMOOTH.GAMMA1)
att_maps.append(attn[i].unsqueeze(0).contiguous())
# --> batch_size x words_num x nef
word = word.transpose(1, 2).contiguous()
weiContext = weiContext.transpose(1, 2).contiguous()
# --> batch_size*words_num x nef
word = word.view(batch_size * words_num, -1)
weiContext = weiContext.view(batch_size * words_num, -1)
#
# -->batch_size*words_num
row_sim = cosine_similarity(word, weiContext)
# --> batch_size x words_num
row_sim = row_sim.view(batch_size, words_num)
# Eq. (10)
row_sim.mul_(cfg.TRAIN.SMOOTH.GAMMA2).exp_()
row_sim = row_sim.sum(dim=1, keepdim=True)
row_sim = torch.log(row_sim)
# --> 1 x batch_size
# similarities(i, j): the similarity between the i-th image and the j-th text description
similarities.append(row_sim)
# batch_size x batch_size
similarities = torch.cat(similarities, 1)
if class_ids is not None:
masks = np.concatenate(masks, 0)
# masks: batch_size x batch_size
# masks = torch.ByteTensor(masks)
masks = torch.BoolTensor(masks)
if cfg.CUDA:
masks = masks.cuda()
similarities = similarities * cfg.TRAIN.SMOOTH.GAMMA3
if class_ids is not None:
similarities.data.masked_fill_(masks, -float('inf'))
similarities1 = similarities.transpose(0, 1)
if labels is not None:
loss0 = nn.CrossEntropyLoss()(similarities, labels)
loss1 = nn.CrossEntropyLoss()(similarities1, labels)
else:
loss0, loss1 = None, None
return loss0, loss1, att_maps
# ##################Loss for G and Ds##############################
def discriminator_loss(netD, real_imgs, fake_imgs, conditions,
real_labels, fake_labels):
# Forward
real_features = netD(real_imgs)
fake_features = netD(fake_imgs.detach())
# loss
#
cond_real_logits = netD.COND_DNET(real_features, conditions)
cond_real_errD = nn.BCELoss()(cond_real_logits, real_labels)
cond_fake_logits = netD.COND_DNET(fake_features, conditions)
cond_fake_errD = nn.BCELoss()(cond_fake_logits, fake_labels)
#
batch_size = real_features.size(0)
cond_wrong_logits = netD.COND_DNET(real_features[:(batch_size - 1)], conditions[1:batch_size])
cond_wrong_errD = nn.BCELoss()(cond_wrong_logits, fake_labels[1:batch_size])
if netD.UNCOND_DNET is not None:
real_logits = netD.UNCOND_DNET(real_features)
fake_logits = netD.UNCOND_DNET(fake_features)
real_errD = nn.BCELoss()(real_logits, real_labels)
fake_errD = nn.BCELoss()(fake_logits, fake_labels)
errD = ((real_errD + cond_real_errD) / 2. +
(fake_errD + cond_fake_errD + cond_wrong_errD) / 3.)
else:
errD = cond_real_errD + (cond_fake_errD + cond_wrong_errD) / 2.
log = 'Real_Acc: {:.4f} Fake_Acc: {:.4f} '.format(torch.mean(real_logits).item(), torch.mean(fake_logits).item())
return errD, log
def generator_loss(netsD, image_encoder, fake_imgs, real_labels,
words_embs, sent_emb, match_labels,
cap_lens, class_ids):
numDs = len(netsD)
batch_size = real_labels.size(0)
logs = ''
# Forward
errG_total = 0
for i in range(numDs):
features = netsD[i](fake_imgs[i])
cond_logits = netsD[i].COND_DNET(features, sent_emb)
cond_errG = nn.BCELoss()(cond_logits, real_labels)
if netsD[i].UNCOND_DNET is not None:
logits = netsD[i].UNCOND_DNET(features)
errG = nn.BCELoss()(logits, real_labels)
g_loss = errG + cond_errG
else:
g_loss = cond_errG
errG_total += g_loss
# err_img = errG_total.data[0]
logs += 'g_loss%d: %.2f ' % (i, g_loss.item())
# Ranking loss
if i == (numDs - 1):
# words_features: batch_size x nef x 17 x 17
# sent_code: batch_size x nef
region_features, cnn_code = image_encoder(fake_imgs[i])
w_loss0, w_loss1, _ = words_loss(region_features, words_embs,
match_labels, cap_lens,
class_ids, batch_size)
w_loss = (w_loss0 + w_loss1) * cfg.TRAIN.SMOOTH.LAMBDA
# err_words = err_words + w_loss.data[0]
s_loss0, s_loss1 = sent_loss(cnn_code, sent_emb,
match_labels, class_ids, batch_size)
s_loss = (s_loss0 + s_loss1) * cfg.TRAIN.SMOOTH.LAMBDA
# err_sent = err_sent + s_loss.data[0]
errG_total += w_loss + s_loss
logs += 'w_loss: %.2f s_loss: %.2f ' % (w_loss.item(), s_loss.item())
#
# # Ranking loss
# # words_features: batch_size x nef x 17 x 17
# # sent_code: batch_size x nef
# region_features, cnn_code = image_encoder(fake_imgs[i])
# w_loss0, w_loss1, _ = words_loss(region_features, words_embs,
# match_labels, cap_lens,
# class_ids, batch_size)
# w_loss = (w_loss0 + w_loss1) * cfg.TRAIN.SMOOTH.LAMBDA
# # err_words = err_words + w_loss.data[0]
#
# s_loss0, s_loss1 = sent_loss(cnn_code, sent_emb,
# match_labels, class_ids, batch_size)
# s_loss = (s_loss0 + s_loss1) * cfg.TRAIN.SMOOTH.LAMBDA
# # err_sent = err_sent + s_loss.data[0]
#
# errG_total += w_loss + s_loss
# logs += 'w_loss: %.2f s_loss: %.2f ' % (w_loss.item(), s_loss.item())
return errG_total, logs, cnn_code
##################################################################
def KL_loss(mu, logvar):
# -0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
KLD = torch.mean(KLD_element).mul_(-0.5)
return KLD
| 9,225 | 37.441667 | 117 | py |
T2I_CL | T2I_CL-main/DM-GAN+CL/code/miscc/utils.py | import os
import errno
import numpy as np
from torch.nn import init
import torch
import torch.nn as nn
from PIL import Image, ImageDraw, ImageFont
from copy import deepcopy
import skimage.transform
from miscc.config import cfg
# For visualization ################################################
COLOR_DIC = {0:[128,64,128], 1:[244, 35,232],
2:[70, 70, 70], 3:[102,102,156],
4:[190,153,153], 5:[153,153,153],
6:[250,170, 30], 7:[220, 220, 0],
8:[107,142, 35], 9:[152,251,152],
10:[70,130,180], 11:[220,20, 60],
12:[255, 0, 0], 13:[0, 0, 142],
14:[119,11, 32], 15:[0, 60,100],
16:[0, 80, 100], 17:[0, 0, 230],
18:[0, 0, 70], 19:[0, 0, 0]}
FONT_MAX = 50
def drawCaption(convas, captions, ixtoword, vis_size, off1=2, off2=2):
num = captions.size(0)
img_txt = Image.fromarray(convas)
# get a font
# fnt = None # ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 50)
fnt = ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 50)
# get a drawing context
d = ImageDraw.Draw(img_txt)
sentence_list = []
for i in range(num):
cap = captions[i].data.cpu().numpy()
sentence = []
for j in range(len(cap)):
if cap[j] == 0:
break
word = ixtoword[cap[j]].encode('ascii', 'ignore').decode('ascii')
d.text(((j + off1) * (vis_size + off2), i * FONT_MAX), '%d:%s' % (j, word[:6]),
font=fnt, fill=(255, 255, 255, 255))
sentence.append(word)
sentence_list.append(sentence)
return img_txt, sentence_list
def build_super_images(real_imgs, captions, ixtoword,
attn_maps, att_sze, lr_imgs=None,
batch_size=cfg.TRAIN.BATCH_SIZE,
max_word_num=cfg.TEXT.WORDS_NUM):
nvis = 8
real_imgs = real_imgs[:nvis]
if lr_imgs is not None:
lr_imgs = lr_imgs[:nvis]
if att_sze == 17:
vis_size = att_sze * 16
else:
vis_size = real_imgs.size(2)
text_convas = \
np.ones([batch_size * FONT_MAX,
(max_word_num + 2) * (vis_size + 2), 3],
dtype=np.uint8)
for i in range(max_word_num):
istart = (i + 2) * (vis_size + 2)
iend = (i + 3) * (vis_size + 2)
text_convas[:, istart:iend, :] = COLOR_DIC[i]
real_imgs = \
nn.Upsample(size=(vis_size, vis_size), mode='bilinear', align_corners=True)(real_imgs)
# [-1, 1] --> [0, 1]
real_imgs.add_(1).div_(2).mul_(255)
real_imgs = real_imgs.data.numpy()
# b x c x h x w --> b x h x w x c
real_imgs = np.transpose(real_imgs, (0, 2, 3, 1))
pad_sze = real_imgs.shape
middle_pad = np.zeros([pad_sze[2], 2, 3])
post_pad = np.zeros([pad_sze[1], pad_sze[2], 3])
if lr_imgs is not None:
lr_imgs = \
nn.Upsample(size=(vis_size, vis_size), mode='bilinear', align_corners=True)(lr_imgs)
# [-1, 1] --> [0, 1]
lr_imgs.add_(1).div_(2).mul_(255)
lr_imgs = lr_imgs.data.numpy()
# b x c x h x w --> b x h x w x c
lr_imgs = np.transpose(lr_imgs, (0, 2, 3, 1))
# batch x seq_len x 17 x 17 --> batch x 1 x 17 x 17
seq_len = max_word_num
img_set = []
num = nvis # len(attn_maps)
text_map, sentences = \
drawCaption(text_convas, captions, ixtoword, vis_size)
text_map = np.asarray(text_map).astype(np.uint8)
bUpdate = 1
for i in range(num):
attn = attn_maps[i].cpu().view(1, -1, att_sze, att_sze)
# --> 1 x 1 x 17 x 17
attn_max = attn.max(dim=1, keepdim=True)
attn = torch.cat([attn_max[0], attn], 1)
#
attn = attn.view(-1, 1, att_sze, att_sze)
attn = attn.repeat(1, 3, 1, 1).data.numpy()
# n x c x h x w --> n x h x w x c
attn = np.transpose(attn, (0, 2, 3, 1))
num_attn = attn.shape[0]
#
img = real_imgs[i]
if lr_imgs is None:
lrI = img
else:
lrI = lr_imgs[i]
row = [lrI, middle_pad]
row_merge = [img, middle_pad]
row_beforeNorm = []
minVglobal, maxVglobal = 1, 0
for j in range(num_attn):
one_map = attn[j]
if (vis_size // att_sze) > 1:
one_map = \
skimage.transform.pyramid_expand(one_map, sigma=20,
upscale=vis_size // att_sze)
row_beforeNorm.append(one_map)
minV = one_map.min()
maxV = one_map.max()
if minVglobal > minV:
minVglobal = minV
if maxVglobal < maxV:
maxVglobal = maxV
for j in range(seq_len + 1):
if j < num_attn:
one_map = row_beforeNorm[j]
one_map = (one_map - minVglobal) / (maxVglobal - minVglobal)
one_map *= 255
#
PIL_im = Image.fromarray(np.uint8(img))
PIL_att = Image.fromarray(np.uint8(one_map))
merged = \
Image.new('RGBA', (vis_size, vis_size), (0, 0, 0, 0))
mask = Image.new('L', (vis_size, vis_size), (210))
merged.paste(PIL_im, (0, 0))
merged.paste(PIL_att, (0, 0), mask)
merged = np.array(merged)[:, :, :3]
else:
one_map = post_pad
merged = post_pad
row.append(one_map)
row.append(middle_pad)
#
row_merge.append(merged)
row_merge.append(middle_pad)
row = np.concatenate(row, 1)
row_merge = np.concatenate(row_merge, 1)
txt = text_map[i * FONT_MAX: (i + 1) * FONT_MAX]
if txt.shape[1] != row.shape[1]:
print('txt', txt.shape, 'row', row.shape)
bUpdate = 0
break
row = np.concatenate([txt, row, row_merge], 0)
img_set.append(row)
if bUpdate:
img_set = np.concatenate(img_set, 0)
img_set = img_set.astype(np.uint8)
return img_set, sentences
else:
return None
def build_super_images2(real_imgs, captions, cap_lens, ixtoword,
attn_maps, att_sze, vis_size=256, topK=5):
batch_size = real_imgs.size(0)
max_word_num = np.max(cap_lens)
text_convas = np.ones([batch_size * FONT_MAX,
max_word_num * (vis_size + 2), 3],
dtype=np.uint8)
real_imgs = \
nn.Upsample(size=(vis_size, vis_size), mode='bilinear')(real_imgs)
# [-1, 1] --> [0, 1]
real_imgs.add_(1).div_(2).mul_(255)
real_imgs = real_imgs.data.numpy()
# b x c x h x w --> b x h x w x c
real_imgs = np.transpose(real_imgs, (0, 2, 3, 1))
pad_sze = real_imgs.shape
middle_pad = np.zeros([pad_sze[2], 2, 3])
# batch x seq_len x 17 x 17 --> batch x 1 x 17 x 17
img_set = []
num = len(attn_maps)
text_map, sentences = \
drawCaption(text_convas, captions, ixtoword, vis_size, off1=0)
text_map = np.asarray(text_map).astype(np.uint8)
bUpdate = 1
for i in range(num):
attn = attn_maps[i].cpu().view(1, -1, att_sze, att_sze)
#
attn = attn.view(-1, 1, att_sze, att_sze)
attn = attn.repeat(1, 3, 1, 1).data.numpy()
# n x c x h x w --> n x h x w x c
attn = np.transpose(attn, (0, 2, 3, 1))
num_attn = cap_lens[i]
thresh = 2./float(num_attn)
#
img = real_imgs[i]
row = []
row_merge = []
row_txt = []
row_beforeNorm = []
conf_score = []
for j in range(num_attn):
one_map = attn[j]
mask0 = one_map > (2. * thresh)
conf_score.append(np.sum(one_map * mask0))
mask = one_map > thresh
one_map = one_map * mask
if (vis_size // att_sze) > 1:
one_map = \
skimage.transform.pyramid_expand(one_map, sigma=20,
upscale=vis_size // att_sze)
minV = one_map.min()
maxV = one_map.max()
one_map = (one_map - minV) / (maxV - minV)
row_beforeNorm.append(one_map)
sorted_indices = np.argsort(conf_score)[::-1]
for j in range(num_attn):
one_map = row_beforeNorm[j]
one_map *= 255
#
PIL_im = Image.fromarray(np.uint8(img))
PIL_att = Image.fromarray(np.uint8(one_map))
merged = \
Image.new('RGBA', (vis_size, vis_size), (0, 0, 0, 0))
mask = Image.new('L', (vis_size, vis_size), (180)) # (210)
merged.paste(PIL_im, (0, 0))
merged.paste(PIL_att, (0, 0), mask)
merged = np.array(merged)[:, :, :3]
row.append(np.concatenate([one_map, middle_pad], 1))
#
row_merge.append(np.concatenate([merged, middle_pad], 1))
#
txt = text_map[i * FONT_MAX:(i + 1) * FONT_MAX,
j * (vis_size + 2):(j + 1) * (vis_size + 2), :]
row_txt.append(txt)
# reorder
row_new = []
row_merge_new = []
txt_new = []
for j in range(num_attn):
idx = sorted_indices[j]
row_new.append(row[idx])
row_merge_new.append(row_merge[idx])
txt_new.append(row_txt[idx])
row = np.concatenate(row_new[:topK], 1)
row_merge = np.concatenate(row_merge_new[:topK], 1)
txt = np.concatenate(txt_new[:topK], 1)
if txt.shape[1] != row.shape[1]:
print('Warnings: txt', txt.shape, 'row', row.shape,
'row_merge_new', row_merge_new.shape)
bUpdate = 0
break
row = np.concatenate([txt, row_merge], 0)
img_set.append(row)
if bUpdate:
img_set = np.concatenate(img_set, 0)
img_set = img_set.astype(np.uint8)
return img_set, sentences
else:
return None
####################################################################
def weights_init(m):
# orthogonal_
# xavier_uniform_(
classname = m.__class__.__name__
if classname.find('Conv') != -1:
#print(m.state_dict().keys())
if list(m.state_dict().keys())[0] == 'weight':
nn.init.orthogonal_(m.weight.data, 1.0)
elif list(m.state_dict().keys())[3] == 'weight_bar':
nn.init.orthogonal_(m.weight_bar.data, 1.0)
#nn.init.orthogonal(m.weight.data, 1.0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
nn.init.orthogonal_(m.weight.data, 1.0)
if m.bias is not None:
m.bias.data.fill_(0.0)
def load_params(model, new_param):
for p, new_p in zip(model.parameters(), new_param):
p.data.copy_(new_p)
def copy_G_params(model):
flatten = deepcopy(list(p.data for p in model.parameters()))
return flatten
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
| 11,370 | 34.095679 | 96 | py |
T2I_CL | T2I_CL-main/AttnGAN+CL/code/main.py | from __future__ import print_function
from miscc.config import cfg, cfg_from_file
from datasets import TextDataset
from trainer import condGANTrainer as trainer
import os
import sys
import time
import random
import pprint
import datetime
import dateutil.tz
import argparse
import numpy as np
import torch
import torchvision.transforms as transforms
dir_path = (os.path.abspath(os.path.join(os.path.realpath(__file__), './.')))
sys.path.append(dir_path)
def parse_args():
parser = argparse.ArgumentParser(description='Train a AttnGAN network')
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfg/bird_attn2.yml', type=str)
parser.add_argument('--gpu', dest='gpu_id', type=int, default=-1)
parser.add_argument('--data_dir', dest='data_dir', type=str, default='')
parser.add_argument('--manualSeed', type=int, help='manual seed')
args = parser.parse_args()
return args
def gen_example(wordtoix, algo):
'''generate images from example sentences'''
from nltk.tokenize import RegexpTokenizer
filepath = '%s/example_filenames.txt' % (cfg.DATA_DIR)
data_dic = {}
with open(filepath, "r") as f:
filenames = f.read().split('\n')
for name in filenames:
if len(name) == 0:
continue
filepath = '%s/%s.txt' % (cfg.DATA_DIR, name)
with open(filepath, "r") as f:
print('Load from:', name)
sentences = f.read().split('\n')
# a list of indices for a sentence
captions = []
cap_lens = []
for sent in sentences:
if len(sent) == 0:
continue
sent = sent.replace("\ufffd\ufffd", " ")
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(sent.lower())
if len(tokens) == 0:
print('sent', sent)
continue
rev = []
for t in tokens:
t = t.encode('ascii', 'ignore').decode('ascii')
if len(t) > 0 and t in wordtoix:
rev.append(wordtoix[t])
captions.append(rev)
cap_lens.append(len(rev))
max_len = np.max(cap_lens)
sorted_indices = np.argsort(cap_lens)[::-1]
cap_lens = np.asarray(cap_lens)
cap_lens = cap_lens[sorted_indices]
cap_array = np.zeros((len(captions), max_len), dtype='int64')
for i in range(len(captions)):
idx = sorted_indices[i]
cap = captions[idx]
c_len = len(cap)
cap_array[i, :c_len] = cap
key = name[(name.rfind('/') + 1):]
data_dic[key] = [cap_array, cap_lens, sorted_indices]
algo.gen_example(data_dic)
if __name__ == "__main__":
args = parse_args()
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.gpu_id != -1:
cfg.GPU_ID = args.gpu_id
else:
cfg.CUDA = False
if args.data_dir != '':
cfg.DATA_DIR = args.data_dir
print('Using config:')
pprint.pprint(cfg)
if not cfg.TRAIN.FLAG:
args.manualSeed = 100
elif args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
random.seed(args.manualSeed)
np.random.seed(args.manualSeed)
torch.manual_seed(args.manualSeed)
if cfg.CUDA:
torch.cuda.manual_seed_all(args.manualSeed)
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
output_dir = '../output/%s_%s_%s' % \
(cfg.DATASET_NAME, cfg.CONFIG_NAME, timestamp)
split_dir, bshuffle = 'train', True
if not cfg.TRAIN.FLAG:
# bshuffle = False
split_dir = 'test'
# Get data loader
imsize = cfg.TREE.BASE_SIZE * (2 ** (cfg.TREE.BRANCH_NUM - 1))
image_transform = transforms.Compose([
transforms.Resize(int(imsize * 76 / 64)),
transforms.RandomCrop(imsize),
transforms.RandomHorizontalFlip()])
dataset = TextDataset(cfg.DATA_DIR, split_dir,
base_size=cfg.TREE.BASE_SIZE,
transform=image_transform)
assert dataset
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=cfg.TRAIN.BATCH_SIZE,
drop_last=True, shuffle=bshuffle, num_workers=int(cfg.WORKERS))
# Define models and go to train/evaluate
algo = trainer(output_dir, dataloader, dataset.n_words, dataset.ixtoword, dataset)
start_t = time.time()
if cfg.TRAIN.FLAG:
algo.train()
else:
'''generate images from pre-extracted embeddings'''
if cfg.B_VALIDATION:
algo.sampling(split_dir) # generate images for the whole valid dataset
else:
gen_example(dataset.wordtoix, algo) # generate images for customized captions
end_t = time.time()
print('Total time for training:', end_t - start_t)
| 5,165 | 33.671141 | 90 | py |
T2I_CL | T2I_CL-main/AttnGAN+CL/code/masks.py | import torch
def mask_correlated_samples(args):
mask = torch.ones((args.batch_size * 2, args.batch_size * 2), dtype=bool)
mask = mask.fill_diagonal_(0)
for i in range(args.batch_size):
mask[i, args.batch_size + i] = 0
mask[args.batch_size + i, i] = 0
return mask
| 296 | 28.7 | 77 | py |
T2I_CL | T2I_CL-main/AttnGAN+CL/code/pretrain_DAMSM.py | from __future__ import print_function
from miscc.utils import mkdir_p
from miscc.utils import build_super_images
from miscc.losses import sent_loss, words_loss
from miscc.config import cfg, cfg_from_file
from datasets import TextDataset
from datasets import prepare_data
from model import RNN_ENCODER, CNN_ENCODER
import os
import sys
import time
import random
import pprint
import datetime
import dateutil.tz
import argparse
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
from masks import mask_correlated_samples_2
from nt_xent import NT_Xent
dir_path = (os.path.abspath(os.path.join(os.path.realpath(__file__), './.')))
sys.path.append(dir_path)
UPDATE_INTERVAL = 50
def l2norm(X, dim, eps=1e-8):
"""L2-normalize columns of X
"""
norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps
X = torch.div(X, norm)
return X
def parse_args():
parser = argparse.ArgumentParser(description='Train a DAMSM network')
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfg/DAMSM/bird.yml', type=str)
parser.add_argument('--gpu', dest='gpu_id', type=int, default=0)
parser.add_argument('--data_dir', dest='data_dir', type=str, default='')
parser.add_argument('--manualSeed', type=int, help='manual seed')
args = parser.parse_args()
return args
def train(dataloader, cnn_model, rnn_model, batch_size,
labels, optimizer, epoch, ixtoword, image_dir, criterion):
cnn_model.train()
rnn_model.train()
s_total_loss0 = 0
s_total_loss1 = 0
w_total_loss0 = 0
w_total_loss1 = 0
count = (epoch + 1) * len(dataloader)
start_time = time.time()
for step, data in enumerate(dataloader, 0):
# print('step', step)
rnn_model.zero_grad()
cnn_model.zero_grad()
# imgs, captions, cap_lens, \
# class_ids, keys = prepare_data(data)
imgs, imgs_2, captions, cap_lens, class_ids, keys, captions_2, cap_lens_2, class_ids_2, \
sort_ind, sort_ind_2 = prepare_data(data)
# words_features: batch_size x nef x 17 x 17
# sent_code: batch_size x nef
words_features, sent_code = cnn_model(imgs[-1])
words_features_2, sent_code_2 = cnn_model(imgs_2[-1])
# --> batch_size x nef x 17*17
nef, att_sze = words_features.size(1), words_features.size(2)
# nef_2, att_sze_2 = words_features_2.size(1), words_features_2.size(2)
# words_features = words_features.view(batch_size, nef, -1)
hidden = rnn_model.init_hidden(batch_size)
# words_emb: batch_size x nef x seq_len
# sent_emb: batch_size x nef
words_emb, sent_emb = rnn_model(captions, cap_lens, hidden)
words_emb_2, sent_emb_2 = rnn_model(captions_2, cap_lens_2, hidden)
w_loss0, w_loss1, attn_maps = words_loss(words_features, words_emb, labels,
cap_lens, class_ids, batch_size)
w_total_loss0 += w_loss0.data
w_total_loss1 += w_loss1.data
loss = w_loss0 + w_loss1
w2_loss0, w2_loss1, attn_maps_2 = words_loss(words_features_2, words_emb_2, labels,
cap_lens_2, class_ids_2, batch_size)
w_total_loss0 += w2_loss0.data
w_total_loss1 += w2_loss1.data
loss += w2_loss0 + w2_loss1
s_loss0, s_loss1 = \
sent_loss(sent_code, sent_emb, labels, class_ids, batch_size)
loss += s_loss0 + s_loss1
s_total_loss0 += s_loss0.data
s_total_loss1 += s_loss1.data
s2_loss0, s2_loss1 = \
sent_loss(sent_code_2, sent_emb_2, labels, class_ids_2, batch_size)
loss += s2_loss0 + s2_loss1
s_total_loss0 += s2_loss0.data
s_total_loss1 += s2_loss1.data
_, ori_indices = torch.sort(sort_ind, 0)
_, ori_indices_2 = torch.sort(sort_ind_2, 0)
sent_emb = sent_emb[ori_indices]
sent_emb_2 = sent_emb_2[ori_indices_2]
# sent_emb = l2norm(sent_emb, dim=1)
sent_emb = l2norm(sent_emb, dim=1)
sent_emb_2 = l2norm(sent_emb_2, dim=1)
contrative_loss = criterion(sent_emb, sent_emb_2)
loss += contrative_loss
#
# mse_loss = nn.MSELoss(reduction='sum')
# q_out = netD.Q_NET(fake_features)
# l2_loss = mse_loss(sent_code, sent_emb)
# batch_size = region_features.size(0)
# l2_loss = l2_loss / batch_size
# l2_loss = l2_loss * 0.1
# print(l2_loss)
# loss += l2_loss
loss.backward()
#
# `clip_grad_norm` helps prevent
# the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm(rnn_model.parameters(),
cfg.TRAIN.RNN_GRAD_CLIP)
optimizer.step()
if step % UPDATE_INTERVAL == 0:
count = epoch * len(dataloader) + step
s_cur_loss0 = s_total_loss0.item() / UPDATE_INTERVAL
s_cur_loss1 = s_total_loss1.item() / UPDATE_INTERVAL
w_cur_loss0 = w_total_loss0.item() / UPDATE_INTERVAL
w_cur_loss1 = w_total_loss1.item() / UPDATE_INTERVAL
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | ms/batch {:5.2f} | '
's_loss {:5.2f} {:5.2f} | '
'w_loss {:5.2f} {:5.2f}'
.format(epoch, step, len(dataloader),
elapsed * 1000. / UPDATE_INTERVAL,
s_cur_loss0, s_cur_loss1,
w_cur_loss0, w_cur_loss1))
s_total_loss0 = 0
s_total_loss1 = 0
w_total_loss0 = 0
w_total_loss1 = 0
start_time = time.time()
# attention Maps
# img_set, _ = \
# build_super_images(imgs[-1].cpu(), captions,
# ixtoword, attn_maps, att_sze)
# if img_set is not None:
# im = Image.fromarray(img_set)
# fullpath = '%s/attention_maps%d.png' % (image_dir, step)
# im.save(fullpath)
return count
def evaluate(dataloader, cnn_model, rnn_model, batch_size, criterion):
cnn_model.eval()
rnn_model.eval()
s_total_loss = 0
w_total_loss = 0
for step, data in enumerate(dataloader, 0):
# real_imgs, captions, cap_lens, \
# class_ids, keys = prepare_data(data)
real_imgs, imgs_2, captions, cap_lens, class_ids, keys, captions_2, cap_lens_2, class_ids_2, \
sort_ind, sort_ind_2 = prepare_data(data)
words_features, sent_code = cnn_model(real_imgs[-1])
# nef = words_features.size(1)
# words_features = words_features.view(batch_size, nef, -1)
hidden = rnn_model.init_hidden(batch_size)
words_emb, sent_emb = rnn_model(captions, cap_lens, hidden)
w_loss0, w_loss1, attn = words_loss(words_features, words_emb, labels,
cap_lens, class_ids, batch_size)
w_total_loss += (w_loss0 + w_loss1).data
s_loss0, s_loss1 = \
sent_loss(sent_code, sent_emb, labels, class_ids, batch_size)
s_total_loss += (s_loss0 + s_loss1).data
if step == 50:
break
s_cur_loss = s_total_loss.item() / step
w_cur_loss = w_total_loss.item() / step
return s_cur_loss, w_cur_loss
def build_models():
# build model ############################################################
text_encoder = RNN_ENCODER(dataset.n_words, nhidden=cfg.TEXT.EMBEDDING_DIM)
image_encoder = CNN_ENCODER(cfg.TEXT.EMBEDDING_DIM)
labels = Variable(torch.LongTensor(range(batch_size)))
start_epoch = 0
if cfg.TRAIN.NET_E != '':
state_dict = torch.load(cfg.TRAIN.NET_E)
text_encoder.load_state_dict(state_dict)
print('Load ', cfg.TRAIN.NET_E)
#
name = cfg.TRAIN.NET_E.replace('text_encoder', 'image_encoder')
state_dict = torch.load(name)
image_encoder.load_state_dict(state_dict)
print('Load ', name)
istart = cfg.TRAIN.NET_E.rfind('_') + 8
iend = cfg.TRAIN.NET_E.rfind('.')
start_epoch = cfg.TRAIN.NET_E[istart:iend]
start_epoch = int(start_epoch) + 1
print('start_epoch', start_epoch)
if cfg.CUDA:
text_encoder = text_encoder.cuda()
image_encoder = image_encoder.cuda()
labels = labels.cuda()
return text_encoder, image_encoder, labels, start_epoch
if __name__ == "__main__":
args = parse_args()
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.gpu_id == -1:
cfg.CUDA = False
else:
cfg.GPU_ID = args.gpu_id
if args.data_dir != '':
cfg.DATA_DIR = args.data_dir
print('Using config:')
pprint.pprint(cfg)
if not cfg.TRAIN.FLAG:
args.manualSeed = 100
elif args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
random.seed(args.manualSeed)
np.random.seed(args.manualSeed)
torch.manual_seed(args.manualSeed)
if cfg.CUDA:
torch.cuda.manual_seed_all(args.manualSeed)
##########################################################################
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
output_dir = '../output/%s_%s_%s' % \
(cfg.DATASET_NAME, cfg.CONFIG_NAME, timestamp)
model_dir = os.path.join(output_dir, 'Model')
image_dir = os.path.join(output_dir, 'Image')
mkdir_p(model_dir)
mkdir_p(image_dir)
torch.cuda.set_device(cfg.GPU_ID)
cudnn.benchmark = True
# Get data loader ##################################################
imsize = cfg.TREE.BASE_SIZE * (2 ** (cfg.TREE.BRANCH_NUM-1))
batch_size = cfg.TRAIN.BATCH_SIZE
image_transform = transforms.Compose([
transforms.Scale(int(imsize * 76 / 64)),
transforms.RandomCrop(imsize),
transforms.RandomHorizontalFlip()])
dataset = TextDataset(cfg.DATA_DIR, 'train',
base_size=cfg.TREE.BASE_SIZE,
transform=image_transform)
print(dataset.n_words, dataset.embeddings_num)
assert dataset
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, drop_last=True,
shuffle=True, num_workers=int(cfg.WORKERS))
# # validation data #
dataset_val = TextDataset(cfg.DATA_DIR, 'test',
base_size=cfg.TREE.BASE_SIZE,
transform=image_transform)
dataloader_val = torch.utils.data.DataLoader(
dataset_val, batch_size=batch_size, drop_last=True,
shuffle=True, num_workers=int(cfg.WORKERS))
# Train ##############################################################
text_encoder, image_encoder, labels, start_epoch = build_models()
para = list(text_encoder.parameters())
for v in image_encoder.parameters():
if v.requires_grad:
para.append(v)
# optimizer = optim.Adam(para, lr=cfg.TRAIN.ENCODER_LR, betas=(0.5, 0.999))
# At any point you can hit Ctrl + C to break out of training early.
mask = mask_correlated_samples_2(batch_size)
temperature = 0.5
device = labels.get_device()
criterion = NT_Xent(batch_size, temperature, mask, device)
try:
lr = cfg.TRAIN.ENCODER_LR
for epoch in range(start_epoch, cfg.TRAIN.MAX_EPOCH):
optimizer = optim.Adam(para, lr=lr, betas=(0.5, 0.999))
epoch_start_time = time.time()
count = train(dataloader, image_encoder, text_encoder,
batch_size, labels, optimizer, epoch,
dataset.ixtoword, image_dir, criterion)
print('-' * 89)
if len(dataloader_val) > 0:
s_loss, w_loss = evaluate(dataloader_val, image_encoder,
text_encoder, batch_size, criterion)
print('| end epoch {:3d} | valid loss '
'{:5.2f} {:5.2f} | lr {:.5f}|'
.format(epoch, s_loss, w_loss, lr))
print('-' * 89)
if lr > cfg.TRAIN.ENCODER_LR/10.:
lr *= 0.98
if (epoch % cfg.TRAIN.SNAPSHOT_INTERVAL == 0 or
epoch == cfg.TRAIN.MAX_EPOCH):
torch.save(image_encoder.state_dict(),
'%s/image_encoder%d.pth' % (model_dir, epoch))
torch.save(text_encoder.state_dict(),
'%s/text_encoder%d.pth' % (model_dir, epoch))
print('Save G/Ds models.')
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
| 13,050 | 34.083333 | 102 | py |
T2I_CL | T2I_CL-main/AttnGAN+CL/code/model.py | import torch
import torch.nn as nn
import torch.nn.parallel
from torch.autograd import Variable
import torchvision.models as models
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from miscc.config import cfg
from GlobalAttention import GlobalAttentionGeneral as ATT_NET
class GLU(nn.Module):
def __init__(self):
super(GLU, self).__init__()
def forward(self, x):
nc = x.size(1)
assert nc % 2 == 0, 'channels dont divide 2!'
nc = int(nc/2)
return x[:, :nc] * torch.sigmoid(x[:, nc:])
def conv1x1(in_planes, out_planes, bias=False):
"1x1 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1,
padding=0, bias=bias)
def conv3x3(in_planes, out_planes):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
# Upsale the spatial size by a factor of 2
def upBlock(in_planes, out_planes):
block = nn.Sequential(
nn.Upsample(scale_factor=2, mode='nearest'),
conv3x3(in_planes, out_planes * 2),
nn.BatchNorm2d(out_planes * 2),
GLU())
return block
# Keep the spatial size
def Block3x3_relu(in_planes, out_planes):
block = nn.Sequential(
conv3x3(in_planes, out_planes * 2),
nn.BatchNorm2d(out_planes * 2),
GLU())
return block
class ResBlock(nn.Module):
def __init__(self, channel_num):
super(ResBlock, self).__init__()
self.block = nn.Sequential(
conv3x3(channel_num, channel_num * 2),
nn.BatchNorm2d(channel_num * 2),
GLU(),
conv3x3(channel_num, channel_num),
nn.BatchNorm2d(channel_num))
def forward(self, x):
residual = x
out = self.block(x)
out += residual
return out
# ############## Text2Image Encoder-Decoder #######
class RNN_ENCODER(nn.Module):
def __init__(self, ntoken, ninput=300, drop_prob=0.5,
nhidden=128, nlayers=1, bidirectional=True):
super(RNN_ENCODER, self).__init__()
self.n_steps = cfg.TEXT.WORDS_NUM
self.ntoken = ntoken # size of the dictionary
self.ninput = ninput # size of each embedding vector
self.drop_prob = drop_prob # probability of an element to be zeroed
self.nlayers = nlayers # Number of recurrent layers
self.bidirectional = bidirectional
self.rnn_type = cfg.RNN_TYPE
if bidirectional:
self.num_directions = 2
else:
self.num_directions = 1
# number of features in the hidden state
self.nhidden = nhidden // self.num_directions
self.define_module()
self.init_weights()
def define_module(self):
self.encoder = nn.Embedding(self.ntoken, self.ninput)
self.drop = nn.Dropout(self.drop_prob)
if self.rnn_type == 'LSTM':
# dropout: If non-zero, introduces a dropout layer on
# the outputs of each RNN layer except the last layer
self.rnn = nn.LSTM(self.ninput, self.nhidden,
self.nlayers, batch_first=True,
dropout=self.drop_prob,
bidirectional=self.bidirectional)
elif self.rnn_type == 'GRU':
self.rnn = nn.GRU(self.ninput, self.nhidden,
self.nlayers, batch_first=True,
dropout=self.drop_prob,
bidirectional=self.bidirectional)
else:
raise NotImplementedError
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
# Do not need to initialize RNN parameters, which have been initialized
# http://pytorch.org/docs/master/_modules/torch/nn/modules/rnn.html#LSTM
# self.decoder.weight.data.uniform_(-initrange, initrange)
# self.decoder.bias.data.fill_(0)
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return (Variable(weight.new(self.nlayers * self.num_directions,
bsz, self.nhidden).zero_()),
Variable(weight.new(self.nlayers * self.num_directions,
bsz, self.nhidden).zero_()))
else:
return Variable(weight.new(self.nlayers * self.num_directions,
bsz, self.nhidden).zero_())
def forward(self, captions, cap_lens, hidden, mask=None):
# input: torch.LongTensor of size batch x n_steps
# --> emb: batch x n_steps x ninput
emb = self.drop(self.encoder(captions))
#
# Returns: a PackedSequence object
cap_lens = cap_lens.data.tolist()
emb = pack_padded_sequence(emb, cap_lens, batch_first=True)
# #hidden and memory (num_layers * num_directions, batch, hidden_size):
# tensor containing the initial hidden state for each element in batch.
# #output (batch, seq_len, hidden_size * num_directions)
# #or a PackedSequence object:
# tensor containing output features (h_t) from the last layer of RNN
output, hidden = self.rnn(emb, hidden)
# PackedSequence object
# --> (batch, seq_len, hidden_size * num_directions)
output = pad_packed_sequence(output, batch_first=True)[0]
# output = self.drop(output)
# --> batch x hidden_size*num_directions x seq_len
words_emb = output.transpose(1, 2)
# --> batch x num_directions*hidden_size
if self.rnn_type == 'LSTM':
sent_emb = hidden[0].transpose(0, 1).contiguous()
else:
sent_emb = hidden.transpose(0, 1).contiguous()
sent_emb = sent_emb.view(-1, self.nhidden * self.num_directions)
return words_emb, sent_emb
class CNN_ENCODER(nn.Module):
def __init__(self, nef):
super(CNN_ENCODER, self).__init__()
if cfg.TRAIN.FLAG:
self.nef = nef
else:
self.nef = 256 # define a uniform ranker
model = models.inception_v3()
url = 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth'
model.load_state_dict(model_zoo.load_url(url))
for param in model.parameters():
param.requires_grad = False
print('Load pretrained model from ', url)
# print(model)
self.define_module(model)
self.init_trainable_weights()
def define_module(self, model):
self.Conv2d_1a_3x3 = model.Conv2d_1a_3x3
self.Conv2d_2a_3x3 = model.Conv2d_2a_3x3
self.Conv2d_2b_3x3 = model.Conv2d_2b_3x3
self.Conv2d_3b_1x1 = model.Conv2d_3b_1x1
self.Conv2d_4a_3x3 = model.Conv2d_4a_3x3
self.Mixed_5b = model.Mixed_5b
self.Mixed_5c = model.Mixed_5c
self.Mixed_5d = model.Mixed_5d
self.Mixed_6a = model.Mixed_6a
self.Mixed_6b = model.Mixed_6b
self.Mixed_6c = model.Mixed_6c
self.Mixed_6d = model.Mixed_6d
self.Mixed_6e = model.Mixed_6e
self.Mixed_7a = model.Mixed_7a
self.Mixed_7b = model.Mixed_7b
self.Mixed_7c = model.Mixed_7c
self.emb_features = conv1x1(768, self.nef)
self.emb_cnn_code = nn.Linear(2048, self.nef)
def init_trainable_weights(self):
initrange = 0.1
self.emb_features.weight.data.uniform_(-initrange, initrange)
self.emb_cnn_code.weight.data.uniform_(-initrange, initrange)
def forward(self, x):
features = None
# --> fixed-size input: batch x 3 x 299 x 299
x = nn.Upsample(size=(299, 299), mode='bilinear')(x)
# 299 x 299 x 3
x = self.Conv2d_1a_3x3(x)
# 149 x 149 x 32
x = self.Conv2d_2a_3x3(x)
# 147 x 147 x 32
x = self.Conv2d_2b_3x3(x)
# 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 73 x 73 x 64
x = self.Conv2d_3b_1x1(x)
# 73 x 73 x 80
x = self.Conv2d_4a_3x3(x)
# 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 35 x 35 x 192
x = self.Mixed_5b(x)
# 35 x 35 x 256
x = self.Mixed_5c(x)
# 35 x 35 x 288
x = self.Mixed_5d(x)
# 35 x 35 x 288
x = self.Mixed_6a(x)
# 17 x 17 x 768
x = self.Mixed_6b(x)
# 17 x 17 x 768
x = self.Mixed_6c(x)
# 17 x 17 x 768
x = self.Mixed_6d(x)
# 17 x 17 x 768
x = self.Mixed_6e(x)
# 17 x 17 x 768
# image region features
features = x
# 17 x 17 x 768
x = self.Mixed_7a(x)
# 8 x 8 x 1280
x = self.Mixed_7b(x)
# 8 x 8 x 2048
x = self.Mixed_7c(x)
# 8 x 8 x 2048
x = F.avg_pool2d(x, kernel_size=8)
# 1 x 1 x 2048
# x = F.dropout(x, training=self.training)
# 1 x 1 x 2048
x = x.view(x.size(0), -1)
# 2048
# global image features
cnn_code = self.emb_cnn_code(x)
# 512
if features is not None:
features = self.emb_features(features)
return features, cnn_code
# ############## G networks ###################
class CA_NET(nn.Module):
# some code is modified from vae examples
# (https://github.com/pytorch/examples/blob/master/vae/main.py)
def __init__(self):
super(CA_NET, self).__init__()
self.t_dim = cfg.TEXT.EMBEDDING_DIM
self.c_dim = cfg.GAN.CONDITION_DIM
self.fc = nn.Linear(self.t_dim, self.c_dim * 4, bias=True)
self.relu = GLU()
def encode(self, text_embedding):
x = self.relu(self.fc(text_embedding))
mu = x[:, :self.c_dim]
logvar = x[:, self.c_dim:]
return mu, logvar
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
if cfg.CUDA:
eps = torch.cuda.FloatTensor(std.size()).normal_()
else:
eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu)
def forward(self, text_embedding):
mu, logvar = self.encode(text_embedding)
c_code = self.reparametrize(mu, logvar)
return c_code, mu, logvar
class INIT_STAGE_G(nn.Module):
def __init__(self, ngf, ncf):
super(INIT_STAGE_G, self).__init__()
self.gf_dim = ngf
self.in_dim = cfg.GAN.Z_DIM + ncf # cfg.TEXT.EMBEDDING_DIM
self.define_module()
def define_module(self):
nz, ngf = self.in_dim, self.gf_dim
self.fc = nn.Sequential(
nn.Linear(nz, ngf * 4 * 4 * 2, bias=False),
nn.BatchNorm1d(ngf * 4 * 4 * 2),
GLU())
self.upsample1 = upBlock(ngf, ngf // 2)
self.upsample2 = upBlock(ngf // 2, ngf // 4)
self.upsample3 = upBlock(ngf // 4, ngf // 8)
self.upsample4 = upBlock(ngf // 8, ngf // 16)
def forward(self, z_code, c_code):
"""
:param z_code: batch x cfg.GAN.Z_DIM
:param c_code: batch x cfg.TEXT.EMBEDDING_DIM
:return: batch x ngf/16 x 64 x 64
"""
c_z_code = torch.cat((c_code, z_code), 1)
# state size ngf x 4 x 4
out_code = self.fc(c_z_code)
out_code = out_code.view(-1, self.gf_dim, 4, 4)
# state size ngf/3 x 8 x 8
out_code = self.upsample1(out_code)
# state size ngf/4 x 16 x 16
out_code = self.upsample2(out_code)
# state size ngf/8 x 32 x 32
out_code32 = self.upsample3(out_code)
# state size ngf/16 x 64 x 64
out_code64 = self.upsample4(out_code32)
return out_code64
class NEXT_STAGE_G(nn.Module):
def __init__(self, ngf, nef, ncf):
super(NEXT_STAGE_G, self).__init__()
self.gf_dim = ngf
self.ef_dim = nef
self.cf_dim = ncf
self.num_residual = cfg.GAN.R_NUM
self.define_module()
def _make_layer(self, block, channel_num):
layers = []
for i in range(cfg.GAN.R_NUM):
layers.append(block(channel_num))
return nn.Sequential(*layers)
def define_module(self):
ngf = self.gf_dim
self.att = ATT_NET(ngf, self.ef_dim)
self.residual = self._make_layer(ResBlock, ngf * 2)
self.upsample = upBlock(ngf * 2, ngf)
def forward(self, h_code, c_code, word_embs, mask):
"""
h_code1(query): batch x idf x ih x iw (queryL=ihxiw)
word_embs(context): batch x cdf x sourceL (sourceL=seq_len)
c_code1: batch x idf x queryL
att1: batch x sourceL x queryL
"""
self.att.applyMask(mask)
c_code, att = self.att(h_code, word_embs)
h_c_code = torch.cat((h_code, c_code), 1)
out_code = self.residual(h_c_code)
# state size ngf/2 x 2in_size x 2in_size
out_code = self.upsample(out_code)
return out_code, att
class GET_IMAGE_G(nn.Module):
def __init__(self, ngf):
super(GET_IMAGE_G, self).__init__()
self.gf_dim = ngf
self.img = nn.Sequential(
conv3x3(ngf, 3),
nn.Tanh()
)
def forward(self, h_code):
out_img = self.img(h_code)
return out_img
class G_NET(nn.Module):
def __init__(self):
super(G_NET, self).__init__()
ngf = cfg.GAN.GF_DIM
nef = cfg.TEXT.EMBEDDING_DIM
ncf = cfg.GAN.CONDITION_DIM
self.ca_net = CA_NET()
if cfg.TREE.BRANCH_NUM > 0:
self.h_net1 = INIT_STAGE_G(ngf * 16, ncf)
self.img_net1 = GET_IMAGE_G(ngf)
# gf x 64 x 64
if cfg.TREE.BRANCH_NUM > 1:
self.h_net2 = NEXT_STAGE_G(ngf, nef, ncf)
self.img_net2 = GET_IMAGE_G(ngf)
if cfg.TREE.BRANCH_NUM > 2:
self.h_net3 = NEXT_STAGE_G(ngf, nef, ncf)
self.img_net3 = GET_IMAGE_G(ngf)
def forward(self, z_code, sent_emb, word_embs, mask):
"""
:param z_code: batch x cfg.GAN.Z_DIM
:param sent_emb: batch x cfg.TEXT.EMBEDDING_DIM
:param word_embs: batch x cdf x seq_len
:param mask: batch x seq_len
:return:
"""
fake_imgs = []
att_maps = []
c_code, mu, logvar = self.ca_net(sent_emb)
if cfg.TREE.BRANCH_NUM > 0:
h_code1 = self.h_net1(z_code, c_code)
fake_img1 = self.img_net1(h_code1)
fake_imgs.append(fake_img1)
if cfg.TREE.BRANCH_NUM > 1:
h_code2, att1 = \
self.h_net2(h_code1, c_code, word_embs, mask)
fake_img2 = self.img_net2(h_code2)
fake_imgs.append(fake_img2)
if att1 is not None:
att_maps.append(att1)
if cfg.TREE.BRANCH_NUM > 2:
h_code3, att2 = \
self.h_net3(h_code2, c_code, word_embs, mask)
fake_img3 = self.img_net3(h_code3)
fake_imgs.append(fake_img3)
if att2 is not None:
att_maps.append(att2)
return fake_imgs, att_maps, mu, logvar
class G_DCGAN(nn.Module):
def __init__(self):
super(G_DCGAN, self).__init__()
ngf = cfg.GAN.GF_DIM
nef = cfg.TEXT.EMBEDDING_DIM
ncf = cfg.GAN.CONDITION_DIM
self.ca_net = CA_NET()
# 16gf x 64 x 64 --> gf x 64 x 64 --> 3 x 64 x 64
if cfg.TREE.BRANCH_NUM > 0:
self.h_net1 = INIT_STAGE_G(ngf * 16, ncf)
# gf x 64 x 64
if cfg.TREE.BRANCH_NUM > 1:
self.h_net2 = NEXT_STAGE_G(ngf, nef, ncf)
if cfg.TREE.BRANCH_NUM > 2:
self.h_net3 = NEXT_STAGE_G(ngf, nef, ncf)
self.img_net = GET_IMAGE_G(ngf)
def forward(self, z_code, sent_emb, word_embs, mask):
"""
:param z_code: batch x cfg.GAN.Z_DIM
:param sent_emb: batch x cfg.TEXT.EMBEDDING_DIM
:param word_embs: batch x cdf x seq_len
:param mask: batch x seq_len
:return:
"""
att_maps = []
c_code, mu, logvar = self.ca_net(sent_emb)
if cfg.TREE.BRANCH_NUM > 0:
h_code = self.h_net1(z_code, c_code)
if cfg.TREE.BRANCH_NUM > 1:
h_code, att1 = self.h_net2(h_code, c_code, word_embs, mask)
if att1 is not None:
att_maps.append(att1)
if cfg.TREE.BRANCH_NUM > 2:
h_code, att2 = self.h_net3(h_code, c_code, word_embs, mask)
if att2 is not None:
att_maps.append(att2)
fake_imgs = self.img_net(h_code)
return [fake_imgs], att_maps, mu, logvar
# ############## D networks ##########################
def Block3x3_leakRelu(in_planes, out_planes):
block = nn.Sequential(
conv3x3(in_planes, out_planes),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.2, inplace=True)
)
return block
# Downsale the spatial size by a factor of 2
def downBlock(in_planes, out_planes):
block = nn.Sequential(
nn.Conv2d(in_planes, out_planes, 4, 2, 1, bias=False),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.2, inplace=True)
)
return block
# Downsale the spatial size by a factor of 16
def encode_image_by_16times(ndf):
encode_img = nn.Sequential(
# --> state size. ndf x in_size/2 x in_size/2
nn.Conv2d(3, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# --> state size 2ndf x x in_size/4 x in_size/4
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# --> state size 4ndf x in_size/8 x in_size/8
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# --> state size 8ndf x in_size/16 x in_size/16
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True)
)
return encode_img
class D_GET_LOGITS(nn.Module):
def __init__(self, ndf, nef, bcondition=False):
super(D_GET_LOGITS, self).__init__()
self.df_dim = ndf
self.ef_dim = nef
self.bcondition = bcondition
if self.bcondition:
self.jointConv = Block3x3_leakRelu(ndf * 8 + nef, ndf * 8)
self.outlogits = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
nn.Sigmoid())
def forward(self, h_code, c_code=None):
if self.bcondition and c_code is not None:
# conditioning output
c_code = c_code.view(-1, self.ef_dim, 1, 1)
c_code = c_code.repeat(1, 1, 4, 4)
# state size (ngf+egf) x 4 x 4
h_c_code = torch.cat((h_code, c_code), 1)
# state size ngf x in_size x in_size
h_c_code = self.jointConv(h_c_code)
else:
h_c_code = h_code
output = self.outlogits(h_c_code)
return output.view(-1)
# For 64 x 64 images
class D_NET64(nn.Module):
def __init__(self, b_jcu=True):
super(D_NET64, self).__init__()
ndf = cfg.GAN.DF_DIM
nef = cfg.TEXT.EMBEDDING_DIM
self.img_code_s16 = encode_image_by_16times(ndf)
if b_jcu:
self.UNCOND_DNET = D_GET_LOGITS(ndf, nef, bcondition=False)
else:
self.UNCOND_DNET = None
self.COND_DNET = D_GET_LOGITS(ndf, nef, bcondition=True)
def forward(self, x_var):
x_code4 = self.img_code_s16(x_var) # 4 x 4 x 8df
return x_code4
# For 128 x 128 images
class D_NET128(nn.Module):
def __init__(self, b_jcu=True):
super(D_NET128, self).__init__()
ndf = cfg.GAN.DF_DIM
nef = cfg.TEXT.EMBEDDING_DIM
self.img_code_s16 = encode_image_by_16times(ndf)
self.img_code_s32 = downBlock(ndf * 8, ndf * 16)
self.img_code_s32_1 = Block3x3_leakRelu(ndf * 16, ndf * 8)
#
if b_jcu:
self.UNCOND_DNET = D_GET_LOGITS(ndf, nef, bcondition=False)
else:
self.UNCOND_DNET = None
self.COND_DNET = D_GET_LOGITS(ndf, nef, bcondition=True)
def forward(self, x_var):
x_code8 = self.img_code_s16(x_var) # 8 x 8 x 8df
x_code4 = self.img_code_s32(x_code8) # 4 x 4 x 16df
x_code4 = self.img_code_s32_1(x_code4) # 4 x 4 x 8df
return x_code4
# For 256 x 256 images
class D_NET256(nn.Module):
def __init__(self, b_jcu=True):
super(D_NET256, self).__init__()
ndf = cfg.GAN.DF_DIM
nef = cfg.TEXT.EMBEDDING_DIM
self.img_code_s16 = encode_image_by_16times(ndf)
self.img_code_s32 = downBlock(ndf * 8, ndf * 16)
self.img_code_s64 = downBlock(ndf * 16, ndf * 32)
self.img_code_s64_1 = Block3x3_leakRelu(ndf * 32, ndf * 16)
self.img_code_s64_2 = Block3x3_leakRelu(ndf * 16, ndf * 8)
if b_jcu:
self.UNCOND_DNET = D_GET_LOGITS(ndf, nef, bcondition=False)
else:
self.UNCOND_DNET = None
self.COND_DNET = D_GET_LOGITS(ndf, nef, bcondition=True)
def forward(self, x_var):
x_code16 = self.img_code_s16(x_var)
x_code8 = self.img_code_s32(x_code16)
x_code4 = self.img_code_s64(x_code8)
x_code4 = self.img_code_s64_1(x_code4)
x_code4 = self.img_code_s64_2(x_code4)
return x_code4
| 21,632 | 33.338095 | 84 | py |
T2I_CL | T2I_CL-main/AttnGAN+CL/code/GlobalAttention.py | """
Global attention takes a matrix and a query metrix.
Based on each query vector q, it computes a parameterized convex combination of the matrix
based.
H_1 H_2 H_3 ... H_n
q q q q
| | | |
\ | | /
.....
\ | /
a
Constructs a unit mapping.
$$(H_1 + H_n, q) => (a)$$
Where H is of `batch x n x dim` and q is of `batch x dim`.
References:
https://github.com/OpenNMT/OpenNMT-py/tree/fc23dfef1ba2f258858b2765d24565266526dc76/onmt/modules
http://www.aclweb.org/anthology/D15-1166
"""
import torch
import torch.nn as nn
def conv1x1(in_planes, out_planes):
"1x1 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1,
padding=0, bias=False)
def func_attention(query, context, gamma1):
"""
query: batch x ndf x queryL
context: batch x ndf x ih x iw (sourceL=ihxiw)
mask: batch_size x sourceL
"""
batch_size, queryL = query.size(0), query.size(2)
ih, iw = context.size(2), context.size(3)
sourceL = ih * iw
# --> batch x sourceL x ndf
context = context.view(batch_size, -1, sourceL)
contextT = torch.transpose(context, 1, 2).contiguous()
# Get attention
# (batch x sourceL x ndf)(batch x ndf x queryL)
# -->batch x sourceL x queryL
attn = torch.bmm(contextT, query) # Eq. (7) in AttnGAN paper
# --> batch*sourceL x queryL
attn = attn.view(batch_size*sourceL, queryL)
# attn = nn.Softmax()(attn) # Eq. (8)
attn = nn.Softmax(dim=1)(attn)
# --> batch x sourceL x queryL
attn = attn.view(batch_size, sourceL, queryL)
# --> batch*queryL x sourceL
attn = torch.transpose(attn, 1, 2).contiguous()
attn = attn.view(batch_size*queryL, sourceL)
# Eq. (9)
attn = attn * gamma1
# attn = nn.Softmax()(attn)
attn = nn.Softmax(dim=1)(attn)
attn = attn.view(batch_size, queryL, sourceL)
# --> batch x sourceL x queryL
attnT = torch.transpose(attn, 1, 2).contiguous()
# (batch x ndf x sourceL)(batch x sourceL x queryL)
# --> batch x ndf x queryL
weightedContext = torch.bmm(context, attnT)
return weightedContext, attn.view(batch_size, -1, ih, iw)
class GlobalAttentionGeneral(nn.Module):
def __init__(self, idf, cdf):
super(GlobalAttentionGeneral, self).__init__()
self.conv_context = conv1x1(cdf, idf)
self.sm = nn.Softmax(dim=1)
self.mask = None
def applyMask(self, mask):
self.mask = mask # batch x sourceL
def forward(self, input, context):
"""
input: batch x idf x ih x iw (queryL=ihxiw)
context: batch x cdf x sourceL
"""
ih, iw = input.size(2), input.size(3)
queryL = ih * iw
batch_size, sourceL = context.size(0), context.size(2)
# --> batch x queryL x idf
target = input.view(batch_size, -1, queryL)
targetT = torch.transpose(target, 1, 2).contiguous()
# batch x cdf x sourceL --> batch x cdf x sourceL x 1
sourceT = context.unsqueeze(3)
# --> batch x idf x sourceL
sourceT = self.conv_context(sourceT).squeeze(3)
# Get attention
# (batch x queryL x idf)(batch x idf x sourceL)
# -->batch x queryL x sourceL
attn = torch.bmm(targetT, sourceT)
# --> batch*queryL x sourceL
attn = attn.view(batch_size*queryL, sourceL)
if self.mask is not None:
# batch_size x sourceL --> batch_size*queryL x sourceL
mask = self.mask.repeat(queryL, 1)
test = mask.data
attn.data.masked_fill_(mask.data, -float('inf'))
# attn = self.sm(attn) # Eq. (2)
attn = self.sm(attn)
# --> batch x queryL x sourceL
attn = attn.view(batch_size, queryL, sourceL)
# --> batch x sourceL x queryL
attn = torch.transpose(attn, 1, 2).contiguous()
# (batch x idf x sourceL)(batch x sourceL x queryL)
# --> batch x idf x queryL
weightedContext = torch.bmm(sourceT, attn)
weightedContext = weightedContext.view(batch_size, -1, ih, iw)
attn = attn.view(batch_size, -1, ih, iw)
return weightedContext, attn
| 4,236 | 31.098485 | 96 | py |
T2I_CL | T2I_CL-main/AttnGAN+CL/code/datasets.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from nltk.tokenize import RegexpTokenizer
from collections import defaultdict
from miscc.config import cfg
import torch
import torch.utils.data as data
from torch.autograd import Variable
import torchvision.transforms as transforms
import os
import sys
import numpy as np
import pandas as pd
from PIL import Image
import numpy.random as random
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
def prepare_data(data):
imgs, captions, captions_lens, class_ids, keys, captions_2, captions_lens_2 = data
# sort data by the length in a decreasing order
sorted_cap_lens, sorted_cap_indices = \
torch.sort(captions_lens, 0, True)
sorted_cap_lens_2, sorted_cap_indices_2 = \
torch.sort(captions_lens_2, 0, True)
imgs_2 = imgs.copy()
real_imgs = []
for i in range(len(imgs)):
imgs[i] = imgs[i][sorted_cap_indices]
if cfg.CUDA:
real_imgs.append(Variable(imgs[i]).cuda())
else:
real_imgs.append(Variable(imgs[i]))
real_imgs_2 = []
for i in range(len(imgs_2)):
imgs_2[i] = imgs_2[i][sorted_cap_indices_2]
if cfg.CUDA:
real_imgs_2.append(Variable(imgs_2[i]).cuda())
else:
real_imgs_2.append(Variable(imgs_2[i]))
captions = captions[sorted_cap_indices].squeeze()
captions_2 = captions_2[sorted_cap_indices_2].squeeze()
# sorted_captions_lens_2 = captions_lens_2[sorted_cap_indices].squeeze()
# captions = torch.cat([captions, captions_2], dim=0)
# sorted_cap_lens = torch.cat([sorted_cap_lens, sorted_captions_lens_2], dim=0)
class_ids_1 = class_ids[sorted_cap_indices].numpy()
class_ids_2 = class_ids[sorted_cap_indices_2].numpy()
# sent_indices = sent_indices[sorted_cap_indices]
keys = [keys[i] for i in sorted_cap_indices.numpy()]
# print('keys', type(keys), keys[-1]) # list
if cfg.CUDA:
captions = Variable(captions).cuda()
sorted_cap_lens = Variable(sorted_cap_lens).cuda()
captions_2 = Variable(captions_2).cuda()
sorted_cap_lens_2 = Variable(sorted_cap_lens_2).cuda()
sorted_cap_indices = sorted_cap_indices.cuda()
sorted_cap_indices_2 = sorted_cap_indices_2.cuda()
else:
captions = Variable(captions)
sorted_cap_lens = Variable(sorted_cap_lens)
captions_2 = Variable(captions_2)
sorted_cap_lens_2 = Variable(sorted_cap_lens_2)
return [real_imgs, real_imgs_2, captions, sorted_cap_lens,
class_ids_1, keys, captions_2, sorted_cap_lens_2, class_ids_2, sorted_cap_indices, sorted_cap_indices_2]
def get_imgs(img_path, imsize, bbox=None,
transform=None, normalize=None):
img = Image.open(img_path).convert('RGB')
width, height = img.size
if bbox is not None:
r = int(np.maximum(bbox[2], bbox[3]) * 0.75)
center_x = int((2 * bbox[0] + bbox[2]) / 2)
center_y = int((2 * bbox[1] + bbox[3]) / 2)
y1 = np.maximum(0, center_y - r)
y2 = np.minimum(height, center_y + r)
x1 = np.maximum(0, center_x - r)
x2 = np.minimum(width, center_x + r)
img = img.crop([x1, y1, x2, y2])
if transform is not None:
img = transform(img)
ret = []
if cfg.GAN.B_DCGAN:
ret = [normalize(img)]
else:
for i in range(cfg.TREE.BRANCH_NUM):
# print(imsize[i])
if i < (cfg.TREE.BRANCH_NUM - 1):
re_img = transforms.Resize(imsize[i])(img)
else:
re_img = img
ret.append(normalize(re_img))
return ret
class TextDataset(data.Dataset):
def __init__(self, data_dir, split='train',
base_size=64,
transform=None, target_transform=None):
self.transform = transform
self.norm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
self.target_transform = target_transform
self.embeddings_num = cfg.TEXT.CAPTIONS_PER_IMAGE
self.imsize = []
for i in range(cfg.TREE.BRANCH_NUM):
self.imsize.append(base_size)
base_size = base_size * 2
self.data = []
self.data_dir = data_dir
if data_dir.find('birds') != -1:
self.bbox = self.load_bbox()
else:
self.bbox = None
split_dir = os.path.join(data_dir, split)
self.filenames, self.captions, self.ixtoword, \
self.wordtoix, self.n_words = self.load_text_data(data_dir, split)
self.class_id = self.load_class_id(split_dir, len(self.filenames))
self.number_example = len(self.filenames)
def load_bbox(self):
data_dir = self.data_dir
bbox_path = os.path.join(data_dir, 'CUB_200_2011/bounding_boxes.txt')
df_bounding_boxes = pd.read_csv(bbox_path,
delim_whitespace=True,
header=None).astype(int)
#
filepath = os.path.join(data_dir, 'CUB_200_2011/images.txt')
df_filenames = \
pd.read_csv(filepath, delim_whitespace=True, header=None)
filenames = df_filenames[1].tolist()
print('Total filenames: ', len(filenames), filenames[0])
#
filename_bbox = {img_file[:-4]: [] for img_file in filenames}
numImgs = len(filenames)
# for i in xrange(0, numImgs):
for i in range(0, numImgs):
# bbox = [x-left, y-top, width, height]
bbox = df_bounding_boxes.iloc[i][1:].tolist()
key = filenames[i][:-4]
filename_bbox[key] = bbox
#
return filename_bbox
def load_captions(self, data_dir, filenames):
all_captions = []
for i in range(len(filenames)):
cap_path = '%s/text/%s.txt' % (data_dir, filenames[i])
with open(cap_path, "r") as f:
captions = f.read().decode('utf8').split('\n')
cnt = 0
for cap in captions:
if len(cap) == 0:
continue
cap = cap.replace("\ufffd\ufffd", " ")
# picks out sequences of alphanumeric characters as tokens
# and drops everything else
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(cap.lower())
# print('tokens', tokens)
if len(tokens) == 0:
print('cap', cap)
continue
tokens_new = []
for t in tokens:
t = t.encode('ascii', 'ignore').decode('ascii')
if len(t) > 0:
tokens_new.append(t)
all_captions.append(tokens_new)
cnt += 1
if cnt == self.embeddings_num:
break
if cnt < self.embeddings_num:
print('ERROR: the captions for %s less than %d'
% (filenames[i], cnt))
return all_captions
def build_dictionary(self, train_captions, test_captions):
word_counts = defaultdict(float)
captions = train_captions + test_captions
for sent in captions:
for word in sent:
word_counts[word] += 1
vocab = [w for w in word_counts if word_counts[w] >= 0]
ixtoword = {}
ixtoword[0] = '<end>'
wordtoix = {}
wordtoix['<end>'] = 0
ix = 1
for w in vocab:
wordtoix[w] = ix
ixtoword[ix] = w
ix += 1
train_captions_new = []
for t in train_captions:
rev = []
for w in t:
if w in wordtoix:
rev.append(wordtoix[w])
# rev.append(0) # do not need '<end>' token
train_captions_new.append(rev)
test_captions_new = []
for t in test_captions:
rev = []
for w in t:
if w in wordtoix:
rev.append(wordtoix[w])
# rev.append(0) # do not need '<end>' token
test_captions_new.append(rev)
return [train_captions_new, test_captions_new,
ixtoword, wordtoix, len(ixtoword)]
def load_text_data(self, data_dir, split):
filepath = os.path.join(data_dir, 'captions.pickle')
train_names = self.load_filenames(data_dir, 'train')
test_names = self.load_filenames(data_dir, 'test')
if not os.path.isfile(filepath):
train_captions = self.load_captions(data_dir, train_names)
test_captions = self.load_captions(data_dir, test_names)
train_captions, test_captions, ixtoword, wordtoix, n_words = \
self.build_dictionary(train_captions, test_captions)
with open(filepath, 'wb') as f:
pickle.dump([train_captions, test_captions,
ixtoword, wordtoix], f, protocol=2)
print('Save to: ', filepath)
else:
with open(filepath, 'rb') as f:
x = pickle.load(f)
train_captions, test_captions = x[0], x[1]
ixtoword, wordtoix = x[2], x[3]
del x
n_words = len(ixtoword)
print('Load from: ', filepath)
if split == 'train':
# a list of list: each list contains
# the indices of words in a sentence
captions = train_captions
filenames = train_names
else: # split=='test'
captions = test_captions
filenames = test_names
return filenames, captions, ixtoword, wordtoix, n_words
def load_class_id(self, data_dir, total_num):
if os.path.isfile(data_dir + '/class_info.pickle'):
with open(data_dir + '/class_info.pickle', 'rb') as f:
# class_id = pickle.load(f)
class_id = pickle.load(f, encoding='latin1')
else:
class_id = np.arange(total_num)
return class_id
def load_filenames(self, data_dir, split):
filepath = '%s/%s/filenames.pickle' % (data_dir, split)
if os.path.isfile(filepath):
with open(filepath, 'rb') as f:
filenames = pickle.load(f)
print('Load filenames from: %s (%d)' % (filepath, len(filenames)))
else:
filenames = []
return filenames
def get_caption(self, sent_ix):
# a list of indices for a sentence
sent_caption = np.asarray(self.captions[sent_ix]).astype('int64')
if (sent_caption == 0).sum() > 0:
print('ERROR: do not need END (0) token', sent_caption)
num_words = len(sent_caption)
# pad with 0s (i.e., '<end>')
x = np.zeros((cfg.TEXT.WORDS_NUM, 1), dtype='int64')
x_len = num_words
if num_words <= cfg.TEXT.WORDS_NUM:
x[:num_words, 0] = sent_caption
else:
ix = list(np.arange(num_words)) # 1, 2, 3,..., maxNum
np.random.shuffle(ix)
ix = ix[:cfg.TEXT.WORDS_NUM]
ix = np.sort(ix)
x[:, 0] = sent_caption[ix]
x_len = cfg.TEXT.WORDS_NUM
return x, x_len
def __getitem__(self, index):
#
key = self.filenames[index]
cls_id = self.class_id[index]
#
if self.bbox is not None:
bbox = self.bbox[key]
data_dir = '%s/CUB_200_2011' % self.data_dir
else:
bbox = None
data_dir = self.data_dir
#
img_name = '%s/images/%s.jpg' % (data_dir, key)
imgs = get_imgs(img_name, self.imsize,
bbox, self.transform, normalize=self.norm)
# random select a sentence
sent_ix = random.randint(0, self.embeddings_num)
new_sent_ix = index * self.embeddings_num + sent_ix
caps, cap_len = self.get_caption(new_sent_ix)
# second sentence
sent_ix = random.randint(0, self.embeddings_num)
new_sent_ix = index * self.embeddings_num + sent_ix
caps_two, cap_len_two = self.get_caption(new_sent_ix)
return imgs, caps, cap_len, cls_id, key, caps_two, cap_len_two
def get_mis_caption(self, cls_id):
mis_match_captions_t = []
mis_match_captions = torch.zeros(99, cfg.TEXT.WORDS_NUM)
mis_match_captions_len = torch.zeros(99)
i = 0
while len(mis_match_captions_t) < 99:
idx = random.randint(0, self.number_example)
if cls_id == self.class_id[idx]:
continue
sent_ix = random.randint(0, self.embeddings_num)
new_sent_ix = idx * self.embeddings_num + sent_ix
caps_t, cap_len_t = self.get_caption(new_sent_ix)
mis_match_captions_t.append(torch.from_numpy(caps_t).squeeze())
mis_match_captions_len[i] = cap_len_t
i = i + 1
sorted_cap_lens, sorted_cap_indices = torch.sort(mis_match_captions_len, 0, True)
# import ipdb
# ipdb.set_trace()
for i in range(99):
mis_match_captions[i, :] = mis_match_captions_t[sorted_cap_indices[i]]
return mis_match_captions.type(torch.LongTensor).cuda(), sorted_cap_lens.type(torch.LongTensor).cuda()
def __len__(self):
return len(self.filenames)
| 13,757 | 35.205263 | 116 | py |
T2I_CL | T2I_CL-main/AttnGAN+CL/code/nt_xent.py | import torch
import torch.nn as nn
class NT_Xent(nn.Module):
def __init__(self, batch_size, temperature, mask, device):
super(NT_Xent, self).__init__()
self.batch_size = batch_size
self.temperature = temperature
self.mask = mask
self.device = device
self.criterion = nn.CrossEntropyLoss(reduction="sum")
self.similarity_f = nn.CosineSimilarity(dim=2)
def forward(self, z_i, z_j):
"""
We do not sample negative examples explicitly.
Instead, given a positive pair, similar to (Chen et al., 2017), we treat the other 2(N − 1) augmented examples within a minibatch as negative examples.
"""
p1 = torch.cat((z_i, z_j), dim=0)
sim = self.similarity_f(p1.unsqueeze(1), p1.unsqueeze(0)) / self.temperature
sim_i_j = torch.diag(sim, self.batch_size)
sim_j_i = torch.diag(sim, -self.batch_size)
positive_samples = torch.cat((sim_i_j, sim_j_i), dim=0).reshape(self.batch_size * 2, 1)
negative_samples = sim[self.mask].reshape(self.batch_size * 2, -1)
labels = torch.zeros(self.batch_size * 2).to(self.device).long()
logits = torch.cat((positive_samples, negative_samples), dim=1)
loss = self.criterion(logits, labels)
loss /= 2 * self.batch_size
return loss
| 1,339 | 36.222222 | 159 | py |
T2I_CL | T2I_CL-main/AttnGAN+CL/code/trainer.py | from __future__ import print_function
from six.moves import range
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
from PIL import Image
from miscc.config import cfg
from miscc.utils import mkdir_p
from miscc.utils import build_super_images, build_super_images2
from miscc.utils import weights_init, load_params, copy_G_params
from model import G_DCGAN, G_NET
from datasets import prepare_data
from model import RNN_ENCODER, CNN_ENCODER
from miscc.losses import words_loss
from miscc.losses import discriminator_loss, generator_loss, KL_loss
import os
import time
import numpy as np
import sys
from masks import mask_correlated_samples
from nt_xent import NT_Xent
from torch.utils.tensorboard import SummaryWriter
def l2norm(X, dim, eps=1e-8):
"""L2-normalize columns of X
"""
norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps
X = torch.div(X, norm)
return X
# ################# Text to image task############################ #
class condGANTrainer(object):
def __init__(self, output_dir, data_loader, n_words, ixtoword, dataset):
if cfg.TRAIN.FLAG:
self.model_dir = os.path.join(output_dir, 'Model')
self.image_dir = os.path.join(output_dir, 'Image')
mkdir_p(self.model_dir)
mkdir_p(self.image_dir)
torch.cuda.set_device(cfg.GPU_ID)
cudnn.benchmark = True
self.batch_size = cfg.TRAIN.BATCH_SIZE
self.max_epoch = cfg.TRAIN.MAX_EPOCH
self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL
self.n_words = n_words
self.ixtoword = ixtoword
self.data_loader = data_loader
self.num_batches = len(self.data_loader)
self.dataset = dataset
self.writer = SummaryWriter('runs/visualize')
def build_models(self):
# ###################encoders######################################## #
if cfg.TRAIN.NET_E == '':
print('Error: no pretrained text-image encoders')
return
image_encoder = CNN_ENCODER(cfg.TEXT.EMBEDDING_DIM)
img_encoder_path = cfg.TRAIN.NET_E.replace('text_encoder', 'image_encoder')
state_dict = \
torch.load(img_encoder_path, map_location=lambda storage, loc: storage)
image_encoder.load_state_dict(state_dict)
for p in image_encoder.parameters():
p.requires_grad = False
print('Load image encoder from:', img_encoder_path)
image_encoder.eval()
text_encoder = \
RNN_ENCODER(self.n_words, nhidden=cfg.TEXT.EMBEDDING_DIM)
state_dict = \
torch.load(cfg.TRAIN.NET_E,
map_location=lambda storage, loc: storage)
text_encoder.load_state_dict(state_dict)
for p in text_encoder.parameters():
p.requires_grad = False
print('Load text encoder from:', cfg.TRAIN.NET_E)
text_encoder.eval()
# #######################generator and discriminators############## #
netsD = []
if cfg.GAN.B_DCGAN:
if cfg.TREE.BRANCH_NUM ==1:
from model import D_NET64 as D_NET
elif cfg.TREE.BRANCH_NUM == 2:
from model import D_NET128 as D_NET
else: # cfg.TREE.BRANCH_NUM == 3:
from model import D_NET256 as D_NET
# TODO: elif cfg.TREE.BRANCH_NUM > 3:
netG = G_DCGAN()
netsD = [D_NET(b_jcu=False)]
else:
from model import D_NET64, D_NET128, D_NET256
netG = G_NET()
if cfg.TREE.BRANCH_NUM > 0:
netsD.append(D_NET64())
if cfg.TREE.BRANCH_NUM > 1:
netsD.append(D_NET128())
if cfg.TREE.BRANCH_NUM > 2:
netsD.append(D_NET256())
# TODO: if cfg.TREE.BRANCH_NUM > 3:
netG.apply(weights_init)
# print(netG)
for i in range(len(netsD)):
netsD[i].apply(weights_init)
# print(netsD[i])
print('# of netsD', len(netsD))
#
epoch = 0
if cfg.TRAIN.NET_G != '':
state_dict = \
torch.load(cfg.TRAIN.NET_G, map_location=lambda storage, loc: storage)
netG.load_state_dict(state_dict)
print('Load G from: ', cfg.TRAIN.NET_G)
istart = cfg.TRAIN.NET_G.rfind('_') + 1
iend = cfg.TRAIN.NET_G.rfind('.')
epoch = cfg.TRAIN.NET_G[istart:iend]
epoch = int(epoch) + 1
if cfg.TRAIN.B_NET_D:
Gname = cfg.TRAIN.NET_G
for i in range(len(netsD)):
s_tmp = Gname[:Gname.rfind('/')]
Dname = '%s/netD%d.pth' % (s_tmp, i)
print('Load D from: ', Dname)
state_dict = \
torch.load(Dname, map_location=lambda storage, loc: storage)
netsD[i].load_state_dict(state_dict)
# ########################################################### #
if cfg.CUDA:
text_encoder = text_encoder.cuda()
image_encoder = image_encoder.cuda()
netG.cuda()
for i in range(len(netsD)):
netsD[i].cuda()
return [text_encoder, image_encoder, netG, netsD, epoch]
def define_optimizers(self, netG, netsD):
optimizersD = []
num_Ds = len(netsD)
for i in range(num_Ds):
opt = optim.Adam(netsD[i].parameters(),
lr=cfg.TRAIN.DISCRIMINATOR_LR,
betas=(0.5, 0.999))
optimizersD.append(opt)
optimizerG = optim.Adam(netG.parameters(),
lr=cfg.TRAIN.GENERATOR_LR,
betas=(0.5, 0.999))
return optimizerG, optimizersD
def prepare_labels(self):
batch_size = self.batch_size
real_labels = Variable(torch.FloatTensor(batch_size).fill_(1))
fake_labels = Variable(torch.FloatTensor(batch_size).fill_(0))
match_labels = Variable(torch.LongTensor(range(batch_size)))
if cfg.CUDA:
real_labels = real_labels.cuda()
fake_labels = fake_labels.cuda()
match_labels = match_labels.cuda()
return real_labels, fake_labels, match_labels
def save_model(self, netG, avg_param_G, netsD, epoch):
backup_para = copy_G_params(netG)
load_params(netG, avg_param_G)
torch.save(netG.state_dict(),
'%s/netG_epoch_%d.pth' % (self.model_dir, epoch))
load_params(netG, backup_para)
#
for i in range(len(netsD)):
netD = netsD[i]
torch.save(netD.state_dict(),
'%s/netD%d.pth' % (self.model_dir, i))
print('Save G/Ds models.')
def set_requires_grad_value(self, models_list, brequires):
for i in range(len(models_list)):
for p in models_list[i].parameters():
p.requires_grad = brequires
def save_img_results(self, netG, noise, sent_emb, words_embs, mask,
image_encoder, captions, cap_lens,
gen_iterations, name='current'):
# Save images
fake_imgs, attention_maps, _, _ = netG(noise, sent_emb, words_embs, mask)
for i in range(len(attention_maps)):
if len(fake_imgs) > 1:
img = fake_imgs[i + 1].detach().cpu()
lr_img = fake_imgs[i].detach().cpu()
else:
img = fake_imgs[0].detach().cpu()
lr_img = None
attn_maps = attention_maps[i]
att_sze = attn_maps.size(2)
img_set, _ = \
build_super_images(img, captions, self.ixtoword,
attn_maps, att_sze, lr_imgs=lr_img)
if img_set is not None:
im = Image.fromarray(img_set)
fullpath = '%s/G_%s_%d_%d.png'\
% (self.image_dir, name, gen_iterations, i)
im.save(fullpath)
# for i in range(len(netsD)):
i = -1
img = fake_imgs[i].detach()
region_features, _ = image_encoder(img)
att_sze = region_features.size(2)
_, _, att_maps = words_loss(region_features.detach(),
words_embs.detach(),
None, cap_lens,
None, self.batch_size)
img_set, _ = \
build_super_images(fake_imgs[i].detach().cpu(),
captions, self.ixtoword, att_maps, att_sze)
if img_set is not None:
im = Image.fromarray(img_set)
fullpath = '%s/D_%s_%d.png'\
% (self.image_dir, name, gen_iterations)
im.save(fullpath)
def train(self):
text_encoder, image_encoder, netG, netsD, start_epoch = self.build_models()
avg_param_G = copy_G_params(netG)
optimizerG, optimizersD = self.define_optimizers(netG, netsD)
real_labels, fake_labels, match_labels = self.prepare_labels()
real_labels_2, fake_labels_2, match_labels_2 = self.prepare_labels()
batch_size = self.batch_size
nz = cfg.GAN.Z_DIM
noise = Variable(torch.FloatTensor(batch_size, nz))
fixed_noise = Variable(torch.FloatTensor(batch_size, nz).normal_(0, 1))
if cfg.CUDA:
noise, fixed_noise = noise.cuda(), fixed_noise.cuda()
gen_iterations = 0
mask = mask_correlated_samples(self)
temperature = 0.5
device = noise.get_device()
criterion = NT_Xent(batch_size, temperature, mask, device)
# gen_iterations = start_epoch * self.num_batches
for epoch in range(start_epoch, self.max_epoch):
start_t = time.time()
data_iter = iter(self.data_loader)
step = 0
D_total_loss = 0
G_total_loss = 0
while step < self.num_batches:
# reset requires_grad to be trainable for all Ds
# self.set_requires_grad_value(netsD, True)
######################################################
# (1) Prepare training data and Compute text embeddings
######################################################
data = data_iter.next()
imgs, imgs_2, captions, cap_lens, class_ids, keys, captions_2, cap_lens_2, class_ids_2, \
sort_ind, sort_ind_2 = prepare_data(data)
# hidden = text_encoder.init_hidden(batch_size)
hidden = text_encoder.init_hidden(batch_size)
# words_embs: batch_size x nef x seq_len
# sent_emb: batch_size x nef
words_embs, sent_emb = text_encoder(captions, cap_lens, hidden)
words_embs, sent_emb = words_embs.detach(), sent_emb.detach()
mask = (captions == 0)
num_words = words_embs.size(2)
if mask.size(1) > num_words:
mask = mask[:, :num_words]
words_embs_2, sent_emb_2 = text_encoder(captions_2, cap_lens_2, hidden)
words_embs_2, sent_emb_2 = words_embs_2.detach(), sent_emb_2.detach()
mask_2 = (captions_2 == 0)
num_words_2 = words_embs_2.size(2)
if mask_2.size(1) > num_words_2:
mask_2 = mask_2[:, :num_words_2]
#######################################################
# (2) Generate fake images
######################################################
noise.data.normal_(0, 1)
fake_imgs, _, mu, logvar = netG(noise, sent_emb, words_embs, mask)
fake_imgs_2, _, mu_2, logvar_2 = netG(noise, sent_emb_2, words_embs_2, mask_2)
#######################################################
# (3) Update D network
######################################################
errD_total = 0
D_logs = ''
for i in range(len(netsD)):
netsD[i].zero_grad()
errD = discriminator_loss(netsD[i], imgs[i], fake_imgs[i],
sent_emb, real_labels, fake_labels)
errD_2 = discriminator_loss(netsD[i], imgs_2[i], fake_imgs_2[i],
sent_emb_2, real_labels_2, fake_labels_2)
errD += errD_2
# backward and update parameters
errD.backward()
optimizersD[i].step()
errD_total += errD
D_logs += 'errD%d: %.2f ' % (i, errD.item())
#######################################################
# (4) Update G network: maximize log(D(G(z)))
######################################################
# compute total loss for training G
step += 1
gen_iterations += 1
# do not need to compute gradient for Ds
# self.set_requires_grad_value(netsD, False)
netG.zero_grad()
errG_total, G_logs, cnn_code = \
generator_loss(netsD, image_encoder, fake_imgs, real_labels,
words_embs, sent_emb, match_labels, cap_lens, class_ids)
kl_loss = KL_loss(mu, logvar)
errG_total += kl_loss
G_logs += 'kl_loss: %.2f ' % kl_loss.item()
errG_total_2, G_logs_2, cnn_code_2 = \
generator_loss(netsD, image_encoder, fake_imgs_2, real_labels_2,
words_embs_2, sent_emb_2, match_labels_2, cap_lens_2, class_ids_2)
kl_loss_2 = KL_loss(mu_2, logvar_2)
errG_total_2 += kl_loss_2
G_logs_2 += 'kl_loss: %.2f ' % kl_loss_2.item()
errG_total += errG_total_2
_, ori_indices = torch.sort(sort_ind, 0)
_, ori_indices_2 = torch.sort(sort_ind_2, 0)
total_contra_loss = 0
i = -1
cnn_code = cnn_code[ori_indices]
cnn_code_2 = cnn_code_2[ori_indices_2]
cnn_code = l2norm(cnn_code, dim=1)
cnn_code_2 = l2norm(cnn_code_2, dim=1)
contrative_loss = criterion(cnn_code, cnn_code_2)
total_contra_loss += contrative_loss * 0.2
G_logs += 'contrative_loss: %.2f ' % total_contra_loss.item()
errG_total += total_contra_loss
# backward and update parameters
errG_total.backward()
optimizerG.step()
for p, avg_p in zip(netG.parameters(), avg_param_G):
avg_p.mul_(0.999).add_(0.001, p.data)
if gen_iterations % 100 == 0:
print(D_logs + '\n' + G_logs + '\n' + G_logs_2)
# save images
if gen_iterations % 1000 == 0:
backup_para = copy_G_params(netG)
load_params(netG, avg_param_G)
# self.save_img_results(netG, fixed_noise, sent_emb,
# words_embs, mask, image_encoder,
# captions, cap_lens, epoch, name='average')
load_params(netG, backup_para)
#
# self.save_img_results(netG, fixed_noise, sent_emb,
# words_embs, mask, image_encoder,
# captions, cap_lens,
# epoch, name='current')
D_total_loss += errD_total.item()
G_total_loss += errG_total.item()
end_t = time.time()
print('''[%d/%d][%d]
Loss_D: %.2f Loss_G: %.2f Time: %.2fs'''
% (epoch, self.max_epoch, self.num_batches,
errD_total.item(), errG_total.item(),
end_t - start_t))
if epoch % cfg.TRAIN.SNAPSHOT_INTERVAL == 0: # and epoch != 0:
self.save_model(netG, avg_param_G, netsD, epoch)
D_total_loss = D_total_loss / step
G_total_loss = G_total_loss / step
# self.writer.add_scalar('Loss_D', D_total_loss , epoch + 1)
# self.writer.add_scalar('Loss_G', G_total_loss , epoch + 1)
self.writer.add_scalars('Loss_D and Loss_G', {'Loss_D': D_total_loss, 'Loss_G': G_total_loss}, epoch + 1)
self.writer.close()
self.save_model(netG, avg_param_G, netsD, self.max_epoch)
def save_singleimages(self, images, filenames, save_dir,
split_dir, sentenceID=0):
for i in range(images.size(0)):
s_tmp = '%s/single_samples/%s/%s' %\
(save_dir, split_dir, filenames[i])
folder = s_tmp[:s_tmp.rfind('/')]
if not os.path.isdir(folder):
print('Make a new folder: ', folder)
mkdir_p(folder)
fullpath = '%s_%d.jpg' % (s_tmp, sentenceID)
# range from [-1, 1] to [0, 1]
# img = (images[i] + 1.0) / 2
img = images[i].add(1).div(2).mul(255).clamp(0, 255).byte()
# range from [0, 1] to [0, 255]
ndarr = img.permute(1, 2, 0).data.cpu().numpy()
im = Image.fromarray(ndarr)
im.save(fullpath)
def sampling(self, split_dir):
if cfg.TRAIN.NET_G == '':
print('Error: the path for morels is not found!')
else:
if split_dir == 'test':
split_dir = 'valid'
# Build and load the generator
if cfg.GAN.B_DCGAN:
netG = G_DCGAN()
else:
netG = G_NET()
netG.apply(weights_init)
netG.cuda()
netG.eval()
# load text encoder
text_encoder = RNN_ENCODER(self.n_words, nhidden=cfg.TEXT.EMBEDDING_DIM)
state_dict = torch.load(cfg.TRAIN.NET_E, map_location=lambda storage, loc: storage)
text_encoder.load_state_dict(state_dict)
print('Load text encoder from:', cfg.TRAIN.NET_E)
text_encoder = text_encoder.cuda()
text_encoder.eval()
#load image encoder
image_encoder = CNN_ENCODER(cfg.TEXT.EMBEDDING_DIM)
img_encoder_path = cfg.TRAIN.NET_E.replace('text_encoder', 'image_encoder')
state_dict = torch.load(img_encoder_path, map_location=lambda storage, loc: storage)
image_encoder.load_state_dict(state_dict)
print('Load image encoder from:', img_encoder_path)
image_encoder = image_encoder.cuda()
image_encoder.eval()
batch_size = self.batch_size
nz = cfg.GAN.Z_DIM
noise = Variable(torch.FloatTensor(batch_size, nz), volatile=True)
noise = noise.cuda()
model_dir = cfg.TRAIN.NET_G
state_dict = torch.load(model_dir, map_location=lambda storage, loc: storage)
# state_dict = torch.load(cfg.TRAIN.NET_G)
netG.load_state_dict(state_dict)
print('Load G from: ', model_dir)
# the path to save generated images
s_tmp = model_dir[:model_dir.rfind('.pth')]
save_dir = '%s/%s' % (s_tmp, split_dir)
mkdir_p(save_dir)
cnt = 0
R_count = 0
R = np.zeros(30000)
cont = True
for ii in range(11): # (cfg.TEXT.CAPTIONS_PER_IMAGE):
if (cont == False):
break
for step, data in enumerate(self.data_loader, 0):
cnt += batch_size
if (cont == False):
break
if step % 100 == 0:
print('cnt: ', cnt)
# if step > 50:
# break
# imgs, captions, cap_lens, class_ids, keys = prepare_data(data)
imgs, imgs_2, captions, cap_lens, class_ids, keys, captions_2, cap_lens_2, class_ids_2, \
sort_ind, sort_ind_2 = prepare_data(data)
hidden = text_encoder.init_hidden(batch_size)
# words_embs: batch_size x nef x seq_len
# sent_emb: batch_size x nef
words_embs, sent_emb = text_encoder(captions, cap_lens, hidden)
words_embs, sent_emb = words_embs.detach(), sent_emb.detach()
mask = (captions == 0)
num_words = words_embs.size(2)
if mask.size(1) > num_words:
mask = mask[:, :num_words]
#######################################################
# (2) Generate fake images
######################################################
noise.data.normal_(0, 1)
fake_imgs, _, _, _ = netG(noise, sent_emb, words_embs, mask)
for j in range(batch_size):
s_tmp = '%s/single/%s' % (save_dir, keys[j])
folder = s_tmp[:s_tmp.rfind('/')]
if not os.path.isdir(folder):
#print('Make a new folder: ', folder)
mkdir_p(folder)
k = -1
# for k in range(len(fake_imgs)):
im = fake_imgs[k][j].data.cpu().numpy()
# [-1, 1] --> [0, 255]
im = (im + 1.0) * 127.5
im = im.astype(np.uint8)
im = np.transpose(im, (1, 2, 0))
im = Image.fromarray(im)
fullpath = '%s_s%d_%d.png' % (s_tmp, k, ii)
im.save(fullpath)
_, cnn_code = image_encoder(fake_imgs[-1])
for i in range(batch_size):
mis_captions, mis_captions_len = self.dataset.get_mis_caption(class_ids[i])
hidden = text_encoder.init_hidden(99)
_, sent_emb_t = text_encoder(mis_captions, mis_captions_len, hidden)
rnn_code = torch.cat((sent_emb[i, :].unsqueeze(0), sent_emb_t), 0)
### cnn_code = 1 * nef
### rnn_code = 100 * nef
scores = torch.mm(cnn_code[i].unsqueeze(0), rnn_code.transpose(0, 1)) # 1* 100
cnn_code_norm = torch.norm(cnn_code[i].unsqueeze(0), 2, dim=1, keepdim=True)
rnn_code_norm = torch.norm(rnn_code, 2, dim=1, keepdim=True)
norm = torch.mm(cnn_code_norm, rnn_code_norm.transpose(0, 1))
scores0 = scores / norm.clamp(min=1e-8)
if torch.argmax(scores0) == 0:
R[R_count] = 1
R_count += 1
if R_count >= 30000:
sum = np.zeros(10)
np.random.shuffle(R)
for i in range(10):
sum[i] = np.average(R[i * 3000:(i + 1) * 3000 - 1])
R_mean = np.average(sum)
R_std = np.std(sum)
print("R mean:{:.4f} std:{:.4f}".format(R_mean, R_std))
cont = False
def gen_example(self, data_dic):
if cfg.TRAIN.NET_G == '':
print('Error: the path for morels is not found!')
else:
# Build and load the generator
text_encoder = \
RNN_ENCODER(self.n_words, nhidden=cfg.TEXT.EMBEDDING_DIM)
state_dict = \
torch.load(cfg.TRAIN.NET_E, map_location=lambda storage, loc: storage)
text_encoder.load_state_dict(state_dict)
print('Load text encoder from:', cfg.TRAIN.NET_E)
text_encoder = text_encoder.cuda()
text_encoder.eval()
# the path to save generated images
if cfg.GAN.B_DCGAN:
netG = G_DCGAN()
else:
netG = G_NET()
s_tmp = cfg.TRAIN.NET_G[:cfg.TRAIN.NET_G.rfind('.pth')]
model_dir = cfg.TRAIN.NET_G
state_dict = \
torch.load(model_dir, map_location=lambda storage, loc: storage)
netG.load_state_dict(state_dict)
print('Load G from: ', model_dir)
netG.cuda()
netG.eval()
for key in data_dic:
save_dir = '%s/%s' % (s_tmp, key)
mkdir_p(save_dir)
captions, cap_lens, sorted_indices = data_dic[key]
batch_size = captions.shape[0]
nz = cfg.GAN.Z_DIM
captions = Variable(torch.from_numpy(captions), volatile=True)
cap_lens = Variable(torch.from_numpy(cap_lens), volatile=True)
captions = captions.cuda()
cap_lens = cap_lens.cuda()
for i in range(1): # 16
noise = Variable(torch.FloatTensor(batch_size, nz), volatile=True)
noise = noise.cuda()
#######################################################
# (1) Extract text embeddings
######################################################
hidden = text_encoder.init_hidden(batch_size)
# words_embs: batch_size x nef x seq_len
# sent_emb: batch_size x nef
words_embs, sent_emb = text_encoder(captions, cap_lens, hidden)
mask = (captions == 0)
#######################################################
# (2) Generate fake images
######################################################
noise.data.normal_(0, 1)
fake_imgs, attention_maps, _, _ = netG(noise, sent_emb, words_embs, mask)
# G attention
cap_lens_np = cap_lens.cpu().data.numpy()
for j in range(batch_size):
save_name = '%s/%d_s_%d' % (save_dir, i, sorted_indices[j])
for k in range(len(fake_imgs)):
im = fake_imgs[k][j].data.cpu().numpy()
im = (im + 1.0) * 127.5
im = im.astype(np.uint8)
# print('im', im.shape)
im = np.transpose(im, (1, 2, 0))
# print('im', im.shape)
im = Image.fromarray(im)
fullpath = '%s_g%d.png' % (save_name, k)
im.save(fullpath)
for k in range(len(attention_maps)):
if len(fake_imgs) > 1:
im = fake_imgs[k + 1].detach().cpu()
else:
im = fake_imgs[0].detach().cpu()
attn_maps = attention_maps[k]
att_sze = attn_maps.size(2)
img_set, sentences = \
build_super_images2(im[j].unsqueeze(0),
captions[j].unsqueeze(0),
[cap_lens_np[j]], self.ixtoword,
[attn_maps[j]], att_sze)
if img_set is not None:
im = Image.fromarray(img_set)
fullpath = '%s_a%d.png' % (save_name, k)
im.save(fullpath)
| 28,328 | 43.057543 | 118 | py |
T2I_CL | T2I_CL-main/AttnGAN+CL/code/miscc/losses.py | import torch
import torch.nn as nn
import numpy as np
from miscc.config import cfg
from GlobalAttention import func_attention
# ##################Loss for matching text-image###################
def cosine_similarity(x1, x2, dim=1, eps=1e-8):
"""Returns cosine similarity between x1 and x2, computed along dim.
"""
w12 = torch.sum(x1 * x2, dim)
w1 = torch.norm(x1, 2, dim)
w2 = torch.norm(x2, 2, dim)
return (w12 / (w1 * w2).clamp(min=eps)).squeeze()
def sent_loss(cnn_code, rnn_code, labels, class_ids,
batch_size, eps=1e-8):
# ### Mask mis-match samples ###
# that come from the same class as the real sample ###
masks = []
if class_ids is not None:
for i in range(batch_size):
mask = (class_ids == class_ids[i]).astype(np.uint8)
mask[i] = 0
masks.append(mask.reshape((1, -1)))
masks = np.concatenate(masks, 0)
# masks: batch_size x batch_size
# masks = torch.ByteTensor(masks)
masks = torch.BoolTensor(masks)
if cfg.CUDA:
masks = masks.cuda()
# --> seq_len x batch_size x nef
if cnn_code.dim() == 2:
cnn_code = cnn_code.unsqueeze(0)
rnn_code = rnn_code.unsqueeze(0)
# cnn_code_norm / rnn_code_norm: seq_len x batch_size x 1
cnn_code_norm = torch.norm(cnn_code, 2, dim=2, keepdim=True)
rnn_code_norm = torch.norm(rnn_code, 2, dim=2, keepdim=True)
# scores* / norm*: seq_len x batch_size x batch_size
scores0 = torch.bmm(cnn_code, rnn_code.transpose(1, 2))
norm0 = torch.bmm(cnn_code_norm, rnn_code_norm.transpose(1, 2))
scores0 = scores0 / norm0.clamp(min=eps) * cfg.TRAIN.SMOOTH.GAMMA3
# --> batch_size x batch_size
scores0 = scores0.squeeze()
if class_ids is not None:
scores0.data.masked_fill_(masks, -float('inf'))
scores1 = scores0.transpose(0, 1)
if labels is not None:
loss0 = nn.CrossEntropyLoss()(scores0, labels)
loss1 = nn.CrossEntropyLoss()(scores1, labels)
else:
loss0, loss1 = None, None
return loss0, loss1
def words_loss(img_features, words_emb, labels,
cap_lens, class_ids, batch_size):
"""
words_emb(query): batch x nef x seq_len
img_features(context): batch x nef x 17 x 17
"""
masks = []
att_maps = []
similarities = []
cap_lens = cap_lens.data.tolist()
for i in range(batch_size):
if class_ids is not None:
mask = (class_ids == class_ids[i]).astype(np.uint8)
mask[i] = 0
masks.append(mask.reshape((1, -1)))
# Get the i-th text description
words_num = cap_lens[i]
# -> 1 x nef x words_num
word = words_emb[i, :, :words_num].unsqueeze(0).contiguous()
# -> batch_size x nef x words_num
word = word.repeat(batch_size, 1, 1)
# batch x nef x 17*17
context = img_features
"""
word(query): batch x nef x words_num
context: batch x nef x 17 x 17
weiContext: batch x nef x words_num
attn: batch x words_num x 17 x 17
"""
weiContext, attn = func_attention(word, context, cfg.TRAIN.SMOOTH.GAMMA1)
att_maps.append(attn[i].unsqueeze(0).contiguous())
# --> batch_size x words_num x nef
word = word.transpose(1, 2).contiguous()
weiContext = weiContext.transpose(1, 2).contiguous()
# --> batch_size*words_num x nef
word = word.view(batch_size * words_num, -1)
weiContext = weiContext.view(batch_size * words_num, -1)
#
# -->batch_size*words_num
row_sim = cosine_similarity(word, weiContext)
# --> batch_size x words_num
row_sim = row_sim.view(batch_size, words_num)
# Eq. (10)
row_sim.mul_(cfg.TRAIN.SMOOTH.GAMMA2).exp_()
row_sim = row_sim.sum(dim=1, keepdim=True)
row_sim = torch.log(row_sim)
# --> 1 x batch_size
# similarities(i, j): the similarity between the i-th image and the j-th text description
similarities.append(row_sim)
# batch_size x batch_size
similarities = torch.cat(similarities, 1)
if class_ids is not None:
masks = np.concatenate(masks, 0)
# masks: batch_size x batch_size
# masks = torch.ByteTensor(masks)
masks = torch.BoolTensor(masks)
if cfg.CUDA:
masks = masks.cuda()
similarities = similarities * cfg.TRAIN.SMOOTH.GAMMA3
if class_ids is not None:
similarities.data.masked_fill_(masks, -float('inf'))
similarities1 = similarities.transpose(0, 1)
if labels is not None:
loss0 = nn.CrossEntropyLoss()(similarities, labels)
loss1 = nn.CrossEntropyLoss()(similarities1, labels)
else:
loss0, loss1 = None, None
return loss0, loss1, att_maps
# ##################Loss for G and Ds##############################
def discriminator_loss(netD, real_imgs, fake_imgs, conditions,
real_labels, fake_labels):
# Forward
real_features = netD(real_imgs)
fake_features = netD(fake_imgs.detach())
# loss
#
cond_real_logits = netD.COND_DNET(real_features, conditions)
cond_real_errD = nn.BCELoss()(cond_real_logits, real_labels)
cond_fake_logits = netD.COND_DNET(fake_features, conditions)
cond_fake_errD = nn.BCELoss()(cond_fake_logits, fake_labels)
#
batch_size = real_features.size(0)
cond_wrong_logits = netD.COND_DNET(real_features[:(batch_size - 1)], conditions[1:batch_size])
cond_wrong_errD = nn.BCELoss()(cond_wrong_logits, fake_labels[1:batch_size])
if netD.UNCOND_DNET is not None:
real_logits = netD.UNCOND_DNET(real_features)
fake_logits = netD.UNCOND_DNET(fake_features)
real_errD = nn.BCELoss()(real_logits, real_labels)
fake_errD = nn.BCELoss()(fake_logits, fake_labels)
errD = ((real_errD + cond_real_errD) / 2. +
(fake_errD + cond_fake_errD + cond_wrong_errD) / 3.)
else:
errD = cond_real_errD + (cond_fake_errD + cond_wrong_errD) / 2.
return errD
def generator_loss(netsD, image_encoder, fake_imgs, real_labels,
words_embs, sent_emb, match_labels,
cap_lens, class_ids):
numDs = len(netsD)
batch_size = real_labels.size(0)
logs = ''
# Forward
errG_total = 0
for i in range(numDs):
features = netsD[i](fake_imgs[i])
cond_logits = netsD[i].COND_DNET(features, sent_emb)
cond_errG = nn.BCELoss()(cond_logits, real_labels)
if netsD[i].UNCOND_DNET is not None:
logits = netsD[i].UNCOND_DNET(features)
errG = nn.BCELoss()(logits, real_labels)
g_loss = errG + cond_errG
else:
g_loss = cond_errG
errG_total += g_loss
# err_img = errG_total.data[0]
logs += 'g_loss%d: %.2f ' % (i, g_loss.item())
# Ranking loss
if i == (numDs - 1):
# words_features: batch_size x nef x 17 x 17
# sent_code: batch_size x nef
region_features, cnn_code = image_encoder(fake_imgs[i])
w_loss0, w_loss1, _ = words_loss(region_features, words_embs,
match_labels, cap_lens,
class_ids, batch_size)
w_loss = (w_loss0 + w_loss1) * \
cfg.TRAIN.SMOOTH.LAMBDA
# err_words = err_words + w_loss.data[0]
s_loss0, s_loss1 = sent_loss(cnn_code, sent_emb,
match_labels, class_ids, batch_size)
s_loss = (s_loss0 + s_loss1) * \
cfg.TRAIN.SMOOTH.LAMBDA
# err_sent = err_sent + s_loss.data[0]
errG_total += w_loss + s_loss
logs += 'w_loss: %.2f s_loss: %.2f ' % (w_loss.item(), s_loss.item())
return errG_total, logs, cnn_code
##################################################################
def KL_loss(mu, logvar):
# -0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
KLD = torch.mean(KLD_element).mul_(-0.5)
return KLD
| 8,228 | 36.575342 | 98 | py |
T2I_CL | T2I_CL-main/AttnGAN+CL/code/miscc/utils.py | import os
import errno
import numpy as np
from torch.nn import init
import torch
import torch.nn as nn
from PIL import Image, ImageDraw, ImageFont
from copy import deepcopy
import skimage.transform
from miscc.config import cfg
# For visualization ################################################
COLOR_DIC = {0:[128,64,128], 1:[244, 35,232],
2:[70, 70, 70], 3:[102,102,156],
4:[190,153,153], 5:[153,153,153],
6:[250,170, 30], 7:[220, 220, 0],
8:[107,142, 35], 9:[152,251,152],
10:[70,130,180], 11:[220,20, 60],
12:[255, 0, 0], 13:[0, 0, 142],
14:[119,11, 32], 15:[0, 60,100],
16:[0, 80, 100], 17:[0, 0, 230],
18:[0, 0, 70], 19:[0, 0, 0]}
FONT_MAX = 50
def drawCaption(convas, captions, ixtoword, vis_size, off1=2, off2=2):
num = captions.size(0)
img_txt = Image.fromarray(convas)
# get a font
# fnt = None # ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 50)
fnt = ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 50)
# get a drawing context
d = ImageDraw.Draw(img_txt)
sentence_list = []
for i in range(num):
cap = captions[i].data.cpu().numpy()
sentence = []
for j in range(len(cap)):
if cap[j] == 0:
break
word = ixtoword[cap[j]].encode('ascii', 'ignore').decode('ascii')
d.text(((j + off1) * (vis_size + off2), i * FONT_MAX), '%d:%s' % (j, word[:6]),
font=fnt, fill=(255, 255, 255, 255))
sentence.append(word)
sentence_list.append(sentence)
return img_txt, sentence_list
def build_super_images(real_imgs, captions, ixtoword,
attn_maps, att_sze, lr_imgs=None,
batch_size=cfg.TRAIN.BATCH_SIZE,
max_word_num=cfg.TEXT.WORDS_NUM):
nvis = 8
real_imgs = real_imgs[:nvis]
if lr_imgs is not None:
lr_imgs = lr_imgs[:nvis]
if att_sze == 17:
vis_size = att_sze * 16
else:
vis_size = real_imgs.size(2)
text_convas = \
np.ones([batch_size * FONT_MAX,
(max_word_num + 2) * (vis_size + 2), 3],
dtype=np.uint8)
for i in range(max_word_num):
istart = (i + 2) * (vis_size + 2)
iend = (i + 3) * (vis_size + 2)
text_convas[:, istart:iend, :] = COLOR_DIC[i]
real_imgs = \
nn.Upsample(size=(vis_size, vis_size), mode='bilinear', align_corners=True)(real_imgs)
# [-1, 1] --> [0, 1]
real_imgs.add_(1).div_(2).mul_(255)
real_imgs = real_imgs.data.numpy()
# b x c x h x w --> b x h x w x c
real_imgs = np.transpose(real_imgs, (0, 2, 3, 1))
pad_sze = real_imgs.shape
middle_pad = np.zeros([pad_sze[2], 2, 3])
post_pad = np.zeros([pad_sze[1], pad_sze[2], 3])
if lr_imgs is not None:
lr_imgs = \
nn.Upsample(size=(vis_size, vis_size), mode='bilinear', align_corners=True)(lr_imgs)
# [-1, 1] --> [0, 1]
lr_imgs.add_(1).div_(2).mul_(255)
lr_imgs = lr_imgs.data.numpy()
# b x c x h x w --> b x h x w x c
lr_imgs = np.transpose(lr_imgs, (0, 2, 3, 1))
# batch x seq_len x 17 x 17 --> batch x 1 x 17 x 17
seq_len = max_word_num
img_set = []
num = nvis # len(attn_maps)
text_map, sentences = \
drawCaption(text_convas, captions, ixtoword, vis_size)
text_map = np.asarray(text_map).astype(np.uint8)
bUpdate = 1
for i in range(num):
attn = attn_maps[i].cpu().view(1, -1, att_sze, att_sze)
# --> 1 x 1 x 17 x 17
attn_max = attn.max(dim=1, keepdim=True)
attn = torch.cat([attn_max[0], attn], 1)
#
attn = attn.view(-1, 1, att_sze, att_sze)
attn = attn.repeat(1, 3, 1, 1).data.numpy()
# n x c x h x w --> n x h x w x c
attn = np.transpose(attn, (0, 2, 3, 1))
num_attn = attn.shape[0]
#
img = real_imgs[i]
if lr_imgs is None:
lrI = img
else:
lrI = lr_imgs[i]
row = [lrI, middle_pad]
row_merge = [img, middle_pad]
row_beforeNorm = []
minVglobal, maxVglobal = 1, 0
for j in range(num_attn):
one_map = attn[j]
if (vis_size // att_sze) > 1:
one_map = \
skimage.transform.pyramid_expand(one_map, sigma=20,
upscale=vis_size // att_sze)
row_beforeNorm.append(one_map)
minV = one_map.min()
maxV = one_map.max()
if minVglobal > minV:
minVglobal = minV
if maxVglobal < maxV:
maxVglobal = maxV
for j in range(seq_len + 1):
if j < num_attn:
one_map = row_beforeNorm[j]
one_map = (one_map - minVglobal) / (maxVglobal - minVglobal)
one_map *= 255
#
PIL_im = Image.fromarray(np.uint8(img))
PIL_att = Image.fromarray(np.uint8(one_map))
merged = \
Image.new('RGBA', (vis_size, vis_size), (0, 0, 0, 0))
mask = Image.new('L', (vis_size, vis_size), (210))
merged.paste(PIL_im, (0, 0))
merged.paste(PIL_att, (0, 0), mask)
merged = np.array(merged)[:, :, :3]
else:
one_map = post_pad
merged = post_pad
row.append(one_map)
row.append(middle_pad)
#
row_merge.append(merged)
row_merge.append(middle_pad)
row = np.concatenate(row, 1)
row_merge = np.concatenate(row_merge, 1)
txt = text_map[i * FONT_MAX: (i + 1) * FONT_MAX]
if txt.shape[1] != row.shape[1]:
print('txt', txt.shape, 'row', row.shape)
bUpdate = 0
break
row = np.concatenate([txt, row, row_merge], 0)
img_set.append(row)
if bUpdate:
img_set = np.concatenate(img_set, 0)
img_set = img_set.astype(np.uint8)
return img_set, sentences
else:
return None
def build_super_images2(real_imgs, captions, cap_lens, ixtoword,
attn_maps, att_sze, vis_size=256, topK=5):
batch_size = real_imgs.size(0)
max_word_num = np.max(cap_lens)
text_convas = np.ones([batch_size * FONT_MAX,
max_word_num * (vis_size + 2), 3],
dtype=np.uint8)
real_imgs = \
nn.Upsample(size=(vis_size, vis_size), mode='bilinear')(real_imgs)
# [-1, 1] --> [0, 1]
real_imgs.add_(1).div_(2).mul_(255)
real_imgs = real_imgs.data.numpy()
# b x c x h x w --> b x h x w x c
real_imgs = np.transpose(real_imgs, (0, 2, 3, 1))
pad_sze = real_imgs.shape
middle_pad = np.zeros([pad_sze[2], 2, 3])
# batch x seq_len x 17 x 17 --> batch x 1 x 17 x 17
img_set = []
num = len(attn_maps)
text_map, sentences = \
drawCaption(text_convas, captions, ixtoword, vis_size, off1=0)
text_map = np.asarray(text_map).astype(np.uint8)
bUpdate = 1
for i in range(num):
attn = attn_maps[i].cpu().view(1, -1, att_sze, att_sze)
#
attn = attn.view(-1, 1, att_sze, att_sze)
attn = attn.repeat(1, 3, 1, 1).data.numpy()
# n x c x h x w --> n x h x w x c
attn = np.transpose(attn, (0, 2, 3, 1))
num_attn = cap_lens[i]
thresh = 2./float(num_attn)
#
img = real_imgs[i]
row = []
row_merge = []
row_txt = []
row_beforeNorm = []
conf_score = []
for j in range(num_attn):
one_map = attn[j]
mask0 = one_map > (2. * thresh)
conf_score.append(np.sum(one_map * mask0))
mask = one_map > thresh
one_map = one_map * mask
if (vis_size // att_sze) > 1:
one_map = \
skimage.transform.pyramid_expand(one_map, sigma=20,
upscale=vis_size // att_sze)
minV = one_map.min()
maxV = one_map.max()
one_map = (one_map - minV) / (maxV - minV)
row_beforeNorm.append(one_map)
sorted_indices = np.argsort(conf_score)[::-1]
for j in range(num_attn):
one_map = row_beforeNorm[j]
one_map *= 255
#
PIL_im = Image.fromarray(np.uint8(img))
PIL_att = Image.fromarray(np.uint8(one_map))
merged = \
Image.new('RGBA', (vis_size, vis_size), (0, 0, 0, 0))
mask = Image.new('L', (vis_size, vis_size), (180)) # (210)
merged.paste(PIL_im, (0, 0))
merged.paste(PIL_att, (0, 0), mask)
merged = np.array(merged)[:, :, :3]
row.append(np.concatenate([one_map, middle_pad], 1))
#
row_merge.append(np.concatenate([merged, middle_pad], 1))
#
txt = text_map[i * FONT_MAX:(i + 1) * FONT_MAX,
j * (vis_size + 2):(j + 1) * (vis_size + 2), :]
row_txt.append(txt)
# reorder
row_new = []
row_merge_new = []
txt_new = []
for j in range(num_attn):
idx = sorted_indices[j]
row_new.append(row[idx])
row_merge_new.append(row_merge[idx])
txt_new.append(row_txt[idx])
row = np.concatenate(row_new[:topK], 1)
row_merge = np.concatenate(row_merge_new[:topK], 1)
txt = np.concatenate(txt_new[:topK], 1)
if txt.shape[1] != row.shape[1]:
print('Warnings: txt', txt.shape, 'row', row.shape,
'row_merge_new', row_merge_new.shape)
bUpdate = 0
break
row = np.concatenate([txt, row_merge], 0)
img_set.append(row)
if bUpdate:
img_set = np.concatenate(img_set, 0)
img_set = img_set.astype(np.uint8)
return img_set, sentences
else:
return None
####################################################################
def weights_init(m):
# orthogonal_
# xavier_uniform_(
classname = m.__class__.__name__
if classname.find('Conv') != -1:
#print(m.state_dict().keys())
if list(m.state_dict().keys())[0] == 'weight':
nn.init.orthogonal_(m.weight.data, 1.0)
elif m.state_dict().keys()[3] == 'weight_bar':
nn.init.orthogonal_(m.weight_bar.data, 1.0)
#nn.init.orthogonal(m.weight.data, 1.0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
nn.init.orthogonal_(m.weight.data, 1.0)
if m.bias is not None:
m.bias.data.fill_(0.0)
def load_params(model, new_param):
for p, new_p in zip(model.parameters(), new_param):
p.data.copy_(new_p)
def copy_G_params(model):
flatten = deepcopy(list(p.data for p in model.parameters()))
return flatten
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
| 11,364 | 34.07716 | 96 | py |
adversarial_ntk_evolution | adversarial_ntk_evolution-master/test_functions.py | import jax
import haiku as hk
import jax.numpy as jnp
from jax.example_libraries import optimizers
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset
import numpy as np
import neural_tangents as nt
import functools
import operator
import optax
import copy
import models
from models import linear_forward
@functools.partial(jax.jit, static_argnums=(3, 7, 8, 9))
def loss_fn(params, lin_params, state, net_fn, rng, images, labels, lin = False, is_training = True, centering = False):
if not lin:
if centering:
#use linear params as 0 parameters if centering
logits0, state = net_fn(lin_params, state, rng, images, is_training = is_training)
logits1, state = net_fn(params, state, rng, images, is_training = is_training)
logits = logits1 - logits0
else:
logits, state = net_fn(params, state, rng, images, is_training = is_training)
else:
logits, state = linear_forward(params, lin_params, state, net_fn, rng, images, is_training = is_training, centering = centering)
labels_oh = jax.nn.one_hot(labels, logits.shape[-1])
loss = optax.softmax_cross_entropy(logits,labels_oh).mean()
acc = jnp.mean(logits.argmax(1) == labels)
return loss, {'net_state': state, 'acc': acc}
@functools.partial(jax.jit, static_argnums=(2))
def clamp_by_norm(x, r, norm = 'l_2'):
if norm == 'l_2':
norms = jnp.sqrt(jnp.sum(x ** 2, [1,2,3], keepdims = True))
factor = jnp.minimum(r/norms, jnp.ones_like(norms))
return x * factor
elif norm == 'l_inf':
return jnp.clip(x, -1 * r, r)
@functools.partial(jax.jit, static_argnums=(3, 10, 11))
def do_perturbation_step_l_inf(params, lin_params, net_state, net_fn, rng, images0, images, labels, eps, alpha, linear = False, centering = False):
grads, _ = jax.grad(loss_fn, has_aux = True, argnums = 5)(params, lin_params, net_state, net_fn, rng, images, labels, lin = linear, is_training = False, centering = centering)
grads = jnp.sign(grads)
images = images + alpha * grads
images = jnp.clip(images, 0., 1.)
d_images = images - images0
d_images = clamp_by_norm(d_images, eps, norm = 'l_inf')
images = images0 + d_images
return images
@functools.partial(jax.jit, static_argnums=(3,10,11))
def do_perturbation_step_l_2(params, lin_params, net_state, net_fn, rng, images0, images, labels, eps, alpha, linear = False, centering = False):
grads, _ = jax.grad(loss_fn, has_aux = True, argnums = 5)(params, lin_params, net_state, net_fn, rng, images, labels, lin = linear, is_training = False, centering = centering)
grads = grads/jnp.sqrt(jnp.sum(grads ** 2, [1,2,3], keepdims = True))
images = images + alpha * grads
images = jnp.clip(images, 0., 1.)
d_images = images - images0
d_images = clamp_by_norm(d_images, eps, norm = 'l_2')
images = images0 + d_images
return images
def perturb(params, lin_params, net_state, net_fn, rng, images0, labels, eps, alpha, iters, linear = False, centering = False, attack = 'linf'):
images = images0
#First add random noise within ball
if attack == 'l2':
images = images + np.random.normal(0, eps/np.sqrt(len(images[0].shape)), images.shape)
if attack == 'linf':
images = images + np.random.uniform(-eps, eps, images.shape)
images = jnp.clip(images, 0., 1.)
d_images = images - images0
d_images = clamp_by_norm(d_images, eps)
images = images + d_images
for i in range(iters):
if attack == 'linf':
images = do_perturbation_step_l_inf(params, lin_params, net_state, net_fn, rng, images0, images, labels, eps, alpha, linear = linear, centering = centering)
elif attack == 'l2':
images = do_perturbation_step_l_2(params, lin_params, net_state, net_fn, rng, images0, images, labels, eps, alpha, linear = linear, centering = centering)
return images
def test(params, lin_params, state, net_fn, rng, test_loader, linear = False, make_adv_examples = False, centering = False, attack = 'linf', return_examples = False, short = False, return_components = False, adv_eps = 4):
adv_eps = adv_eps/255
n_correct = 0
n_total = 0
n_correct_adv = 0
n_correct_batch = 0
n_batch = 0
n_correct_adv_batch = 0
adv_examples = []
predictions = []
components = []
linear_components = []
adv_components = []
adv_linear_components = []
adv_predictions = []
print("testing")
for i, (images, labels) in enumerate(test_loader):
images = np.array(np.transpose(images.cpu().numpy(), [0,2,3,1]))
labels = labels.cpu().numpy()
if linear:
logits, return_dict = linear_forward(params, lin_params, state, net_fn, rng, images, is_training = False, centering = centering, return_components = True)
f = return_dict['f']
df = return_dict['df']
components.append(f)
linear_components.append(df)
else:
if centering:
logits0, _ = net_fn(lin_params, state, rng, images, is_training = False)
logits1, _ = net_fn(params, state, rng, images, is_training = False)
logits = logits1 - logits0
components.append(logits0)
linear_components.append(np.zeros_like(logits))
else:
logits, _ = net_fn(params, state, rng, images, is_training = False)
components.append(logits)
linear_components.append(np.zeros_like(logits))
n_correct += np.sum(logits.argmax(1) == labels)
n_correct_batch += np.sum(logits.argmax(1) == labels)
predictions.append(logits.argmax(1))
n_total += len(labels)
n_batch += len(labels)
if make_adv_examples:
iters = 100
if attack == 'l2':
adv_images = perturb(params, lin_params, state, net_fn, rng, images, labels, 0.25, 0.01, iters, linear = linear, centering = centering, attack = attack)
elif attack == 'linf':
adv_images = perturb(params, lin_params, state, net_fn, rng, images, labels, adv_eps, 2 * adv_eps / iters, iters, linear = linear, centering = centering, attack = attack)
else:
adv_images = images
if return_examples:
adv_examples.append(adv_images)
if linear:
logits_adv, return_dict = linear_forward(params, lin_params, state, net_fn, rng, adv_images, is_training = False, centering = centering, return_components = True)
f = return_dict['f']
df = return_dict['df']
adv_components.append(f)
adv_linear_components.append(df)
else:
if centering:
logits0, _ = net_fn(lin_params, state, rng, adv_images, is_training = False)
logits1, _ = net_fn(params, state, rng, adv_images, is_training = False)
logits_adv = logits1 - logits0
adv_components.append(logits0)
adv_linear_components.append(np.zeros_like(logits_adv))
else:
logits_adv, _ = net_fn(params, state, rng, adv_images, is_training = False)
adv_components.append(logits_adv)
adv_linear_components.append(np.zeros_like(logits_adv))
n_correct_adv += np.sum(logits_adv.argmax(1) == labels)
n_correct_adv_batch += np.sum(logits_adv.argmax(1) == labels)
adv_predictions.append(logits_adv.argmax(1))
if i % 10 == 9:
print("\nTest Batch {}".format(int((i+1)/10)))
print("Clean Acc: {:.2f}".format(100 * n_correct_batch/n_batch))
print("Dirty Acc: {:.2f}".format(100 * n_correct_adv_batch/n_batch))
n_correct_batch = 0
n_batch = 0
n_correct_adv_batch = 0
if short:
break
print("\nTest Results Total".format(int((i+1)/10)))
print("Clean Acc: {:.2f}".format(100 * n_correct/n_total))
print("Dirty Acc: {:.2f}".format(100 * n_correct_adv/n_total))
components_clean = {'f': np.concatenate(components), 'df': np.concatenate(linear_components)}
components_dirty = {'f': np.concatenate(adv_components), 'df': np.concatenate(adv_linear_components)}
if return_examples:
adv_examples = np.concatenate(adv_examples, 0)
predictions = np.concatenate(predictions)
adv_predictions = np.concatenate(adv_predictions)
if return_components:
return n_correct/n_total, n_correct_adv/n_total, adv_examples, predictions, adv_predictions, components_clean, components_dirty
return n_correct/n_total, n_correct_adv/n_total, adv_examples, predictions, adv_predictions
return n_correct/n_total, n_correct_adv/n_total | 9,229 | 39.482456 | 221 | py |
adversarial_ntk_evolution | adversarial_ntk_evolution-master/run_exp.py | import jax
import haiku as hk
import jax.numpy as jnp
from jax.example_libraries import optimizers
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset
import numpy as np
import neural_tangents as nt
import functools
import operator
import optax
import copy
import models
import pickle
from utils import bind, _add, _sub
import os
from test_functions import do_perturbation_step_l_inf, do_perturbation_step_l_2, perturb, test, loss_fn
import data
import os
import argparse
@functools.partial(jax.jit, static_argnums=(3,5, 9, 10))
def do_training_step(params, lin_params, net_state, net_fn, opt_state, optimizer_update, rng, images, labels, is_training = True, centering = False):
[loss,lf_dict], grads = jax.value_and_grad(loss_fn, has_aux = True)(params, lin_params, net_state, net_fn, rng, images, labels, lin = False, is_training = is_training, centering = centering)
net_state = lf_dict['net_state']
acc = lf_dict['acc']
updates, opt_state = optimizer_update(grads, opt_state, params)
params = optax.apply_updates(params, updates)
return loss, params, net_state, opt_state, acc
@functools.partial(jax.jit, static_argnums=(3, 5, 9, 10))
def do_training_step_linear(params, lin_params, net_state, net_fn, opt_state_lin, optimizer_lin_update, rng, images, labels, centering = False, is_training = False):
[loss, lf_dict], grads = jax.value_and_grad(loss_fn, has_aux = True, argnums = 1)(params, lin_params, net_state, net_fn, rng, images, labels, lin = True, centering = centering, is_training = is_training)
net_state = lf_dict['net_state']
acc = lf_dict['acc']
updates, opt_state_lin = optimizer_lin_update(grads, opt_state_lin, lin_params)
lin_params = optax.apply_updates(lin_params, updates)
return loss, params, lin_params, net_state, opt_state_lin, acc
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, default = './X_nothing', help = 'data path')
parser.add_argument('--standard_epochs', type=int, default = 100, help = 'number of epochs to run in standard dynamics before switching to phase 2')
parser.add_argument('--linear_epochs', type=int, default = 100, help = 'number of epochs to run in stage 2 dynamics')
parser.add_argument('--centering', action='store_true', help = 'whether to use centered linearized dynamics for phase 2. default is non-centering')
parser.add_argument('--save_models', action='store_true', help = 'whether to save the models at the end of phase 1/2')
parser.add_argument('--constant_save', action='store_true', help = 'whether to save after every epoch in phase 1')
parser.add_argument('--constant_save_linear', action='store_true', help = 'whether to save after every epoch in phase 2')
parser.add_argument('--loose_bn_second', action='store_true', help = 'whether to allow batch norm parameters to change in the second phase, default is frozen batch norm')
parser.add_argument('--do_standard_second', action='store_true', help = 'whether to use standard dynamics in phase 2')
parser.add_argument('--skip_first_test', action='store_true', help = 'whether to skip evaluation after phase 1')
parser.add_argument('--skip_second_test', action='store_true', help = 'whether to skip evaoluation after phase 2')
parser.add_argument('--random_seed', type = int, default = 0, help = 'random seed')
parser.add_argument('--base_model_path', type=str, default = '', help = 'if this is non-empty, we load a model from the path and then skip to phase 2')
parser.add_argument('--model', type=str, default = 'resnet18', help = 'model. all experiments in the paper use a resnet 18')
parser.add_argument('--loaders', type=str, default = 'CC', help = 'first letter is what type of training in phase 1 and second letter is type of training in phase 2. C = benign/clean data. A = adversarial training. F ="flip" i.e. flip from clean data to adversarial after 50 epochs')
parser.add_argument('--dataset', type=str, default = help = 'cifar10', 'dataset. either cifar10 or cifar100')
parser.add_argument('--second_lr', type=float, default = 0.01, help = 'learning rate to use in phase 2')
parser.add_argument('--eps', type=float, default = 4.00, help = 'eps value for adversarial training. scaled by 1/255')
parser.add_argument('--save_path', type=str, default = './saved_models/', help = 'save path for the models')
args = parser.parse_args()
if args.save_models:
os.makedirs(args.save_path, exist_ok=True)
transform_train = transforms.Compose([
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
])
constant_save_extra_epochs = [0.125, 0.25, 0.375, 0.5, 0.75, 1.5, 2.5]
class TensorDataset(Dataset):
def __init__(self, *tensors, transform=None):
assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)
self.tensors = tensors
self.transform = transform
def __getitem__(self, index):
im, targ = tuple(tensor[index] for tensor in self.tensors)
if self.transform:
real_transform = transforms.Compose([
transforms.ToPILImage(),
self.transform
])
im = real_transform(im)
return im, targ
def __len__(self):
return self.tensors[0].size(0)
train_data, train_labels = data.get_data_and_labels(args.dataset)
n_classes = data.get_n_classes(args.dataset)
train_dataset = TensorDataset(train_data, train_labels, transform=transform_train)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=128, shuffle=True, num_workers=4)
test_loader = data.get_loader(args.dataset, train = False, batch_size = 100, shuffle = False)
loader_order = args.loaders
standard_loader = train_loader
linear_loader = train_loader
rng = jax.random.PRNGKey(args.random_seed)
print("RANDOM SEED {}".format(args.random_seed))
net_forward_init, net_forward_apply = models.get_model(args.model, n_classes)
dummy_images, dummy_labels = next(iter(train_loader))
dummy_images = np.transpose(dummy_images.cpu().numpy(), [0,2,3,1])
dummy_labels = dummy_labels.cpu().numpy()
params, net_state = net_forward_init(rng, dummy_images, is_training=True)
lin_params = copy.deepcopy(params)
optimizer_init, optimizer_update = optax.chain( optax.sgd(0.1, momentum = 0.9))
opt_state = optimizer_init(params)
if len(args.base_model_path) > 0:
print('Loading from saved model')
checkpoint = pickle.load(open('./{}'.format(args.base_model_path), 'rb'))
params = checkpoint['params']
lin_params = checkpoint['lin_params']
net_state = checkpoint['net_state']
optimizer_init, optimizer_update = optax.chain( optax.sgd(args.second_lr, momentum = 0.9))
opt_state = optimizer_init(params)
else:
losses = []
for epoch in range(args.standard_epochs):
print(epoch)
if args.constant_save:
pickle.dump({'params' : params, 'lin_params' : lin_params, 'net_state' : net_state}, open('./{}/parameters_checkpoint_{}.pkl'.format(args.save_path, epoch),'wb'))
optim_step = 0
for i, (images, labels) in enumerate(standard_loader):
if args.constant_save and len(constant_save_extra_epochs) > 0 and (epoch + (i/len(standard_loader))) > constant_save_extra_epochs[0]:
pickle.dump({'params' : params, 'lin_params' : lin_params, 'net_state' : net_state}, open('./{}/parameters_checkpoint_{}.pkl'.format(args.save_path, constant_save_extra_epochs[0]),'wb'))
constant_save_extra_epochs.pop(0)
images = np.transpose(images.cpu().numpy(), [0,2,3, 1])
labels = labels.cpu().numpy()
loss, params, net_state, opt_state, acc = do_training_step(params, lin_params, net_state, net_forward_apply, opt_state, optimizer_update, rng, images, labels)
if loader_order[0] in ['A'] or (loader_order[0] == 'F' and epoch >= 50):
adv_eps = args.eps/255
iters = 20
adv_1 = perturb(params, lin_params, net_state, net_forward_apply, rng, images, labels, adv_eps, 2 * adv_eps/iters, iters)
loss, params, net_state, opt_state, acc = do_training_step(params, lin_params, net_state, net_forward_apply, opt_state, optimizer_update, rng, adv_1, labels)
optim_step += 1
losses.append(loss)
if epoch == 99:
_, optimizer_update = optax.chain( optax.sgd(0.01, momentum = 0.9))
elif epoch == 149:
_, optimizer_update = optax.chain( optax.sgd(0.001, momentum = 0.9))
if args.skip_first_test:
clean_acc_l2, dirty_acc_l2 = [0, 0]
clean_acc_linf, dirty_acc_linf = [0, 0]
else:
clean_acc_l2, dirty_acc_l2 = test(params, lin_params, net_state, net_forward_apply, rng, test_loader, make_adv_examples = True, linear = False, attack = 'l2', adv_eps = args.eps)
clean_acc_linf, dirty_acc_linf = test(params, lin_params, net_state, net_forward_apply, rng, test_loader, make_adv_examples = True, linear = False, attack = 'linf', adv_eps = args.eps)
standard_results = {
'clean': clean_acc_l2,
'l2': dirty_acc_l2,
'linf': dirty_acc_linf
}
lin_params = copy.deepcopy(params)
optimizer_lin_init, optimizer_lin_update = optax.chain( optax.sgd(args.second_lr, momentum = 0.9))
opt_state_lin = optimizer_lin_init(lin_params)
losses = []
for epoch in range(args.linear_epochs):
print(epoch)
optim_step = 0
for i, (images, labels) in enumerate(linear_loader):
images = np.transpose(images.cpu().numpy(), [0,2,3, 1])
labels = labels.cpu().numpy()
if args.constant_save_linear:
pickle.dump({'params' : params, 'lin_params' : lin_params, 'net_state' : net_state}, open('./{}/parameters_checkpoint_linear_{}.pkl'.format(args.save_path, epoch),'wb'))
if args.do_standard_second:
loss, params, net_state, opt_state, acc = do_training_step(params, lin_params, net_state, net_forward_apply, opt_state, optimizer_update, rng, images, labels, is_training = args.loose_bn_second, centering = args.centering)
else:
loss, params, lin_params, net_state, opt_state_lin, acc = do_training_step_linear(params, lin_params, net_state, net_forward_apply, opt_state_lin, optimizer_lin_update, rng, images, labels, centering = args.centering, is_training = args.loose_bn_second)
if loader_order[1] in ['A'] or (loader_order[1] == 'F' and epoch >= 50):
adv_eps = args.eps/255
iters = 20
adv_1 = perturb(params, lin_params, net_state, net_forward_apply, rng, images, labels, adv_eps, 2 * adv_eps/iters, iters, linear = True, centering = args.centering)
loss, params, lin_params, net_state, opt_state_lin, acc = do_training_step_linear(params, lin_params, net_state, net_forward_apply, opt_state_lin, optimizer_lin_update, rng, adv_1, labels, centering = args.centering)
optim_step += 1
losses.append(loss)
#note we test l2 norms in the code but in the paper we only use l-inf attacks/defenses
if args.skip_second_test:
clean_acc_l2, dirty_acc_l2 = [0, 0]
clean_acc_linf, dirty_acc_linf = [0, 0]
elif args.do_standard_second:
clean_acc_l2, dirty_acc_l2 = test(params, lin_params, net_state, net_forward_apply, rng, test_loader, make_adv_examples = True, linear = False, centering = args.centering, attack = 'l2', adv_eps = args.eps)
clean_acc_linf, dirty_acc_linf = test(params, lin_params, net_state, net_forward_apply, rng, test_loader, make_adv_examples = True, linear = False, centering = args.centering, attack = 'linf', adv_eps = args.eps)
else:
clean_acc_l2, dirty_acc_l2 = test(params, lin_params, net_state, net_forward_apply, rng, test_loader, make_adv_examples = True, linear = True, centering = args.centering, attack = 'l2', adv_eps = args.eps)
clean_acc_linf, dirty_acc_linf = test(params, lin_params, net_state, net_forward_apply, rng, test_loader, make_adv_examples = True, linear = True, centering = args.centering, attack = 'linf', adv_eps = args.eps)
linear_results = {
'clean': clean_acc_l2,
'l2': dirty_acc_l2,
'linf': dirty_acc_linf
}
if args.save_models:
pickle.dump({'params' : params, 'lin_params' : lin_params, 'net_state' : net_state}, open('./{}/parameters_final.pkl'.format(args.save_path),'wb'))
pickle.dump({'standard': standard_results, 'linear': linear_results, 'standard_second': args.do_standard_second}, open('./{}/results.pkl'.format(args.save_path),'wb'))
if __name__ == '__main__':
main()
| 13,392 | 50.511538 | 287 | py |
adversarial_ntk_evolution | adversarial_ntk_evolution-master/calculate_kernel.py | import jax
import haiku as hk
import jax.numpy as jnp
from jax.example_libraries import optimizers
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset
import numpy as np
import neural_tangents as nt
import functools
import operator
import optax
import copy
import models
import pickle
from utils import bind, _add, _sub, get_class_indices
import os
from test_functions import do_perturbation_step_l_inf, do_perturbation_step_l_2, perturb, test, loss_fn
import numpy as np
import argparse
import time
import data
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, default = '', help = 'path of the saved model')
parser.add_argument('--dataset_size', type=int, default = 500, help = 'number of images to estimate the kernel with')
parser.add_argument('--save_name', type=str, default = 'saved_kernel', help = 'what to name the saved model')
parser.add_argument('--class_index', type=int, default = -1, help = 'which class to make for the kernel. Default is -1 which means we basically average out every classes kernel')
parser.add_argument('--show_progress', action='store_true', help = 'for when youre impatient and want to see every time a kernel sub block is made')
parser.add_argument('--model', type=str, default = 'resnet18', help = 'model')
parser.add_argument('--bonus_dir', type=str, default = '.', help = 'extra directory for more specific save locations')
parser.add_argument('--random_seed', type = int, default = 0, help = 'random seed')
parser.add_argument('--use_linear_params', action='store_true', help = '')
parser.add_argument('--dataset', type=str, default = 'cifar10')
args = parser.parse_args()
train_data, train_labels = data.get_data_and_labels(args.dataset)
x_train = np.transpose(train_data.cpu().numpy(), [0,2,3,1])
rng = jax.random.PRNGKey(args.random_seed)
net_forward_init, net_forward_apply = models.get_model(args.model, data.get_n_classes(args.dataset))
train_subindices = get_class_indices(train_labels, int((args.dataset_size)/10), args.random_seed, n_classes = 10)
x_train = x_train[train_subindices]
checkpoint = pickle.load(open('./{}'.format(args.model_path), 'rb'))
params = checkpoint['params']
lin_params = checkpoint['lin_params']
if args.use_linear_params:
params = lin_params
net_state = checkpoint['net_state']
if args.class_index == -1:
print("Calculating Combined Kernel")
net_forward_binded = lambda a, b: bind(net_forward_apply, ..., net_state, rng, is_training = False)(a,b)[0]
else:
print("Calculating Kernel for class {}".format(args.class_index))
net_forward_binded = lambda a, b: bind(net_forward_apply, ..., net_state, rng, is_training = False)(a,b)[0][:, args.class_index : args.class_index + 1]
kernel = np.zeros([x_train.shape[0], x_train.shape[0]])
kernel_fn = nt.empirical_kernel_fn(net_forward_binded, implementation = 2)
batch_size = 4
kernel_fn = jax.jit(nt.batch(kernel_fn, batch_size=batch_size), static_argnums = (2))
for a in range(int(args.dataset_size/batch_size)):
for b in range(a, int(args.dataset_size/batch_size)):
start = time.time()
if args.show_progress:
print(a, b)
kernel[a * batch_size : (a+1) * batch_size, b * batch_size : (b+1) * batch_size] = kernel_fn(x_train[a * batch_size : (a+1) * batch_size], x_train[b * batch_size : (b+1) * batch_size], 'ntk', params)
kernel = np.triu(kernel) + np.triu(kernel, k = 1).T
base_path = os.path.dirname(args.model_path)
if not os.path.isdir('./{}/{}/'.format(base_path, args.bonus_dir)):
os.mkdir('./{}/{}/'.format(base_path, args.bonus_dir))
if args.class_index == -1:
pickle.dump({'kernel': kernel, 'labels': train_labels[train_subindices].numpy()}, open('./{}/{}/{}_{}.pkl'.format(base_path, args.bonus_dir, args.save_name, args.dataset_size),'wb'))
else:
pickle.dump({'kernel': kernel, 'labels': train_labels[train_subindices].numpy()}, open('./{}/{}/{}_class_{}_{}.pkl'.format(base_path, args.bonus_dir, args.save_name, args.class_index, args.dataset_size),'wb'))
if __name__ == '__main__':
main()
| 4,382 | 42.39604 | 217 | py |
adversarial_ntk_evolution | adversarial_ntk_evolution-master/visualize_ntk_features.py | import jax
import haiku as hk
import jax.numpy as jnp
from jax.example_libraries import optimizers
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset
import numpy as np
import neural_tangents as nt
import functools
import operator
import optax
import copy
import models
import pickle
from utils import bind, _add, _sub, _multiply
import os
from test_functions import do_perturbation_step_l_inf, do_perturbation_step_l_2, perturb, test, loss_fn
import numpy as np
import argparse
import time
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, default = '', help = 'saved model path')
parser.add_argument('--dataset_size', type=int, default = 500, help = 'size of the saved kernel')
parser.add_argument('--save_name', type=str, default = 'ntk_eig_images', help = 'what to name the saved images')
parser.add_argument('--class_index', type=int, default = -1, help = 'what class to use for the ntk')
parser.add_argument('--kernel_path', type=str, default = '', help = 'where the kernel is saved')
parser.add_argument('--n_images', type=int, default = 3, help = 'number of visualization images to make')
args = parser.parse_args()
train_size = args.dataset_size
selected_class = args.class_index
x_train = np.transpose(torch.tensor(torch.load('../Pytorch-Adversarial-Training-CIFAR/X_nothing')).cpu().numpy(), [0,2,3,1])[:train_size]
rng = jax.random.PRNGKey(0)
net_forward_init, net_forward_apply = models.get_resnet()
checkpoint = pickle.load(open('./{}'.format(args.model_path), 'rb'))
params = checkpoint['params']
lin_params = checkpoint['lin_params']
net_state = checkpoint['net_state']
net_forward_binded = lambda a, b: bind(net_forward_apply, ..., net_state, rng, is_training = False)(a,b)[0][:, selected_class : selected_class + 1]
labels = torch.load('../Pytorch-Adversarial-Training-CIFAR/y_train').cpu()
y_oh = torch.nn.functional.one_hot(labels[:train_size], 10).double().cpu().numpy()
network_info = (params, net_state, net_forward_apply, rng, net_forward_binded, selected_class, lin_params)
kernel = pickle.load(open('./{}'.format(args.kernel_path), 'rb'))['kernel']
U = np.linalg.svd(kernel)[0]
pos_images = np.zeros([args.n_images, 32, 32, 3])
for i in range(args.n_images):
pos_images[i] = visualize_eig(kernel, U, y_oh, i, x_train, network_info, flip = False)
neg_images = np.zeros([args.n_images, 32, 32, 3])
for i in range(args.n_images):
neg_images[i] = visualize_eig(kernel, y_oh, U, i, x_train, network_info, flip = True)
w_image = visualize_eig(kernel, y_oh, U, 'w', x_train, network_info, flip = False)
base_path = os.path.dirname(args.model_path)
pickle.dump({'neg_images': neg_images, 'pos_images': pos_images, 'w_image': w_image}, open('./{}/{}_class_{}_{}.pkl'.format(base_path, args.save_name, args.class_index, args.dataset_size),'wb'))
@functools.partial(jax.jit, static_argnums=(3,))
def weighted_forward(params, weights, images, net_forward_binded):
return jnp.sum(weights[None, :] @ net_forward_binded(params, images))
@functools.partial(jax.jit, static_argnums=(2, 3, 6, 7, 8))
def get_g_mag(params, net_state, net_forward_binded, net_forward_apply, rng, images, selected_class, is_training = False, centering = True):
g = jax.grad(lambda a, b: net_forward_binded(a,b)[0, 0])(params, images)
return -1 * models.linear_forward(params, _add(params, g), net_state, net_forward_apply, rng, images, is_training = is_training, centering = centering)[0][0, selected_class]
@functools.partial(jax.jit, static_argnums=(3, 4, 7, 8))
def get_mmd(params, feature_vec, net_state, net_forward_binded, net_forward_apply, rng, images, is_training = False, centering = True):
g = jax.grad(lambda a, b: net_forward_binded(a,b)[0, 0])(params, images)
return -1 * models.linear_forward(params, _sub(_add(params, feature_vec), g), net_state, net_forward_apply, rng, images, is_training = is_training, centering = centering)[0][0, selected_class]
@functools.partial(jax.jit, static_argnums=(3, 4, 8, 9, 10))
def get_cos(params, feature_vec, net_state, net_forward_binded, net_forward_apply, rng, images, feature_vec_mag, selected_class, is_training = False, centering = True):
g = jax.grad(lambda a, b: net_forward_binded(a,b)[0, 0])(params, images)
gtg = -1 * models.linear_forward(params, _add(params, g), net_state, net_forward_apply, rng, images, is_training = is_training, centering = centering)[0][0, selected_class]
gtv = -1 * models.linear_forward(params, _add(params, feature_vec), net_state, net_forward_apply, rng, images, is_training = is_training, centering = centering)[0][0, selected_class]
return gtv/ (jnp.sqrt(gtg) * jnp.sqrt(feature_vec_mag)), [gtg, gtv]
def dumb_f(a, b):
return jnp.sum(a) + jnp.sum(b)
def visualize_eig(kernel, y_oh, U, eig_index, x_train, network_info, mode = 'cos', flip = False):
params, net_state, net_forward_apply, rng, net_forward_binded, selected_class, lin_params = network_info
if eig_index == 'w':
weights = np.linalg.solve(kernel, y_oh[:, selected_class: selected_class + 1]).reshape(-1)
weights = weights #* 4
else:
weights = U[:, eig_index]
batch_size = 100
feature_vec = None
for b in range(int(x_train.shape[0]/batch_size)):
x_batch = x_train[b * batch_size : (b+1) * batch_size]
g = jax.grad(weighted_forward)(params, weights[b * batch_size : (b+1) * batch_size], x_batch, net_forward_binded)
if feature_vec is None:
feature_vec = g
else:
feature_vec = _add(feature_vec, g)
base_image = np.zeros([1, 32, 32, 3]) + 0.5
feature_vec_mag = jax.tree_util.tree_reduce(dumb_f, _multiply(feature_vec, feature_vec))
for i in range(600):
if mode == 'l2':
mag, g_combined = jax.value_and_grad(get_mmd, argnums = 6)(params, feature_vec, net_state, net_forward_binded, net_forward_apply, rng, base_image, selected_class, is_training = False, centering = True)
elif mode == 'cos':
[cos, aux], g_combined = jax.value_and_grad(get_cos, argnums = 6, has_aux = True)(params, feature_vec, net_state, net_forward_binded, net_forward_apply, rng, base_image, feature_vec_mag, selected_class, is_training = False, centering = True)
if not flip:
base_image += 0.001 * jnp.sign(g_combined)
else:
base_image -= 0.001 * jnp.sign(g_combined)
base_image = np.clip(base_image, 0, 1)
if i% 40 == 0:
if mode == 'l2':
print('{}, {}'.format(i, feature_vec_mag - mag))
elif mode == 'cos':
print('{}, {}'.format(i, cos))
return base_image[0]
if __name__ == '__main__':
main() | 7,034 | 46.533784 | 253 | py |
adversarial_ntk_evolution | adversarial_ntk_evolution-master/utils.py | import functools
import jax
import operator
import numpy as np
class bind(functools.partial):
"""
An improved version of partial which accepts Ellipsis (...) as a placeholder
"""
def __call__(self, *args, **keywords):
keywords = {**self.keywords, **keywords}
iargs = iter(args)
args = (next(iargs) if arg is ... else arg for arg in self.args)
return self.func(*args, *iargs, **keywords)
def _sub(x, y):
return jax.tree_util.tree_multimap(operator.sub, x, y)
def _add(x, y):
return jax.tree_util.tree_multimap(operator.add, x, y)
def _multiply(x, y):
return jax.tree_util.tree_multimap(operator.mul, x, y)
def get_class_indices(train_labels, samples_per_class, seed = 0, n_classes = 10):
np.random.seed(seed)
combined_indices = []
for c in range(n_classes):
class_indices = np.where(train_labels.numpy() == c)[0]
combined_indices.extend(class_indices[np.random.choice(len(class_indices), samples_per_class, replace = False)])
return combined_indices | 1,061 | 29.342857 | 120 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.