code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy, time
from torch.autograd import Variable
from utils import outputActivation
import pdb
# Customizations
# - DONE Embeddings: linear transform d_feats -> d_model features
# - DONE Generator
# - DONE Batching
# DONE: add social context
# DONE : use maneuvers
# - GeneratorLat and GeneratorLon DONE
# - Embeddings with traj/grid/lat/lon features DONE
# ---------- EMBEDDINGS ----------
class Embeddings(nn.Module):
def __init__(self, d_model, src_feats, src_ngrid=0, src_grid=(13,3), src_lon=0, src_lat=0, soc_emb_size=0):
super(Embeddings, self).__init__()
#self.lut = nn.Embedding(vocab, d_model)
self.d_model = copy.copy(d_model)
self.traj_emb = None
self.grid_emb = None
self.lat_emb = None
self.lon_emb = None
self.soc_emb = None
self.soc_emb_size = soc_emb_size
# Baiscally out of the 512 features for d_model encoding we split as:
# 256 features for ego traj inputs
# 256 features for social context (occupancy grid) inputs
# Additionaly we may reserve 20 features (3*4+2*4) for maneuveurs used as inputs
# Or just 512 features for taj_emb (eg at the output)
if src_ngrid > 0: # handle 2D input features with conv net
d_model_grid = d_model//2
d_model -= d_model_grid
# We start with [Batch, src_ngrid, 13, 3]
self.conv1 = torch.nn.Conv2d(src_ngrid, 64, 3) # => [64, 11, 1]
self.conv2 = torch.nn.Conv2d(64, 16, (3,1)) # => [16, 9, 1]
self.maxpool = torch.nn.MaxPool2d((2,1),padding = (1,0)) # => [16, 5, 1]
self.leaky_relu = torch.nn.LeakyReLU(0.1)
self.grid_emb = torch.nn.Linear(5, d_model_grid) # 5 from [16, 5, 1]
if soc_emb_size > 0:
self.soc_emb = torch.nn.Linear(soc_emb_size, d_model_grid) # projection
if src_lon > 0:
d_model_lon = src_lon * 4
d_model -= d_model_lon
self.lon_emb = torch.nn.Linear(src_lon, d_model_lon)
if src_lat > 0:
d_model_lat = src_lat * 4
d_model -= d_model_lat
self.lat_emb = torch.nn.Linear(src_lat, d_model_lat)
self.traj_emb = torch.nn.Linear(src_feats, d_model)
def forward(self, x):
# workaround to make nn.Sequential work with multiple inputs
# cf https://discuss.pytorch.org/t/nn-sequential-layers-forward-with-multiple-inputs-error/35591/3
#x, soc = x[0], x[1]
traj, grid, lon, lat = x
emb = self.traj_emb(traj) # * math.sqrt(self.d_model)
if grid is not None:
if len(grid.shape) == 3: # 1D input
assert self.soc_emb is not None
soc_emb = self.soc_emb(grid) # * math.sqrt(self.d_model)
emb = torch.cat((emb, soc_emb), dim=-1)
else: # 2D input
assert self.grid_emb is not None
## Apply convolutional social pooling: => [128, 16, 5, 1]
grid_enc = self.maxpool(self.leaky_relu(self.conv2(self.leaky_relu(self.conv1(grid)))))
grid_enc = torch.squeeze(grid_enc) # [128, 16, 5]
grid_emb = self.grid_emb(grid_enc)
emb = torch.cat((emb, grid_emb), dim=-1)
if lon is not None:
assert self.lon_emb is not None
lon_emb = self.lon_emb(lon) # * math.sqrt(self.d_model)
emb = torch.cat((emb, lon_emb), dim=-1)
if lat is not None:
assert self.lat_emb is not None
lat_emb = self.lat_emb(lat) # * math.sqrt(self.d_model)
emb = torch.cat((emb, lat_emb), dim=-1)
#print("EMB:", emb.shape)
return emb # * math.sqrt(self.d_model)
#return self.lut(x) * math.sqrt(self.d_model)
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0., max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0., d_model, 2) *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
# ---------- COMMON LAYERS for encoder/decoder ----------
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim = -1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
# XXX return x + self.dropout(sublayer(self.norm(x)))
# XXX Normalize after residual cnx like in the paper
return self.norm(x + self.dropout(sublayer(x)))
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
# ---------- ENCODER ----------
class EncoderLayer(nn.Module):
"Encoder is made up of self-attn and feed forward (defined below)"
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"Follow Figure 1 (left) for connections."
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class Encoder(nn.Module):
"Core encoder is a stack of N layers"
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
"Pass the input (and mask) through each layer in turn."
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
# ---------- DECODER ----------
class DecoderLayer(nn.Module):
"Decoder is made of self-attn, src-attn, and feed forward (defined below)"
def __init__(self, size, self_attn, src_attn, feed_forward, dropout):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 3)
def forward(self, x, memory, src_mask, tgt_mask):
"Follow Figure 1 (right) for connections."
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
return self.sublayer[2](x, self.feed_forward)
class Decoder(nn.Module):
"Generic N layer decoder with masking."
def __init__(self, layer, N):
super(Decoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, memory, src_mask, tgt_mask):
for layer in self.layers:
x = layer(x, memory, src_mask, tgt_mask)
return self.norm(x)
# ---------- ENCODER/DECODER ----------
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator=None, generator_lat=None, generator_lon=None):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
self.generator_lat = generator_lat
self.generator_lon = generator_lon
def forward(self, src, tgt, src_mask, tgt_mask, src_grid=None, src_lon=None, src_lat=None):
"Take in and process masked src and target sequences."
return self.decode(self.encode(src, src_mask, src_grid, src_lon, src_lat), src_mask,
tgt, tgt_mask)
def encode(self, src, src_mask, src_grid=None, src_lon=None, src_lat=None):
return self.encoder(self.src_embed((src, src_grid, src_lon, src_lat)), src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask):
return self.decoder(self.tgt_embed((tgt, None, None, None)), memory, src_mask, tgt_mask)
#def prepare_infer(self, Ty, batch_size):
# self.ys_masks = []
# self.Ty = Ty
# for i in range(Ty):
# ys_mask = np.ones( (i+1, i+1), dtype='uint8')
# ys_mask = np.tril(ys_mask, 0)
# ys_mask = np.repeat(ys_mask[np.newaxis, :, :], batch_size, axis=0)
# ys_mask = torch.from_numpy(ys_mask)
# if torch.cuda.is_available():
# ys_mask = ys_mask.cuda()
# self.ys_masks.append(ys_mask)
def infer(self, model, src, src_mask, Ty, src_grid=None, src_lon=None, src_lat=None):
m, Tx, nx = src.shape
memory = model.encode(src, src_mask, src_grid, src_lon, src_lat) # [Batch 128, Tx 16, d_model 512]
ys = src[:, -1, 0:2].unsqueeze(1) # [Batch 128, ys.size(1) 1, X/Y 2]
for i in range(Ty):
ys_mask = np.ones( (ys.size(1), ys.size(1)), dtype='uint8')
ys_mask = np.tril(ys_mask, 0)
ys_mask = np.repeat(ys_mask[np.newaxis, :, :], m, axis=0)
ys_mask = torch.from_numpy(ys_mask)
if torch.cuda.is_available():
ys_mask = ys_mask.cuda()
#out = model.decode(memory, src_mask, ys, self.ys_masks[i]) # [Batch 128, ys.size(1), d_model 512]
# Last batch is usually not of size batch_size ...
out = model.decode(memory, src_mask, ys, ys_mask) # [Batch , ys.size(1), d_model 512]
fut_pred = model.generator(out) # [ys.size(1), Batch 128, gaussian_params 5]
fut_pred = fut_pred.permute(1, 0, 2) # [Batch 128, ys.size(1), gaussian_params 5]
next_y = fut_pred[:, -1, 0:2].unsqueeze(1) # [Batch 128, 1, muX/muY 2]
ys = torch.cat( (ys, next_y), dim=1) # [Batch 128, ys.size(1)+1, 2]
fut_pred = fut_pred.permute(1, 0, 2) # [Ty 25, Batch 128, 5]
return fut_pred
# ---------- GENERATOR: for final output ----------
class Generator(nn.Module):
"Define standard linear + softmax generation step."
def __init__(self, d_model, tgt_params):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, tgt_params)
def forward(self, x):
# params: [batch 128, Ty 25, bivariate gaussian params 5]
fut_pred = self.proj(x)
# fut_pred: [Ty 25, batch 128, 5] via permute
fut_pred = fut_pred.permute(1, 0, 2)
fut_pred = outputActivation(fut_pred)
# fut_pred: [Ty 25, batch 128, bivariate gaussian params 5] via outputActivation which enforces pred constraints
return fut_pred
#return F.log_softmax(self.proj(x), dim=-1)
class GeneratorLat(nn.Module):
"Define standard linear + softmax generation step."
def __init__(self, d_model, tgt_lat_classes):
super(GeneratorLat, self).__init__()
# 3 classes: right, left, none
self.proj = nn.Linear(d_model, tgt_lat_classes)
def forward(self, x):
lat_pred = F.softmax(self.proj(x), dim=-1) # [Batch 128, Ty, 3]
lat_pred = lat_pred[:, -1, :]
lat_pred = torch.squeeze(lat_pred)
return lat_pred # [Batch 128, 3]
class GeneratorLon(nn.Module):
"Define standard linear + softmax generation step."
def __init__(self, d_model, tgt_lon_classes):
super(GeneratorLon, self).__init__()
# 2 classes: braking or not
self.proj = nn.Linear(d_model, 2, tgt_lon_classes)
def forward(self, x):
lon_pred = F.softmax(self.proj(x), dim=-1)
lon_pred = lon_pred[:, -1, :]
lon_pred = torch.squeeze(lon_pred)
return lon_pred # [Batch 128, 2]
# ---------- FULL MODEL ----------
# This model does not use lon/lat features as inputs
# But predicts lon/lat maneuvers
# DEPRECATED
def make_model_cls(src_feats, tgt_feats, tgt_lon_classes=2, tgt_lat_classes=3,
N=6, d_model=512, d_ff=2048, h=8, dropout=0.1,
src_ngrid=0, src_grid=(13,3), src_soc_emb_size=0):
"Helper: Construct a model from hyperparameters."
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
model = EncoderDecoder(
Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),
Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N),
nn.Sequential(Embeddings(d_model, src_feats, src_ngrid, src_grid, src_soc_emb_size), c(position)),
nn.Sequential(Embeddings(d_model, tgt_feats), c(position)),
generator_lat = GeneratorLat(d_model, tgt_lon_classes),
generator_lon = GeneratorLon(d_model, tgt_lat_classes))
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform(p)
return model
# This model uses lon/lat features as inputs
# And predicts traj
#def make_model(src_feats, tgt_feats, tgt_params=5, N=6, d_model=512, d_ff=2048, h=8, dropout=0.1,
#def make_model(src_feats, tgt_feats, tgt_params=5, N=1, d_model=256, d_ff=1024, h=1, dropout=0.1,
def make_model(src_feats, tgt_feats, tgt_params=5, N=1, d_model=256, d_ff=256, h=4, dropout=0.1,
src_ngrid=0, src_grid=(13,3), # for 2D image like input features
src_soc_emb_size = 0,
src_lon=0, src_lat=0): # additional input features (TODO: list for genericity)
"Helper: Construct a model from hyperparameters."
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
model = EncoderDecoder(
Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),
Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N),
nn.Sequential(Embeddings(d_model, src_feats, src_ngrid, src_grid, src_lon, src_lat, src_soc_emb_size), c(position)),
nn.Sequential(Embeddings(d_model, tgt_feats), c(position)),
generator = Generator(d_model, tgt_params))
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform(p)
return model
# ---------- BATCH utility ----------
class Batch:
"Object for holding a batch of data with mask during training."
def __init__(self):
self.src = None
self.src_grid = None
self.src_mask = None
self.src_lon = None
self.src_lat = None
self.trg = None
self.trg_mask = None
self.trg_y = None
def transfo(self, source, target=None, source_grid=None, source_lon=None, source_lat=None):
# We want [Batch, Tx, Nx]
src = copy.copy(source)
src = src.permute(1, 0, 2)
self.src = src
m, Tx, _ = src.shape
# [Batch, Tx, 13, 3] for grid or [Batch, Tx, 80] for grid_soc
src_grid = copy.copy(source_grid)
self.src_grid = src_grid
# encoder has full visibility on all inputs
src_mask = np.ones((1, Tx), dtype='uint8')
#src_mask[:,0] = 0
src_mask = np.repeat(src_mask[np.newaxis, :, :], m, axis=0)
self.src_mask = torch.from_numpy(src_mask)
if source_lon is not None:
src_lon = copy.copy(source_lon)
src_lon = torch.unsqueeze(src_lon, dim=1)
src_lon = torch.repeat_interleave(src_lon, Tx, dim=1)
self.src_lon = src_lon
else:
self.src_lon = None
if source_lat is not None:
src_lat = copy.copy(source_lat)
src_lat = torch.unsqueeze(src_lat, dim=1)
src_lat = torch.repeat_interleave(src_lat, Tx, dim=1)
self.src_lat = src_lat
else:
self.src_lat = None
self.ntokens = torch.from_numpy(np.array([m*Tx]))
# We want [Batch, Ty, Ny]
if target is not None:
trg = copy.copy(target)
trg = trg.permute(1, 0, 2)
# Create a fake Transformer "start symbol/step" by repeating "end of input" in beginning of trg
# The "start symbol" is pretty common for NMT taks; do something similar here
trg = torch.cat((src[:,-1,:].unsqueeze(1), trg), dim=1)
my, Ty, ny = trg.shape
assert m == my, "src and trg batch sizes do not match"
# ensure sequentiality between input and output of decoder
# y(n) depends on y(1)...y(n-1)
self.trg = trg[:, :-1, :] # input of DECODER
self.trg_y = trg[:, 1:, :] # expected output of DECODER
# otherwise the decoder just "learns" to copy the input ...
# with quickly a loss of 0 during training .....
# decoder at step n, has visibility on y(1)..y(n-1)
trg_mask = np.ones((Ty-1,Ty-1), dtype='uint8')
trg_mask = np.tril(trg_mask, 0)
trg_mask = np.repeat(trg_mask[np.newaxis, :, :], m, axis=0)
self.trg_mask = torch.from_numpy(trg_mask)
if torch.cuda.is_available():
self.trg = self.trg.cuda()
self.trg_y = self.trg_y.cuda()
self.trg_mask = self.trg_mask.cuda()
else:
self.trg = None
self.trg_y = None
self.trg_mask = None
#print("SRC:", self.src.shape)
#if self.src_grid is not None:
# print("SRC_GRID:", self.src_grid.shape)
#print("TRG:", self.trg.shape)
#print("TRG_Y:", self.trg_y.shape)
if torch.cuda.is_available():
self.src = self.src.cuda()
self.src_mask = self.src_mask.cuda()
if self.src_lon is not None:
self.src_lon = self.src_lon.cuda()
if self.src_lat is not None:
self.src_lat = self.src_lat.cuda()
if self.src_grid is not None:
self.src_grid = self.src_grid.cuda()
| [
"torch.nn.Dropout",
"torch.sin",
"math.sqrt",
"torch.from_numpy",
"utils.outputActivation",
"math.log",
"numpy.array",
"torch.cos",
"torch.cuda.is_available",
"torch.squeeze",
"copy.deepcopy",
"torch.nn.init.xavier_uniform",
"copy.copy",
"torch.repeat_interleave",
"torch.nn.functional.so... | [((4461, 4486), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(-1)'}), '(scores, dim=-1)\n', (4470, 4486), True, 'import torch.nn.functional as F\n'), ((735, 753), 'copy.copy', 'copy.copy', (['d_model'], {}), '(d_model)\n', (744, 753), False, 'import math, copy, time\n'), ((2080, 2115), 'torch.nn.Linear', 'torch.nn.Linear', (['src_feats', 'd_model'], {}), '(src_feats, d_model)\n', (2095, 2115), False, 'import torch\n'), ((3585, 3606), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (3595, 3606), True, 'import torch.nn as nn\n'), ((3673, 3702), 'torch.zeros', 'torch.zeros', (['max_len', 'd_model'], {}), '(max_len, d_model)\n', (3684, 3702), False, 'import torch\n'), ((3865, 3895), 'torch.sin', 'torch.sin', (['(position * div_term)'], {}), '(position * div_term)\n', (3874, 3895), False, 'import torch\n'), ((3912, 3942), 'torch.cos', 'torch.cos', (['(position * div_term)'], {}), '(position * div_term)\n', (3921, 3942), False, 'import torch\n'), ((4367, 4381), 'math.sqrt', 'math.sqrt', (['d_k'], {}), '(d_k)\n', (4376, 4381), False, 'import math, copy, time\n'), ((4549, 4576), 'torch.matmul', 'torch.matmul', (['p_attn', 'value'], {}), '(p_attn, value)\n', (4561, 4576), False, 'import torch\n'), ((4956, 4977), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (4966, 4977), True, 'import torch.nn as nn\n'), ((6435, 6454), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (6445, 6454), True, 'import torch.nn as nn\n'), ((6900, 6924), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_ff'], {}), '(d_model, d_ff)\n', (6909, 6924), True, 'import torch.nn as nn\n'), ((6938, 6962), 'torch.nn.Linear', 'nn.Linear', (['d_ff', 'd_model'], {}), '(d_ff, d_model)\n', (6947, 6962), True, 'import torch.nn as nn\n'), ((6980, 6999), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (6990, 6999), True, 'import torch.nn as nn\n'), ((12142, 12172), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'tgt_params'], {}), '(d_model, tgt_params)\n', (12151, 12172), True, 'import torch.nn as nn\n'), ((12384, 12410), 'utils.outputActivation', 'outputActivation', (['fut_pred'], {}), '(fut_pred)\n', (12400, 12410), False, 'from utils import outputActivation\n'), ((12808, 12843), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'tgt_lat_classes'], {}), '(d_model, tgt_lat_classes)\n', (12817, 12843), True, 'import torch.nn as nn\n'), ((12979, 13002), 'torch.squeeze', 'torch.squeeze', (['lat_pred'], {}), '(lat_pred)\n', (12992, 13002), False, 'import torch\n'), ((13253, 13291), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(2)', 'tgt_lon_classes'], {}), '(d_model, 2, tgt_lon_classes)\n', (13262, 13291), True, 'import torch.nn as nn\n'), ((13406, 13429), 'torch.squeeze', 'torch.squeeze', (['lon_pred'], {}), '(lon_pred)\n', (13419, 13429), False, 'import torch\n'), ((16390, 16407), 'copy.copy', 'copy.copy', (['source'], {}), '(source)\n', (16399, 16407), False, 'import math, copy, time\n'), ((16556, 16578), 'copy.copy', 'copy.copy', (['source_grid'], {}), '(source_grid)\n', (16565, 16578), False, 'import math, copy, time\n'), ((16666, 16697), 'numpy.ones', 'np.ones', (['(1, Tx)'], {'dtype': '"""uint8"""'}), "((1, Tx), dtype='uint8')\n", (16673, 16697), True, 'import numpy as np\n'), ((16732, 16780), 'numpy.repeat', 'np.repeat', (['src_mask[np.newaxis, :, :]', 'm'], {'axis': '(0)'}), '(src_mask[np.newaxis, :, :], m, axis=0)\n', (16741, 16780), True, 'import numpy as np\n'), ((16799, 16825), 'torch.from_numpy', 'torch.from_numpy', (['src_mask'], {}), '(src_mask)\n', (16815, 16825), False, 'import torch\n'), ((18742, 18767), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (18765, 18767), False, 'import torch\n'), ((1392, 1425), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['src_ngrid', '(64)', '(3)'], {}), '(src_ngrid, 64, 3)\n', (1407, 1425), False, 'import torch\n'), ((1459, 1490), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(64)', '(16)', '(3, 1)'], {}), '(64, 16, (3, 1))\n', (1474, 1490), False, 'import torch\n'), ((1525, 1567), 'torch.nn.MaxPool2d', 'torch.nn.MaxPool2d', (['(2, 1)'], {'padding': '(1, 0)'}), '((2, 1), padding=(1, 0))\n', (1543, 1567), False, 'import torch\n'), ((1604, 1627), 'torch.nn.LeakyReLU', 'torch.nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (1622, 1627), False, 'import torch\n'), ((1647, 1679), 'torch.nn.Linear', 'torch.nn.Linear', (['(5)', 'd_model_grid'], {}), '(5, d_model_grid)\n', (1662, 1679), False, 'import torch\n'), ((1893, 1930), 'torch.nn.Linear', 'torch.nn.Linear', (['src_lon', 'd_model_lon'], {}), '(src_lon, d_model_lon)\n', (1908, 1930), False, 'import torch\n'), ((2023, 2060), 'torch.nn.Linear', 'torch.nn.Linear', (['src_lat', 'd_model_lat'], {}), '(src_lat, d_model_lat)\n', (2038, 2060), False, 'import torch\n'), ((3089, 3122), 'torch.cat', 'torch.cat', (['(emb, lon_emb)'], {'dim': '(-1)'}), '((emb, lon_emb), dim=-1)\n', (3098, 3122), False, 'import torch\n'), ((3249, 3282), 'torch.cat', 'torch.cat', (['(emb, lat_emb)'], {'dim': '(-1)'}), '((emb, lat_emb), dim=-1)\n', (3258, 3282), False, 'import torch\n'), ((4888, 4915), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_model'], {}), '(d_model, d_model)\n', (4897, 4915), True, 'import torch.nn as nn\n'), ((5906, 5926), 'torch.ones', 'torch.ones', (['features'], {}), '(features)\n', (5916, 5926), False, 'import torch\n'), ((5954, 5975), 'torch.zeros', 'torch.zeros', (['features'], {}), '(features)\n', (5965, 5975), False, 'import torch\n'), ((7156, 7177), 'copy.deepcopy', 'copy.deepcopy', (['module'], {}), '(module)\n', (7169, 7177), False, 'import math, copy, time\n'), ((11093, 11112), 'numpy.tril', 'np.tril', (['ys_mask', '(0)'], {}), '(ys_mask, 0)\n', (11100, 11112), True, 'import numpy as np\n'), ((11126, 11173), 'numpy.repeat', 'np.repeat', (['ys_mask[np.newaxis, :, :]', 'm'], {'axis': '(0)'}), '(ys_mask[np.newaxis, :, :], m, axis=0)\n', (11135, 11173), True, 'import numpy as np\n'), ((11187, 11212), 'torch.from_numpy', 'torch.from_numpy', (['ys_mask'], {}), '(ys_mask)\n', (11203, 11212), False, 'import torch\n'), ((11219, 11244), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11242, 11244), False, 'import torch\n'), ((11768, 11798), 'torch.cat', 'torch.cat', (['(ys, next_y)'], {'dim': '(1)'}), '((ys, next_y), dim=1)\n', (11777, 11798), False, 'import torch\n'), ((14584, 14609), 'torch.nn.init.xavier_uniform', 'nn.init.xavier_uniform', (['p'], {}), '(p)\n', (14606, 14609), True, 'import torch.nn as nn\n'), ((15911, 15936), 'torch.nn.init.xavier_uniform', 'nn.init.xavier_uniform', (['p'], {}), '(p)\n', (15933, 15936), True, 'import torch.nn as nn\n'), ((16869, 16890), 'copy.copy', 'copy.copy', (['source_lon'], {}), '(source_lon)\n', (16878, 16890), False, 'import math, copy, time\n'), ((16904, 16935), 'torch.unsqueeze', 'torch.unsqueeze', (['src_lon'], {'dim': '(1)'}), '(src_lon, dim=1)\n', (16919, 16935), False, 'import torch\n'), ((16949, 16992), 'torch.repeat_interleave', 'torch.repeat_interleave', (['src_lon', 'Tx'], {'dim': '(1)'}), '(src_lon, Tx, dim=1)\n', (16972, 16992), False, 'import torch\n'), ((17093, 17114), 'copy.copy', 'copy.copy', (['source_lat'], {}), '(source_lat)\n', (17102, 17114), False, 'import math, copy, time\n'), ((17128, 17159), 'torch.unsqueeze', 'torch.unsqueeze', (['src_lat'], {'dim': '(1)'}), '(src_lat, dim=1)\n', (17143, 17159), False, 'import torch\n'), ((17173, 17216), 'torch.repeat_interleave', 'torch.repeat_interleave', (['src_lat', 'Tx'], {'dim': '(1)'}), '(src_lat, Tx, dim=1)\n', (17196, 17216), False, 'import torch\n'), ((17310, 17328), 'numpy.array', 'np.array', (['[m * Tx]'], {}), '([m * Tx])\n', (17318, 17328), True, 'import numpy as np\n'), ((17391, 17408), 'copy.copy', 'copy.copy', (['target'], {}), '(target)\n', (17400, 17408), False, 'import math, copy, time\n'), ((18162, 18202), 'numpy.ones', 'np.ones', (['(Ty - 1, Ty - 1)'], {'dtype': '"""uint8"""'}), "((Ty - 1, Ty - 1), dtype='uint8')\n", (18169, 18202), True, 'import numpy as np\n'), ((18212, 18232), 'numpy.tril', 'np.tril', (['trg_mask', '(0)'], {}), '(trg_mask, 0)\n', (18219, 18232), True, 'import numpy as np\n'), ((18247, 18295), 'numpy.repeat', 'np.repeat', (['trg_mask[np.newaxis, :, :]', 'm'], {'axis': '(0)'}), '(trg_mask[np.newaxis, :, :], m, axis=0)\n', (18256, 18295), True, 'import numpy as np\n'), ((18315, 18341), 'torch.from_numpy', 'torch.from_numpy', (['trg_mask'], {}), '(trg_mask)\n', (18331, 18341), False, 'import torch\n'), ((18349, 18374), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (18372, 18374), False, 'import torch\n'), ((1744, 1787), 'torch.nn.Linear', 'torch.nn.Linear', (['soc_emb_size', 'd_model_grid'], {}), '(soc_emb_size, d_model_grid)\n', (1759, 1787), False, 'import torch\n'), ((2580, 2613), 'torch.cat', 'torch.cat', (['(emb, soc_emb)'], {'dim': '(-1)'}), '((emb, soc_emb), dim=-1)\n', (2589, 2613), False, 'import torch\n'), ((2840, 2863), 'torch.squeeze', 'torch.squeeze', (['grid_enc'], {}), '(grid_enc)\n', (2853, 2863), False, 'import torch\n'), ((2928, 2962), 'torch.cat', 'torch.cat', (['(emb, grid_emb)'], {'dim': '(-1)'}), '((emb, grid_emb), dim=-1)\n', (2937, 2962), False, 'import torch\n'), ((3716, 3742), 'torch.arange', 'torch.arange', (['(0.0)', 'max_len'], {}), '(0.0, max_len)\n', (3728, 3742), False, 'import torch\n'), ((3778, 3807), 'torch.arange', 'torch.arange', (['(0.0)', 'd_model', '(2)'], {}), '(0.0, d_model, 2)\n', (3790, 3807), False, 'import torch\n'), ((3819, 3836), 'math.log', 'math.log', (['(10000.0)'], {}), '(10000.0)\n', (3827, 3836), False, 'import math, copy, time\n')] |
import numpy as np
import time
import ray
import ray.autoscaler.sdk
from ray._private.test_utils import Semaphore
import json
import os
from time import perf_counter
from tqdm import trange, tqdm
MAX_ARGS = 10000
MAX_RETURNS = 3000
MAX_RAY_GET_ARGS = 10000
MAX_QUEUED_TASKS = 1_000_000
MAX_RAY_GET_SIZE = 100 * 2**30
def assert_no_leaks():
total = ray.cluster_resources()
current = ray.available_resources()
total.pop("memory")
total.pop("object_store_memory")
current.pop("memory")
current.pop("object_store_memory")
assert total == current, (total, current)
def test_many_args():
@ray.remote
def sum_args(*args):
return sum(sum(arg) for arg in args)
args = [[1 for _ in range(10000)] for _ in range(MAX_ARGS)]
result = ray.get(sum_args.remote(*args))
assert result == MAX_ARGS * 10000
def test_many_returns():
@ray.remote(num_returns=MAX_RETURNS)
def f():
to_return = []
for _ in range(MAX_RETURNS):
obj = list(range(10000))
to_return.append(obj)
return tuple(to_return)
returned_refs = f.remote()
assert len(returned_refs) == MAX_RETURNS
for ref in returned_refs:
expected = list(range(10000))
obj = ray.get(ref)
assert obj == expected
def test_ray_get_args():
def with_dese():
print("Putting test objects:")
refs = []
for _ in trange(MAX_RAY_GET_ARGS):
obj = list(range(10000))
refs.append(ray.put(obj))
print("Getting objects")
results = ray.get(refs)
assert len(results) == MAX_RAY_GET_ARGS
print("Asserting correctness")
for obj in tqdm(results):
expected = list(range(10000))
assert obj == expected
def with_zero_copy():
print("Putting test objects:")
refs = []
for _ in trange(MAX_RAY_GET_ARGS):
obj = np.arange(10000)
refs.append(ray.put(obj))
print("Getting objects")
results = ray.get(refs)
assert len(results) == MAX_RAY_GET_ARGS
print("Asserting correctness")
for obj in tqdm(results):
expected = np.arange(10000)
assert (obj == expected).all()
with_dese()
print("Done with dese")
with_zero_copy()
print("Done with zero copy")
def test_many_queued_tasks():
sema = Semaphore.remote(0)
@ray.remote(num_cpus=1)
def block():
ray.get(sema.acquire.remote())
@ray.remote(num_cpus=1)
def f():
pass
num_cpus = int(ray.cluster_resources()["CPU"])
blocked_tasks = []
for _ in range(num_cpus):
blocked_tasks.append(block.remote())
print("Submitting many tasks")
pending_tasks = []
for _ in trange(MAX_QUEUED_TASKS):
pending_tasks.append(f.remote())
# Make sure all the tasks can actually run.
for _ in range(num_cpus):
sema.release.remote()
print("Unblocking tasks")
for ref in tqdm(pending_tasks):
assert ray.get(ref) is None
def test_large_object():
print("Generating object")
obj = np.zeros(MAX_RAY_GET_SIZE, dtype=np.int8)
print("Putting object")
ref = ray.put(obj)
del obj
print("Getting object")
big_obj = ray.get(ref)
assert big_obj[0] == 0
assert big_obj[-1] == 0
ray.init(address="auto")
args_start = perf_counter()
test_many_args()
args_end = perf_counter()
time.sleep(5)
assert_no_leaks()
print("Finished many args")
returns_start = perf_counter()
test_many_returns()
returns_end = perf_counter()
time.sleep(5)
assert_no_leaks()
print("Finished many returns")
get_start = perf_counter()
test_ray_get_args()
get_end = perf_counter()
time.sleep(5)
assert_no_leaks()
print("Finished ray.get on many objects")
queued_start = perf_counter()
test_many_queued_tasks()
queued_end = perf_counter()
time.sleep(5)
assert_no_leaks()
print("Finished queueing many tasks")
large_object_start = perf_counter()
test_large_object()
large_object_end = perf_counter()
time.sleep(5)
assert_no_leaks()
print("Done")
args_time = args_end - args_start
returns_time = returns_end - returns_start
get_time = get_end - get_start
queued_time = queued_end - queued_start
large_object_time = large_object_end - large_object_start
print(f"Many args time: {args_time} ({MAX_ARGS} args)")
print(f"Many returns time: {returns_time} ({MAX_RETURNS} returns)")
print(f"Ray.get time: {get_time} ({MAX_RAY_GET_ARGS} args)")
print(f"Queued task time: {queued_time} ({MAX_QUEUED_TASKS} tasks)")
print(f"Ray.get large object time: {large_object_time} "
f"({MAX_RAY_GET_SIZE} bytes)")
if "TEST_OUTPUT_JSON" in os.environ:
out_file = open(os.environ["TEST_OUTPUT_JSON"], "w")
results = {
"args_time": args_time,
"num_args": MAX_ARGS,
"returns_time": returns_time,
"num_returns": MAX_RETURNS,
"get_time": MAX_RAY_GET_ARGS,
"queued_time": queued_time,
"num_queued": MAX_QUEUED_TASKS,
"large_object_time": large_object_time,
"large_object_size": MAX_RAY_GET_SIZE,
"success": "1"
}
json.dump(results, out_file)
| [
"ray.init",
"ray.cluster_resources",
"ray.get",
"numpy.arange",
"tqdm.tqdm",
"time.perf_counter",
"time.sleep",
"ray._private.test_utils.Semaphore.remote",
"numpy.zeros",
"ray.put",
"ray.remote",
"ray.available_resources",
"tqdm.trange",
"json.dump"
] | [((3347, 3371), 'ray.init', 'ray.init', ([], {'address': '"""auto"""'}), "(address='auto')\n", (3355, 3371), False, 'import ray\n'), ((3386, 3400), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (3398, 3400), False, 'from time import perf_counter\n'), ((3429, 3443), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (3441, 3443), False, 'from time import perf_counter\n'), ((3445, 3458), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3455, 3458), False, 'import time\n'), ((3522, 3536), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (3534, 3536), False, 'from time import perf_counter\n'), ((3571, 3585), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (3583, 3585), False, 'from time import perf_counter\n'), ((3587, 3600), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3597, 3600), False, 'import time\n'), ((3663, 3677), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (3675, 3677), False, 'from time import perf_counter\n'), ((3708, 3722), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (3720, 3722), False, 'from time import perf_counter\n'), ((3724, 3737), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3734, 3737), False, 'import time\n'), ((3814, 3828), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (3826, 3828), False, 'from time import perf_counter\n'), ((3867, 3881), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (3879, 3881), False, 'from time import perf_counter\n'), ((3883, 3896), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3893, 3896), False, 'import time\n'), ((3975, 3989), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (3987, 3989), False, 'from time import perf_counter\n'), ((4029, 4043), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (4041, 4043), False, 'from time import perf_counter\n'), ((4045, 4058), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (4055, 4058), False, 'import time\n'), ((356, 379), 'ray.cluster_resources', 'ray.cluster_resources', ([], {}), '()\n', (377, 379), False, 'import ray\n'), ((394, 419), 'ray.available_resources', 'ray.available_resources', ([], {}), '()\n', (417, 419), False, 'import ray\n'), ((882, 917), 'ray.remote', 'ray.remote', ([], {'num_returns': 'MAX_RETURNS'}), '(num_returns=MAX_RETURNS)\n', (892, 917), False, 'import ray\n'), ((2400, 2419), 'ray._private.test_utils.Semaphore.remote', 'Semaphore.remote', (['(0)'], {}), '(0)\n', (2416, 2419), False, 'from ray._private.test_utils import Semaphore\n'), ((2426, 2448), 'ray.remote', 'ray.remote', ([], {'num_cpus': '(1)'}), '(num_cpus=1)\n', (2436, 2448), False, 'import ray\n'), ((2511, 2533), 'ray.remote', 'ray.remote', ([], {'num_cpus': '(1)'}), '(num_cpus=1)\n', (2521, 2533), False, 'import ray\n'), ((2782, 2806), 'tqdm.trange', 'trange', (['MAX_QUEUED_TASKS'], {}), '(MAX_QUEUED_TASKS)\n', (2788, 2806), False, 'from tqdm import trange, tqdm\n'), ((3004, 3023), 'tqdm.tqdm', 'tqdm', (['pending_tasks'], {}), '(pending_tasks)\n', (3008, 3023), False, 'from tqdm import trange, tqdm\n'), ((3129, 3170), 'numpy.zeros', 'np.zeros', (['MAX_RAY_GET_SIZE'], {'dtype': 'np.int8'}), '(MAX_RAY_GET_SIZE, dtype=np.int8)\n', (3137, 3170), True, 'import numpy as np\n'), ((3209, 3221), 'ray.put', 'ray.put', (['obj'], {}), '(obj)\n', (3216, 3221), False, 'import ray\n'), ((3276, 3288), 'ray.get', 'ray.get', (['ref'], {}), '(ref)\n', (3283, 3288), False, 'import ray\n'), ((5136, 5164), 'json.dump', 'json.dump', (['results', 'out_file'], {}), '(results, out_file)\n', (5145, 5164), False, 'import json\n'), ((1255, 1267), 'ray.get', 'ray.get', (['ref'], {}), '(ref)\n', (1262, 1267), False, 'import ray\n'), ((1421, 1445), 'tqdm.trange', 'trange', (['MAX_RAY_GET_ARGS'], {}), '(MAX_RAY_GET_ARGS)\n', (1427, 1445), False, 'from tqdm import trange, tqdm\n'), ((1574, 1587), 'ray.get', 'ray.get', (['refs'], {}), '(refs)\n', (1581, 1587), False, 'import ray\n'), ((1695, 1708), 'tqdm.tqdm', 'tqdm', (['results'], {}), '(results)\n', (1699, 1708), False, 'from tqdm import trange, tqdm\n'), ((1888, 1912), 'tqdm.trange', 'trange', (['MAX_RAY_GET_ARGS'], {}), '(MAX_RAY_GET_ARGS)\n', (1894, 1912), False, 'from tqdm import trange, tqdm\n'), ((2039, 2052), 'ray.get', 'ray.get', (['refs'], {}), '(refs)\n', (2046, 2052), False, 'import ray\n'), ((2160, 2173), 'tqdm.tqdm', 'tqdm', (['results'], {}), '(results)\n', (2164, 2173), False, 'from tqdm import trange, tqdm\n'), ((1932, 1948), 'numpy.arange', 'np.arange', (['(10000)'], {}), '(10000)\n', (1941, 1948), True, 'import numpy as np\n'), ((2198, 2214), 'numpy.arange', 'np.arange', (['(10000)'], {}), '(10000)\n', (2207, 2214), True, 'import numpy as np\n'), ((2580, 2603), 'ray.cluster_resources', 'ray.cluster_resources', ([], {}), '()\n', (2601, 2603), False, 'import ray\n'), ((3040, 3052), 'ray.get', 'ray.get', (['ref'], {}), '(ref)\n', (3047, 3052), False, 'import ray\n'), ((1508, 1520), 'ray.put', 'ray.put', (['obj'], {}), '(obj)\n', (1515, 1520), False, 'import ray\n'), ((1973, 1985), 'ray.put', 'ray.put', (['obj'], {}), '(obj)\n', (1980, 1985), False, 'import ray\n')] |
import cv2
import numpy as np
import matplotlib.pyplot as plt
IMAGE = "b&w2.jpg"
prototxt = "./Models/colorization_deploy_v2.prototxt.txt"
model = "./Models/colorization_release_v2.caffemodel"
points = "./Models/pts_in_hull.npy"
image = "./input_images/"+IMAGE
net = cv2.dnn.readNetFromCaffe(prototxt, model)
pts = np.load(points)
class8 = net.getLayerId("class8_ab")
conv8 = net.getLayerId("conv8_313_rh")
pts = pts.transpose().reshape(2, 313, 1, 1)
net.getLayer(class8).blobs = [pts.astype("float32")]
net.getLayer(conv8).blobs = [np.full([1, 313], 2.606, dtype="float32")]
image = cv2.imread(image)
image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
image = cv2.cvtColor(image,cv2.COLOR_GRAY2RGB)
scaled = image.astype("float32") / 255.0
lab = cv2.cvtColor(scaled, cv2.COLOR_RGB2LAB)
resized = cv2.resize(lab, (224, 224))
L = cv2.split(resized)[0]
L -= 50
net.setInput(cv2.dnn.blobFromImage(L))
ab = net.forward()[0, :, :, :].transpose((1, 2, 0))
ab = cv2.resize(ab, (image.shape[1], image.shape[0]))
L = cv2.split(lab)[0]
colorized = np.concatenate((L[:, :, np.newaxis], ab), axis=2)
colorized = cv2.cvtColor(colorized, cv2.COLOR_LAB2RGB)
colorized = np.clip(colorized, 0, 1)
colorized = (255 * colorized).astype("uint8")
plt.imshow(image)
plt.show()
plt.imshow(colorized)
plt.show()
plt.axis("off")
| [
"numpy.clip",
"matplotlib.pyplot.imshow",
"cv2.dnn.blobFromImage",
"numpy.full",
"cv2.dnn.readNetFromCaffe",
"cv2.cvtColor",
"numpy.concatenate",
"cv2.split",
"matplotlib.pyplot.axis",
"cv2.resize",
"numpy.load",
"cv2.imread",
"matplotlib.pyplot.show"
] | [((282, 323), 'cv2.dnn.readNetFromCaffe', 'cv2.dnn.readNetFromCaffe', (['prototxt', 'model'], {}), '(prototxt, model)\n', (306, 323), False, 'import cv2\n'), ((331, 346), 'numpy.load', 'np.load', (['points'], {}), '(points)\n', (338, 346), True, 'import numpy as np\n'), ((610, 627), 'cv2.imread', 'cv2.imread', (['image'], {}), '(image)\n', (620, 627), False, 'import cv2\n'), ((637, 676), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (649, 676), False, 'import cv2\n'), ((685, 724), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_GRAY2RGB'], {}), '(image, cv2.COLOR_GRAY2RGB)\n', (697, 724), False, 'import cv2\n'), ((775, 814), 'cv2.cvtColor', 'cv2.cvtColor', (['scaled', 'cv2.COLOR_RGB2LAB'], {}), '(scaled, cv2.COLOR_RGB2LAB)\n', (787, 814), False, 'import cv2\n'), ((826, 853), 'cv2.resize', 'cv2.resize', (['lab', '(224, 224)'], {}), '(lab, (224, 224))\n', (836, 853), False, 'import cv2\n'), ((991, 1039), 'cv2.resize', 'cv2.resize', (['ab', '(image.shape[1], image.shape[0])'], {}), '(ab, (image.shape[1], image.shape[0]))\n', (1001, 1039), False, 'import cv2\n'), ((1078, 1127), 'numpy.concatenate', 'np.concatenate', (['(L[:, :, np.newaxis], ab)'], {'axis': '(2)'}), '((L[:, :, np.newaxis], ab), axis=2)\n', (1092, 1127), True, 'import numpy as np\n'), ((1143, 1185), 'cv2.cvtColor', 'cv2.cvtColor', (['colorized', 'cv2.COLOR_LAB2RGB'], {}), '(colorized, cv2.COLOR_LAB2RGB)\n', (1155, 1185), False, 'import cv2\n'), ((1199, 1223), 'numpy.clip', 'np.clip', (['colorized', '(0)', '(1)'], {}), '(colorized, 0, 1)\n', (1206, 1223), True, 'import numpy as np\n'), ((1274, 1291), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (1284, 1291), True, 'import matplotlib.pyplot as plt\n'), ((1293, 1303), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1301, 1303), True, 'import matplotlib.pyplot as plt\n'), ((1305, 1326), 'matplotlib.pyplot.imshow', 'plt.imshow', (['colorized'], {}), '(colorized)\n', (1315, 1326), True, 'import matplotlib.pyplot as plt\n'), ((1328, 1338), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1336, 1338), True, 'import matplotlib.pyplot as plt\n'), ((1340, 1355), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1348, 1355), True, 'import matplotlib.pyplot as plt\n'), ((556, 597), 'numpy.full', 'np.full', (['[1, 313]', '(2.606)'], {'dtype': '"""float32"""'}), "([1, 313], 2.606, dtype='float32')\n", (563, 597), True, 'import numpy as np\n'), ((859, 877), 'cv2.split', 'cv2.split', (['resized'], {}), '(resized)\n', (868, 877), False, 'import cv2\n'), ((906, 930), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['L'], {}), '(L)\n', (927, 930), False, 'import cv2\n'), ((1047, 1061), 'cv2.split', 'cv2.split', (['lab'], {}), '(lab)\n', (1056, 1061), False, 'import cv2\n')] |
import numpy as np
from netCDF4 import Dataset
from datetime import datetime
from datetime import timedelta
import os
import sys
import matplotlib.pyplot as plt
from matplotlib import gridspec
import matplotlib.colors as mcolors
import matplotlib.patches as patches
from matplotlib.colors import BoundaryNorm
from tools_LT import read_evar_only, setup_12p
quick = True
#quick = False
def read_vars( INFO, tlev=0, HIM8=True ):
# Read variables
if HIM8:
fn_Him8 = os.path.join( INFO["GTOP"], INFO["EXP"], INFO["time0"].strftime('%Y%m%d%H%M%S'), INFO["TYPE"], INFO["MEM"],
"Him8_" + INFO["time0"].strftime('%Y%m%d%H%M%S_') + INFO["MEM"] + ".nc")
print( fn_Him8 )
nc = Dataset(fn_Him8, 'r', format='NETCDF4')
tbb = nc.variables["tbb"][tlev,:,:,:]
nc.close()
else:
tbb = np.zeros(1)
fn_radar = os.path.join( INFO["GTOP"], INFO["EXP"], INFO["time0"].strftime('%Y%m%d%H%M%S'), INFO["TYPE"], INFO["MEM"],
"radar_" + INFO["time0"].strftime('%Y%m%d%H%M%S_') + INFO["MEM"] + ".nc")
print( fn_radar, tlev )
nc = Dataset(fn_radar, 'r', format='NETCDF4')
if INFO["TYPE"] is "fcst":
z = nc.variables["z"][tlev,:,:,:]
vr = nc.variables["vr"][tlev,:,:,:]
else:
z = nc.variables["z"][:,:,:]
vr = nc.variables["vr"][:,:,:]
nc.close()
return( tbb, z, vr )
def main( INFO, EXP1="2000m_DA_0306", EXP2="2000m_DA_0306", NEXP="2000m_NODA_0306",tlev=0, typ="anal", tit_l=[], vname1="QHYD", vname2="QCRG", zlev_show=1,
LOC=True ):
data_path = "../../dat4figs/Fig11"
os.makedirs( data_path, exist_ok=True )
print( tlev, INFO["DT"]*tlev )
#ctime = datetime(2001, 1, 1, 1, 0) + timedelta(seconds=INFO["DT"]*tlev )
ctime = INFO["time0"] + timedelta(seconds=INFO["DT"]*tlev )
if typ is not "fcst":
ctime = datetime(2001, 1, 1, 1, 0) + timedelta(seconds=INFO["DT"]*tlev )
INFO["EXP"] = EXP1
INFO["MEM"] = "mean"
INFO["TYPE"] = typ
if typ is not "fcst":
INFO["time0"] = ctime
print("CHECK", INFO["time0"] )
# tbb_exp1, z_exp1, vr_exp1 = read_vars( INFO, tlev=tlev, HIM8=False )
# evar_exp1 = read_evar_only( INFO, tlev=tlev, vname=vname2 )
# eqh_exp1 = read_evar_only( INFO, tlev=tlev, vname=vname1 )
# if vname1 != "U" and vname1 != "V" and vname1 != "W" and vname1 != "T":
# eqh_exp1 = eqh_exp1 * 1.e3
# efp_exp1 = read_evar_only( INFO, tlev=tlev, vname="FP" )
#
# INFO["EXP"] = EXP2
# tbb_exp2, z_exp2, vr_exp2 = read_vars( INFO, tlev=tlev, HIM8=False )
# evar_exp2 = read_evar_only( INFO, tlev=tlev, vname=vname2 )
# eqh_exp2 = read_evar_only( INFO, tlev=tlev, vname=vname1 )
# if vname2 != "U" and vname2 != "V" and vname2 != "W" and vname1 != "T":
# eqh_exp2 = eqh_exp2 * 1.e3
# efp_exp2 = read_evar_only( INFO, tlev=tlev, vname="FP" )
ft_sec = int( INFO["DT"]*tlev )
# nature run
# read variables
INFO["EXP"] = NEXP
INFO["MEM"] = "mean"
INFO["TYPE"] = "fcst"
INFO["time0"] = datetime(2001, 1, 1, 1, 0)
tlev_nat = int( ( ctime - datetime(2001, 1, 1, 1, 0) ).total_seconds() / INFO["DT"] )
print( "DEBUG", tlev_nat, ctime)
# tbb_nat, z_nat, vr_nat = read_vars( INFO, tlev=tlev_nat, HIM8=False )
# evar_nat = read_evar_only( INFO, tlev=tlev_nat, vname=vname2 )
# efp_nat = read_evar_only( INFO, tlev=tlev_nat, vname="FP" )
# ew_nat = read_evar_only( INFO, tlev=tlev_nat, vname="W" )
# qh_nat = read_evar_only( INFO, tlev=tlev_nat, vname=vname1 )
# if vname1 != "U" and vname1 != "V" and vname1 != "W":
# qh_nat = qh_nat * 1.e3
# print("evars: ", evar_nat.shape, evar_exp1.shape, evar_exp2.shape )
if typ is "fcst":
foot = "\n(fcst from mean)"
if ft_sec == 0:
foot = "\n(analysis)"
foot = "" # DEBUG
tit_l_ = [
tit_l[0] + foot,
tit_l[1] + foot,
tit_l[2],
tit_l[0] + foot,
tit_l[1] + foot,
tit_l[2],
"",
]
else:
foot = ""
tit_l_ = [
tit_l[0] + foot,
tit_l[1] + foot,
tit_l[2],
tit_l[0] + foot,
tit_l[1] + foot,
tit_l[2],
]
# print( z_nat.shape, z_exp1.shape, z_exp2.shape )
ax_l, crs_l, fig = setup_12p()
levs_dbz= np.array([15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65])
cmap_dbz = mcolors.ListedColormap(['cyan','dodgerblue',
'lime', 'limegreen','yellow',
'orange', 'red', 'firebrick', 'magenta',
'purple'])
cmap_dbz.set_under('w', alpha=1.0)
cmap_dbz.set_over('gray', alpha=1.0)
cmap_rb = plt.cm.get_cmap("RdBu_r")
cmap_rb.set_under('gray', alpha=1.0)
cmap_rb.set_over('gray', alpha=1.0)
cmap_dbz = mcolors.ListedColormap(['cyan','dodgerblue',
'lime', 'limegreen','yellow',
'orange', 'red', 'firebrick', 'magenta',
'purple'])
cmap_dbz.set_under('w', alpha=1.0)
cmap_dbz.set_over('gray', alpha=1.0)
unit_dbz = "(dBZ)"
unit_crg = r'(nC m$^{-3}$)'
if vname2 == "QCRG" or vname2 == "CR":
levs_rb_qcrg = np.array([-0.4, -0.3, -0.2, -0.1, -0.05, -0.01,
0.01, 0.05, 0.1, 0.2, 0.3, 0.4, ])
else:
levs_rb_qcrg = np.array([ -2.4, -2.0, -1.6, -1.2, -0.8, -0.4,
0.4, 0.8, 1.2, 1.6, 2, 2.4])
if vname1 == "QHYD":
levs_dbz = np.array([0.5, 1, 2, 4, 6, 8, 10, 12, 14, 16])
else:
levs_dbz = np.array([0.5, 1, 2, 3, 4, 5, 6, 7, 8, 10])
if vname2 == "U" or vname2 == "V" or vname2 == "W":
levs_rb_qcrg = np.array([ -36, -30, -24, -18, -12, -6,
6, 12, 18, 24, 30, 36])
if vname1 == "U" or vname1 == "V" or vname1 == "W":
levs_dbz = np.array([ -36, -30, -24, -18, -12, -6,
6, 12, 18, 24, 30, 36])
cmap_dbz = cmap_rb
if vname2 == "T":
levs_rb_qcrg = np.array([ -36, -30, -24, -18, -12, -6,
6, 12, 18, 24, 30, 36])
levs_rb_qcrg = np.arange(228, 280, 2)
unit_crg = "(K)"
levs_rb_qcrg = np.array([-0.6, -0.4, -0.2, -0.1, -0.05, -0.01,
0.01, 0.05, 0.1, 0.2, 0.4, 0.6])
levs_l = [ levs_dbz, levs_dbz, levs_dbz,
levs_rb_qcrg, levs_rb_qcrg, levs_rb_qcrg,
levs_dbz, levs_dbz, levs_dbz, levs_dbz, levs_dbz, levs_dbz,
levs_rb_qcrg, levs_rb_qcrg, levs_rb_qcrg, levs_rb_qcrg, levs_rb_qcrg, levs_rb_qcrg, ]
cmap_l = [ cmap_dbz, cmap_dbz, cmap_dbz,
cmap_rb, cmap_rb, cmap_rb,
cmap_dbz, cmap_dbz, cmap_dbz, cmap_dbz, cmap_dbz, cmap_dbz,
cmap_rb, cmap_rb, cmap_rb, cmap_rb, cmap_rb, cmap_rb ]
unit_l = [ unit_dbz, unit_dbz, unit_dbz,
unit_crg, unit_crg, unit_crg,
unit_dbz, unit_dbz, unit_dbz,
unit_crg, unit_crg, unit_crg ]
pnum_l = [
"(a)", "(b)", "(c)",
"(d)", "(e)", "(f)",
]
tvar = vname2
if vname2 is "QCRG":
levs = levs_rb_qcrg
cmap = cmap_rb
unit = unit_crg
tvar = "Total charge density"
bbox = { 'facecolor':'w', 'alpha':0.95, 'pad':1.5, 'edgecolor':'w' }
xmin = 120
xmax = 280
ymin = 120
ymax = 320
zmin = 0.0
zmax = 15.0
ft_sec_a = int( ( ctime - INFO["time00"] ).total_seconds() )
print( "ctime",ctime, tlev, INFO["DT"])
xlabel = "X (km)"
ylabel = "Y (km)"
zlabel = "Z (km)"
xaxis = INFO["X"][:] * 0.001
yaxis = INFO["Y"][:] * 0.001
x2d, y2d = np.meshgrid( yaxis, xaxis )
xdgrid = 20
ydgrid = 20
# cy, cx = np.unravel_index( np.argmax(z_nat[zlev_show,:,:]), ew_nat[0,0,:,:].shape)
# cx = 76
# cy = 89
# print("CX,CY:", cx, cy)
#cx = 98
#cy = 106
#cx = 100
#cy = 111
#cx = 84
#cy = 95
#cx = 90
#cy = 93
cx = 90
cy = 89
if typ is not "fcst":
info = 't={0:.0f}min\nZ={1:}km'.format( ft_sec_a/60, INFO["Z"][zlev_show]/1000)
else:
info = 't={0:.0f}min\n(FT={1:.0f}min)\nZ={2:}km'.format( ft_sec_a/60, ft_sec/60, INFO["Z"][zlev_show]/1000)
if typ != "fcst":
VAR_l = [ ]
# z_exp1[zlev_show,:,:],
# z_exp2[zlev_show,:,:],
# z_nat[zlev_show,:,:],
# evar_exp1[0,zlev_show,:,:],
# evar_exp2[0,zlev_show,:,:],
# evar_nat[0,zlev_show,:,:],
# np.transpose( z_exp1[:,:,cx] ),
# np.transpose( z_exp2[:,:,cx] ),
# np.transpose( z_nat[:,:,cx] ), ]
else:
VAR_l = [
# #z_exp1[zlev_show,:,:],
# #z_exp2[zlev_show,:,:],
# #z_nat[zlev_show,:,:],
# eqh_exp1[0,zlev_show,:,:],
# eqh_exp2[0,zlev_show,:,:],
# qh_nat[0,zlev_show,:,:],
# evar_exp1[0,zlev_show,:,:],
# evar_exp2[0,zlev_show,:,:],
# evar_nat[0,zlev_show,:,:],
# #np.transpose( z_exp1[:,:,cx] ),
# #z_exp1[:,cy,:],
# np.transpose( eqh_exp1[0,:,:,cx] ),
# eqh_exp1[0,:,cy,:],
# #np.transpose( z_exp2[:,:,cx] ),
# #z_exp2[:,cy,:],
# np.transpose( eqh_exp2[0,:,:,cx] ),
# eqh_exp2[0,:,cy,:],
# #np.transpose( z_nat[:,:,cx] ),
# #z_nat[:,cy,:],
# np.transpose( qh_nat[0,:,:,cx] ),
# qh_nat[0,:,cy,:],
# np.transpose( evar_exp1[0,:,:,cx]),
# evar_exp1[0,:,cy,:],
# np.transpose( evar_exp2[0,:,:,cx] ),
# evar_exp2[0,:,cy,:],
# np.transpose( evar_nat[0,:,:,cx] ),
# evar_nat[0,:,cy,:],
]
# FP_l = [ np.sum( efp_exp1[0,:,:,:], axis=0 ),
# np.sum( efp_exp2[0,:,:,:], axis=0 ),
# np.sum( efp_nat[0,:,:,:], axis=0 ),
# np.sum( efp_exp1[0,:,:,:], axis=0 ),
# np.sum( efp_exp2[0,:,:,:], axis=0 ),
# np.sum( efp_nat[0,:,:,:], axis=0 ),
# ]
# if LOC:
# inf = "/data_honda01/honda/SCALE-LETKF/scale-LT/OUTPUT/" + EXP2 + "/loc.txt"
# loc_data = np.loadtxt( inf, delimiter=",", dtype='float32')
for idx, ax in enumerate(ax_l):
print("DEBUG", idx, crs_l[idx])
fn = '{0:}/data{1:0=2}.npz'.format( data_path, idx )
print( fn )
xdgrid_ = xdgrid
ydgrid_ = ydgrid
xmin_ = xmin
ymin_ = ymin
xmax_ = xmax
ymax_ = ymax
if crs_l[idx] == "ZY":
xaxis = INFO["Z"][:] * 0.001
yaxis = INFO["Y"][:] * 0.001
x2d, y2d = np.meshgrid( yaxis, xaxis )
ymin_ = zmin
ymax_ = zmax
ax.hlines( y=INFO["Z"][zlev_show]*0.001, xmin=xmin_, xmax=xmax_,
colors="k",linestyles='dotted',linewidths=1.0 )
ax.vlines( x=INFO["X"][cx]*0.001, ymin=ymin_, ymax=ymax_,
colors="k",linestyles='dotted',linewidths=1.0 )
ydgrid_ = 2
xdgrid_ = 20
elif crs_l[idx] == "XZ":
xaxis = INFO["Y"][:] * 0.001
yaxis = INFO["Z"][:] * 0.001
x2d, y2d = np.meshgrid( yaxis, xaxis )
xmin_ = zmin
xmax_ = zmax
ax.hlines( y=INFO["Y"][cy]*0.001, xmin=xmin_, xmax=xmax_,
colors="k",linestyles='dotted',linewidths=1.0 )
ax.vlines( x=INFO["Z"][zlev_show]*0.001, ymin=ymin, ymax=ymax,
colors="k",linestyles='dotted',linewidths=1.0 )
xdgrid_ = 2
ydgrid_ = 20
elif crs_l[idx] == "XY":
ax.vlines( x=INFO["X"][cx]*0.001, ymin=ymin, ymax=ymax,
colors="k",linestyles='dotted',linewidths=1.0 )
ax.hlines( y=INFO["Y"][cy]*0.001, xmin=xmin, xmax=xmax,
colors="k",linestyles='dotted',linewidths=1.0 )
# print( VAR_l[idx].shape, x2d.shape, np.max(VAR_l[idx]))
norm = BoundaryNorm(levs_l[idx], ncolors=cmap_l[idx].N, clip=True)
# np.savez( fn, data=VAR_l[idx][:,:], locx=loc_data[:,0],
# locy=loc_data[:,1] )
data = np.load( fn )['data']
locx = np.load( fn )['locx']
locy = np.load( fn )['locy']
#SHADE = ax.pcolormesh(x2d, y2d,
SHADE = ax.contourf(x2d, y2d,
data,
#VAR_l[idx][:,:],
levels=levs_l[idx],
#vmin=np.min(levs),
#vmax=np.max(levs),
cmap=cmap_l[idx],
extend='both',
norm=norm,
)
if LOC:
if ( idx == 1 or idx == 4 ) and tlev == 0:
#ax.scatter( INFO["X"][loc_data[:,0]-1]*0.001,
# INFO["Y"][loc_data[:,1]-1]*0.001,
# ax.scatter( loc_data[:,0],
# loc_data[:,1],
ax.scatter( locx,
locy,
marker='s', s=5, linewidths=0.3,
edgecolors='k', facecolors="None", alpha=1.0,
)
if typ is "fcst" and ft_sec > 0 and idx <= 5:
ssize = 10.0
idx_ = idx
if idx > 2:
idx_ = idx - 3
fp2d = FP_l[idx_]
#fp2d[ fp2d < 1.0 ] = np.nan
#fp2d = fp2d / ssize
fp2d = np.where( fp2d >= 1.0, ssize, np.nan )
ax.scatter( x2d, y2d, s=fp2d,
c='k', marker='s',
edgecolors="w", linewidths=0.5 )
ax.set_xlim( xmin_, xmax_ )
ax.set_ylim( ymin_, ymax_ )
ax.xaxis.set_ticks( np.arange(xmin_, xmax_, xdgrid_) )
ax.yaxis.set_ticks( np.arange(ymin_, ymax_, ydgrid_) )
ax.tick_params(axis='both', which='minor', labelsize=6 )
ax.tick_params(axis='both', which='major', labelsize=6 )
if idx <= 5:
ax.text(0.5, 0.95, tit_l_[idx],
fontsize=12, transform=ax.transAxes,
horizontalalignment='center',
verticalalignment='top',
bbox=bbox )
ax.text(0.1, 0.95, pnum_l[idx],
fontsize=10, transform=ax.transAxes,
horizontalalignment='center',
verticalalignment='top',
bbox=bbox )
xlabel_ = xlabel
ylabel_ = ylabel
if crs_l[idx] == "XZ":
ylabel_ = ""
xlabel_ = zlabel
elif crs_l[idx] == "ZY":
xlabel_ = ""
ylabel_ = zlabel
ax.set_xlabel( xlabel_, fontsize=6 )
ax.set_ylabel( ylabel_, fontsize=6 )
if idx <= 5:
pos = ax.get_position()
#cb_h = pos.height
#cb_w = 0.01
cb_h = 0.01
cb_w = pos.width * 1.5
ax_cb = fig.add_axes( [pos.x0, pos.y0-0.06, cb_w, cb_h] )
cb = plt.colorbar( SHADE, cax=ax_cb, orientation = 'horizontal',
ticks=levs_l[idx], extend='both' )
cb.ax.tick_params( labelsize=6 )
ax.text( 1.15, -0.12, unit_l[idx],
fontsize=8, transform=ax.transAxes,
horizontalalignment='right',
verticalalignment='top', )
if idx == 2 or idx == 5:
ax.text( 1.1, 1.2, info,
fontsize=9, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='bottom', )
if idx == 9 or idx == 15:
tvar_ = tvar
if idx == 9:
if vname1 == "QHYD":
tvar_ = "Total hydrometeor"
else:
tvar_ = vname1
ax.text( 0.5, 1.15, tvar_,
fontsize=13, transform=ax.transAxes,
horizontalalignment='center',
verticalalignment='center', )
# fig_tit = tvar
# fig.suptitle( fig_tit, fontsize=16 )
#odir = 'png/18p_DA_var/{0:}/i{1:03}_j{2:03}'.format( EXP2, cx, cy )
odir = 'pdf/fig20210624/18p_DA_var/{0:}/i{1:03}_j{2:03}'.format( EXP2, cx, cy )
ofig = '18p_{0:}_{1:}_{2:}_fta{3:05}_ft{4:05}_z{5:0=2}_{6:}_{7:}.pdf'.format(typ, EXP1, EXP2, ft_sec_a, ft_sec, zlev_show, vname1, vname2)
print( ofig, odir )
if not quick:
os.makedirs(odir, exist_ok=True)
plt.savefig(os.path.join(odir,ofig),
bbox_inches="tight", pad_inches = 0.1)
plt.cla()
plt.clf()
plt.close('all')
else:
plt.show()
###################
DX = 2000.0
DY = 2000.0
XDIM = 192
YDIM = 192
TDIM = 13
ZDIM = 40
XDIM = 176
YDIM = 176
ZDIM = 45
DZ = 500.0
DT = 300
X = np.arange( DX*0.5, DX*XDIM, DX )
Y = np.arange( DY*0.5, DY*YDIM, DY )
T = np.arange( 0, DT*TDIM, DT )
BAND = np.arange( 7, 17, 1 )
Z = np.arange(DZ*0.5, DZ*ZDIM, DZ)
#EXP = "2000m_NODA_1022_FIR2km_N"
#time0 = datetime( 2001, 1, 1, 1, 0, 0 )
EXP = "2000m_DA_1022_FIR2km_N"
EXP = "2000m_DA_0302"
time0 = datetime( 2001, 1, 1, 1, 20, 0 )
time0 = datetime( 2001, 1, 1, 1, 30, 0 )
time0 = datetime( 2001, 1, 1, 2, 0, 0 )
#time0 = datetime( 2001, 1, 1, 1, 40, 0 )
GTOP = "/data_honda01/honda/SCALE-LETKF/scale-LT/OUTPUT"
TYPE = "fcst"
MEM = "mean"
MEM = "0025"
time00 = datetime( 2001, 1, 1, 0, 0, 0 )
INFO = {"XDIM":XDIM, "YDIM":YDIM, "NBAND":10, "TDIM":TDIM,
"X":X, "Y":Y , "BAND":BAND, "T":T, "GTOP":GTOP,
"ZDIM":ZDIM, "Z":Z, "DT":DT,
"TYPE":TYPE, "MEM":MEM, "EXP":EXP,
"time0": time0, "time00": time00 }
tmax = 13
tmax = 7
tmin = 0
#tmin = 1
tmax = 1
#tmin = 1
#tmax = 7
typ = "anal"
typ = "fcst"
vname = "QCRG"
tit_l = ["NODA", "DA", "Nature run"]
EXP1 = "2000m_DA_0723_NOFP_30min"
EXP2 = "2000m_DA_0723_FP_30min_LOC10km_VLOC30km"
EXP2 = "2000m_DA_0723_FP_30min_LOC30km"
NEXP = "2000m_NODA_0723"
tit_l = ["GUESS", "ANAL", "Nature run"]
#tit_l = ["GUESS", "ANAL(HT8)", "Nature run"]
tit_l = ["NO GLMDA", "GLMDA", "Nature run"]
vname1 = "QR"
vname2 = "CR"
#vname1 = "QG"
#vname2 = "CG"
#vname1 = "QS"
#vname2 = "CS"
#vname1 = "QHYD"
#vname2 = "QCRG"
vname1_l = [
"QHYD",
# "QV",
# "W",
# "QR",
# "QG",
# "QS",
]
vname2_l = [
"QCRG",
# "T",
# "V",
# "CR",
# "CG",
# "CS",
]
zlev_min = 6
zlev_max = 28
dz = 4
zlev_min = 14
dz = 1
zlev_max = zlev_min + dz
zlev_max = zlev_min + 6
zlev_max = zlev_min
if typ is not "fcst":
tmin = 1
LOC = False
LOC = True #False
for tlev in range( tmin, tmax ):
if tlev > 0:
tit_l = ["NO GLM DA\nforecast", "GLM DA\nforecast", "Nature run"]
for zlev_show in range( zlev_min, zlev_max+dz, dz):
for idx, vname1 in enumerate(vname1_l):
INFO["time0"] = time0
main( INFO, EXP1=EXP1, EXP2=EXP2, NEXP=NEXP, tlev=tlev, typ=typ, tit_l=tit_l, vname1=vname1, vname2=vname2_l[idx], zlev_show=zlev_show, LOC=LOC )
| [
"tools_LT.setup_12p",
"numpy.array",
"datetime.timedelta",
"numpy.arange",
"datetime.datetime",
"numpy.where",
"netCDF4.Dataset",
"matplotlib.colors.ListedColormap",
"matplotlib.pyplot.close",
"numpy.meshgrid",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.cm.get_cmap",
"matplotlib.pyplot.show... | [((17338, 17372), 'numpy.arange', 'np.arange', (['(DX * 0.5)', '(DX * XDIM)', 'DX'], {}), '(DX * 0.5, DX * XDIM, DX)\n', (17347, 17372), True, 'import numpy as np\n'), ((17375, 17409), 'numpy.arange', 'np.arange', (['(DY * 0.5)', '(DY * YDIM)', 'DY'], {}), '(DY * 0.5, DY * YDIM, DY)\n', (17384, 17409), True, 'import numpy as np\n'), ((17412, 17439), 'numpy.arange', 'np.arange', (['(0)', '(DT * TDIM)', 'DT'], {}), '(0, DT * TDIM, DT)\n', (17421, 17439), True, 'import numpy as np\n'), ((17447, 17466), 'numpy.arange', 'np.arange', (['(7)', '(17)', '(1)'], {}), '(7, 17, 1)\n', (17456, 17466), True, 'import numpy as np\n'), ((17474, 17508), 'numpy.arange', 'np.arange', (['(DZ * 0.5)', '(DZ * ZDIM)', 'DZ'], {}), '(DZ * 0.5, DZ * ZDIM, DZ)\n', (17483, 17508), True, 'import numpy as np\n'), ((17645, 17675), 'datetime.datetime', 'datetime', (['(2001)', '(1)', '(1)', '(1)', '(20)', '(0)'], {}), '(2001, 1, 1, 1, 20, 0)\n', (17653, 17675), False, 'from datetime import datetime\n'), ((17687, 17717), 'datetime.datetime', 'datetime', (['(2001)', '(1)', '(1)', '(1)', '(30)', '(0)'], {}), '(2001, 1, 1, 1, 30, 0)\n', (17695, 17717), False, 'from datetime import datetime\n'), ((17729, 17758), 'datetime.datetime', 'datetime', (['(2001)', '(1)', '(1)', '(2)', '(0)', '(0)'], {}), '(2001, 1, 1, 2, 0, 0)\n', (17737, 17758), False, 'from datetime import datetime\n'), ((17912, 17941), 'datetime.datetime', 'datetime', (['(2001)', '(1)', '(1)', '(0)', '(0)', '(0)'], {}), '(2001, 1, 1, 0, 0, 0)\n', (17920, 17941), False, 'from datetime import datetime\n'), ((1132, 1172), 'netCDF4.Dataset', 'Dataset', (['fn_radar', '"""r"""'], {'format': '"""NETCDF4"""'}), "(fn_radar, 'r', format='NETCDF4')\n", (1139, 1172), False, 'from netCDF4 import Dataset\n'), ((1638, 1675), 'os.makedirs', 'os.makedirs', (['data_path'], {'exist_ok': '(True)'}), '(data_path, exist_ok=True)\n', (1649, 1675), False, 'import os\n'), ((3083, 3109), 'datetime.datetime', 'datetime', (['(2001)', '(1)', '(1)', '(1)', '(0)'], {}), '(2001, 1, 1, 1, 0)\n', (3091, 3109), False, 'from datetime import datetime\n'), ((4474, 4485), 'tools_LT.setup_12p', 'setup_12p', ([], {}), '()\n', (4483, 4485), False, 'from tools_LT import read_evar_only, setup_12p\n'), ((4502, 4556), 'numpy.array', 'np.array', (['[15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65]'], {}), '([15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65])\n', (4510, 4556), True, 'import numpy as np\n'), ((4572, 4704), 'matplotlib.colors.ListedColormap', 'mcolors.ListedColormap', (["['cyan', 'dodgerblue', 'lime', 'limegreen', 'yellow', 'orange', 'red',\n 'firebrick', 'magenta', 'purple']"], {}), "(['cyan', 'dodgerblue', 'lime', 'limegreen', 'yellow',\n 'orange', 'red', 'firebrick', 'magenta', 'purple'])\n", (4594, 4704), True, 'import matplotlib.colors as mcolors\n'), ((4912, 4937), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""RdBu_r"""'], {}), "('RdBu_r')\n", (4927, 4937), True, 'import matplotlib.pyplot as plt\n'), ((5035, 5167), 'matplotlib.colors.ListedColormap', 'mcolors.ListedColormap', (["['cyan', 'dodgerblue', 'lime', 'limegreen', 'yellow', 'orange', 'red',\n 'firebrick', 'magenta', 'purple']"], {}), "(['cyan', 'dodgerblue', 'lime', 'limegreen', 'yellow',\n 'orange', 'red', 'firebrick', 'magenta', 'purple'])\n", (5057, 5167), True, 'import matplotlib.colors as mcolors\n'), ((6520, 6605), 'numpy.array', 'np.array', (['[-0.6, -0.4, -0.2, -0.1, -0.05, -0.01, 0.01, 0.05, 0.1, 0.2, 0.4, 0.6]'], {}), '([-0.6, -0.4, -0.2, -0.1, -0.05, -0.01, 0.01, 0.05, 0.1, 0.2, 0.4, 0.6]\n )\n', (6528, 6605), True, 'import numpy as np\n'), ((8010, 8035), 'numpy.meshgrid', 'np.meshgrid', (['yaxis', 'xaxis'], {}), '(yaxis, xaxis)\n', (8021, 8035), True, 'import numpy as np\n'), ((734, 773), 'netCDF4.Dataset', 'Dataset', (['fn_Him8', '"""r"""'], {'format': '"""NETCDF4"""'}), "(fn_Him8, 'r', format='NETCDF4')\n", (741, 773), False, 'from netCDF4 import Dataset\n'), ((860, 871), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (868, 871), True, 'import numpy as np\n'), ((1823, 1859), 'datetime.timedelta', 'timedelta', ([], {'seconds': "(INFO['DT'] * tlev)"}), "(seconds=INFO['DT'] * tlev)\n", (1832, 1859), False, 'from datetime import timedelta\n'), ((5482, 5567), 'numpy.array', 'np.array', (['[-0.4, -0.3, -0.2, -0.1, -0.05, -0.01, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4]'], {}), '([-0.4, -0.3, -0.2, -0.1, -0.05, -0.01, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4]\n )\n', (5490, 5567), True, 'import numpy as np\n'), ((5628, 5702), 'numpy.array', 'np.array', (['[-2.4, -2.0, -1.6, -1.2, -0.8, -0.4, 0.4, 0.8, 1.2, 1.6, 2, 2.4]'], {}), '([-2.4, -2.0, -1.6, -1.2, -0.8, -0.4, 0.4, 0.8, 1.2, 1.6, 2, 2.4])\n', (5636, 5702), True, 'import numpy as np\n'), ((5783, 5829), 'numpy.array', 'np.array', (['[0.5, 1, 2, 4, 6, 8, 10, 12, 14, 16]'], {}), '([0.5, 1, 2, 4, 6, 8, 10, 12, 14, 16])\n', (5791, 5829), True, 'import numpy as np\n'), ((5858, 5901), 'numpy.array', 'np.array', (['[0.5, 1, 2, 3, 4, 5, 6, 7, 8, 10]'], {}), '([0.5, 1, 2, 3, 4, 5, 6, 7, 8, 10])\n', (5866, 5901), True, 'import numpy as np\n'), ((5982, 6044), 'numpy.array', 'np.array', (['[-36, -30, -24, -18, -12, -6, 6, 12, 18, 24, 30, 36]'], {}), '([-36, -30, -24, -18, -12, -6, 6, 12, 18, 24, 30, 36])\n', (5990, 6044), True, 'import numpy as np\n'), ((6156, 6218), 'numpy.array', 'np.array', (['[-36, -30, -24, -18, -12, -6, 6, 12, 18, 24, 30, 36]'], {}), '([-36, -30, -24, -18, -12, -6, 6, 12, 18, 24, 30, 36])\n', (6164, 6218), True, 'import numpy as np\n'), ((6324, 6386), 'numpy.array', 'np.array', (['[-36, -30, -24, -18, -12, -6, 6, 12, 18, 24, 30, 36]'], {}), '([-36, -30, -24, -18, -12, -6, 6, 12, 18, 24, 30, 36])\n', (6332, 6386), True, 'import numpy as np\n'), ((6445, 6467), 'numpy.arange', 'np.arange', (['(228)', '(280)', '(2)'], {}), '(228, 280, 2)\n', (6454, 6467), True, 'import numpy as np\n'), ((12574, 12633), 'matplotlib.colors.BoundaryNorm', 'BoundaryNorm', (['levs_l[idx]'], {'ncolors': 'cmap_l[idx].N', 'clip': '(True)'}), '(levs_l[idx], ncolors=cmap_l[idx].N, clip=True)\n', (12586, 12633), False, 'from matplotlib.colors import BoundaryNorm\n'), ((16967, 16999), 'os.makedirs', 'os.makedirs', (['odir'], {'exist_ok': '(True)'}), '(odir, exist_ok=True)\n', (16978, 16999), False, 'import os\n'), ((17109, 17118), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (17116, 17118), True, 'import matplotlib.pyplot as plt\n'), ((17126, 17135), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (17133, 17135), True, 'import matplotlib.pyplot as plt\n'), ((17143, 17159), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (17152, 17159), True, 'import matplotlib.pyplot as plt\n'), ((17177, 17187), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17185, 17187), True, 'import matplotlib.pyplot as plt\n'), ((1901, 1927), 'datetime.datetime', 'datetime', (['(2001)', '(1)', '(1)', '(1)', '(0)'], {}), '(2001, 1, 1, 1, 0)\n', (1909, 1927), False, 'from datetime import datetime\n'), ((1930, 1966), 'datetime.timedelta', 'timedelta', ([], {'seconds': "(INFO['DT'] * tlev)"}), "(seconds=INFO['DT'] * tlev)\n", (1939, 1966), False, 'from datetime import timedelta\n'), ((11254, 11279), 'numpy.meshgrid', 'np.meshgrid', (['yaxis', 'xaxis'], {}), '(yaxis, xaxis)\n', (11265, 11279), True, 'import numpy as np\n'), ((12756, 12767), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (12763, 12767), True, 'import numpy as np\n'), ((12792, 12803), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (12799, 12803), True, 'import numpy as np\n'), ((12828, 12839), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (12835, 12839), True, 'import numpy as np\n'), ((14046, 14082), 'numpy.where', 'np.where', (['(fp2d >= 1.0)', 'ssize', 'np.nan'], {}), '(fp2d >= 1.0, ssize, np.nan)\n', (14054, 14082), True, 'import numpy as np\n'), ((14319, 14351), 'numpy.arange', 'np.arange', (['xmin_', 'xmax_', 'xdgrid_'], {}), '(xmin_, xmax_, xdgrid_)\n', (14328, 14351), True, 'import numpy as np\n'), ((14381, 14413), 'numpy.arange', 'np.arange', (['ymin_', 'ymax_', 'ydgrid_'], {}), '(ymin_, ymax_, ydgrid_)\n', (14390, 14413), True, 'import numpy as np\n'), ((15561, 15655), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['SHADE'], {'cax': 'ax_cb', 'orientation': '"""horizontal"""', 'ticks': 'levs_l[idx]', 'extend': '"""both"""'}), "(SHADE, cax=ax_cb, orientation='horizontal', ticks=levs_l[idx],\n extend='both')\n", (15573, 15655), True, 'import matplotlib.pyplot as plt\n'), ((17019, 17043), 'os.path.join', 'os.path.join', (['odir', 'ofig'], {}), '(odir, ofig)\n', (17031, 17043), False, 'import os\n'), ((11789, 11814), 'numpy.meshgrid', 'np.meshgrid', (['yaxis', 'xaxis'], {}), '(yaxis, xaxis)\n', (11800, 11814), True, 'import numpy as np\n'), ((3140, 3166), 'datetime.datetime', 'datetime', (['(2001)', '(1)', '(1)', '(1)', '(0)'], {}), '(2001, 1, 1, 1, 0)\n', (3148, 3166), False, 'from datetime import datetime\n')] |
from abc import abstractmethod
import numpy as np
from pymoo.core.population import Population
# ---------------------------------------------------------------------------------------------------------
# Survival
# ---------------------------------------------------------------------------------------------------------
class Survival:
def __init__(self, filter_infeasible=True):
super().__init__()
self.filter_infeasible = filter_infeasible
def do(self,
problem,
pop,
*args,
n_survive=None,
return_indices=False,
**kwargs):
# make sure the population has at least one individual
if len(pop) == 0:
return pop
if n_survive is None:
n_survive = len(pop)
n_survive = min(n_survive, len(pop))
# if the split should be done beforehand
if self.filter_infeasible and problem.n_constr > 0:
# split feasible and infeasible solutions
feas, infeas = split_by_feasibility(pop, eps=0.0, sort_infeasbible_by_cv=True)
if len(feas) == 0:
survivors = Population()
else:
survivors = self._do(problem, pop[feas], *args, n_survive=min(len(feas), n_survive), **kwargs)
# calculate how many individuals are still remaining to be filled up with infeasible ones
n_remaining = n_survive - len(survivors)
# if infeasible solutions needs to be added
if n_remaining > 0:
survivors = Population.merge(survivors, pop[infeas[:n_remaining]])
else:
survivors = self._do(problem, pop, *args, n_survive=n_survive, **kwargs)
if return_indices:
H = {}
for k, ind in enumerate(pop):
H[ind] = k
return [H[survivor] for survivor in survivors]
else:
return survivors
@abstractmethod
def _do(self, problem, pop, *args, n_survive=None, **kwargs):
pass
def split_by_feasibility(pop, eps=0.0, sort_infeasbible_by_cv=True):
CV = pop.get("CV")
b = (CV <= eps)
feasible = np.where(b)[0]
infeasible = np.where(~b)[0]
if sort_infeasbible_by_cv:
infeasible = infeasible[np.argsort(CV[infeasible, 0])]
return feasible, infeasible
def calc_adapt_eps(pop):
cv = pop.get("CV")[:, 0]
cv_mean = np.median(cv)
fr = (cv <= 0).sum() / len(cv)
return cv_mean * fr
| [
"numpy.median",
"pymoo.core.population.Population",
"numpy.where",
"numpy.argsort",
"pymoo.core.population.Population.merge"
] | [((2429, 2442), 'numpy.median', 'np.median', (['cv'], {}), '(cv)\n', (2438, 2442), True, 'import numpy as np\n'), ((2183, 2194), 'numpy.where', 'np.where', (['b'], {}), '(b)\n', (2191, 2194), True, 'import numpy as np\n'), ((2215, 2227), 'numpy.where', 'np.where', (['(~b)'], {}), '(~b)\n', (2223, 2227), True, 'import numpy as np\n'), ((2295, 2324), 'numpy.argsort', 'np.argsort', (['CV[infeasible, 0]'], {}), '(CV[infeasible, 0])\n', (2305, 2324), True, 'import numpy as np\n'), ((1164, 1176), 'pymoo.core.population.Population', 'Population', ([], {}), '()\n', (1174, 1176), False, 'from pymoo.core.population import Population\n'), ((1579, 1633), 'pymoo.core.population.Population.merge', 'Population.merge', (['survivors', 'pop[infeas[:n_remaining]]'], {}), '(survivors, pop[infeas[:n_remaining]])\n', (1595, 1633), False, 'from pymoo.core.population import Population\n')] |
"""
Copyright (c) 2016, <NAME> .All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
<NAME>. April, 2018.
email: <EMAIL>
LIVIA Department, ETS, Montreal.
"""
import numpy as np
import pdb
# If you are not using nifti files you can comment this line
import nibabel as nib
import scipy.io as sio
from ImgOperations.imgOp import applyPadding
# ----- Loader for nifti files ------ #
def load_nii (imageFileName, printFileNames) :
if printFileNames == True:
print (" ... Loading file: {}".format(imageFileName))
img_proxy = nib.load(imageFileName)
imageData = img_proxy.get_data()
return (imageData,img_proxy)
def release_nii_proxy(img_proxy) :
img_proxy.uncache()
# ----- Loader for matlab format ------- #
# Very important: All the volumes should have been saved as 'vol'.
# Otherwise, change its name here
def load_matlab (imageFileName, printFileNames) :
if printFileNames == True:
print (" ... Loading file: {}".format(imageFileName))
mat_contents = sio.loadmat(imageFileName)
imageData = mat_contents['vol']
return (imageData)
""" It loads the images (CT/MRI + Ground Truth + ROI) for the patient image Idx"""
def load_imagesSinglePatient(imageIdx,
imageNames,
imageNames_Bottom,
groundTruthNames,
roiNames,
applyPaddingBool,
receptiveField,
sampleSizes,
imageType
):
if imageIdx >= len(imageNames) :
print (" ERROR!!!!! : The image index specified is greater than images array size....)")
exit(1)
# --- Load image data (CT/MRI/...) ---
printFileNames = False # Get this from config.ini
imageFileName = imageNames[imageIdx]
if imageType == 0:
[imageData,img_proxy] = load_nii(imageFileName, printFileNames)
else:
imageData = load_matlab(imageFileName, printFileNames)
if applyPaddingBool == True :
[imageData, paddingValues] = applyPadding(imageData, sampleSizes, receptiveField)
else:
paddingValues = ((0,0),(0,0),(0,0))
if len(imageData.shape) > 3 :
imageData = imageData[:,:,:,0]
if imageType == 0:
release_nii_proxy(img_proxy)
# --- Load image data for bottom path (CT/MRI/...) ---
printFileNames = False # Get this from config.ini
imageFileName = imageNames_Bottom[imageIdx]
if imageType == 0:
[imageData_Bottom,img_proxy] = load_nii(imageFileName, printFileNames)
else:
imageData_Bottom = load_matlab(imageFileName, printFileNames)
if applyPaddingBool == True :
[imageData_Bottom, paddingValues] = applyPadding(imageData_Bottom, sampleSizes, receptiveField)
else:
paddingValues = ((0,0),(0,0),(0,0))
if len(imageData_Bottom.shape) > 3 :
imageData_Bottom = imageData_Bottom[:,:,:,0]
if imageType == 0:
release_nii_proxy(img_proxy)
# --- Load ground truth (i.e. labels) ---
if len(groundTruthNames) > 0 :
GTFileName = groundTruthNames[imageIdx]
if imageType == 0:
[gtLabelsData, gt_proxy] = load_nii (GTFileName, printFileNames)
else:
gtLabelsData = load_matlab(GTFileName, printFileNames)
# Convert ground truth to int type
if np.issubdtype( gtLabelsData.dtype, np.int ) :
gtLabelsData = gtLabelsData
else:
np.rint(gtLabelsData).astype("int32")
imageGtLabels = gtLabelsData
if imageType == 0:
# Release data
release_nii_proxy(gt_proxy)
if applyPaddingBool == True :
[imageGtLabels, paddingValues] = applyPadding(imageGtLabels, sampleSizes, receptiveField)
else :
imageGtLabels = np.empty(0)
# --- Load roi ---
if len(roiNames)> 0 :
roiFileName = roiNames[imageIdx]
if imageType == 0:
[roiMaskData, roi_proxy] = load_nii (roiFileName, printFileNames)
else:
roiMaskData = load_matlab(roiFileName, printFileNames)
roiMask = roiMaskData
if imageType == 0:
# Release data
release_nii_proxy(roi_proxy)
if applyPaddingBool == True :
[roiMask, paddingValues] = applyPadding(roiMask, sampleSizes, receptiveField)
else :
roiMask = np.ones(imageGtLabels.shape)
return [imageData, imageData_Bottom, imageGtLabels, roiMask, paddingValues]
# -------------------------------------------------------- #
def getRandIndexes(total, maxNumberIdx) :
# Generate a shuffle array of a vector containing "total" elements
idxs = range(total)
np.random.shuffle(idxs)
rand_idxs = idxs[0:maxNumberIdx]
return rand_idxs
| [
"numpy.ones",
"nibabel.load",
"scipy.io.loadmat",
"numpy.issubdtype",
"ImgOperations.imgOp.applyPadding",
"numpy.empty",
"numpy.rint",
"numpy.random.shuffle"
] | [((1511, 1534), 'nibabel.load', 'nib.load', (['imageFileName'], {}), '(imageFileName)\n', (1519, 1534), True, 'import nibabel as nib\n'), ((1987, 2013), 'scipy.io.loadmat', 'sio.loadmat', (['imageFileName'], {}), '(imageFileName)\n', (1998, 2013), True, 'import scipy.io as sio\n'), ((5928, 5951), 'numpy.random.shuffle', 'np.random.shuffle', (['idxs'], {}), '(idxs)\n', (5945, 5951), True, 'import numpy as np\n'), ((3138, 3190), 'ImgOperations.imgOp.applyPadding', 'applyPadding', (['imageData', 'sampleSizes', 'receptiveField'], {}), '(imageData, sampleSizes, receptiveField)\n', (3150, 3190), False, 'from ImgOperations.imgOp import applyPadding\n'), ((3824, 3883), 'ImgOperations.imgOp.applyPadding', 'applyPadding', (['imageData_Bottom', 'sampleSizes', 'receptiveField'], {}), '(imageData_Bottom, sampleSizes, receptiveField)\n', (3836, 3883), False, 'from ImgOperations.imgOp import applyPadding\n'), ((4496, 4537), 'numpy.issubdtype', 'np.issubdtype', (['gtLabelsData.dtype', 'np.int'], {}), '(gtLabelsData.dtype, np.int)\n', (4509, 4537), True, 'import numpy as np\n'), ((4994, 5005), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (5002, 5005), True, 'import numpy as np\n'), ((5614, 5642), 'numpy.ones', 'np.ones', (['imageGtLabels.shape'], {}), '(imageGtLabels.shape)\n', (5621, 5642), True, 'import numpy as np\n'), ((4891, 4947), 'ImgOperations.imgOp.applyPadding', 'applyPadding', (['imageGtLabels', 'sampleSizes', 'receptiveField'], {}), '(imageGtLabels, sampleSizes, receptiveField)\n', (4903, 4947), False, 'from ImgOperations.imgOp import applyPadding\n'), ((5534, 5584), 'ImgOperations.imgOp.applyPadding', 'applyPadding', (['roiMask', 'sampleSizes', 'receptiveField'], {}), '(roiMask, sampleSizes, receptiveField)\n', (5546, 5584), False, 'from ImgOperations.imgOp import applyPadding\n'), ((4611, 4632), 'numpy.rint', 'np.rint', (['gtLabelsData'], {}), '(gtLabelsData)\n', (4618, 4632), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Collection of helper methods for rdm module
@author: baihan
"""
import numpy as np
from scipy.spatial.distance import squareform
def batch_to_vectors(x):
"""converts a *stack* of RDMs in vector or matrix form into vector form
Args:
x: stack of RDMs
Returns:
tuple: **v** (np.ndarray): 2D, vector form of the stack of RDMs
**n_rdm** (int): number of rdms
**n_cond** (int): number of conditions
"""
if x.ndim == 2:
v = x
n_rdm = x.shape[0]
n_cond = _get_n_from_reduced_vectors(x)
elif x.ndim == 3:
m = x
n_rdm = x.shape[0]
n_cond = x.shape[1]
v = np.ndarray((n_rdm, int(n_cond * (n_cond - 1) / 2)))
for idx in np.arange(n_rdm):
v[idx, :] = squareform(m[idx, :, :], checks=False)
elif x.ndim == 1:
v = np.array([x])
n_rdm = 1
n_cond = _get_n_from_reduced_vectors(v)
return v, n_rdm, n_cond
def batch_to_matrices(x):
"""converts a *stack* of RDMs in vector or matrix form into matrix form
Args:
**x**: stack of RDMs
Returns:
tuple: **v** (np.ndarray): 3D, matrix form of the stack of RDMs
**n_rdm** (int): number of rdms
**n_cond** (int): number of conditions
"""
if x.ndim == 2:
v = x
n_rdm = x.shape[0]
n_cond = _get_n_from_reduced_vectors(x)
m = np.ndarray((n_rdm, n_cond, n_cond))
for idx in np.arange(n_rdm):
m[idx, :, :] = squareform(v[idx, :])
elif x.ndim == 3:
m = x
n_rdm = x.shape[0]
n_cond = x.shape[1]
return m, n_rdm, n_cond
def _get_n_from_reduced_vectors(x):
"""
calculates the size of the RDM from the vector representation
Args:
**x**(np.ndarray): stack of RDM vectors (2D)
Returns:
int: n: size of the RDM
"""
return int(np.ceil(np.sqrt(x.shape[1] * 2)))
def add_pattern_index(rdms, pattern_descriptor):
"""
adds index if pattern_descriptor is None
Args:
**rdms** (pyrsa.rdm.RDMs): rdms object to be parsed
Returns:
pattern_descriptor
pattern_select
"""
pattern_select = rdms.pattern_descriptors[pattern_descriptor]
pattern_select = np.unique(pattern_select)
return pattern_descriptor, pattern_select
| [
"scipy.spatial.distance.squareform",
"numpy.sqrt",
"numpy.unique",
"numpy.array",
"numpy.ndarray",
"numpy.arange"
] | [((2313, 2338), 'numpy.unique', 'np.unique', (['pattern_select'], {}), '(pattern_select)\n', (2322, 2338), True, 'import numpy as np\n'), ((1457, 1492), 'numpy.ndarray', 'np.ndarray', (['(n_rdm, n_cond, n_cond)'], {}), '((n_rdm, n_cond, n_cond))\n', (1467, 1492), True, 'import numpy as np\n'), ((1512, 1528), 'numpy.arange', 'np.arange', (['n_rdm'], {}), '(n_rdm)\n', (1521, 1528), True, 'import numpy as np\n'), ((786, 802), 'numpy.arange', 'np.arange', (['n_rdm'], {}), '(n_rdm)\n', (795, 802), True, 'import numpy as np\n'), ((1557, 1578), 'scipy.spatial.distance.squareform', 'squareform', (['v[idx, :]'], {}), '(v[idx, :])\n', (1567, 1578), False, 'from scipy.spatial.distance import squareform\n'), ((1952, 1975), 'numpy.sqrt', 'np.sqrt', (['(x.shape[1] * 2)'], {}), '(x.shape[1] * 2)\n', (1959, 1975), True, 'import numpy as np\n'), ((828, 866), 'scipy.spatial.distance.squareform', 'squareform', (['m[idx, :, :]'], {'checks': '(False)'}), '(m[idx, :, :], checks=False)\n', (838, 866), False, 'from scipy.spatial.distance import squareform\n'), ((901, 914), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (909, 914), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from catboost import CatBoostClassifier
import xgboost as xgb
import lightgbm as lgb
from sklearn.utils import class_weight
from abc import ABC, abstractmethod
class predict_model(ABC):
"""
Abstract class for working with classifiers.
"""
@abstractmethod
def __init__(self, name='predict_model', categ_conv=True):
self.params = {}
self.exclude_list = []
self.name = name
self.random = 1
self.classifier = None
self.categ_conv = categ_conv
self.data_df = {}
def set_params(self, params=None):
if not params:
self.params = {}
else:
self.params = params
def set_random_seed(self, random=1):
self.random = random
@abstractmethod
def load_data(self, data, balance=False):
self.data = data
self.data_df['train'], self.data_df['y'] = self.data.get_train(
balance=balance
)
self.data_df['test'] = self.data.get_test()
self.category_cols = self.data.get_cat_list()
for header in self.category_cols:
self.data_df['train'].loc[:, header] = self.data_df['train'][header].astype('category').cat.codes
self.data_df['test'].loc[:, header] = self.data_df['test'][header].astype('category').cat.codes
return True
def get_train(self):
return self.data_df['train']
def get_y(self):
return self.data_df['y']
def get_test(self):
return self.data_df['test']
def set_exclude_list(self, exclude_list):
self.exclude_list = exclude_list.copy()
@abstractmethod
def get_feature_importances(self):
pass
@abstractmethod
def train(self, x_train=None, y_train=None):
pass
def predict(self, test=None):
if self.classifier:
if not isinstance(test, pd.DataFrame):
test = self.get_test()
elif self.categ_conv:
cols = [x for x in self.category_cols if x in test.columns]
for header in cols:
test.loc[:, header] = test[header].astype('category').cat.codes
test = test.drop(
[x for x in self.exclude_list if x in test.columns], axis=1
)
res = pd.DataFrame(index=test.index)
res['country'] = self.data.country
res['poor'] = self.classifier.predict_proba(test)[:, 1]
return res
else:
print('error: classifier not defined')
return None
class CB_model(predict_model):
"""
Class for a CatBoost classifier.
"""
def __init__(self, name='cat_boost', categ_conv=True):
super().__init__(name='cat_boost', categ_conv=categ_conv)
self.name = name
def load_data(self, data, balance=False):
if super().load_data(data, balance):
c_w = class_weight.compute_class_weight(
class_weight='balanced',
classes=np.unique(self.data_df['y']),
y=self.data_df['y']
)
self.classifier = CatBoostClassifier(**self.params,
class_weights=c_w)
return True
else:
return False
def train(self, x_train=None, y_train=None):
if not isinstance(x_train, pd.DataFrame):
x_train = self.get_train()
elif self.categ_conv:
cols = [x for x in self.category_cols if x in x_train.columns]
for header in cols:
x_train.loc[:, header] = x_train[header].astype('category').cat.codes
if not isinstance(y_train, pd.Series):
y_train = self.get_y()
x_train = x_train.drop([x for x in self.exclude_list
if x in x_train.columns], axis=1)
self.category_cols = [x for x in self.category_cols
if x not in self.exclude_list]
cat_dims = [x_train.columns.get_loc(i) for i in self.category_cols]
print(x_train.shape, y_train.shape, len(self.category_cols))
self.classifier.fit(x_train, y_train, cat_features=cat_dims)
return self.classifier
def get_feature_importances(self):
return self.classifier._feature_importance
class XGB_model(predict_model):
"""
Class for a XGBoost classifier.
"""
def __init__(self, name='xg_boost', categ_conv=True):
super().__init__(name='xg_boost', categ_conv=categ_conv)
self.name = name
def load_data(self, data, balance=False):
if super().load_data(data, balance):
self.params['scale_pos_weight'] = (
(self.data_df['y'].shape[0] - self.data_df['y'].sum()) /
self.data_df['y'].sum()
)
self.classifier = xgb.XGBClassifier(**self.params)
return True
else:
return False
def train(self, x_train=None, y_train=None):
if not isinstance(x_train, pd.DataFrame):
x_train = self.get_train()
elif self.categ_conv:
cols = [x for x in self.category_cols if x in x_train.columns]
for header in cols:
x_train.loc[:, header] = x_train[header].astype('category').cat.codes
if not isinstance(y_train, pd.Series):
y_train = self.get_y()
x_train = x_train.drop([x for x in self.exclude_list
if x in x_train.columns], axis=1)
print('x_train shape: ', x_train.shape)
self.classifier.fit(x_train, y_train)
return self.classifier
def get_feature_importances(self):
return self.classifier.feature_importances_
class LGBM_model(predict_model):
"""
Class for LightGBM classifier.
"""
def __init__(self, name='lgbm', categ_conv=True):
super().__init__(name='lgbm', categ_conv=categ_conv)
self.name = name
def load_data(self, data, balance=False):
if super().load_data(data, balance):
self.classifier = lgb.LGBMClassifier(**self.params)
return True
else:
return False
def train(self, x_train=None, y_train=None):
if not isinstance(x_train, pd.DataFrame):
x_train = self.get_train()
elif self.categ_conv:
cols = [x for x in self.category_cols if x in x_train.columns]
for header in cols:
x_train.loc[:, header] = x_train[header].astype('category').cat.codes
if not isinstance(y_train, pd.Series):
y_train = self.get_y()
x_train = x_train.drop([x for x in self.exclude_list
if x in x_train.columns], axis=1)
print('x_train shape: ', x_train.shape)
self.category_cols = [x for x in self.category_cols
if x not in self.exclude_list]
self.classifier.fit(x_train, y_train, verbose=False)
return self.classifier
def get_feature_importances(self):
return self.classifier.feature_importances_
| [
"numpy.unique",
"lightgbm.LGBMClassifier",
"pandas.DataFrame",
"catboost.CatBoostClassifier",
"xgboost.XGBClassifier"
] | [((2413, 2443), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'test.index'}), '(index=test.index)\n', (2425, 2443), True, 'import pandas as pd\n'), ((3233, 3285), 'catboost.CatBoostClassifier', 'CatBoostClassifier', ([], {'class_weights': 'c_w'}), '(**self.params, class_weights=c_w)\n', (3251, 3285), False, 'from catboost import CatBoostClassifier\n'), ((4968, 5000), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {}), '(**self.params)\n', (4985, 5000), True, 'import xgboost as xgb\n'), ((6210, 6243), 'lightgbm.LGBMClassifier', 'lgb.LGBMClassifier', ([], {}), '(**self.params)\n', (6228, 6243), True, 'import lightgbm as lgb\n'), ((3122, 3150), 'numpy.unique', 'np.unique', (["self.data_df['y']"], {}), "(self.data_df['y'])\n", (3131, 3150), True, 'import numpy as np\n')] |
"""
@authors:
# =============================================================================
Information:
The functions in this script are used to export and create array files
todo:
Something is wrong with: Store_temp_GLl
# =============================================================================
"""
# =============================================================================
# LIBRARIES
# =============================================================================
import numpy as np
#from numpy import pi, sin, cos
import matplotlib.pyplot as plt
#from time import gmtime, strftime
#import GH_import as imp
#import GH_convert as conv
#import GH_generate as gen
#import GH_solve as solv
#import GH_displayGeoid as dgeo
#import GH_displaySat as dsat
#import GH_export as exp
#import GH_displayTopo as dtopo
#import GH_terminal as term
#import GH_harmonics as harm
#import GH_geoMath as gmath
#import GH_earthMap as emap
# =============================================================================
# FUNCTIONS TO STORE FILES
# =============================================================================
def Store_Array(data, title, path="../Rendered"):
"""
Stores an array into a text file that can later be imported again
Input:
data: the array in question
title: a string, of the desired title for the file.
Must incluse ".txt"
path: path in which to store the array
To import use:
data = np.loadtxt(title)
"""
# print(f"Writing \"{title}\" in \"{path}\"")
file = open(f"{path}/{title}", "w+")
for n in range (data.shape[0]):
# print("Writing", n, "\tof", length-1)
for m in range(data.shape[1]):
file.write(str(data[n, m]))
file.write("\t")
file.write("\n")
file.close()
# print(f"\r\tDone writing {title}")
def Store_temp_GLl(G_Grid, G_Long, G_Lat, detail=""):
"""
Stores arrays into text files for future import
Should be used with imp.Load_GLl()
If you want to keep the arrays, move them into the Randered/grid directory,
or they might get written over
"""
temp_GLl_path = "../Rendered/grid"
Store_Array(G_Grid, f"{detail} G_Grid", temp_GLl_path)
Store_Array(G_Long, f"{detail} G_Long", temp_GLl_path)
Store_Array(G_Lat, f"{detail} G_Lat", temp_GLl_path)
# =============================================================================
# FUNCTIONS FOR FIGURES
# =============================================================================
def Store_Figure(fignum, title, time="", path="../Rendered/images", dpi=500):
"""
Stores a figure into a .png format
Input:
fignum: matplotlib figure number
title: title image name.
path: image path location
dpi: pixels per inch density
"""
plt.figure(fignum)
# mng = plt.get_current_fig_manager()
# mng.window.showMaximized()
# plt.show()
file_name = f"{path}/{time} {title}.png"
plt.savefig(file_name, dpi=dpi)
# =============================================================================
# TEST FUNCTIONS
# =============================================================================
def TEST_store_temp():
A = np.ones((1,5))
B = np.ones((2,5))*2
C = np.ones((5,2))*3
Store_temp_GLl(A, B, C)
# =============================================================================
# MAIN
# =============================================================================
if __name__ == '__main__':
# TEST_store_temp()
'''
Store_temp_GLl(G_Grid, G_Long, G_Lat, "TESTrr0")
'''
print("\nGH_export done")
| [
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"numpy.ones"
] | [((2905, 2923), 'matplotlib.pyplot.figure', 'plt.figure', (['fignum'], {}), '(fignum)\n', (2915, 2923), True, 'import matplotlib.pyplot as plt\n'), ((3062, 3093), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {'dpi': 'dpi'}), '(file_name, dpi=dpi)\n', (3073, 3093), True, 'import matplotlib.pyplot as plt\n'), ((3303, 3318), 'numpy.ones', 'np.ones', (['(1, 5)'], {}), '((1, 5))\n', (3310, 3318), True, 'import numpy as np\n'), ((3326, 3341), 'numpy.ones', 'np.ones', (['(2, 5)'], {}), '((2, 5))\n', (3333, 3341), True, 'import numpy as np\n'), ((3351, 3366), 'numpy.ones', 'np.ones', (['(5, 2)'], {}), '((5, 2))\n', (3358, 3366), True, 'import numpy as np\n')] |
import numpy as np
from scipy import optimize
from sklearn.metrics import euclidean_distances
from ashic.utils import rotation, fill_array3d
def prepare_data(x, tab, alpha, beta, mask, loci, bias):
n = int(x.shape[0]/2)
x1 = x[:n, :][loci, :]
x2 = x[n:, :][loci, :]
# find the centroids of x1, x2
c1 = x1.mean(axis=0)
c2 = x2.mean(axis=0)
d = c2 - c1
# centering x1, x2
x1 = x1 - c1
x2 = x2 - c2
b1 = bias[:n][loci]
b2 = bias[n:][loci]
return x1, x2, d, tab[loci, :][:, loci], \
alpha, beta, mask[loci, :][:, loci], b1, b2
def poisson_complete_ll(x1, x2, t, alpha, beta, mask, b1, b2):
d = euclidean_distances(x1, x2)
mu = beta * np.power(d, alpha)[mask] * np.outer(b1, b2)[mask]
ll = t[mask] * np.log(mu) - mu
return - ll.sum()
def eval_f(angles, data=None):
"""
function to minimize
"""
x1, x2, d, t, alpha, beta, mask, b1, b2 = data
thetaxm, thetaym, thetazm, thetaxp, thetayp, thetazp = angles
rm = rotation(thetaxm, thetaym, thetazm)
rp = rotation(thetaxp, thetayp, thetazp)
x1r = rm.dot(x1.T).T
x2r = rp.dot(x2.T).T + d
obj = poisson_complete_ll(x1r, x2r, t, alpha, beta, mask, b1, b2)
return obj
def estimate_rotation(x, tab, alpha, beta, mask, loci, bias, maxiter=1000):
data = prepare_data(x, tab, alpha, beta, mask, loci, bias)
ini = np.repeat(0., 6).astype(float)
results = optimize.fmin_l_bfgs_b(
eval_f, # function to minimize
x0=ini.flatten(), # initial guess
approx_grad=True,
# fprime=eval_grad_f, # gradient of function
args=(data, ), # args to pass to function
iprint=1,
maxiter=maxiter)
thetaxm, thetaym, thetazm, thetaxp, thetayp, thetazp = results[0]
rm = rotation(thetaxm, thetaym, thetazm)
rp = rotation(thetaxp, thetayp, thetazp)
x1 = fill_array3d(data[0], loci, 0.)
x2 = fill_array3d(data[1], loci, 0.)
x1 = rm.dot(x1.T).T
x2 = rp.dot(x2.T).T + data[2]
return np.concatenate((x1, x2))
| [
"numpy.repeat",
"numpy.power",
"sklearn.metrics.euclidean_distances",
"numpy.log",
"ashic.utils.rotation",
"numpy.outer",
"numpy.concatenate",
"ashic.utils.fill_array3d"
] | [((658, 685), 'sklearn.metrics.euclidean_distances', 'euclidean_distances', (['x1', 'x2'], {}), '(x1, x2)\n', (677, 685), False, 'from sklearn.metrics import euclidean_distances\n'), ((1009, 1044), 'ashic.utils.rotation', 'rotation', (['thetaxm', 'thetaym', 'thetazm'], {}), '(thetaxm, thetaym, thetazm)\n', (1017, 1044), False, 'from ashic.utils import rotation, fill_array3d\n'), ((1054, 1089), 'ashic.utils.rotation', 'rotation', (['thetaxp', 'thetayp', 'thetazp'], {}), '(thetaxp, thetayp, thetazp)\n', (1062, 1089), False, 'from ashic.utils import rotation, fill_array3d\n'), ((1785, 1820), 'ashic.utils.rotation', 'rotation', (['thetaxm', 'thetaym', 'thetazm'], {}), '(thetaxm, thetaym, thetazm)\n', (1793, 1820), False, 'from ashic.utils import rotation, fill_array3d\n'), ((1830, 1865), 'ashic.utils.rotation', 'rotation', (['thetaxp', 'thetayp', 'thetazp'], {}), '(thetaxp, thetayp, thetazp)\n', (1838, 1865), False, 'from ashic.utils import rotation, fill_array3d\n'), ((1875, 1907), 'ashic.utils.fill_array3d', 'fill_array3d', (['data[0]', 'loci', '(0.0)'], {}), '(data[0], loci, 0.0)\n', (1887, 1907), False, 'from ashic.utils import rotation, fill_array3d\n'), ((1916, 1948), 'ashic.utils.fill_array3d', 'fill_array3d', (['data[1]', 'loci', '(0.0)'], {}), '(data[1], loci, 0.0)\n', (1928, 1948), False, 'from ashic.utils import rotation, fill_array3d\n'), ((2017, 2041), 'numpy.concatenate', 'np.concatenate', (['(x1, x2)'], {}), '((x1, x2))\n', (2031, 2041), True, 'import numpy as np\n'), ((729, 745), 'numpy.outer', 'np.outer', (['b1', 'b2'], {}), '(b1, b2)\n', (737, 745), True, 'import numpy as np\n'), ((771, 781), 'numpy.log', 'np.log', (['mu'], {}), '(mu)\n', (777, 781), True, 'import numpy as np\n'), ((1380, 1397), 'numpy.repeat', 'np.repeat', (['(0.0)', '(6)'], {}), '(0.0, 6)\n', (1389, 1397), True, 'import numpy as np\n'), ((702, 720), 'numpy.power', 'np.power', (['d', 'alpha'], {}), '(d, alpha)\n', (710, 720), True, 'import numpy as np\n')] |
#Schrodinger 1D equation solved in an infinite potential well by Numerov's algorithm and the shooting method
#Not generalized to any potential yet as I'm not sure about the boundary conditions.
from __future__ import division
import numpy as np
from matplotlib import pyplot as plt
hbar = 1
m = 1
xmin = -1
xmax = 1
N = 10000
Tolerance = 1e-3
def v(x):
return 1/2*x**2
def f(x, E):
return (-E+v(x))*2*m/(hbar*hbar)
X, dx = np.linspace(xmin, xmax, N, retstep=True)
def Boundary(E):
Psi = np.zeros(N)
Psi[1]=1
F=f(X,E)
for i in range(1,N-1):
Psi[i+1]=(2*Psi[i]*(1-dx*dx*F[i]/12) - Psi[i-1]*(1-dx*dx*F[i-1]/12) +dx*dx*F[i]*Psi[i])/(1-dx*dx*F[i+1]/12)
return Psi[-1]
def Wavefunction(E):
Psi = np.zeros(N)
Psi[1]=1
F=f(X,E)
for i in range(1,N-1):
Psi[i+1]=(2*Psi[i]*(1-dx*dx*F[i]/12) - Psi[i-1]*(1-dx*dx*F[i-1]/12) +dx*dx*F[i]*Psi[i])/(1-dx*dx*F[i+1]/12)
return Psi
Energies = [(10,20)]
for E in Energies:
E1, E2 = E
B1 = Boundary(E1)
B2 = Boundary(E2)
accuracy = min((abs(B1), abs(B2)))
print(B1, B2, accuracy)
if(B1 > B2):
Ebuffer = E1
E1 = E2
E2 = Ebuffer
while(accuracy > Tolerance):
Emid = (E1 + E2)/2
Bmid = Boundary(Emid)
if(Bmid > 0):
#print("Overshooting")
E2 = Emid
if(Bmid < 0):
#print("Undershooting")
E1 = Emid
if(Bmid == 0):
print("WHY IS THIS HAPPENING WITH FLOATS")
accuracy = min((abs(Boundary(E1)), abs(Boundary(E2))))
print(E1, E2, accuracy)
print(Emid)
Psi = Wavefunction(Emid)
plt.plot(X,Psi, 'k-', label="$\Psi$, E=%.2f" % round(Emid,2))
plt.legend()
plt.grid()
plt.savefig("%.2fNumerovAlgorithm.png" % round(Emid,2),)
| [
"matplotlib.pyplot.legend",
"numpy.linspace",
"matplotlib.pyplot.grid",
"numpy.zeros"
] | [((437, 477), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'N'], {'retstep': '(True)'}), '(xmin, xmax, N, retstep=True)\n', (448, 477), True, 'import numpy as np\n'), ((506, 517), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (514, 517), True, 'import numpy as np\n'), ((739, 750), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (747, 750), True, 'import numpy as np\n'), ((1717, 1729), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1727, 1729), True, 'from matplotlib import pyplot as plt\n'), ((1734, 1744), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1742, 1744), True, 'from matplotlib import pyplot as plt\n')] |
#!/usr/bin/env python
# coding: utf-8
# import torch.nn.functional as F
# import torch.optim as optim
# from apex import amp
# from tqdm import tqdm
# from skimage import io as img
# import torchvision.models as models
# from torchsummaryX import summary as modelsummary
# import nni
import time
import os
import warnings
import shutil
import argparse
import logging
import math
import torch
import torch.nn as nn
import numpy as np
import scipy.misc
import matplotlib
import matplotlib.pyplot as plt
import colorama
from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec
from training_functions import organise_models, recon_loss, VGGPerceptualLoss
from configs import get_configs, LogbookFormatter
from progress_bar import create_progressbar
from evaluation import calculate_sifid_given_paths, calculate_cs
warnings.filterwarnings("ignore")
matplotlib.use('Agg')
# np.set_printoptions(threshold = np.inf)
# @profile
def main():
# params = nni.get_next_parameter()
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, required=True)
parser.add_argument('--config_file', type=str, required=True)
parser.add_argument('--log', type=bool, default=True)
parser.add_argument('--logbook', type=str, default='log.txt')
logger = get_logger()
parser = get_configs(parser, logger)
opts = parser.parse_args()
red = colorama.Fore.RED
green = colorama.Fore.GREEN
white = colorama.Fore.WHITE
cyan = colorama.Fore.CYAN
reset = colorama.Style.RESET_ALL
bright = colorama.Style.BRIGHT
dim = colorama.Style.DIM
seed = opts.seed
# seed = params['seed']
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
img_path = opts.img_input_dir
anot_path = opts.anot_input_dir
output_dir = opts.output_dir
local_time = time.strftime("%m%d_%H%M%S", time.localtime())
dir2save = '%s/%s_seed%s/' % (output_dir, local_time, seed)
gen_num = opts.gen_num
dir2gen = '%sran_gen/' % (dir2save)
try:
os.makedirs(dir2save)
os.makedirs(dir2gen)
shutil.copyfile('%s/%s' % (os.getcwd(), opts.config_file), '%s/%s' % (dir2save, opts.config_file))
shutil.copyfile('%s/my_models.py' % os.getcwd(), '%s/my_models.py' % dir2save)
shutil.copyfile('%s/run.py' % os.getcwd(), '%s/run.py' % dir2save)
shutil.copyfile('%s/training_functions.py' % os.getcwd(), '%s/training_functions.py' % dir2save)
shutil.copyfile('%s/utils_functions.py' % os.getcwd(), '%s/utils_functions.py' % dir2save)
except OSError:
raise Exception("Files ERROR!")
if opts.log:
logbook = opts.logbook
logpath = dir2save + logbook
loghandler = logging.FileHandler(filename=logpath, mode="a", encoding="utf-8")
loghandler.setLevel(logging.INFO)
logbook_formatter = LogbookFormatter(fmt="[%(asctime)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
loghandler.setFormatter(logbook_formatter)
logger.addHandler(loghandler)
### can't Loading Weights at present
weights2load = 0
G_weights2load = ''
D_weights2load = ''
###
Gs = []
Zs = []
Ds = []
reals = []
masks = []
noises = []
NoiseWeight = []
errD2plot = []
errG2plot = []
mode = opts.mode
channels = opts.channels
kernel_size = opts.kernel_size
stride = opts.stride
if_padding = opts.if_padding
if_lazy = opts.if_lazy
G_num_layer = opts.G_num_layer
D_num_layer = opts.D_num_layer
if mode == 'f':
weight4style = opts.weight4style
scale_base = opts.scale_base
# scales = opts.scales
scales = modify_scales(anot_path, scale_base)
logger.info('-' * 80)
logger.info(green + '[INFO]: scales are set to %s' % scales + reset)
out_channels = opts.out_channels
lr_g = opts.lr_g
lr_d = opts.lr_d
iters_list = [int(i) for i in opts.iters_list]
D_steps = opts.D_steps
G_steps = opts.G_steps
lambda_grad = opts.lambda_grad
n_segments = opts.n_segments
compactness = opts.compactness
sigma = opts.sigma
start_label = opts.start_label
device = torch.device("cuda:0")
alpha4rec_ini = opts.alpha4rec
alpha4cos_ini = opts.alpha4cos
alpha4vgg_ini = opts.alpha4vgg
p_loss = VGGPerceptualLoss(resize=False, device=device) if alpha4vgg_ini != 0 else 0
###
# factor4rec = calc_factor('rec', scales)
# factor4cos = calc_factor('cos', scales)
# factor4vgg = calc_factor('vgg', scales)
###
noise_weight = opts.noise_weight
noise_weight_ini = noise_weight
p4flip = opts.p4flip
torch.backends.cudnn.benchmark = True
# amp.register_float_function(torch, 'sigmoid')
reals, masks = get_reals(mode, img_path, anot_path, scales, scale_base, reals, channels, masks)
reals, masks = reals[::-1], masks[::-1]
reals_b, reals_fa, masks_b, masks_f = [], [], [], []
for _ in reals:
reals_b.append(_[1])
reals_fa.append(_[3])
for _ in masks:
masks_b.append(_[0])
masks_f.append(_[1])
for scale_num in range(scales):
outfile_dir = '%s%s/' % (dir2save, scale_num)
try:
os.makedirs(outfile_dir)
except OSError:
raise Exception("Files ERROR!")
_, __, ___, ____, _____ = reals[scale_num][0], reals[scale_num][1], reals[scale_num][2], reals[scale_num][3], masks_f[scale_num]
plt.imsave('%s/real_original.png' % (outfile_dir), convert_image_np(_), vmin=0, vmax=1)
plt.imsave('%s/real_background.png' % (outfile_dir), convert_image_np(__), vmin=0, vmax=1)
plt.imsave('%s/real_foregrounds.png' % (outfile_dir), convert_image_np(___), vmin=0, vmax=1)
plt.imsave('%s/real_foreground_a.png' % (outfile_dir), convert_image_np(____), vmin=0, vmax=1)
scipy.misc.toimage(convert_image_np(_____[:, 0, :, :][None, :, :, :])).save('%s/mask_f.png' % (outfile_dir))
torch.save(reals_fa, dir2save+'reals_f.pth')
torch.save(reals_b, dir2save+'reals_b.pth')
torch.save(masks_f, dir2save+'masks_f.pth')
logger.info('-' * 80)
logger.info(green + '[INFO]: data prepared!' + reset)
logger.info('-' * 80)
torch.cuda.synchronize()
start_time = time.time()
logger.info(green + '[INFO]: training starts at %s' % time.strftime("%H:%M:%S", time.localtime()) + reset)
logger.info('-' * 80)
for scale_num in range(scales):
iters = iters_list[scale_num]
outfile_dir = '%s%s/' % (dir2save, scale_num)
real_curr = reals[scale_num]
x = np.random.choice(iters, int(iters*p4flip), replace=False)
# real_seg = get_seg(real_curr[3], n_segments=n_segments, compactness=compactness, sigma=sigma, start_label=start_label)
zeros = torch.zeros_like(real_curr[3]).to(device)
edge_w, edge_h = math.ceil(0.1*real_curr[3].shape[3]), math.ceil(0.1*real_curr[3].shape[2])
for i in range(edge_w):
zeros[:,:,:,i] = 1.
for i in range(real_curr[3].shape[3]-edge_w, real_curr[3].shape[3]):
zeros[:,:,:,i] = 1.
for i in range(edge_h):
zeros[:,:,i,:] = 1.
for i in range(real_curr[3].shape[2]-edge_h, real_curr[3].shape[2]):
zeros[:,:,i,:] = 1.
assert zeros[0,0,0,0] == 1
if mode == 'f':
alpha4cos = alpha4cos_ini
if scale_num >= scales: # 4 5
alpha4rec = alpha4rec_ini * 10
else: # 0 1 2 3
alpha4rec = alpha4rec_ini
real_curr[3] = real_curr[3].to(device)
h, w = real_curr[3].shape[2], real_curr[3].shape[3]
D, G, optimizerD, optimizerG, schedulerD, schedulerG = organise_models(
mode, device, weights2load, lr_g, lr_d, channels, kernel_size, stride, if_padding,
G_num_layer, D_num_layer, out_channels, factor=0.01+weight4style*(scales-scale_num-1)/scales
)
elif mode == 'b':
# if scale_num <= 0:
# lr_g = 0.0001
# lr_d = 0.0001
alpha4rec = alpha4rec_ini
alpha4cos = alpha4cos_ini
real_curr[1] = real_curr[1].to(device)
h, w = real_curr[1].shape[2], real_curr[1].shape[3]
D, G, optimizerD, optimizerG, schedulerD, schedulerG = organise_models(
mode, device, weights2load, lr_g, lr_d, channels, kernel_size, stride, if_padding, G_num_layer, D_num_layer, out_channels
)
# [D, G], [optimizerD, optimizerG] = amp.initialize([D, G], [optimizerD, optimizerG], opt_level='O1', num_losses=14)
# p_loss = 0
#p_loss = p_loss.to(device)
r_loss = recon_loss(False)
r_loss = r_loss.to(device)
if if_padding:
padder = make_padder(0)
else:
padder = make_padder((G_num_layer-1)*1+2+1)
# if opts.ani==True:
# fpadder = make_padder(0)
# h_f = h_f + (1+2+G_num_layer*2)*2
# w_f = w_f + (1+2+G_num_layer*2)*2
noise_1 = padder(Generate_noise([channels, h, w], device=device, if_0=True, if_c_same=False))
epoch_iterator = create_progressbar(
iterable=range(iters),
desc="Training scale [{}/{}]".format(scale_num, scales-1),
offset=0, leave=True, logging_on_update=False, logging_on_close=True, postfix=True
)
for i in epoch_iterator:
epoch_iterator.set_description('Scale [{}/{}], Iteration [{}/{}]'.format(scale_num+1, scales, i+1, iters))
if mode == 'f':
if i >= 1600 and scale_num > 0:
alpha4rec = alpha4rec_ini
styles_ref = []
_tmp = real_curr[3].squeeze(0).cpu()
for cnt in range(G_num_layer*2+2):
if if_padding:
_padder = make_padder(0)
else:
_padder = make_padder(2+2*3+(G_num_layer-1-cnt)*1)
_augment = data_augmenter(_tmp, device=device)
_augment_ = _padder(_augment)
styles_ref.append(_augment_.detach())
del _augment, _augment_
if Gs == []:
noise_1 = padder(Generate_noise([1, h, w], device=device, if_0=False, if_c_same=True))
noise_2 = padder(Generate_noise([1, h, w], device=device, if_0=False, if_c_same=True))
# noise_2_f = padder(get_slerp_interp([1, h_f, w_f], device=device, iters=iters, iter_curr=i, if_c_same=True, start=noise_2_f_s, end=noise_2_f_e))
else:
noise_2 = padder(Generate_noise([channels, h, w], device=device, if_0=False, if_c_same=False))
# noise_2_f = padder(get_slerp_interp([channels, h_f, w_f], device=device, iters=iters, iter_curr=i, if_c_same=False, start=noise_2_f_s, end=noise_2_f_e))
for j in range(D_steps):
if (j == 0) & (i == 0):
if Gs == []:
noise_3 = padder(Generate_noise([channels, h, w], device=device, if_0=True, if_c_same=False))
prev = torch.full([1, channels, h, w], 0, device=device)
_ = prev
prev = padder(prev)
noise_weight = 1
else:
criterion = nn.MSELoss()
if mode == 'f':
prev = padder(draw_concat(Gs, Zs, reals_fa, NoiseWeight, _, 'rand', kernel_size, channels, device, padder, G_num_layer, mode))
noise_3 = draw_concat(Gs, Zs, reals_fa, NoiseWeight, _, 'rec', kernel_size, channels, device, padder, G_num_layer, mode)
RMSE = torch.sqrt(criterion(real_curr[3], noise_3))
elif mode == 'b':
prev = padder(draw_concat(Gs, Zs, reals_b, NoiseWeight, _, 'rand', kernel_size, channels, device, padder, G_num_layer, mode))
noise_3 = draw_concat(Gs, Zs, reals_b, NoiseWeight, _, 'rec', kernel_size, channels, device, padder, G_num_layer, mode)
RMSE = torch.sqrt(criterion(real_curr[1], noise_3))
noise_weight = noise_weight_ini*RMSE
noise_3 = padder(noise_3)
else:
if mode == 'f':
prev = padder(draw_concat(Gs, Zs, reals_fa, NoiseWeight, _, 'rand', kernel_size, channels, device, padder, G_num_layer, mode))
elif mode == 'b':
prev = padder(draw_concat(Gs, Zs, reals_b, NoiseWeight, _, 'rand', kernel_size, channels, device, padder, G_num_layer, mode))
if Gs == []:
noise = noise_2
else:
noise = noise_weight * noise_2 + prev
D.zero_grad()
if mode == 'f':
output = D(real_curr[3])
elif mode == 'b':
output = D(real_curr[1])
if i in x:
errD_real = output.mean()
else:
errD_real = -output.mean()
# with amp.scale_loss(errD_real, optimizerD, loss_id=0) as errD_real:
# errD_real.backward(retain_graph=True)
errD_real.backward(retain_graph=True)
if i in x:
errD_real = -errD_real
if mode == 'f':
fake = G(noise.detach(), styles_ref, prev)
elif mode == 'b':
fake = G(noise.detach(), prev)
output = D(fake.detach())
if i in x:
errD_fake = -output.mean()
else:
errD_fake = output.mean()
# with amp.scale_loss(errD_fake, optimizerD, loss_id=1) as errD_fake:
# errD_fake.backward(retain_graph=True)
errD_fake.backward(retain_graph=True)
if i in x:
errD_fake = -errD_fake
if mode == 'f':
gradient_penalty = calc_gradient_penalty(D, real_curr[3], fake, lambda_grad, device)
elif mode == 'b':
gradient_penalty = calc_gradient_penalty(D, real_curr[1], fake, lambda_grad, device)
# with amp.scale_loss(gradient_penalty, optimizerD, loss_id=2) as gradient_penalty:
# gradient_penalty.backward()
gradient_penalty.backward()
optimizerD.step()
D.zero_grad()
optimizerD.zero_grad()
_errD_real = errD_real.item()
_errD_fake = errD_fake.item()
_gradient_penalty = gradient_penalty.item()
del errD_real, errD_fake, gradient_penalty
_errD = _errD_real + _errD_fake + _gradient_penalty
errD2plot.append([_errD_real, _errD_fake, _gradient_penalty])
schedulerD.step(_errD)
for j in range(G_steps):
G.zero_grad()
###
output = D(fake)
###
errG = -output.mean()
# with amp.scale_loss(errG, optimizerG, loss_id=3) as errG:
# errG.backward(retain_graph=True)
errG.backward(retain_graph=True)
Z_opt = noise_weight * noise_1 + noise_3
if mode == 'f':
_tmp = G(Z_opt.detach(), styles_ref, noise_3)
elif mode == 'b':
_tmp = G(Z_opt.detach(), noise_3)
if alpha4rec != 0:
# loss = r_loss
loss = nn.L1Loss()
Z_opt = noise_weight * noise_1 + noise_3
if mode == 'f':
_loss = loss(_tmp*zeros, real_curr[3]*zeros)
# _loss = calc_local_rec(loss, _tmp, real_seg)
elif mode == 'b':
_loss = loss(_tmp, real_curr[1])
rec_loss = alpha4rec * _loss
del _loss
# with amp.scale_loss(rec_loss, optimizerG, loss_id=4) as rec_loss:
# rec_loss.backward(retain_graph=True)
rec_loss.backward(retain_graph=True)
rec_loss = rec_loss.detach()
else:
Z_opt = noise_1
rec_loss = torch.Tensor([0])
if alpha4cos != 0:
loss = nn.CosineEmbeddingLoss()
Z_opt = noise_weight * noise_1 + noise_3
if mode == 'f':
_loss = loss(_tmp, real_curr[3], torch.ones_like(real_curr[3]))
elif mode == 'b':
_loss = loss(_tmp, real_curr[1], torch.ones_like(real_curr[1]))
cos_loss = alpha4cos * _loss
del _loss
# with amp.scale_loss(cos_loss, optimizerG, loss_id=5) as cos_loss:
# cos_loss.backward(retain_graph=True)
cos_loss.backward(retain_graph=True)
cos_loss = cos_loss.detach()
else:
Z_opt = noise_1
cos_loss = torch.Tensor([0])
if alpha4vgg_ini != 0:
loss = p_loss
Z_opt = noise_weight * noise_1 + noise_3
if mode == 'f':
# _loss = alpha4vgg_ini * loss(_tmp, real_curr[3], device)
_loss = loss(_tmp, real_curr[3], device)
elif mode == 'b':
_loss = alpha4vgg_ini * loss(_tmp, real_curr[1], device)
perceptual_loss = _loss
# perceptual_loss1 = _loss1
# perceptual_loss2 = _loss2
del _loss
# perceptual_loss = factor4vgg[scale_num] * alpha4vgg * p_loss(G(Z_opt.detach(), styles_ref, noise_3), real_curr[3], device)
# perceptual_loss = factor4vgg[scale_num] * alpha4vgg * p_loss(G(Z_opt.detach(), noise_3), real_curr[1], device)
# with amp.scale_loss(perceptual_loss_f, optimizerG_f, loss_id=6) as perceptual_loss_f:
# perceptual_loss_f.backward(retain_graph=True)
# with amp.scale_loss(perceptual_loss, optimizerG, loss_id=5) as perceptual_loss:
# perceptual_loss.backward(retain_graph=True)
# perceptual_loss1.backward(retain_graph=True)
perceptual_loss.backward(retain_graph=True)
# perceptual_loss = perceptual_loss1.detach() + perceptual_loss2.detach()
perceptual_loss = perceptual_loss.detach()
else:
Z_opt = noise_1
perceptual_loss = torch.Tensor([0])
optimizerG.step()
G.zero_grad()
optimizerG.zero_grad()
_errG = errG.item()
_rec_loss = rec_loss.item()
_cos_loss = cos_loss.item()
_perceptual_loss = perceptual_loss.item()
del errG, rec_loss, cos_loss, perceptual_loss
errG2plot.append([_errG, _rec_loss, _cos_loss, _perceptual_loss])
_errG = _errG + _rec_loss + _cos_loss + _perceptual_loss
schedulerG.step(_errG)
del noise_2
if i % 200 == 0 or i == (iters-1):
if mode == 'b':
_fake = fake.cpu()
_fake = _fake * masks_b[scale_num]
_fake = _fake + masks_b[scale_num] - torch.ones_like(masks_b[scale_num])
plt.imsave('%s/fake_%s_%s.png' % (outfile_dir, mode, str(i)), convert_image_np(_fake.detach()), vmin=0, vmax=1)
elif mode == 'f':
plt.imsave('%s/fake_%s_%s.png' % (outfile_dir, mode, str(i)), convert_image_np(fake.detach()), vmin=0, vmax=1)
if i % 500 == 0 or i == (iters-1):
plot_sinloss(errG2plot, errD2plot, scale_num, iters_list, outfile_dir, mode, i)
epoch_iterator.close()
torch.save(G.state_dict(), '%s/G_%s.pth' % (outfile_dir, mode))
torch.save(D.state_dict(), '%s/D_%s.pth' % (outfile_dir, mode))
G = reset_grads(G, False)
G.eval()
D = reset_grads(D, False)
D.eval()
Gs.append(G)
Ds.append(D)
NoiseWeight.append(noise_weight)
Zs.append(noise_1)
# torch.save(Gs, '%s/Gs.pth' % (dir2save))
torch.save(Zs, '%s/Zs.pth' % (dir2save))
torch.save(NoiseWeight, '%s/noiseweight_%s.pth' % (dir2save, mode))
del D, G
torch.cuda.synchronize()
end_time = time.time()
logger.info('-' * 80)
logger.info(green + '[INFO]: training time cost : %s' % seconds2time(end_time - start_time) + reset)
logger.info('-' * 80)
logger.info(green + '[INFO]: randomly generating %s samples...' %(opts.gen_num) + reset)
logger.info('-' * 80)
if mode == 'f':
ran_gen(Gs, Zs, NoiseWeight, reals_fa, opts, dir2gen, padder)
elif mode == 'b':
ran_gen(Gs, Zs, NoiseWeight, reals_b, opts, dir2gen, padder)
logger.info('-' * 80)
logger.info(green + '[INFO]: calculating eval metrics...' + reset)
logger.info('-' * 80)
sifid = calculate_sifid_given_paths(dir2gen+'real.png', dir2gen, batch_size=1, dims=64, suffix='png')
diversity = calculate_cs(dir2gen, suffix='png')
logger.info(green + '[INFO]: SIFID : %6f DIVERSITY : %6f GQI : %6f ' % (sifid, diversity, diversity/sifid)+ reset)
# nni.report_final_result(diversity/sifid)
if __name__ == "__main__":
main() | [
"configs.get_configs",
"torch.nn.L1Loss",
"utils_functions.data_augmenter",
"torch.cuda.synchronize",
"torch.nn.MSELoss",
"utils_functions.ran_gen",
"training_functions.organise_models",
"argparse.ArgumentParser",
"training_functions.VGGPerceptualLoss",
"evaluation.calculate_sifid_given_paths",
... | [((995, 1028), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1018, 1028), False, 'import warnings\n'), ((1029, 1050), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (1043, 1050), False, 'import matplotlib\n'), ((1171, 1196), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1194, 1196), False, 'import argparse\n'), ((1459, 1471), 'utils_functions.get_logger', 'get_logger', ([], {}), '()\n', (1469, 1471), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((1485, 1512), 'configs.get_configs', 'get_configs', (['parser', 'logger'], {}), '(parser, logger)\n', (1496, 1512), False, 'from configs import get_configs, LogbookFormatter\n'), ((1822, 1842), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1836, 1842), True, 'import numpy as np\n'), ((1847, 1870), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1864, 1870), False, 'import torch\n'), ((1875, 1907), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (1901, 1907), False, 'import torch\n'), ((3859, 3895), 'utils_functions.modify_scales', 'modify_scales', (['anot_path', 'scale_base'], {}), '(anot_path, scale_base)\n', (3872, 3895), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((4357, 4379), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (4369, 4379), False, 'import torch\n'), ((4944, 5029), 'utils_functions.get_reals', 'get_reals', (['mode', 'img_path', 'anot_path', 'scales', 'scale_base', 'reals', 'channels', 'masks'], {}), '(mode, img_path, anot_path, scales, scale_base, reals, channels, masks\n )\n', (4953, 5029), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((6155, 6201), 'torch.save', 'torch.save', (['reals_fa', "(dir2save + 'reals_f.pth')"], {}), "(reals_fa, dir2save + 'reals_f.pth')\n", (6165, 6201), False, 'import torch\n'), ((6204, 6249), 'torch.save', 'torch.save', (['reals_b', "(dir2save + 'reals_b.pth')"], {}), "(reals_b, dir2save + 'reals_b.pth')\n", (6214, 6249), False, 'import torch\n'), ((6252, 6297), 'torch.save', 'torch.save', (['masks_f', "(dir2save + 'masks_f.pth')"], {}), "(masks_f, dir2save + 'masks_f.pth')\n", (6262, 6297), False, 'import torch\n'), ((6410, 6434), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (6432, 6434), False, 'import torch\n'), ((6452, 6463), 'time.time', 'time.time', ([], {}), '()\n', (6461, 6463), False, 'import time\n'), ((21093, 21117), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (21115, 21117), False, 'import torch\n'), ((21133, 21144), 'time.time', 'time.time', ([], {}), '()\n', (21142, 21144), False, 'import time\n'), ((21737, 21836), 'evaluation.calculate_sifid_given_paths', 'calculate_sifid_given_paths', (["(dir2gen + 'real.png')", 'dir2gen'], {'batch_size': '(1)', 'dims': '(64)', 'suffix': '"""png"""'}), "(dir2gen + 'real.png', dir2gen, batch_size=1,\n dims=64, suffix='png')\n", (21764, 21836), False, 'from evaluation import calculate_sifid_given_paths, calculate_cs\n'), ((21847, 21882), 'evaluation.calculate_cs', 'calculate_cs', (['dir2gen'], {'suffix': '"""png"""'}), "(dir2gen, suffix='png')\n", (21859, 21882), False, 'from evaluation import calculate_sifid_given_paths, calculate_cs\n'), ((2059, 2075), 'time.localtime', 'time.localtime', ([], {}), '()\n', (2073, 2075), False, 'import time\n'), ((2226, 2247), 'os.makedirs', 'os.makedirs', (['dir2save'], {}), '(dir2save)\n', (2237, 2247), False, 'import os\n'), ((2256, 2276), 'os.makedirs', 'os.makedirs', (['dir2gen'], {}), '(dir2gen)\n', (2267, 2276), False, 'import os\n'), ((2916, 2981), 'logging.FileHandler', 'logging.FileHandler', ([], {'filename': 'logpath', 'mode': '"""a"""', 'encoding': '"""utf-8"""'}), "(filename=logpath, mode='a', encoding='utf-8')\n", (2935, 2981), False, 'import logging\n'), ((3052, 3130), 'configs.LogbookFormatter', 'LogbookFormatter', ([], {'fmt': '"""[%(asctime)s] %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(fmt='[%(asctime)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\n", (3068, 3130), False, 'from configs import get_configs, LogbookFormatter\n'), ((4499, 4545), 'training_functions.VGGPerceptualLoss', 'VGGPerceptualLoss', ([], {'resize': '(False)', 'device': 'device'}), '(resize=False, device=device)\n', (4516, 4545), False, 'from training_functions import organise_models, recon_loss, VGGPerceptualLoss\n'), ((8909, 8926), 'training_functions.recon_loss', 'recon_loss', (['(False)'], {}), '(False)\n', (8919, 8926), False, 'from training_functions import organise_models, recon_loss, VGGPerceptualLoss\n'), ((20696, 20717), 'utils_functions.reset_grads', 'reset_grads', (['G', '(False)'], {}), '(G, False)\n', (20707, 20717), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((20747, 20768), 'utils_functions.reset_grads', 'reset_grads', (['D', '(False)'], {}), '(D, False)\n', (20758, 20768), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((20955, 20993), 'torch.save', 'torch.save', (['Zs', "('%s/Zs.pth' % dir2save)"], {}), "(Zs, '%s/Zs.pth' % dir2save)\n", (20965, 20993), False, 'import torch\n'), ((21004, 21071), 'torch.save', 'torch.save', (['NoiseWeight', "('%s/noiseweight_%s.pth' % (dir2save, mode))"], {}), "(NoiseWeight, '%s/noiseweight_%s.pth' % (dir2save, mode))\n", (21014, 21071), False, 'import torch\n'), ((21449, 21510), 'utils_functions.ran_gen', 'ran_gen', (['Gs', 'Zs', 'NoiseWeight', 'reals_fa', 'opts', 'dir2gen', 'padder'], {}), '(Gs, Zs, NoiseWeight, reals_fa, opts, dir2gen, padder)\n', (21456, 21510), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((5400, 5424), 'os.makedirs', 'os.makedirs', (['outfile_dir'], {}), '(outfile_dir)\n', (5411, 5424), False, 'import os\n'), ((5690, 5709), 'utils_functions.convert_image_np', 'convert_image_np', (['_'], {}), '(_)\n', (5706, 5709), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((5789, 5809), 'utils_functions.convert_image_np', 'convert_image_np', (['__'], {}), '(__)\n', (5805, 5809), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((5890, 5911), 'utils_functions.convert_image_np', 'convert_image_np', (['___'], {}), '(___)\n', (5906, 5911), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((5993, 6015), 'utils_functions.convert_image_np', 'convert_image_np', (['____'], {}), '(____)\n', (6009, 6015), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((7051, 7089), 'math.ceil', 'math.ceil', (['(0.1 * real_curr[3].shape[3])'], {}), '(0.1 * real_curr[3].shape[3])\n', (7060, 7089), False, 'import math\n'), ((7089, 7127), 'math.ceil', 'math.ceil', (['(0.1 * real_curr[3].shape[2])'], {}), '(0.1 * real_curr[3].shape[2])\n', (7098, 7127), False, 'import math\n'), ((7911, 8121), 'training_functions.organise_models', 'organise_models', (['mode', 'device', 'weights2load', 'lr_g', 'lr_d', 'channels', 'kernel_size', 'stride', 'if_padding', 'G_num_layer', 'D_num_layer', 'out_channels'], {'factor': '(0.01 + weight4style * (scales - scale_num - 1) / scales)'}), '(mode, device, weights2load, lr_g, lr_d, channels,\n kernel_size, stride, if_padding, G_num_layer, D_num_layer, out_channels,\n factor=0.01 + weight4style * (scales - scale_num - 1) / scales)\n', (7926, 8121), False, 'from training_functions import organise_models, recon_loss, VGGPerceptualLoss\n'), ((9007, 9021), 'utils_functions.make_padder', 'make_padder', (['(0)'], {}), '(0)\n', (9018, 9021), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((9057, 9099), 'utils_functions.make_padder', 'make_padder', (['((G_num_layer - 1) * 1 + 2 + 1)'], {}), '((G_num_layer - 1) * 1 + 2 + 1)\n', (9068, 9099), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((9281, 9356), 'utils_functions.Generate_noise', 'Generate_noise', (['[channels, h, w]'], {'device': 'device', 'if_0': '(True)', 'if_c_same': '(False)'}), '([channels, h, w], device=device, if_0=True, if_c_same=False)\n', (9295, 9356), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((21541, 21601), 'utils_functions.ran_gen', 'ran_gen', (['Gs', 'Zs', 'NoiseWeight', 'reals_b', 'opts', 'dir2gen', 'padder'], {}), '(Gs, Zs, NoiseWeight, reals_b, opts, dir2gen, padder)\n', (21548, 21601), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((2428, 2439), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2437, 2439), False, 'import os\n'), ((2509, 2520), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2518, 2520), False, 'import os\n'), ((2599, 2610), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2608, 2610), False, 'import os\n'), ((2701, 2712), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2710, 2712), False, 'import os\n'), ((6984, 7014), 'torch.zeros_like', 'torch.zeros_like', (['real_curr[3]'], {}), '(real_curr[3])\n', (7000, 7014), False, 'import torch\n'), ((8535, 8677), 'training_functions.organise_models', 'organise_models', (['mode', 'device', 'weights2load', 'lr_g', 'lr_d', 'channels', 'kernel_size', 'stride', 'if_padding', 'G_num_layer', 'D_num_layer', 'out_channels'], {}), '(mode, device, weights2load, lr_g, lr_d, channels,\n kernel_size, stride, if_padding, G_num_layer, D_num_layer, out_channels)\n', (8550, 8677), False, 'from training_functions import organise_models, recon_loss, VGGPerceptualLoss\n'), ((20429, 20508), 'utils_functions.plot_sinloss', 'plot_sinloss', (['errG2plot', 'errD2plot', 'scale_num', 'iters_list', 'outfile_dir', 'mode', 'i'], {}), '(errG2plot, errD2plot, scale_num, iters_list, outfile_dir, mode, i)\n', (20441, 20508), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((2312, 2323), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2321, 2323), False, 'import os\n'), ((6060, 6110), 'utils_functions.convert_image_np', 'convert_image_np', (['_____[:, 0, :, :][None, :, :, :]'], {}), '(_____[:, 0, :, :][None, :, :, :])\n', (6076, 6110), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((10240, 10275), 'utils_functions.data_augmenter', 'data_augmenter', (['_tmp'], {'device': 'device'}), '(_tmp, device=device)\n', (10254, 10275), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((10483, 10551), 'utils_functions.Generate_noise', 'Generate_noise', (['[1, h, w]'], {'device': 'device', 'if_0': '(False)', 'if_c_same': '(True)'}), '([1, h, w], device=device, if_0=False, if_c_same=True)\n', (10497, 10551), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((10586, 10654), 'utils_functions.Generate_noise', 'Generate_noise', (['[1, h, w]'], {'device': 'device', 'if_0': '(False)', 'if_c_same': '(True)'}), '([1, h, w], device=device, if_0=False, if_c_same=True)\n', (10600, 10654), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((10870, 10946), 'utils_functions.Generate_noise', 'Generate_noise', (['[channels, h, w]'], {'device': 'device', 'if_0': '(False)', 'if_c_same': '(False)'}), '([channels, h, w], device=device, if_0=False, if_c_same=False)\n', (10884, 10946), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((14427, 14492), 'utils_functions.calc_gradient_penalty', 'calc_gradient_penalty', (['D', 'real_curr[3]', 'fake', 'lambda_grad', 'device'], {}), '(D, real_curr[3], fake, lambda_grad, device)\n', (14448, 14492), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((15999, 16010), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (16008, 16010), True, 'import torch.nn as nn\n'), ((16769, 16786), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (16781, 16786), False, 'import torch\n'), ((16849, 16873), 'torch.nn.CosineEmbeddingLoss', 'nn.CosineEmbeddingLoss', ([], {}), '()\n', (16871, 16873), True, 'import torch.nn as nn\n'), ((17610, 17627), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (17622, 17627), False, 'import torch\n'), ((19250, 19267), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (19262, 19267), False, 'import torch\n'), ((21231, 21266), 'utils_functions.seconds2time', 'seconds2time', (['(end_time - start_time)'], {}), '(end_time - start_time)\n', (21243, 21266), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((6548, 6564), 'time.localtime', 'time.localtime', ([], {}), '()\n', (6562, 6564), False, 'import time\n'), ((10093, 10107), 'utils_functions.make_padder', 'make_padder', (['(0)'], {}), '(0)\n', (10104, 10107), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((10168, 10220), 'utils_functions.make_padder', 'make_padder', (['(2 + 2 * 3 + (G_num_layer - 1 - cnt) * 1)'], {}), '(2 + 2 * 3 + (G_num_layer - 1 - cnt) * 1)\n', (10179, 10220), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((11379, 11428), 'torch.full', 'torch.full', (['[1, channels, h, w]', '(0)'], {'device': 'device'}), '([1, channels, h, w], 0, device=device)\n', (11389, 11428), False, 'import torch\n'), ((11609, 11621), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (11619, 11621), True, 'import torch.nn as nn\n'), ((14566, 14631), 'utils_functions.calc_gradient_penalty', 'calc_gradient_penalty', (['D', 'real_curr[1]', 'fake', 'lambda_grad', 'device'], {}), '(D, real_curr[1], fake, lambda_grad, device)\n', (14587, 14631), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((20031, 20066), 'torch.ones_like', 'torch.ones_like', (['masks_b[scale_num]'], {}), '(masks_b[scale_num])\n', (20046, 20066), False, 'import torch\n'), ((11271, 11346), 'utils_functions.Generate_noise', 'Generate_noise', (['[channels, h, w]'], {'device': 'device', 'if_0': '(True)', 'if_c_same': '(False)'}), '([channels, h, w], device=device, if_0=True, if_c_same=False)\n', (11285, 11346), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((11855, 11969), 'utils_functions.draw_concat', 'draw_concat', (['Gs', 'Zs', 'reals_fa', 'NoiseWeight', '_', '"""rec"""', 'kernel_size', 'channels', 'device', 'padder', 'G_num_layer', 'mode'], {}), "(Gs, Zs, reals_fa, NoiseWeight, _, 'rec', kernel_size, channels,\n device, padder, G_num_layer, mode)\n", (11866, 11969), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((12677, 12792), 'utils_functions.draw_concat', 'draw_concat', (['Gs', 'Zs', 'reals_fa', 'NoiseWeight', '_', '"""rand"""', 'kernel_size', 'channels', 'device', 'padder', 'G_num_layer', 'mode'], {}), "(Gs, Zs, reals_fa, NoiseWeight, _, 'rand', kernel_size, channels,\n device, padder, G_num_layer, mode)\n", (12688, 12792), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((17028, 17057), 'torch.ones_like', 'torch.ones_like', (['real_curr[3]'], {}), '(real_curr[3])\n', (17043, 17057), False, 'import torch\n'), ((11704, 11819), 'utils_functions.draw_concat', 'draw_concat', (['Gs', 'Zs', 'reals_fa', 'NoiseWeight', '_', '"""rand"""', 'kernel_size', 'channels', 'device', 'padder', 'G_num_layer', 'mode'], {}), "(Gs, Zs, reals_fa, NoiseWeight, _, 'rand', kernel_size, channels,\n device, padder, G_num_layer, mode)\n", (11715, 11819), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((12280, 12393), 'utils_functions.draw_concat', 'draw_concat', (['Gs', 'Zs', 'reals_b', 'NoiseWeight', '_', '"""rec"""', 'kernel_size', 'channels', 'device', 'padder', 'G_num_layer', 'mode'], {}), "(Gs, Zs, reals_b, NoiseWeight, _, 'rec', kernel_size, channels,\n device, padder, G_num_layer, mode)\n", (12291, 12393), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((12866, 12980), 'utils_functions.draw_concat', 'draw_concat', (['Gs', 'Zs', 'reals_b', 'NoiseWeight', '_', '"""rand"""', 'kernel_size', 'channels', 'device', 'padder', 'G_num_layer', 'mode'], {}), "(Gs, Zs, reals_b, NoiseWeight, _, 'rand', kernel_size, channels,\n device, padder, G_num_layer, mode)\n", (12877, 12980), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n'), ((17154, 17183), 'torch.ones_like', 'torch.ones_like', (['real_curr[1]'], {}), '(real_curr[1])\n', (17169, 17183), False, 'import torch\n'), ((12130, 12244), 'utils_functions.draw_concat', 'draw_concat', (['Gs', 'Zs', 'reals_b', 'NoiseWeight', '_', '"""rand"""', 'kernel_size', 'channels', 'device', 'padder', 'G_num_layer', 'mode'], {}), "(Gs, Zs, reals_b, NoiseWeight, _, 'rand', kernel_size, channels,\n device, padder, G_num_layer, mode)\n", (12141, 12244), False, 'from utils_functions import ran_gen, seconds2time, get_logger, data_augmenter, plot_sinloss, make_padder, get_reals, Generate_noise, convert_image_np, draw_concat, calc_gradient_penalty, reset_grads, modify_scales, get_seg, calc_local_rec\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 30 14:22:15 2020
@author: paras
"""
import pandas as pd
import numpy as np
from sklearn.externals import joblib
from flask import Flask, jsonify, request
import json
import flask
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from io import BytesIO
import urllib
def loaderImage(URL):
with urllib.request.urlopen(URL) as url:
img = image.load_img(BytesIO(url.read()), target_size=(125, 125))
return image.img_to_array(img)
app = Flask(__name__)
IMAGE_SIZE = 64
@app.route('/')
def index():
return flask.render_template('index.html')
@app.route('/predict', methods=['POST'])
def predict():
form_data = request.form.to_dict()
print(form_data)
print("Uploaded image path: "+str(form_data["image_path"]))
#test_image = image.load_img(form_data["image_path"], target_size = (IMAGE_SIZE, IMAGE_SIZE))
#test_image = image.img_to_array(test_image)
test_image = loaderImage(form_data["image_path"][5:])
test_image = np.expand_dims(test_image, axis = 0)
print(test_image)
classifier = joblib.load('prediction_classifier.pkl')
result = classifier.predict(test_image)
if result[0][0] == 1:
prediction = 'Dog'
else:
prediction = 'Cat'
return flask.render_template('index.html', predicted_value="Uploaded image was of:\n {}".format(prediction))
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080)
| [
"flask.render_template",
"keras.preprocessing.image.img_to_array",
"flask.Flask",
"sklearn.externals.joblib.load",
"flask.request.form.to_dict",
"numpy.expand_dims",
"urllib.request.urlopen"
] | [((552, 567), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (557, 567), False, 'from flask import Flask, jsonify, request\n'), ((520, 543), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (538, 543), False, 'from keras.preprocessing import image\n'), ((626, 661), 'flask.render_template', 'flask.render_template', (['"""index.html"""'], {}), "('index.html')\n", (647, 661), False, 'import flask\n'), ((736, 758), 'flask.request.form.to_dict', 'request.form.to_dict', ([], {}), '()\n', (756, 758), False, 'from flask import Flask, jsonify, request\n'), ((1077, 1111), 'numpy.expand_dims', 'np.expand_dims', (['test_image'], {'axis': '(0)'}), '(test_image, axis=0)\n', (1091, 1111), True, 'import numpy as np\n'), ((1159, 1199), 'sklearn.externals.joblib.load', 'joblib.load', (['"""prediction_classifier.pkl"""'], {}), "('prediction_classifier.pkl')\n", (1170, 1199), False, 'from sklearn.externals import joblib\n'), ((398, 425), 'urllib.request.urlopen', 'urllib.request.urlopen', (['URL'], {}), '(URL)\n', (420, 425), False, 'import urllib\n')] |
import numpy as np
import pygenome as pg
# fitness function: measures the sortness of a permutation
def sorted_permutation(vector):
unsorted = vector.size
for i in range(vector.size):
if vector[i] == i:
unsorted -= 1
return unsorted
permutation_size = 10
# GA 1
def generational_no_elitism():
np.random.seed(42)
pop = pg.genetic_algorithm_permutation(
sorted_permutation, permutation_size, total_generations=25)
best = pg.best_individual(pop)
print('fitness: %s\tgenotype: %s' % (best.fitness.value, best.genotype))
# GA 2
def generational_with_elitism():
np.random.seed(42)
pop = pg.genetic_algorithm_permutation(
sorted_permutation, permutation_size, total_generations=25, elitism=True)
best = pg.best_individual(pop)
print('fitness: %s\tgenotype: %s' % (best.fitness.value, best.genotype))
# entry point
if __name__ == "__main__":
print('GA 1: generational, no elitism')
generational_no_elitism()
print('GA 2: generational, with elitism')
generational_with_elitism()
| [
"pygenome.genetic_algorithm_permutation",
"numpy.random.seed",
"pygenome.best_individual"
] | [((337, 355), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (351, 355), True, 'import numpy as np\n'), ((366, 462), 'pygenome.genetic_algorithm_permutation', 'pg.genetic_algorithm_permutation', (['sorted_permutation', 'permutation_size'], {'total_generations': '(25)'}), '(sorted_permutation, permutation_size,\n total_generations=25)\n', (398, 462), True, 'import pygenome as pg\n'), ((479, 502), 'pygenome.best_individual', 'pg.best_individual', (['pop'], {}), '(pop)\n', (497, 502), True, 'import pygenome as pg\n'), ((627, 645), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (641, 645), True, 'import numpy as np\n'), ((656, 766), 'pygenome.genetic_algorithm_permutation', 'pg.genetic_algorithm_permutation', (['sorted_permutation', 'permutation_size'], {'total_generations': '(25)', 'elitism': '(True)'}), '(sorted_permutation, permutation_size,\n total_generations=25, elitism=True)\n', (688, 766), True, 'import pygenome as pg\n'), ((783, 806), 'pygenome.best_individual', 'pg.best_individual', (['pop'], {}), '(pop)\n', (801, 806), True, 'import pygenome as pg\n')] |
"""Structure classes."""
import numpy as np
import rmsd
import math
import warnings
from collections import Counter, OrderedDict, defaultdict
from .base import StructureClass, query, StructureSet
class AtomStructure:
"""A structure made of atoms. This contains various useful methods that rely
on a ``atoms()`` method, which the inheriting object must supply itself. All
atomic structures also have IDs and names.
Two atomic structures are equal if every pairwise atom in their pairing
are equal.
The class would never be instantiated directly."""
def __init__(self, id=None, name=None):
self._id, self._name = id, name
def __eq__(self, other):
try:
mapping = self.pairing_with(other)
for atom1, atom2 in mapping.items():
if not atom1 == atom2: return False
return True
except: return False
def __hash__(self):
return id(self)
@property
def id(self):
"""The structure's unique ID.
:rtype: ``str``"""
return self._id
@property
def name(self):
"""The structure's name.
:rtype: ``str``"""
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def mass(self):
"""The structure's mass - the sum of all its atoms' masses.
:rtype: ``float``"""
return round(sum([atom.mass for atom in self.atoms()]), 12)
@property
def charge(self):
"""The structure's charge - the sum of all its atoms' charges.
:rtype: ``float``"""
return round(sum([atom.charge for atom in self.atoms()]), 12)
@property
def formula(self):
"""The structure's formula as a ``Counter`` dictionary - the count of
all its atoms' elements.
:rtype: ``Counter``"""
return Counter([atom.element for atom in self.atoms()])
@property
def center_of_mass(self):
"""Returns the center of mass of the structure. This is the average of
all the atom coordinates, weighted by the mass of each atom.
:rtype: ``tuple``"""
mass = self.mass
locations = np.array([a._location * a.mass for a in self.atoms()])
return np.sum(locations, axis=0) / mass
@property
def radius_of_gyration(self):
"""The radius of gyration of a structure is a measure of how extended it
is. It is the root mean square deviation of the atoms' distance from the
structure's :py:meth:`.center_of_mass`.
:rtype: ``float``"""
center_of_mass = self.center_of_mass
atoms = self.atoms()
square_deviation = sum(
[atom.distance_to(center_of_mass) ** 2 for atom in atoms]
)
mean_square_deviation = square_deviation / len(atoms)
return np.sqrt(mean_square_deviation)
def pairing_with(self, structure):
"""Takes another structure with the same number of atoms as this one,
and attempts to find the nearest equivalent of every atom in this
structure, in that structure.
Atoms will be aligned first by ID (if equal), then element, then by
name, and finally by memory address - this last metric is
used to ensure that even when allocation is essentially random, it is at
least the same every time two structures are aligned.
:param AtomStructure structure: the structure to pair with.
:raises ValueError: if the other structure has a different number of\
atoms.
:rtype: ``dict``"""
atoms = self.atoms()
other_atoms = structure.atoms()
if len(atoms) != len(other_atoms):
raise ValueError("{} and {} have different numbers of atoms".format(
self, structure
))
pair = {}
common_ids = set(a._id for a in atoms) & set(a._id for a in other_atoms)
id_atoms = {a._id: a for a in atoms}
id_other_atoms = {a._id: a for a in other_atoms}
for id_ in common_ids:
pair[id_atoms[id_]] = id_other_atoms[id_]
atoms.remove(id_atoms[id_])
other_atoms.remove(id_other_atoms[id_])
atoms, other_atoms = list(atoms), list(other_atoms)
for l in atoms, other_atoms:
l.sort(key=lambda a: (
a._id, a._element, a._name, id(a)
))
return {**pair, **{a1: a2 for a1, a2 in zip(atoms, other_atoms)}}
def rmsd_with(self, structure):
"""Calculates the Root Mean Square Deviation between this structure and
another.
:param AtomStructure structure: the structure to check against.
:raises ValueError: if the other structure has a different number of\
atoms.
:rtype: ``float``"""
pairing = self.pairing_with(structure)
coords1, coords2 = [[a.location for a in atoms]
for atoms in zip(*pairing.items())]
c1, c2 = self.center_of_mass, structure.center_of_mass
coords1 = [[x - c1[0], y - c1[1], z - c1[2]] for x, y, z in coords1]
coords2 = [[x - c2[0], y - c2[1], z - c2[2]] for x, y, z in coords2]
return round(rmsd.kabsch_rmsd(coords1, coords2), 12)
def create_grid(self, size=1, margin=0):
"""A generator which models a grid around the structure and returns the
coordinates of all the points in that grid. The origin is always one of
those points, and the grid will be a box.
:param int size: The spacing between grid points. The default is 1.
:param int margin: How far to extend the grid beyond the structure\
coordinates. The default is 0.
:rtype: ``tuple``"""
atom_locations = [atom.location for atom in self.atoms()]
dimension_values = []
for dimension in range(3):
coordinates = [loc[dimension] for loc in atom_locations]
min_, max_ = min(coordinates) - margin, max(coordinates) + margin
values = [0]
while values[0] > min_: values.insert(0, values[0] - size)
while values[-1] < max_: values.append(values[-1] + size)
dimension_values.append(values)
for x in dimension_values[0]:
for y in dimension_values[1]:
for z in dimension_values[2]:
yield (x, y, z)
def check_ids(self):
"""Looks through all the structure's sub-structures and raises a
warning if they have duplicate ID."""
for objects in ("chains", "ligands", "waters", "residues", "atoms"):
try:
ids = [obj.id for obj in getattr(self, objects)()]
unique_ids = set(ids)
if len(ids) != len(unique_ids):
warnings.warn(f"{objects} have duplicate IDs")
except AttributeError: pass
def save(self, path):
"""Saves the structure to file. The file extension given in the filename
will be used to determine which file format to save in.
If the structure you are saving has any duplicate IDs, a warning will be
issued, as the file saved will likely be nonsensical.
:param str path: the filename and location to save to."""
from .utilities import save
self.check_ids()
ext = path.split(".")[-1]
if ext == "cif":
from .mmcif import structure_to_mmcif_string
string = structure_to_mmcif_string(self)
elif ext == "mmtf":
from .mmtf import structure_to_mmtf_string
string = structure_to_mmtf_string(self)
elif ext == "pdb":
from .pdb import structure_to_pdb_string
string = structure_to_pdb_string(self)
else:
raise ValueError("Unsupported file extension: " + ext)
save(string, path)
def atoms_in_sphere(self, location, radius, *args, **kwargs):
"""Returns all the atoms in a given sphere within this structure. This
will be a lot faster if the structure is a :py:class:`.Model` and if
:py:meth:`.optimise_distances` has been called, as it won't have to
search all atoms.
:param tuple location: the centre of the sphere.
:param float radius: the radius of the sphere.
:rtype: ``set``"""
if "_internal_grid" in self.__dict__ and self._internal_grid:
r, atoms = math.ceil(radius / 10), set()
x, y, z = [int(math.floor(n / 10)) * 10 for n in location]
x_range, y_range, z_range = [
[(val - (n * 10)) for n in range(1, r + 1)][::-1] + [val] + [
(val + n * 10) for n in range(1, r + 1)
] for val in (x, y, z)
]
for x in x_range:
for y in y_range:
for z in z_range:
atoms = atoms.union(self._internal_grid[x][y][z])
atoms = StructureSet(*atoms)
atoms = query(lambda self: atoms)(self, *args, **kwargs)
else:
atoms = self.atoms(*args, **kwargs)
return {a for a in atoms if a.distance_to(location) <= radius}
def pairwise_atoms(self, *args, **kwargs):
"""A generator which yeilds all the pairwise atom combinations of the
structure. There will be no duplicates in the returned generator, and
the number of returned pairs will be a triangle number.
:rtype: ``tuple``"""
atoms = list(self.atoms(*args, **kwargs))
for a_index in range(len(atoms) - 1):
for o_index in range(a_index + 1, len(atoms)):
yield {atoms[a_index], atoms[o_index]}
def nearby_atoms(self, *args, **kwargs):
"""Returns all atoms within a given distance of this structure,
excluding the structure's own atoms.
This will be a lot faster if the model's
:py:meth:`.optimise_distances` has been called, as it won't have to
search all atoms.
:param float cutoff: the distance cutoff to use.
:rtype: ``set``"""
atoms = set()
for atom in self.atoms():
atoms.update(atom.nearby_atoms(*args, **kwargs))
return atoms - self.atoms()
def nearby_hets(self, *args, **kwargs):
"""Returns all other het structures within a given distance of this
structure, excluding itself.
This will be a lot faster if the model's
:py:meth:`.optimise_distances` has been called, as it won't have to
search all atoms.
:param float cutoff: the distance cutoff to use.
:rtype: ``set``"""
structures = set()
hets = set()
for atom in self.atoms():
structures.update(atom.nearby_hets(*args, **kwargs))
hets.add(atom.het)
return structures - hets
def nearby_chains(self, *args, **kwargs):
"""Returns all other chain structures within a given distance of this
structure, excluding itself.
:param float cutoff: the distance cutoff to use.
:rtype: ``set``"""
structures = set()
chains = set()
for atom in self.atoms():
structures.update(atom.nearby_chains(*args, **kwargs))
chains.add(atom.chain)
return structures - chains
def translate(self, dx=0, dy=0, dz=0, trim=12):
"""Translates the structure through space, updating all atom
coordinates accordingly. You can provide three values, or a single
vector.
:param Number dx: The distance to move in the x direction.
:param Number dy: The distance to move in the y direction.
:param Number dz: The distance to move in the z direction.
:param int trim: The amount of rounding to do to the atoms' coordinates\
after translating - the default is 12 decimal places but this can be\
set to ``None`` if no rounding is to be done."""
try:
_,_,_ = dx
vector = dx
except TypeError: vector = (dx, dy, dz)
Atom.translate_atoms(vector, *self.atoms())
self.trim(trim)
def transform(self, matrix, trim=12):
"""Transforms the structure using a 3x3 matrix supplied. This is useful
if the :py:meth:`.rotate` method isn't powerful enough for your needs.
:param array matrix: A NumPy matrix representing the transformation.\
You can supply a list of lists if you like and it will be converted to\
a NumPy matrix.
:param int trim: The amount of rounding to do to the atoms' coordinates\
after transforming - the default is 12 decimal places but this can be\
set to ``None`` if no rounding is to be done."""
Atom.transform_atoms(matrix, *self.atoms())
self.trim(trim)
def rotate(self, angle, axis, trim=12):
"""Rotates the structure about an axis, updating all atom coordinates
accordingly.
:param Number angle: The angle in radians.
:param str axis: The axis to rotate around. Can only be 'x', 'y' or 'z'.
:param int trim: The amount of rounding to do to the atoms' coordinates\
after translating - the default is 12 decimal places but this can be\
set to ``None`` if no rounding is to be done."""
Atom.rotate_atoms(angle, axis, *self.atoms())
self.trim(trim)
def trim(self, places):
"""Rounds the coordinate values to a given number of decimal places.
Useful for removing floating point rounding errors after transformation.
:param int places: The number of places to round the coordinates to. If\
``None``, no rounding will be done."""
for atom in self.atoms():
atom.trim(places)
class Molecule(AtomStructure):
"""A molecule is a top-level constituent of a :py:class:`.Model` - a chain,
a ligand, or a water molecule. They can have internal IDs, separate from the
standard ID."""
def __init__(self, id, name, internal_id):
AtomStructure.__init__(self, id, name)
self._internal_id = internal_id
self._model = None
@property
def internal_id(self):
"""The molecule's internal ID - how it is refered to by atomium
operations. This will be identical to regular IDs when the model comes
from a .pdb file, but .cif and .mmtf files make this distinction.
:rtype: ``str``"""
return self._internal_id or self._id
@property
def model(self):
"""Returns the molecules :py:class:`.Model`.
:rtype: ``Model``"""
return self._model
class Het(AtomStructure):
"""A direct container of atoms, such as a residue or ligand. Though never
instantiated directly, there is an initaliser method for setting up the
atom dictionary."""
from atomium import data as __data
def __init__(self, id, name, full_name, *atoms):
AtomStructure.__init__(self, id, name)
self._full_name = full_name
for atom in atoms: atom._het = self
self._atoms = StructureSet(*atoms)
def __contains__(self, atom):
return atom in self._atoms.structures
@property
def full_name(self):
"""Returns the residue's full name, based on its three letter name - or
just the three letter name if it doesn't match anything. Or you can just
supply a full name when you instantiate the Het.
:rtype: ``str``"""
if self._full_name: return self._full_name
return self.__data.FULL_NAMES.get(self._name, self._name)
@full_name.setter
def full_name(self, full_name):
self._full_name = full_name
@property
def chain(self):
"""Returns the :py:class:`.Chain` the structure is part of (if a
residue) or associated with (if a ligand).
:rtype: ``Chain``"""
return self._chain
def atoms(self):
"""Returns the atoms in the ligand.
:rtype: ``set``"""
return self._atoms
class Model(AtomStructure, metaclass=StructureClass):
"""The universe in which all other molecules live, interact, and generally
exist.
It is a cotainer of its molecules, residues, and atoms.
:param \*molecules: The chains, ligands, and waters that will inhabit the\
model."""
def __init__(self, *molecules, file=None):
AtomStructure.__init__(self, None, None)
self._chains = set()
self._ligands = set()
self._waters = set()
for mol in molecules:
mol._model = self
d = (self._chains if isinstance(mol, Chain) else self._waters
if mol._water else self._ligands)
d.add(mol)
self._chains = StructureSet(*self._chains)
self._ligands = StructureSet(*self._ligands)
self._waters = StructureSet(*self._waters)
self._file = file
self._internal_grid = None
def __repr__(self):
chains = "{} chains".format(len(self._chains))
if len(self._chains) == 1: chains = chains[:-1]
ligands = "{} ligands".format(len(self._ligands))
if len(self._ligands) == 1: ligands = ligands[:-1]
return "<Model ({}, {})>".format(chains, ligands)
def __contains__(self, obj):
return (obj in self.molecules() or obj in self.residues()
or obj in self.atoms())
@property
def file(self):
"""The :py:class:`.File` the model comes from."""
return self._file
def chains(self):
"""Returns the model's chains.
:rtype: ``set``"""
return self._chains
def ligands(self):
"""Returns the model's ligands.
:rtype: ``set``"""
return self._ligands
def waters(self):
"""Returns the model's water ligands.
:rtype: ``set``"""
return self._waters
def molecules(self):
"""Returns all of the model's molecules (chains, ligands, waters).
:rtype: ``set``"""
return self._chains + self._ligands + self._waters
def residues(self):
"""Returns all of the model's residues in all its chains.
:rtype: ``set``"""
res = []
for chain in self._chains.structures:
res += chain.residues()
return StructureSet(*res)
def atoms(self):
"""Returns all of the model's atoms in all its molecules.
:rtype: ``set``"""
atoms = set()
for mol in self.molecules():
try:
atoms.update(mol._atoms.structures)
except:
for res in mol._residues.structures:
atoms.update(res._atoms.structures)
return StructureSet(*atoms)
def dehydrate(self):
"""Removes all water ligands from the model."""
self._waters = StructureSet()
def optimise_distances(self):
"""Calling this method makes finding atoms within a sphere faster, and
consequently makes all 'nearby' methods faster. It organises the atoms
in the model into grids, so that only relevant atoms are checked for
distances."""
self._internal_grid = defaultdict(
lambda: defaultdict(lambda: defaultdict(set))
)
for atom in self.atoms():
x, y, z = [int(math.floor(n / 10)) * 10 for n in atom.location]
self._internal_grid[x][y][z].add(atom)
#TODO copy
class Chain(Molecule, metaclass=StructureClass):
"""A sequence of residues. Unlike other structures, they are iterable, and
have a length.
Residues can also be accessed using indexing.
:param \*residues: The residues that will make up the chain.
:param str id: the chain's unique ID.
:param str internal_id: the internal ID used for transformations.
:param str sequence: the actual sequence the chain should have.
:param list helices: the alpha helices within the chain.
:param list strands: the beta strands within the chain."""
def __init__(self, *residues, sequence="", helices=None, strands=None, **kwargs):
Molecule.__init__(
self, kwargs.get("id"), kwargs.get("name"), kwargs.get("internal_id")
)
self._sequence = sequence
for res in residues: res._chain = self
self._residues = StructureSet(*residues)
self._model = None
self._helices = helices or []
self._strands = strands or []
def __repr__(self):
return "<Chain {} ({} residues)>".format(self._id, len(self._residues))
def __len__(self):
return len(self._residues)
def __iter__(self):
return iter(self._residues.structures)
def __getitem__(self, key):
return self.residues()[key]
def __contains__(self, obj):
return obj in self._residues.structures or obj in self.atoms()
@property
def sequence(self):
"""Returns the sequence associated with the chain. Note that this is the
sequence that the molecule actually has in real life - some may be
missing from the actual sequence of residues in the structure.
:rtype: ``str``"""
return self._sequence
@sequence.setter
def sequence(self, sequence):
self._sequence = sequence
@property
def helices(self):
"""The alpha helix residues in the chain
:rtype: ``tuple``"""
return tuple(self._helices)
@property
def strands(self):
"""The beta strand residues in the chain
:rtype: ``tuple``"""
return tuple(self._strands)
@property
def length(self):
"""Returns the number of residues in the chain.
:rtype: ``int``"""
return len(self)
@property
def present_sequence(self):
"""The sequence of residues actually present in the atoms present.
:rtype: ``str``"""
return "".join(r.code for r in self.residues())
def copy(self, id=None, residue_ids=None, atom_ids=None):
"""Creates a copy of the chain, with new atoms and residues.
:param str id: if given, the ID of the new chain.
:param function residue_ids: a callable which, if given, will generate\
new residue IDs.
:param function atom_ids: a callable which, if given, will generate new\
atom IDs.
:rtype: ``Chain``"""
residue_ids = residue_ids or (lambda i: i)
residues = {r: r.copy(
id=residue_ids(r.id), atom_ids=atom_ids
) for r in self.residues()}
for r in self.residues():
residues[r].next = residues[r.next] if r.next else None
return Chain(
*residues.values(), id=id or self._id, internal_id=self._internal_id,
name=self._name, sequence=self._sequence,
helices=[tuple(residues[r] for r in h) for h in self._helices],
strands=[tuple(residues[r] for r in s) for s in self._strands]
)
def residues(self):
"""Returns the residues in the chain.
:rtype: ``tuple``"""
return self._residues
def ligands(self):
"""Returns all the ligands associated with the chain - but only if the
chain is part of a model.
:rtype: ``set``"""
return StructureSet() if self._model is None else StructureSet(
*[l for l in self._model._ligands.structures if l._chain is self]
)
def atoms(self):
"""Returns all the atoms in with the chain.
:rtype: ``set``"""
atoms = set()
for res in self._residues.structures:
atoms.update(res._atoms.structures)
return StructureSet(*atoms)
class Ligand(Molecule, Het, metaclass=StructureClass):
"""A small molecule, usually associated with a polymer chain.
:param \*atoms: The atoms that will make up the ligand.
:param str id: the ligand's unique ID.
:param str name: the ligand's name.
:param str internal_id: the internal ID used for transformations.
:param Chain chain: the chain the ligand is associated with.
:param bool water: if ``True``, the ligand will be treated as water."""
def __init__(self, *atoms, chain=None, water=False, **kwargs):
Het.__init__(
self, kwargs.get("id"), kwargs.get("name"),
kwargs.get("full_name"), *atoms)
Molecule.__init__(self, kwargs.get("id"), kwargs.get("name"),
kwargs.get("internal_id"))
self._chain, self._water = chain, water
def __repr__(self):
return "<{} {} ({})>".format(
"Water" if self._water else "Ligand", self._name, self._id
)
@property
def is_water(self):
"""Returns ``True`` if the ligand is a water ligand.
:rtype: ``bool``"""
return self._water
def copy(self, id=None, atom_ids=None):
"""Creates a copy of the ligand, with new atoms.
:param str id: if given, the ID of the new ligand.
:param function atom_ids: a callable which, if given, will generate new\
atom IDs.
:rtype: ``Ligand``"""
atoms = list(self.atoms())
if atom_ids:
new_ids = [atom_ids(a.id) for a in atoms]
atoms = [a.copy(id=id) for a, id in zip(atoms, new_ids)]
else:
atoms = [a.copy() for a in self.atoms()]
return self.__class__(*atoms, id=id or self._id,
name=self._name, internal_id=self._internal_id, water=self._water)
class Residue(Het, metaclass=StructureClass):
"""A small subunit within a chain.
:param \*atoms: The atoms the residue is to be made of.
:param str id: The residue's ID.
:param str name: The residue's name."""
from atomium import data as __data
def __init__(self, *atoms, **kwargs):
Het.__init__(self, kwargs.get("id"), kwargs.get("name"),
kwargs.get("full_name"), *atoms)
self._next, self._previous = None, None
self._chain = None
def __repr__(self):
return "<Residue {} ({})>".format(self._name, self._id)
@property
def next(self):
"""Residues can be linked to each other in a linear chain. This property
returns the :py:class:`.Residue` downstream of this one. Alternatively,
if you supply a residue, that residue will be assigned as the 'next' one
downstream to this, and this residue will be upstream to that.
Note that is a separate concept from bonds.
:raises ValueError: if you try to connect a residue to itself.
:rtype: ``Residue``"""
return self._next
@next.setter
def next(self, next):
if next is None:
if self._next: self._next._previous = None
self._next = None
elif next is self:
raise ValueError("Cannot link {} to itself".format(self))
else:
self._next = next
next._previous = self
@property
def previous(self):
"""Residues can be linked to each other in a linear chain. This property
returns the :py:class:`.Residue` upstream of this one. Alternatively,
if you supply a residue, that residue will be assigned as the 'previous'
one upstream to this, and this residue will be downstream to that.
:raises ValueError: if you try to connect a residue to itself.
:rtype: ``Residue``"""
return self._previous
@previous.setter
def previous(self, previous):
if previous is None:
if self._previous: self._previous._next = None
self._previous = None
elif previous is self:
raise ValueError("Cannot link {} to itself".format(self))
else:
self._previous = previous
previous._next = self
@property
def code(self):
"""Returns the single letter code, based on its three letter name - or
just 'X' if it doesn't match anything.
:rtype: ``str``"""
return self.__data.CODES.get(self._name, "X")
@property
def helix(self):
"""Returns ``True`` if the residue is part of an alpha helix.
:rtype: ``bool``"""
if self.chain:
for helix in self.chain.helices:
if self in helix: return True
return False
@property
def strand(self):
"""Returns ``True`` if the residue is part of a beta strand.
:rtype: ``bool``"""
if self.chain:
for strand in self.chain.strands:
if self in strand: return True
return False
def copy(self, id=None, atom_ids=None):
"""Creates a copy of the residue, with new atoms.
:param str id: if given, the ID of the new residue.
:param function atom_ids: a callable which, if given, will\
generate new atom IDs.
:rtype: ``Residue``"""
atoms = list(self.atoms())
if atom_ids:
new_ids = [atom_ids(a.id) for a in atoms]
atoms = [a.copy(id=id) for a, id in zip(atoms, new_ids)]
else:
atoms = [a.copy() for a in self.atoms()]
return self.__class__(*atoms, id=id or self._id, name=self._name)
@property
def model(self):
"""Returns the :py:class:`.Model` the residue is part of, via its
chain.
:rtype: ``Model``"""
try:
return self._chain._model
except AttributeError: return None
class Atom:
"""An atom in space - a point particle with a location, element, charge etc.
Atoms are the building blocks of all structures in atomium.
Two atoms are equal if they have the same properties (not including ID).
:param str element: The atom's elemental symbol.
:param number x: The atom's x coordinate.
:param number y: The atom's y coordinate.
:param number z: The atom's z coordinate.
:param int id: An integer ID for the atom.
:param str name: The atom's name.
:param number charge: The charge of the atom.
:param number bvalue: The B-value of the atom (its uncertainty).
:param list anisotropy: The directional uncertainty of the atom."""
from atomium import data as __data
__slots__ = [
"_element", "_location", "_id", "_name", "_charge",
"_bvalue", "_anisotropy", "_het", "_bonded_atoms", "_is_hetatm"
]
def __init__(self, element, x, y, z, id, name, charge, bvalue, anisotropy, is_hetatm=False):
self._location = np.array([x, y, z])
self._element = element
self._id, self._name, self._charge = id, name, charge
self._bvalue, self._anisotropy = bvalue, anisotropy
self._het, self._bonded_atoms, self._is_hetatm = None, set(), is_hetatm
def __repr__(self):
return "<Atom {} ({})>".format(self._id, self._name)
def __iter__(self):
return iter(self._location)
def __eq__(self, other):
if not isinstance(other, Atom): return False
for attr in self.__slots__:
if attr not in ("_id", "_het", "_bonded_atoms", "_location"):
if getattr(self, attr) != getattr(other, attr): return False
if list(self._location) != list(other._location): return False
return True
def __hash__(self):
return id(self)
@staticmethod
def translate_atoms(vector, *atoms):
"""Translates multiple atoms using some vector.
:param vector: the three values representing the delta position.
:param \*atoms: the atoms to translate."""
for atom in atoms:
atom._location += np.array(vector)
@staticmethod
def transform_atoms(matrix, *atoms):
"""Transforms multiple atoms using some matrix.
:param matrix: the transformation matrix.
:param \*atoms: the atoms to transform."""
locations = [list(a) for a in atoms]
output = np.dot(np.array(matrix), np.array(locations).transpose())
for atom, location in zip(atoms, output.transpose()):
atom._location = location
@staticmethod
def rotate_atoms(angle, axis, *atoms, **kwargs):
"""Rotates multiple atoms using an axis and an angle.
:param float angle: the angle to rotate by in radians.
:param str axis: the axis to rotate around (x, y, or z).
:param \*atoms: the atoms to rotate."""
try:
axis = [1 if i == "xyz".index(axis) else 0 for i in range(3)]
except ValueError:
raise ValueError("'{}' is not a valid axis".format(axis))
axis = np.asarray(axis)
axis = axis / np.sqrt(np.dot(axis, axis))
a = np.cos(angle / 2)
b, c, d = -axis * np.sin(angle / 2)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
Atom.transform_atoms(np.array([
[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]
]), *atoms, **kwargs)
@property
def element(self):
"""The atom's element symbol. This is used to calculate its mass using a
Periodic Table.
:rtype: ``str``"""
return self._element
@property
def location(self):
"""The atom's location.
:rtype: ``tuple``"""
return tuple(self._location)
@property
def id(self):
"""The atom's unique integer ID. It cannot be updated - the ID the atom
is created with is its ID forever.
:rtype: ``int``"""
return self._id
@property
def name(self):
"""The atom's name. This is often used to determine what 'kind' of atom
it is.
:rtype: ``str``"""
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def charge(self):
"""The atom's charge - usually just zero, or 'neutral'.
:rtype: ``float``"""
return self._charge
@charge.setter
def charge(self, charge):
self._charge = charge
@property
def bvalue(self):
"""The atom's B-value - the uncertainty in its position in all
directions.
:rtype: ``float``"""
return self._bvalue
@bvalue.setter
def bvalue(self, bvalue):
self._bvalue = bvalue
@property
def anisotropy(self):
"""The atom's directional uncertainty, represented by a list of six
numbers.
:rtype: ``list``"""
return self._anisotropy
@property
def bonded_atoms(self):
"""Returns the atoms this atom is bonded to.
:rtype: ``set```"""
return self._bonded_atoms
@property
def mass(self):
"""The atom's molar mass according to the Periodic Table, based on the
atom's :py:meth:`element`. If the element doesn't match any symbol on
the Periodic Table, a mass of 0 will be returned.
The element lookup is case-insensitive.
:rtype: ``float``"""
return self.__data.PERIODIC_TABLE.get(self._element.upper(), 0)
@property
def covalent_radius(self):
"""The atom's covalent radius, based on the atom's :py:meth:`element`.
If the element doesn't match any symbol on the Periodic Table, a radius
of 0 will be returned.
The element lookup is case-insensitive.
:rtype: ``float``"""
return self.__data.COVALENT_RADII.get(self._element.upper(), 0)
@property
def is_metal(self):
"""Checks whether the atom's element matches a metal element.
The element lookup is case-insensitive.
:rtype: ``bool``"""
return self._element.upper() in self.__data.METALS
@property
def is_backbone(self):
"""Returns ``True`` if the atom has a backbone atom name.
:rtype: ``bool``"""
return isinstance(self._het, Residue) and \
self._name in ["CA", "C", "N", "O"]
@property
def is_side_chain(self):
"""Returns ``True`` if the atom has a side chain atom name.
:rtype: ``bool``"""
return isinstance(self._het, Residue) and not self.is_backbone
def distance_to(self, other):
"""Returns the distance (in whatever units the coordinates are defined
in) between this atom and another. You can also give a (x, y, z) tuple
instead of another atom if you so wish.
:param Atom other: The other atom (or location tuple).
:rtype: ``float``"""
return np.linalg.norm(self._location - np.array(list(other)))
def angle(self, atom1, atom2):
"""Gets the angle between two atom vectors with this atom as the origin.
:param Atom atom1: The first atom.
:param Atom atom2: Thne second atom."""
vectors = [
[v1 - v2 for v1, v2 in zip(atom.location, self.location)
] for atom in (atom1, atom2)]
normalized = [np.linalg.norm(v) for v in vectors]
if 0 in normalized: return 0
vectors = [v / n for v, n in zip(vectors, normalized)]
return np.arccos(np.clip(np.dot(vectors[0], vectors[1]), -1.0, 1.0))
def copy(self, id=None):
"""Returns a copy of the atom. The new atom will have the same element,
location, name, charge, ID, bvalue etc. as the original, but will not
be part of any model or other molecule.
:rtype: ``Atom``"""
return Atom(
self._element, *self._location, id or self._id, self._name,
self._charge, self._bvalue, self._anisotropy
)
@property
def het(self):
"""Returns the :py:class:`.Residue` or :py:class:`.Ligand` the atom is
part of, or ``None`` if it is not part of one.
:rtype: ``Het```"""
return self._het
@property
def chain(self):
"""Returns the :py:class:`.Chain` the atom is part of, or ``None`` if
it is not part of one.
:rtype: ``Chain``"""
if self._het: return self._het.chain
@property
def model(self):
"""Returns the :py:class:`.Model` the atom is part of, or ``None`` if
it is not part of one.
:rtype: ``Model``"""
if self.chain: return self.chain.model
def nearby_atoms(self, cutoff, *args, **kwargs):
"""Returns all atoms in the associated :py:class:`.Model` that are
within a given distance (in the units of the atom coordinates) of this
atom. If the atom is not part of a model, no atoms will be returned.
:param float cutoff: The radius to search within.
:rtype: ``set``"""
if self.model:
atoms = self.model.atoms_in_sphere(
self.location, cutoff, *args, **kwargs
)
try:
atoms.remove(self)
except: pass
return atoms
return set()
def nearby_hets(self, *args, residues=True, ligands=True, **kwargs):
"""Returns all residues and ligands in the associated :py:class:`.Model`
that are within a given distance (in the units of the atom coordinates)
of this atom. If the atom is not part of a model, no residues will be
returned.
:param float cutoff: the distance cutoff to use.
:param bool residues: if ``False``, residues will not be returned.
:param bool ligands: if ``False``, ligands will not be returned.
:rtype: ``set``"""
atoms = self.nearby_atoms(*args, **kwargs)
structures = set()
for atom in atoms:
if atom.het is not None: structures.add(atom.het)
try:
structures.remove(self.het)
except: pass
if not residues:
structures = {s for s in structures if not isinstance(s, Residue)}
if not ligands:
structures = {s for s in structures if not (isinstance(s, Ligand))}
return structures
def nearby_chains(self, *args, **kwargs):
"""Returns all chain structures in the associated :py:class:`.Model`
that are within a given distance (in the units of the atom coordinates)
of this atom. If the atom is not part of a model, no chains will be
returned.
:param float cutoff: the distance cutoff to use.
:rtype: ``set``"""
atoms = self.nearby_atoms(*args, **kwargs)
chains = set()
for atom in atoms:
if atom.chain is not None: chains.add(atom.chain)
try:
chains.remove(self.chain)
except: pass
return chains
def translate(self, dx=0, dy=0, dz=0, trim=12):
"""Translates an atom in 3D space. You can provide three values, or a
single vector.
:param float dx: The distance to move in the x direction.
:param float dy: The distance to move in the y direction.
:param float dz: The distance to move in the z direction.
:param int trim: The amount of rounding to do to the atom's coordinates\
after translating - the default is 12 decimal places but this can be\
set to ``None`` if no rounding is to be done."""
try:
_,_,_ = dx
vector = dx
except TypeError: vector = (dx, dy, dz)
Atom.translate_atoms(vector, self)
self.trim(trim)
def transform(self, matrix, trim=12):
"""Transforms the atom using a 3x3 matrix supplied. This is useful if
the :py:meth:`.rotate` method isn't powerful enough for your needs.
:param array matrix: A NumPy matrix representing the transformation.\
You can supply a list of lists if you like and it will be converted to\
a NumPy matrix.
:param int trim: The amount of rounding to do to the atom's coordinates\
after transforming - the default is 12 decimal places but this can be\
set to ``None`` if no rounding is to be done."""
Atom.transform_atoms(matrix, self)
self.trim(trim)
def rotate(self, angle, axis, trim=12):
"""Rotates the atom by an angle in radians, around one of the the three
axes.
:param float angle: The angle to rotate by in radians.
:param str axis: the axis to rotate around.
:param int trim: The amount of rounding to do to the atom's coordinates\
after rotating - the default is 12 decimal places but this can be\
set to ``None`` if no rounding is to be done."""
Atom.rotate_atoms(angle, axis, self)
self.trim(trim)
def move_to(self, x, y, z):
"""Moves the atom to the coordinates given.
:param number x: The atom's new x coordinate.
:param number y: The atom's new y coordinate.
:param number z: The atom's new z coordinate."""
self._location[0], self._location[1], self._location[2] = x, y, z
def trim(self, places):
"""Rounds the coordinate values to a given number of decimal places.
Useful for removing floating point rounding errors after transformation.
:param int places: The number of places to round the coordinates to. If\
``None``, no rounding will be done."""
if places is not None:
self._location = np.round(self._location, places)
def bond(self, other):
"""Bonds the atom to some other atom.
:param Atom other: the other atom to bond to."""
self._bonded_atoms.add(other)
other._bonded_atoms.add(self)
| [
"numpy.sqrt",
"math.ceil",
"math.floor",
"numpy.asarray",
"numpy.sum",
"numpy.array",
"numpy.dot",
"collections.defaultdict",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"rmsd.kabsch_rmsd",
"warnings.warn",
"numpy.round"
] | [((2860, 2890), 'numpy.sqrt', 'np.sqrt', (['mean_square_deviation'], {}), '(mean_square_deviation)\n', (2867, 2890), True, 'import numpy as np\n'), ((30474, 30493), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (30482, 30493), True, 'import numpy as np\n'), ((32562, 32578), 'numpy.asarray', 'np.asarray', (['axis'], {}), '(axis)\n', (32572, 32578), True, 'import numpy as np\n'), ((32641, 32658), 'numpy.cos', 'np.cos', (['(angle / 2)'], {}), '(angle / 2)\n', (32647, 32658), True, 'import numpy as np\n'), ((2276, 2301), 'numpy.sum', 'np.sum', (['locations'], {'axis': '(0)'}), '(locations, axis=0)\n', (2282, 2301), True, 'import numpy as np\n'), ((5199, 5233), 'rmsd.kabsch_rmsd', 'rmsd.kabsch_rmsd', (['coords1', 'coords2'], {}), '(coords1, coords2)\n', (5215, 5233), False, 'import rmsd\n'), ((31593, 31609), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (31601, 31609), True, 'import numpy as np\n'), ((31899, 31915), 'numpy.array', 'np.array', (['matrix'], {}), '(matrix)\n', (31907, 31915), True, 'import numpy as np\n'), ((32685, 32702), 'numpy.sin', 'np.sin', (['(angle / 2)'], {}), '(angle / 2)\n', (32691, 32702), True, 'import numpy as np\n'), ((32858, 33029), 'numpy.array', 'np.array', (['[[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], [2 * (bc - ad), aa + cc -\n bb - dd, 2 * (cd + ab)], [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]]'], {}), '([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], [2 * (bc - ad),\n aa + cc - bb - dd, 2 * (cd + ab)], [2 * (bd + ac), 2 * (cd - ab), aa +\n dd - bb - cc]])\n', (32866, 33029), True, 'import numpy as np\n'), ((37013, 37030), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (37027, 37030), True, 'import numpy as np\n'), ((43306, 43338), 'numpy.round', 'np.round', (['self._location', 'places'], {}), '(self._location, places)\n', (43314, 43338), True, 'import numpy as np\n'), ((8418, 8440), 'math.ceil', 'math.ceil', (['(radius / 10)'], {}), '(radius / 10)\n', (8427, 8440), False, 'import math\n'), ((32609, 32627), 'numpy.dot', 'np.dot', (['axis', 'axis'], {}), '(axis, axis)\n', (32615, 32627), True, 'import numpy as np\n'), ((37182, 37212), 'numpy.dot', 'np.dot', (['vectors[0]', 'vectors[1]'], {}), '(vectors[0], vectors[1])\n', (37188, 37212), True, 'import numpy as np\n'), ((6782, 6828), 'warnings.warn', 'warnings.warn', (['f"""{objects} have duplicate IDs"""'], {}), "(f'{objects} have duplicate IDs')\n", (6795, 6828), False, 'import warnings\n'), ((31917, 31936), 'numpy.array', 'np.array', (['locations'], {}), '(locations)\n', (31925, 31936), True, 'import numpy as np\n'), ((8475, 8493), 'math.floor', 'math.floor', (['(n / 10)'], {}), '(n / 10)\n', (8485, 8493), False, 'import math\n'), ((19255, 19271), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (19266, 19271), False, 'from collections import Counter, OrderedDict, defaultdict\n'), ((19344, 19362), 'math.floor', 'math.floor', (['(n / 10)'], {}), '(n / 10)\n', (19354, 19362), False, 'import math\n')] |
# import the necessary packages
from imutils.video import VideoStream
import numpy as np
import argparse
import imutils
import time
import cv2
import os
import torch
from PIL import Image
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torch.autograd import Variable
import matplotlib.pyplot as plt
class ClassificationCNN(nn.Module):
def __init__(self, input_dim=(3, 500, 500), num_filters=16, kernel_size=5,
stride_conv=1, weight_scale=0.001, pool=2, stride_pool=2, hidden_dim=200,
num_classes=4, dropout=0.4):
super(ClassificationCNN, self).__init__()
channels, height, width = input_dim
self.dropout = dropout
self.layer1 = nn.Sequential(
nn.Conv2d(input_dim[0], num_filters, kernel_size=5, padding=2),
nn.BatchNorm2d(num_filters),
nn.ReLU(),
nn.MaxPool2d(2,2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2,2))
self.fc = nn.Linear(500000, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.view(out.size(0), -1)
out = F.dropout(out, self.dropout, True)
out = F.relu(self.fc(out))
out = self.fc2(out)
return out
@property
def is_cuda(self):
return next(self.parameters()).is_cuda
def save(self, path):
print('Saving model... %s' % path)
torch.save(self, path)
def image_loader(image_name):
"""load image, returns cuda tensor"""
imsize = 500
loader = transforms.Compose([transforms.Scale(imsize), transforms.ToTensor()])
image = Image.open(image_name)
image = loader(image).float()
image = Variable(image, requires_grad=True)
image = image.unsqueeze(0) #this is for VGG, may not be needed for ResNet
return image.cuda() #assumes that you're using GPU
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", default="deploy.prototxt.txt",
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", default="res10_300x300_ssd_iter_140000.caffemodel",
help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
model = torch.load('./classification_cnn.model')
input_test1 = image_loader(".test_data/Dominik/domi_12.png")
input_test2 = image_loader("./test_data/Nathaniel/nath_12.png")
input_test3 = image_loader("./test_data/Maren/maren_12.png")
input_test4 = image_loader("./test_data/Alex/alex12.png")
output_test = model(input_test1)
_, pred_test = torch.max(output_test, 1)
print("Domi is number:", pred_test.data.cpu().numpy()[0])
output_test = model(input_test2)
_, pred_test = torch.max(output_test, 1)
print("Nath is number:", pred_test.data.cpu().numpy()[0])
output_test = model(input_test3)
_, pred_test = torch.max(output_test, 1)
print("Maren is number:", pred_test.data.cpu().numpy()[0])
output_test = model(input_test4)
_, pred_test = torch.max(output_test, 1)
print("Alex is number:", pred_test.data.cpu().numpy()[0])
# initialize the video stream and allow the camera sensor to warm up
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
width = 1280
height = 720
# loop over the frames from the video stream
counter = 0
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=400)
# grab the frame dimensions and convert it to a blob
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,
(300, 300), (104.0, 177.0, 123.0))
# pass the blob through the network and obtain the detections and
# predictions
net.setInput(blob)
detections = net.forward()
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with the
# prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence < args["confidence"]:
continue
# compute the (x, y)-coordinates of the bounding box for the
# object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
try:
# neural net
head = frame[startY-40:endY+30, startX-40:endX+40]
cv2.imshow("Head", head)
neural_head = cv2.resize(head,(500, 500))
neural_head = Image.fromarray(neural_head)
loader = transforms.Compose([transforms.Scale(500), transforms.ToTensor()])
input = loader(neural_head).float()
#test = np.transpose(neural_head, (1,2,0))
#cv2.imwrite("test.png", test)
except Exception as err:
print(err)
continue
'''
print(head.shape)
channels = head[2]
head_width = head[0]
print(channels.shape)
head_height = [1]
print(head_height.shape)
neural_head = np.array([channels, head_width, head_height], dtype=np.uint8)
'''
#input = image_loader("test.png")
#input = torch.tensor(neural_head).cuda()
input = Variable(input, requires_grad=True)
input = input.unsqueeze(0).cuda()
output = model(input.float())
_, pred = torch.max(output, 1)
if pred[0] == 1:
text = "Domi"
elif pred[0] == 2:
text = "Maren"
elif pred[0] == 3:
text = "Nath"
elif pred[0] == 0:
text = "Alex"
'''
#save img
show_head = cv2.resize(head,(500,500))
cv2.imshow("Head", show_head)
path = "/home/dwinter/Dokumente/opencv/val_data/Nathaniel/"
save_name = "nath_" + str(counter) + ".png"
cv2.imwrite(os.path.join(path, save_name), head)
counter += 1
'''
# draw the bounding box of the face along with the associated
# probability
#text = "{:.2f}%".format(confidence * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(frame, (startX, startY), (endX, endY),
(0, 0, 255), 2)
cv2.putText(frame, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
frame = cv2.resize(frame,(width,height)) # show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
if counter >= 500:
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
| [
"cv2.rectangle",
"torch.nn.ReLU",
"torch.max",
"time.sleep",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"torch.nn.BatchNorm2d",
"imutils.video.VideoStream",
"argparse.ArgumentParser",
"cv2.dnn.readNetFromCaffe",
"torchvision.transforms.ToTensor",
"torch.autograd.Variable",
"cv2.... | [((2149, 2174), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2172, 2174), False, 'import argparse\n'), ((2660, 2717), 'cv2.dnn.readNetFromCaffe', 'cv2.dnn.readNetFromCaffe', (["args['prototxt']", "args['model']"], {}), "(args['prototxt'], args['model'])\n", (2684, 2717), False, 'import cv2\n'), ((2727, 2767), 'torch.load', 'torch.load', (['"""./classification_cnn.model"""'], {}), "('./classification_cnn.model')\n", (2737, 2767), False, 'import torch\n'), ((3061, 3086), 'torch.max', 'torch.max', (['output_test', '(1)'], {}), '(output_test, 1)\n', (3070, 3086), False, 'import torch\n'), ((3193, 3218), 'torch.max', 'torch.max', (['output_test', '(1)'], {}), '(output_test, 1)\n', (3202, 3218), False, 'import torch\n'), ((3325, 3350), 'torch.max', 'torch.max', (['output_test', '(1)'], {}), '(output_test, 1)\n', (3334, 3350), False, 'import torch\n'), ((3458, 3483), 'torch.max', 'torch.max', (['output_test', '(1)'], {}), '(output_test, 1)\n', (3467, 3483), False, 'import torch\n'), ((3685, 3700), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (3695, 3700), False, 'import time\n'), ((7850, 7873), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7871, 7873), False, 'import cv2\n'), ((1847, 1869), 'PIL.Image.open', 'Image.open', (['image_name'], {}), '(image_name)\n', (1857, 1869), False, 'from PIL import Image\n'), ((1917, 1952), 'torch.autograd.Variable', 'Variable', (['image'], {'requires_grad': '(True)'}), '(image, requires_grad=True)\n', (1925, 1952), False, 'from torch.autograd import Variable\n'), ((3953, 3985), 'imutils.resize', 'imutils.resize', (['frame'], {'width': '(400)'}), '(frame, width=400)\n', (3967, 3985), False, 'import imutils\n'), ((7543, 7577), 'cv2.resize', 'cv2.resize', (['frame', '(width, height)'], {}), '(frame, (width, height))\n', (7553, 7577), False, 'import cv2\n'), ((7611, 7637), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (7621, 7637), False, 'import cv2\n'), ((1131, 1160), 'torch.nn.Linear', 'nn.Linear', (['(500000)', 'hidden_dim'], {}), '(500000, hidden_dim)\n', (1140, 1160), True, 'import torch.nn as nn\n'), ((1180, 1214), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'num_classes'], {}), '(hidden_dim, num_classes)\n', (1189, 1214), True, 'import torch.nn as nn\n'), ((1357, 1391), 'torch.nn.functional.dropout', 'F.dropout', (['out', 'self.dropout', '(True)'], {}), '(out, self.dropout, True)\n', (1366, 1391), True, 'import torch.nn.functional as F\n'), ((1637, 1659), 'torch.save', 'torch.save', (['self', 'path'], {}), '(self, path)\n', (1647, 1659), False, 'import torch\n'), ((3658, 3676), 'imutils.video.VideoStream', 'VideoStream', ([], {'src': '(0)'}), '(src=0)\n', (3669, 3676), False, 'from imutils.video import VideoStream\n'), ((4118, 4147), 'cv2.resize', 'cv2.resize', (['frame', '(300, 300)'], {}), '(frame, (300, 300))\n', (4128, 4147), False, 'import cv2\n'), ((6202, 6237), 'torch.autograd.Variable', 'Variable', (['input'], {'requires_grad': '(True)'}), '(input, requires_grad=True)\n', (6210, 6237), False, 'from torch.autograd import Variable\n'), ((6360, 6380), 'torch.max', 'torch.max', (['output', '(1)'], {}), '(output, 1)\n', (6369, 6380), False, 'import torch\n'), ((7306, 7374), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(startX, startY)', '(endX, endY)', '(0, 0, 255)', '(2)'], {}), '(frame, (startX, startY), (endX, endY), (0, 0, 255), 2)\n', (7319, 7374), False, 'import cv2\n'), ((7415, 7504), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(startX, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.45)', '(0, 0, 255)', '(2)'], {}), '(frame, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0,\n 255), 2)\n', (7426, 7504), False, 'import cv2\n'), ((7652, 7666), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (7663, 7666), False, 'import cv2\n'), ((774, 836), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_dim[0]', 'num_filters'], {'kernel_size': '(5)', 'padding': '(2)'}), '(input_dim[0], num_filters, kernel_size=5, padding=2)\n', (783, 836), True, 'import torch.nn as nn\n'), ((850, 877), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['num_filters'], {}), '(num_filters)\n', (864, 877), True, 'import torch.nn as nn\n'), ((891, 900), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (898, 900), True, 'import torch.nn as nn\n'), ((914, 932), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (926, 932), True, 'import torch.nn as nn\n'), ((982, 1025), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16)', '(32)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(16, 32, kernel_size=5, padding=2)\n', (991, 1025), True, 'import torch.nn as nn\n'), ((1039, 1057), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (1053, 1057), True, 'import torch.nn as nn\n'), ((1071, 1080), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1078, 1080), True, 'import torch.nn as nn\n'), ((1094, 1112), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (1106, 1112), True, 'import torch.nn as nn\n'), ((1785, 1809), 'torchvision.transforms.Scale', 'transforms.Scale', (['imsize'], {}), '(imsize)\n', (1801, 1809), False, 'from torchvision import transforms\n'), ((1811, 1832), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1830, 1832), False, 'from torchvision import transforms\n'), ((4967, 4989), 'numpy.array', 'np.array', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (4975, 4989), True, 'import numpy as np\n'), ((5201, 5225), 'cv2.imshow', 'cv2.imshow', (['"""Head"""', 'head'], {}), "('Head', head)\n", (5211, 5225), False, 'import cv2\n'), ((5260, 5288), 'cv2.resize', 'cv2.resize', (['head', '(500, 500)'], {}), '(head, (500, 500))\n', (5270, 5288), False, 'import cv2\n'), ((5322, 5350), 'PIL.Image.fromarray', 'Image.fromarray', (['neural_head'], {}), '(neural_head)\n', (5337, 5350), False, 'from PIL import Image\n'), ((5400, 5421), 'torchvision.transforms.Scale', 'transforms.Scale', (['(500)'], {}), '(500)\n', (5416, 5421), False, 'from torchvision import transforms\n'), ((5423, 5444), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5442, 5444), False, 'from torchvision import transforms\n')] |
# based on tut_mission_B737.py and Vehicle.py from Regional Jet Optimization
#
# Created: Aug 2014, SUAVE Team
# Modified: Aug 2017, SUAVE Team
# Modified: Jul 2018, geo
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
# Python Imports
import numpy as np
import pylab as plt
# SUAVE Imports
import SUAVE
from SUAVE.Core import Data, Units
from SUAVE.Methods.Propulsion.turbofan_sizing import turbofan_sizing
from SUAVE.Methods.Geometry.Two_Dimensional.Cross_Section.Propulsion import compute_turbofan_geometry
from SUAVE.Input_Output.Results import print_parasite_drag, \
print_compress_drag, \
print_engine_data, \
print_mission_breakdown, \
print_weight_breakdown
# ----------------------------------------------------------------------
# Main
# ----------------------------------------------------------------------
def main():
configs, analyses = full_setup()
simple_sizing(configs)
configs.finalize()
analyses.finalize()
# weight analysis
weights = analyses.configs.base.weights
breakdown = weights.evaluate()
weights.vehicle.mass_properties.center_of_gravity = SUAVE.Methods.Center_of_Gravity.compute_aircraft_center_of_gravity(weights.vehicle, nose_load_fraction=.06)
# mission analysis
mission = analyses.missions.base
results = mission.evaluate()
CM = results.conditions.cruise.stability.static.CM[0][0]
cm0 = results.conditions.cruise.stability.static.cm0[0][0]
cm_alpha = results.conditions.cruise.stability.static.cm_alpha[0][0]
cn_beta = results.conditions.cruise.stability.static.cn_beta[0][0]
static_margin = results.conditions.cruise.stability.static.static_margin[0][0]
# print weight breakdown
print_weight_breakdown(configs.base,filename = 'E170_weight_breakdown.dat')
# print engine data into file
print_engine_data(configs.base,filename = 'E170_engine_data.dat')
# print parasite drag data into file
# define reference condition for parasite drag
ref_condition = Data()
ref_condition.mach_number = 0.3
ref_condition.reynolds_number = 12e6
print_parasite_drag(ref_condition,configs.cruise,analyses,'E170_parasite_drag.dat')
# print compressibility drag data into file
print_compress_drag(configs.cruise,analyses,filename = 'E170_compress_drag.dat')
# print mission breakdown
print_mission_breakdown(results,filename='E170_mission_breakdown.dat')
# plt the old results
plot_mission(results)
return
# ----------------------------------------------------------------------
# Analysis Setup
# ----------------------------------------------------------------------
def full_setup():
# vehicle data
vehicle = vehicle_setup()
configs = configs_setup(vehicle)
# vehicle analyses
configs_analyses = analyses_setup(configs)
# mission analyses
mission = mission_setup(configs_analyses)
missions_analyses = missions_setup(mission)
analyses = SUAVE.Analyses.Analysis.Container()
analyses.configs = configs_analyses
analyses.missions = missions_analyses
return configs, analyses
# ----------------------------------------------------------------------
# Define the Vehicle Analyses
# ----------------------------------------------------------------------
def analyses_setup(configs):
analyses = SUAVE.Analyses.Analysis.Container()
# build a base analysis for each config
for tag,config in configs.items():
analysis = base_analysis(config)
analyses[tag] = analysis
return analyses
def base_analysis(vehicle):
# ------------------------------------------------------------------
# Initialize the Analyses
# ------------------------------------------------------------------
analyses = SUAVE.Analyses.Vehicle()
# ------------------------------------------------------------------
# Basic Geometry Relations
sizing = SUAVE.Analyses.Sizing.Sizing()
sizing.features.vehicle = vehicle
analyses.append(sizing)
# ------------------------------------------------------------------
# Weights
weights = SUAVE.Analyses.Weights.Weights_Tube_Wing()
weights.vehicle = vehicle
analyses.append(weights)
# ------------------------------------------------------------------
# Aerodynamics Analysis
aerodynamics = SUAVE.Analyses.Aerodynamics.Fidelity_Zero()
aerodynamics.geometry = vehicle
analyses.append(aerodynamics)
# ------------------------------------------------------------------
# Stability Analysis
stability = SUAVE.Analyses.Stability.Fidelity_Zero()
stability.geometry = vehicle
analyses.append(stability)
# ------------------------------------------------------------------
# Planet Analysis
planet = SUAVE.Analyses.Planets.Planet()
analyses.append(planet)
# ------------------------------------------------------------------
# Atmosphere Analysis
atmosphere = SUAVE.Analyses.Atmospheric.US_Standard_1976()
atmosphere.features.planet = planet.features
analyses.append(atmosphere)
return analyses
# ----------------------------------------------------------------------
# Define the Vehicle
# ----------------------------------------------------------------------
def vehicle_setup():
# ------------------------------------------------------------------
# Initialize the Vehicle
# ------------------------------------------------------------------
vehicle = SUAVE.Vehicle()
vehicle.tag = 'Embraer_E190'
# ------------------------------------------------------------------
# Vehicle-level Properties
# ------------------------------------------------------------------
# mass properties
vehicle.mass_properties.max_takeoff = 38600. * Units.kg
vehicle.mass_properties.operating_empty = 21157. * Units.kg
vehicle.mass_properties.takeoff = 38600. * Units.kg
vehicle.mass_properties.max_zero_fuel = 30900. * Units.kg
vehicle.mass_properties.cargo = 0.0 * Units.kg
vehicle.mass_properties.max_payload = 9743.0 * Units.kg
vehicle.mass_properties.max_fuel = 9335.0 * Units.kg
vehicle.mass_properties.center_of_gravity = [14.85, 0, 0]
# envelope properties
vehicle.envelope.ultimate_load = 3.75
vehicle.envelope.limit_load = 2.50
# basic parameters
vehicle.reference_area = 72.72 * Units['meters**2']
vehicle.passengers = 72
vehicle.systems.control = "fully powered"
vehicle.systems.accessories = "medium range"
# ------------------------------------------------------------------
# Main Wing
# ------------------------------------------------------------------
wing = SUAVE.Components.Wings.Main_Wing()
wing.tag = 'main_wing'
wing.aspect_ratio = 8.6
wing.sweeps.quarter_chord = 23.0 * Units.deg # 22.5
wing.thickness_to_chord = 0.11
wing.taper = 0.28
wing.span_efficiency = 1.0 #
wing.spans.projected = 26.0 * Units.meter
wing.chords.root = 5.428 * Units.meter # 5.203
wing.chords.tip = 1.380 * Units.meter # 1.460
wing.chords.mean_aerodynamic = 3.806 * Units.meter
wing.areas.reference = 72.72 * Units['meters**2']
wing.areas.wetted = 2.0 * wing.areas.reference
wing.areas.exposed = 0.8 * wing.areas.wetted
wing.areas.affected = 0.6 * wing.areas.reference
wing.twists.root = 2.0 * Units.degrees
wing.twists.tip = 0.0 * Units.degrees
wing.origin = [10.36122,0,0] #
wing.vertical = False
wing.symmetric = True
wing.high_lift = True
wing.flaps.type = "double_slotted"
wing.flaps.chord = 0.280 * Units.meter #
wing.dynamic_pressure_ratio = 1.0
# add to vehicle
vehicle.append_component(wing)
# ------------------------------------------------------------------
# Horizontal Stabilizer
# ------------------------------------------------------------------
wing = SUAVE.Components.Wings.Wing()
wing.tag = 'horizontal_stabilizer'
wing.aspect_ratio = 4.3 #5.5
wing.sweeps.quarter_chord = 30.0 * Units.deg #34.5
wing.thickness_to_chord = 0.3707 #0.11
wing.taper = 0.11
wing.span_efficiency = 0.9 #
wing.spans.projected = 10.000 * Units.meter
wing.chords.root = 3.394 * Units.meter
wing.chords.tip = 1.258 * Units.meter
wing.chords.mean_aerodynamic = 2.4895 * Units.meter
wing.areas.reference = 23.25 * Units['meters**2']
wing.areas.wetted = 2.0 * wing.areas.reference
wing.areas.exposed = 0.8 * wing.areas.wetted
wing.areas.affected = 0.6 * wing.areas.reference
wing.twists.root = 2.0 * Units.degrees
wing.twists.tip = 2.0 * Units.degrees
wing.origin = [24.6,0,0]
wing.vertical = False
wing.symmetric = True
wing.dynamic_pressure_ratio = 0.9 #
# add to vehicle
vehicle.append_component(wing)
# ------------------------------------------------------------------
# Vertical Stabilizer
# ------------------------------------------------------------------
wing = SUAVE.Components.Wings.Wing()
wing.tag = 'vertical_stabilizer'
# equal to E190 data
wing.aspect_ratio = 1.7
wing.sweeps.quarter_chord = 35 * Units.deg
wing.thickness_to_chord = 0.11
wing.taper = 0.31
wing.span_efficiency = 0.9
wing.spans.projected = 5.270 * Units.meter
wing.chords.root = 4.70 * Units.meter
wing.chords.tip = 1.45 * Units.meter
wing.chords.mean_aerodynamic = 3.36 * Units.meter
wing.areas.reference = 16.0 * Units['meters**2']
wing.areas.wetted = 2.0 * wing.areas.reference
wing.areas.exposed = 0.8 * wing.areas.wetted
wing.areas.affected = 0.6 * wing.areas.reference
wing.twists.root = 0.0 * Units.degrees
wing.twists.tip = 0.0 * Units.degrees
wing.origin = [23.9,0,0]
wing.vertical = True
wing.symmetric = False
wing.dynamic_pressure_ratio = 1.0
# add to vehicle
vehicle.append_component(wing)
# ------------------------------------------------------------------
# Fuselage
# ------------------------------------------------------------------
fuselage = SUAVE.Components.Fuselages.Fuselage()
fuselage.tag = 'fuselage'
fuselage.number_coach_seats = vehicle.passengers
fuselage.seats_abreast = 4
fuselage.seat_pitch = 0.7455 #
fuselage.fineness.nose = 2.0 #
fuselage.fineness.tail = 3.0 #
fuselage.lengths.nose = 6.82 * Units.meter
fuselage.lengths.tail = 10.67 * Units.meter
fuselage.lengths.cabin = 18.23 * Units.meter
fuselage.lengths.total = 29.90 * Units.meter
fuselage.lengths.fore_space = 0. * Units.meter
fuselage.lengths.aft_space = 0. * Units.meter
fuselage.width = 2.955 * Units.meter
fuselage.heights.maximum = 3.361 * Units.meter
fuselage.areas.side_projected = 203.32 * Units['meters**2']
fuselage.areas.wetted = 277.96 * Units['meters**2']
fuselage.areas.front_projected = 31.2 * Units['meters**2'] # 8.0110
fuselage.effective_diameter = 3.18
fuselage.differential_pressure = 10**5 * Units.pascal
fuselage.heights.at_quarter_length = 3.35 * Units.meter
fuselage.heights.at_three_quarters_length = 3.35 * Units.meter
fuselage.heights.at_wing_root_quarter_chord = 3.50 * Units.meter
# add to vehicle
vehicle.append_component(fuselage)
# ------------------------------------------------------------------
# Turbofan Network
# ------------------------------------------------------------------
#instantiate the gas turbine network
turbofan = SUAVE.Components.Energy.Networks.Turbofan()
turbofan.tag = 'turbofan'
# setup
turbofan.number_of_engines = 2
turbofan.bypass_ratio = 5.0
turbofan.engine_length = 3.1 * Units.meter
turbofan.nacelle_diameter = 1.64395 * Units.meter
turbofan.origin = [[9.721, 3.984,-1],[9.721,-3.984,-1]] # meters
#compute engine areas
turbofan.areas.wetted = 1.1*np.pi*turbofan.nacelle_diameter*turbofan.engine_length
# working fluid
turbofan.working_fluid = SUAVE.Attributes.Gases.Air()
# ------------------------------------------------------------------
# Component 1 - Ram
# to convert freestream static to stagnation quantities
# instantiate
ram = SUAVE.Components.Energy.Converters.Ram()
ram.tag = 'ram'
# add to the network
turbofan.append(ram)
# ------------------------------------------------------------------
# Component 2 - Inlet Nozzle
# instantiate
inlet_nozzle = SUAVE.Components.Energy.Converters.Compression_Nozzle()
inlet_nozzle.tag = 'inlet_nozzle'
# setup
inlet_nozzle.polytropic_efficiency = 0.98
inlet_nozzle.pressure_ratio = 0.98
# add to network
turbofan.append(inlet_nozzle)
# ------------------------------------------------------------------
# Component 3 - Low Pressure Compressor
# instantiate
compressor = SUAVE.Components.Energy.Converters.Compressor()
compressor.tag = 'low_pressure_compressor'
# setup
compressor.polytropic_efficiency = 0.91
compressor.pressure_ratio = 1.9
# add to network
turbofan.append(compressor)
# ------------------------------------------------------------------
# Component 4 - High Pressure Compressor
# instantiate
compressor = SUAVE.Components.Energy.Converters.Compressor()
compressor.tag = 'high_pressure_compressor'
# setup
compressor.polytropic_efficiency = 0.91
compressor.pressure_ratio = 10.0
# add to network
turbofan.append(compressor)
# ------------------------------------------------------------------
# Component 5 - Low Pressure Turbine
# instantiate
turbine = SUAVE.Components.Energy.Converters.Turbine()
turbine.tag='low_pressure_turbine'
# setup
turbine.mechanical_efficiency = 0.99
turbine.polytropic_efficiency = 0.93
# add to network
turbofan.append(turbine)
# ------------------------------------------------------------------
# Component 6 - High Pressure Turbine
# instantiate
turbine = SUAVE.Components.Energy.Converters.Turbine()
turbine.tag='high_pressure_turbine'
# setup
turbine.mechanical_efficiency = 0.99
turbine.polytropic_efficiency = 0.93
# add to network
turbofan.append(turbine)
# ------------------------------------------------------------------
# Component 7 - Combustor
# instantiate
combustor = SUAVE.Components.Energy.Converters.Combustor()
combustor.tag = 'combustor'
# setup
combustor.efficiency = 0.99
combustor.alphac = 1.0
combustor.turbine_inlet_temperature = 1500 # K
combustor.pressure_ratio = 0.95
combustor.fuel_data = SUAVE.Attributes.Propellants.Jet_A()
# add to network
turbofan.append(combustor)
# ------------------------------------------------------------------
# Component 8 - Core Nozzle
# instantiate
nozzle = SUAVE.Components.Energy.Converters.Expansion_Nozzle()
nozzle.tag = 'core_nozzle'
# setup
nozzle.polytropic_efficiency = 0.95
nozzle.pressure_ratio = 0.99
# add to network
turbofan.append(nozzle)
# ------------------------------------------------------------------
# Component 9 - Fan Nozzle
# instantiate
nozzle = SUAVE.Components.Energy.Converters.Expansion_Nozzle()
nozzle.tag = 'fan_nozzle'
# setup
nozzle.polytropic_efficiency = 0.95
nozzle.pressure_ratio = 0.99
# add to network
turbofan.append(nozzle)
# ------------------------------------------------------------------
# Component 10 - Fan
# instantiate
fan = SUAVE.Components.Energy.Converters.Fan()
fan.tag = 'fan'
# setup
fan.polytropic_efficiency = 0.93
fan.pressure_ratio = 1.7
# add to network
turbofan.append(fan)
# ------------------------------------------------------------------
#Component 10 : thrust (to compute the thrust)
thrust = SUAVE.Components.Energy.Processes.Thrust()
thrust.tag ='compute_thrust'
#total design thrust (includes all the engines)
thrust.total_design = 52700.0 * Units.N #Newtons
#design sizing conditions
altitude = 35000.0*Units.ft
mach_number = 0.78
isa_deviation = 0.
#Engine setup for noise module
# add to network
turbofan.thrust = thrust
#size the turbofan
turbofan_sizing(turbofan,mach_number,altitude)
# add gas turbine network turbofan to the vehicle
vehicle.append_component(turbofan)
# ------------------------------------------------------------------
# ------------------------------------------------------------------
#now add weights objects
vehicle.landing_gear = SUAVE.Components.Landing_Gear.Landing_Gear()
vehicle.control_systems = SUAVE.Components.Physical_Component()
vehicle.electrical_systems = SUAVE.Components.Physical_Component()
vehicle.avionics = SUAVE.Components.Energy.Peripherals.Avionics()
vehicle.passenger_weights = SUAVE.Components.Physical_Component()
vehicle.furnishings = SUAVE.Components.Physical_Component()
vehicle.air_conditioner = SUAVE.Components.Physical_Component()
vehicle.fuel = SUAVE.Components.Physical_Component()
vehicle.apu = SUAVE.Components.Physical_Component()
vehicle.hydraulics = SUAVE.Components.Physical_Component()
vehicle.optionals = SUAVE.Components.Physical_Component()
vehicle.wings['vertical_stabilizer'].rudder = SUAVE.Components.Physical_Component()
# ------------------------------------------------------------------
# Vehicle Definition Complete
# ------------------------------------------------------------------
return vehicle
# ----------------------------------------------------------------------
# Define the Configurations
# ---------------------------------------------------------------------
def configs_setup(vehicle):
# ------------------------------------------------------------------
# Initialize Configurations
# ------------------------------------------------------------------
configs = SUAVE.Components.Configs.Config.Container()
base_config = SUAVE.Components.Configs.Config(vehicle)
base_config.tag = 'base'
configs.append(base_config)
# ------------------------------------------------------------------
# Cruise Configuration
# ------------------------------------------------------------------
config = SUAVE.Components.Configs.Config(base_config)
config.tag = 'cruise'
configs.append(config)
config.maximum_lift_coefficient = 1.2
# ------------------------------------------------------------------
# Cruise with Spoilers Configuration
# ------------------------------------------------------------------
config = SUAVE.Components.Configs.Config(base_config)
config.tag = 'cruise_spoilers'
configs.append(config)
config.maximum_lift_coefficient = 1.2
# ------------------------------------------------------------------
# Takeoff Configuration
# ------------------------------------------------------------------
config = SUAVE.Components.Configs.Config(base_config)
config.tag = 'takeoff'
config.wings['main_wing'].flaps.angle = 20. * Units.deg
config.wings['main_wing'].slats.angle = 25. * Units.deg
config.V2_VS_ratio = 1.21
config.maximum_lift_coefficient = 2.
configs.append(config)
# ------------------------------------------------------------------
# Landing Configuration
# ------------------------------------------------------------------
config = SUAVE.Components.Configs.Config(base_config)
config.tag = 'landing'
config.wings['main_wing'].flaps_angle = 30. * Units.deg
config.wings['main_wing'].slats_angle = 25. * Units.deg
config.Vref_VS_ratio = 1.23
config.maximum_lift_coefficient = 2.
configs.append(config)
# ------------------------------------------------------------------
# Short Field Takeoff Configuration
# ------------------------------------------------------------------
config = SUAVE.Components.Configs.Config(base_config)
config.tag = 'short_field_takeoff'
config.wings['main_wing'].flaps.angle = 20. * Units.deg
config.wings['main_wing'].slats.angle = 25. * Units.deg
# config.V2_VS_ratio = 1.21
# config.maximum_lift_coefficient = 2.
configs.append(config)
return configs
def simple_sizing(configs):
base = configs.base
base.pull_base()
# zero fuel weight
base.mass_properties.max_zero_fuel = 0.9 * base.mass_properties.max_takeoff
# wing areas
for wing in base.wings:
wing.areas.wetted = 2.0 * wing.areas.reference
wing.areas.exposed = 0.8 * wing.areas.wetted
wing.areas.affected = 0.6 * wing.areas.wetted
# diff the new data
base.store_diff()
# ------------------------------------------------------------------
# Landing Configuration
# ------------------------------------------------------------------
landing = configs.landing
# make sure base data is current
landing.pull_base()
# landing weight
landing.mass_properties.landing = 0.85 * base.mass_properties.takeoff
# diff the new data
landing.store_diff()
return
# ----------------------------------------------------------------------
# Define the Mission
# ----------------------------------------------------------------------
def mission_setup(analyses):
# ------------------------------------------------------------------
# Initialize the Mission
# ------------------------------------------------------------------
mission = SUAVE.Analyses.Mission.Sequential_Segments()
mission.tag = 'the_mission'
#airport
airport = SUAVE.Attributes.Airports.Airport()
airport.altitude = 0.0 * Units.ft
airport.delta_isa = 0.0
airport.atmosphere = SUAVE.Analyses.Atmospheric.US_Standard_1976()
mission.airport = airport
# unpack Segments module
Segments = SUAVE.Analyses.Mission.Segments
# base segment
base_segment = Segments.Segment()
atmosphere=SUAVE.Attributes.Atmospheres.Earth.US_Standard_1976()
planet = SUAVE.Attributes.Planets.Earth()
# ------------------------------------------------------------------
# First Climb Segment: Constant Speed, Constant Rate
# ------------------------------------------------------------------
segment = Segments.Climb.Constant_Speed_Constant_Rate()
segment.tag = "climb_1"
# connect vehicle configuration
segment.analyses.extend( analyses.base )
# define segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_start = 0.0 * Units.km
segment.altitude_end = 3.048 * Units.km
segment.air_speed = 138.0 * Units['m/s']
segment.climb_rate = 3000. * Units['ft/min']
# add to misison
mission.append_segment(segment)
# ------------------------------------------------------------------
# Second Climb Segment: Constant Speed, Constant Rate
# ------------------------------------------------------------------
segment = Segments.Climb.Constant_Speed_Constant_Rate()
segment.tag = "climb_2"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 3.657 * Units.km
segment.air_speed = 168.0 * Units['m/s']
segment.climb_rate = 2500. * Units['ft/min']
# add to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Third Climb Segment: Constant Speed, Constant Climb Rate
# ------------------------------------------------------------------
segment = Segments.Climb.Constant_Speed_Constant_Rate()
segment.tag = "climb_3"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 25000. * Units.ft
segment.air_speed = 200.0 * Units['m/s']
segment.climb_rate = 1800. * Units['ft/min']
# add to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Fourth Climb Segment: Constant Speed, Constant Rate
# ------------------------------------------------------------------
segment = Segments.Climb.Constant_Speed_Constant_Rate()
segment.tag = "climb_4"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 32000. * Units.ft
segment.air_speed = 230.0* Units['m/s']
segment.climb_rate = 900. * Units['ft/min']
# add to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Fifth Climb Segment: Constant Speed, Constant Rate
# ------------------------------------------------------------------
segment = Segments.Climb.Constant_Speed_Constant_Rate()
segment.tag = "climb_5"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 37000. * Units.ft
segment.air_speed = 230.0 * Units['m/s']
segment.climb_rate = 300. * Units['ft/min']
# add to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Cruise Segment: Constant Speed, Constant Altitude
# ------------------------------------------------------------------
segment = Segments.Cruise.Constant_Speed_Constant_Altitude()
segment.tag = "cruise"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.air_speed = 450. * Units.knots
segment.distance = 2050. * Units.nmi
# add to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# First Descent Segment: Constant Speed, Constant Rate
# ------------------------------------------------------------------
segment = Segments.Descent.Constant_Speed_Constant_Rate()
segment.tag = "descent_1"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 9.31 * Units.km
segment.air_speed = 440.0 * Units.knots
segment.descent_rate = 2600. * Units['ft/min']
# add to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Second Descent Segment: Constant Speed, Constant Rate
# ------------------------------------------------------------------
segment = Segments.Descent.Constant_Speed_Constant_Rate()
segment.tag = "descent_2"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise_spoilers )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 3.657 * Units.km
segment.air_speed = 365.0 * Units.knots
segment.descent_rate = 2300. * Units['ft/min']
# append to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Third Descent Segment: Constant Speed, Constant Rate
# ------------------------------------------------------------------
segment = Segments.Descent.Constant_Speed_Constant_Rate()
segment.tag = "descent_3"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 0.0 * Units.km
segment.air_speed = 250.0 * Units.knots
segment.descent_rate = 1500. * Units['ft/min']
# append to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Mission definition complete
# ------------------------------------------------------------------
#------------------------------------------------------------------
### Reserve mission
#------------------------------------------------------------------
# ------------------------------------------------------------------
# First Climb Segment: Constant Speed, Constant Throttle
# ------------------------------------------------------------------
segment = Segments.Climb.Constant_Speed_Constant_Rate()
segment.tag = "reserve_climb"
# connect vehicle configuration
segment.analyses.extend( analyses.base )
# define segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_start = 0.0 * Units.km
segment.altitude_end = 15000. * Units.ft
segment.air_speed = 138.0 * Units['m/s']
segment.climb_rate = 3000. * Units['ft/min']
# add to misison
mission.append_segment(segment)
# ------------------------------------------------------------------
# Cruise Segment: constant speed, constant altitude
# ------------------------------------------------------------------
segment = Segments.Cruise.Constant_Mach_Constant_Altitude(base_segment)
segment.tag = "reserve_cruise"
segment.analyses.extend( analyses.cruise )
segment.mach = 0.5
segment.distance = 140.0 * Units.nautical_mile
mission.append_segment(segment)
# ------------------------------------------------------------------
# Loiter Segment: constant mach, constant time
# ------------------------------------------------------------------
segment = Segments.Cruise.Constant_Mach_Constant_Altitude_Loiter(base_segment)
segment.tag = "reserve_loiter"
segment.analyses.extend( analyses.cruise )
segment.mach = 0.5
segment.time = 30.0 * Units.minutes
mission.append_segment(segment)
# ------------------------------------------------------------------
# Final Descent Segment: consant speed, constant segment rate
# ------------------------------------------------------------------
segment = Segments.Descent.Linear_Mach_Constant_Rate(base_segment)
segment.tag = "reserve_descent_1"
segment.analyses.extend( analyses.landing )
segment.altitude_end = 0.0 * Units.km
segment.descent_rate = 3.0 * Units['m/s']
segment.mach_end = 0.24
segment.mach_start = 0.3
# append to mission
mission.append_segment(segment)
#------------------------------------------------------------------
### Reserve mission completed
#------------------------------------------------------------------
return mission
def missions_setup(base_mission):
# the mission container
missions = SUAVE.Analyses.Mission.Mission.Container()
# ------------------------------------------------------------------
# Base Mission
# ------------------------------------------------------------------
missions.base = base_mission
return missions
# ----------------------------------------------------------------------
# Plot Mission
# ----------------------------------------------------------------------
def plot_mission(results,line_style='bo-'):
axis_font = {'fontname':'Arial', 'size':'14'}
# ------------------------------------------------------------------
# Aerodynamics
# ------------------------------------------------------------------
fig = plt.figure("Aerodynamic Forces",figsize=(8,6))
for segment in results.segments.values():
time = segment.conditions.frames.inertial.time[:,0] / Units.min
Thrust = segment.conditions.frames.body.thrust_force_vector[:,0] / Units.lbf
eta = segment.conditions.propulsion.throttle[:,0]
axes = fig.add_subplot(2,1,1)
axes.plot( time , Thrust , line_style )
axes.set_ylabel('Thrust (lbf)',axis_font)
axes.grid(True)
axes = fig.add_subplot(2,1,2)
axes.plot( time , eta , line_style )
axes.set_xlabel('Time (min)',axis_font)
axes.set_ylabel('Throttle',axis_font)
axes.grid(True)
plt.savefig("B737_engine.pdf")
plt.savefig("B737_engine.png")
# ------------------------------------------------------------------
# Aerodynamics 2
# ------------------------------------------------------------------
fig = plt.figure("Aerodynamic Coefficients",figsize=(8,10))
for segment in results.segments.values():
time = segment.conditions.frames.inertial.time[:,0] / Units.min
CLift = segment.conditions.aerodynamics.lift_coefficient[:,0]
CDrag = segment.conditions.aerodynamics.drag_coefficient[:,0]
aoa = segment.conditions.aerodynamics.angle_of_attack[:,0] / Units.deg
l_d = CLift/CDrag
axes = fig.add_subplot(3,1,1)
axes.plot( time , CLift , line_style )
axes.set_ylabel('Lift Coefficient',axis_font)
axes.grid(True)
axes = fig.add_subplot(3,1,2)
axes.plot( time , l_d , line_style )
axes.set_ylabel('L/D',axis_font)
axes.grid(True)
axes = fig.add_subplot(3,1,3)
axes.plot( time , aoa , 'ro-' )
axes.set_xlabel('Time (min)',axis_font)
axes.set_ylabel('AOA (deg)',axis_font)
axes.grid(True)
plt.savefig("B737_aero.pdf")
plt.savefig("B737_aero.png")
# ------------------------------------------------------------------
# Aerodynamics 2
# ------------------------------------------------------------------
fig = plt.figure("Drag Components",figsize=(8,10))
axes = plt.gca()
for i, segment in enumerate(results.segments.values()):
time = segment.conditions.frames.inertial.time[:,0] / Units.min
drag_breakdown = segment.conditions.aerodynamics.drag_breakdown
cdp = drag_breakdown.parasite.total[:,0]
cdi = drag_breakdown.induced.total[:,0]
cdc = drag_breakdown.compressible.total[:,0]
cdm = drag_breakdown.miscellaneous.total[:,0]
cd = drag_breakdown.total[:,0]
if line_style == 'bo-':
axes.plot( time , cdp , 'ko-', label='CD parasite' )
axes.plot( time , cdi , 'bo-', label='CD induced' )
axes.plot( time , cdc , 'go-', label='CD compressibility' )
axes.plot( time , cdm , 'yo-', label='CD miscellaneous' )
axes.plot( time , cd , 'ro-', label='CD total' )
if i == 0:
axes.legend(loc='upper center')
else:
axes.plot( time , cdp , line_style )
axes.plot( time , cdi , line_style )
axes.plot( time , cdc , line_style )
axes.plot( time , cdm , line_style )
axes.plot( time , cd , line_style )
axes.set_xlabel('Time (min)')
axes.set_ylabel('CD')
axes.grid(True)
plt.savefig("B737_drag.pdf")
plt.savefig("B737_drag.png")
# ------------------------------------------------------------------
# Altitude, sfc, vehicle weight
# ------------------------------------------------------------------
fig = plt.figure("Altitude_sfc_weight",figsize=(8,10))
for segment in results.segments.values():
time = segment.conditions.frames.inertial.time[:,0] / Units.min
aoa = segment.conditions.aerodynamics.angle_of_attack[:,0] / Units.deg
mass = segment.conditions.weights.total_mass[:,0] / Units.lb
altitude = segment.conditions.freestream.altitude[:,0] / Units.ft
mdot = segment.conditions.weights.vehicle_mass_rate[:,0]
thrust = segment.conditions.frames.body.thrust_force_vector[:,0]
sfc = (mdot / Units.lb) / (thrust /Units.lbf) * Units.hr
axes = fig.add_subplot(3,1,1)
axes.plot( time , altitude , line_style )
axes.set_ylabel('Altitude (ft)',axis_font)
axes.grid(True)
axes = fig.add_subplot(3,1,3)
axes.plot( time , sfc , line_style )
axes.set_xlabel('Time (min)',axis_font)
axes.set_ylabel('sfc (lb/lbf-hr)',axis_font)
axes.grid(True)
axes = fig.add_subplot(3,1,2)
axes.plot( time , mass , 'ro-' )
axes.set_ylabel('Weight (lb)',axis_font)
axes.grid(True)
plt.savefig("B737_mission.pdf")
plt.savefig("B737_mission.png")
# ------------------------------------------------------------------
# Velocities
# ------------------------------------------------------------------
fig = plt.figure("Velocities",figsize=(8,10))
for segment in results.segments.values():
time = segment.conditions.frames.inertial.time[:,0] / Units.min
Lift = -segment.conditions.frames.wind.lift_force_vector[:,2]
Drag = -segment.conditions.frames.wind.drag_force_vector[:,0] / Units.lbf
Thrust = segment.conditions.frames.body.thrust_force_vector[:,0] / Units.lb
velocity = segment.conditions.freestream.velocity[:,0]
pressure = segment.conditions.freestream.pressure[:,0]
density = segment.conditions.freestream.density[:,0]
EAS = velocity * np.sqrt(density/1.225)
mach = segment.conditions.freestream.mach_number[:,0]
axes = fig.add_subplot(3,1,1)
axes.plot( time , velocity / Units.kts, line_style )
axes.set_ylabel('velocity (kts)',axis_font)
axes.grid(True)
axes = fig.add_subplot(3,1,2)
axes.plot( time , EAS / Units.kts, line_style )
axes.set_xlabel('Time (min)',axis_font)
axes.set_ylabel('Equivalent Airspeed',axis_font)
axes.grid(True)
axes = fig.add_subplot(3,1,3)
axes.plot( time , mach , line_style )
axes.set_xlabel('Time (min)',axis_font)
axes.set_ylabel('Mach',axis_font)
axes.grid(True)
return
if __name__ == '__main__':
main()
plt.show() | [
"SUAVE.Input_Output.Results.print_parasite_drag",
"numpy.sqrt",
"pylab.savefig",
"SUAVE.Components.Energy.Converters.Compression_Nozzle",
"SUAVE.Components.Landing_Gear.Landing_Gear",
"SUAVE.Analyses.Vehicle",
"SUAVE.Components.Energy.Converters.Compressor",
"SUAVE.Input_Output.Results.print_mission_b... | [((1246, 1359), 'SUAVE.Methods.Center_of_Gravity.compute_aircraft_center_of_gravity', 'SUAVE.Methods.Center_of_Gravity.compute_aircraft_center_of_gravity', (['weights.vehicle'], {'nose_load_fraction': '(0.06)'}), '(weights.\n vehicle, nose_load_fraction=0.06)\n', (1312, 1359), False, 'import SUAVE\n'), ((1834, 1908), 'SUAVE.Input_Output.Results.print_weight_breakdown', 'print_weight_breakdown', (['configs.base'], {'filename': '"""E170_weight_breakdown.dat"""'}), "(configs.base, filename='E170_weight_breakdown.dat')\n", (1856, 1908), False, 'from SUAVE.Input_Output.Results import print_parasite_drag, print_compress_drag, print_engine_data, print_mission_breakdown, print_weight_breakdown\n'), ((1949, 2013), 'SUAVE.Input_Output.Results.print_engine_data', 'print_engine_data', (['configs.base'], {'filename': '"""E170_engine_data.dat"""'}), "(configs.base, filename='E170_engine_data.dat')\n", (1966, 2013), False, 'from SUAVE.Input_Output.Results import print_parasite_drag, print_compress_drag, print_engine_data, print_mission_breakdown, print_weight_breakdown\n'), ((2128, 2134), 'SUAVE.Core.Data', 'Data', ([], {}), '()\n', (2132, 2134), False, 'from SUAVE.Core import Data, Units\n'), ((2221, 2311), 'SUAVE.Input_Output.Results.print_parasite_drag', 'print_parasite_drag', (['ref_condition', 'configs.cruise', 'analyses', '"""E170_parasite_drag.dat"""'], {}), "(ref_condition, configs.cruise, analyses,\n 'E170_parasite_drag.dat')\n", (2240, 2311), False, 'from SUAVE.Input_Output.Results import print_parasite_drag, print_compress_drag, print_engine_data, print_mission_breakdown, print_weight_breakdown\n'), ((2358, 2443), 'SUAVE.Input_Output.Results.print_compress_drag', 'print_compress_drag', (['configs.cruise', 'analyses'], {'filename': '"""E170_compress_drag.dat"""'}), "(configs.cruise, analyses, filename='E170_compress_drag.dat'\n )\n", (2377, 2443), False, 'from SUAVE.Input_Output.Results import print_parasite_drag, print_compress_drag, print_engine_data, print_mission_breakdown, print_weight_breakdown\n'), ((2474, 2545), 'SUAVE.Input_Output.Results.print_mission_breakdown', 'print_mission_breakdown', (['results'], {'filename': '"""E170_mission_breakdown.dat"""'}), "(results, filename='E170_mission_breakdown.dat')\n", (2497, 2545), False, 'from SUAVE.Input_Output.Results import print_parasite_drag, print_compress_drag, print_engine_data, print_mission_breakdown, print_weight_breakdown\n'), ((3090, 3125), 'SUAVE.Analyses.Analysis.Container', 'SUAVE.Analyses.Analysis.Container', ([], {}), '()\n', (3123, 3125), False, 'import SUAVE\n'), ((3464, 3499), 'SUAVE.Analyses.Analysis.Container', 'SUAVE.Analyses.Analysis.Container', ([], {}), '()\n', (3497, 3499), False, 'import SUAVE\n'), ((3907, 3931), 'SUAVE.Analyses.Vehicle', 'SUAVE.Analyses.Vehicle', ([], {}), '()\n', (3929, 3931), False, 'import SUAVE\n'), ((4051, 4081), 'SUAVE.Analyses.Sizing.Sizing', 'SUAVE.Analyses.Sizing.Sizing', ([], {}), '()\n', (4079, 4081), False, 'import SUAVE\n'), ((4251, 4293), 'SUAVE.Analyses.Weights.Weights_Tube_Wing', 'SUAVE.Analyses.Weights.Weights_Tube_Wing', ([], {}), '()\n', (4291, 4293), False, 'import SUAVE\n'), ((4475, 4518), 'SUAVE.Analyses.Aerodynamics.Fidelity_Zero', 'SUAVE.Analyses.Aerodynamics.Fidelity_Zero', ([], {}), '()\n', (4516, 4518), False, 'import SUAVE\n'), ((4705, 4745), 'SUAVE.Analyses.Stability.Fidelity_Zero', 'SUAVE.Analyses.Stability.Fidelity_Zero', ([], {}), '()\n', (4743, 4745), False, 'import SUAVE\n'), ((4921, 4952), 'SUAVE.Analyses.Planets.Planet', 'SUAVE.Analyses.Planets.Planet', ([], {}), '()\n', (4950, 4952), False, 'import SUAVE\n'), ((5099, 5144), 'SUAVE.Analyses.Atmospheric.US_Standard_1976', 'SUAVE.Analyses.Atmospheric.US_Standard_1976', ([], {}), '()\n', (5142, 5144), False, 'import SUAVE\n'), ((5643, 5658), 'SUAVE.Vehicle', 'SUAVE.Vehicle', ([], {}), '()\n', (5656, 5658), False, 'import SUAVE\n'), ((7007, 7041), 'SUAVE.Components.Wings.Main_Wing', 'SUAVE.Components.Wings.Main_Wing', ([], {}), '()\n', (7039, 7041), False, 'import SUAVE\n'), ((8458, 8487), 'SUAVE.Components.Wings.Wing', 'SUAVE.Components.Wings.Wing', ([], {}), '()\n', (8485, 8487), False, 'import SUAVE\n'), ((9767, 9796), 'SUAVE.Components.Wings.Wing', 'SUAVE.Components.Wings.Wing', ([], {}), '()\n', (9794, 9796), False, 'import SUAVE\n'), ((11058, 11095), 'SUAVE.Components.Fuselages.Fuselage', 'SUAVE.Components.Fuselages.Fuselage', ([], {}), '()\n', (11093, 11095), False, 'import SUAVE\n'), ((12639, 12682), 'SUAVE.Components.Energy.Networks.Turbofan', 'SUAVE.Components.Energy.Networks.Turbofan', ([], {}), '()\n', (12680, 12682), False, 'import SUAVE\n'), ((13165, 13193), 'SUAVE.Attributes.Gases.Air', 'SUAVE.Attributes.Gases.Air', ([], {}), '()\n', (13191, 13193), False, 'import SUAVE\n'), ((13391, 13431), 'SUAVE.Components.Energy.Converters.Ram', 'SUAVE.Components.Energy.Converters.Ram', ([], {}), '()\n', (13429, 13431), False, 'import SUAVE\n'), ((13657, 13712), 'SUAVE.Components.Energy.Converters.Compression_Nozzle', 'SUAVE.Components.Energy.Converters.Compression_Nozzle', ([], {}), '()\n', (13710, 13712), False, 'import SUAVE\n'), ((14084, 14131), 'SUAVE.Components.Energy.Converters.Compressor', 'SUAVE.Components.Energy.Converters.Compressor', ([], {}), '()\n', (14129, 14131), False, 'import SUAVE\n'), ((14509, 14556), 'SUAVE.Components.Energy.Converters.Compressor', 'SUAVE.Components.Energy.Converters.Compressor', ([], {}), '()\n', (14554, 14556), False, 'import SUAVE\n'), ((14929, 14973), 'SUAVE.Components.Energy.Converters.Turbine', 'SUAVE.Components.Energy.Converters.Turbine', ([], {}), '()\n', (14971, 14973), False, 'import SUAVE\n'), ((15335, 15379), 'SUAVE.Components.Energy.Converters.Turbine', 'SUAVE.Components.Energy.Converters.Turbine', ([], {}), '()\n', (15377, 15379), False, 'import SUAVE\n'), ((15732, 15778), 'SUAVE.Components.Energy.Converters.Combustor', 'SUAVE.Components.Energy.Converters.Combustor', ([], {}), '()\n', (15776, 15778), False, 'import SUAVE\n'), ((16070, 16106), 'SUAVE.Attributes.Propellants.Jet_A', 'SUAVE.Attributes.Propellants.Jet_A', ([], {}), '()\n', (16104, 16106), False, 'import SUAVE\n'), ((16311, 16364), 'SUAVE.Components.Energy.Converters.Expansion_Nozzle', 'SUAVE.Components.Energy.Converters.Expansion_Nozzle', ([], {}), '()\n', (16362, 16364), False, 'import SUAVE\n'), ((16696, 16749), 'SUAVE.Components.Energy.Converters.Expansion_Nozzle', 'SUAVE.Components.Energy.Converters.Expansion_Nozzle', ([], {}), '()\n', (16747, 16749), False, 'import SUAVE\n'), ((17071, 17111), 'SUAVE.Components.Energy.Converters.Fan', 'SUAVE.Components.Energy.Converters.Fan', ([], {}), '()\n', (17109, 17111), False, 'import SUAVE\n'), ((17418, 17460), 'SUAVE.Components.Energy.Processes.Thrust', 'SUAVE.Components.Energy.Processes.Thrust', ([], {}), '()\n', (17458, 17460), False, 'import SUAVE\n'), ((17860, 17908), 'SUAVE.Methods.Propulsion.turbofan_sizing.turbofan_sizing', 'turbofan_sizing', (['turbofan', 'mach_number', 'altitude'], {}), '(turbofan, mach_number, altitude)\n', (17875, 17908), False, 'from SUAVE.Methods.Propulsion.turbofan_sizing import turbofan_sizing\n'), ((18234, 18278), 'SUAVE.Components.Landing_Gear.Landing_Gear', 'SUAVE.Components.Landing_Gear.Landing_Gear', ([], {}), '()\n', (18276, 18278), False, 'import SUAVE\n'), ((18312, 18349), 'SUAVE.Components.Physical_Component', 'SUAVE.Components.Physical_Component', ([], {}), '()\n', (18347, 18349), False, 'import SUAVE\n'), ((18383, 18420), 'SUAVE.Components.Physical_Component', 'SUAVE.Components.Physical_Component', ([], {}), '()\n', (18418, 18420), False, 'import SUAVE\n'), ((18454, 18500), 'SUAVE.Components.Energy.Peripherals.Avionics', 'SUAVE.Components.Energy.Peripherals.Avionics', ([], {}), '()\n', (18498, 18500), False, 'import SUAVE\n'), ((18534, 18571), 'SUAVE.Components.Physical_Component', 'SUAVE.Components.Physical_Component', ([], {}), '()\n', (18569, 18571), False, 'import SUAVE\n'), ((18605, 18642), 'SUAVE.Components.Physical_Component', 'SUAVE.Components.Physical_Component', ([], {}), '()\n', (18640, 18642), False, 'import SUAVE\n'), ((18676, 18713), 'SUAVE.Components.Physical_Component', 'SUAVE.Components.Physical_Component', ([], {}), '()\n', (18711, 18713), False, 'import SUAVE\n'), ((18747, 18784), 'SUAVE.Components.Physical_Component', 'SUAVE.Components.Physical_Component', ([], {}), '()\n', (18782, 18784), False, 'import SUAVE\n'), ((18818, 18855), 'SUAVE.Components.Physical_Component', 'SUAVE.Components.Physical_Component', ([], {}), '()\n', (18853, 18855), False, 'import SUAVE\n'), ((18889, 18926), 'SUAVE.Components.Physical_Component', 'SUAVE.Components.Physical_Component', ([], {}), '()\n', (18924, 18926), False, 'import SUAVE\n'), ((18960, 18997), 'SUAVE.Components.Physical_Component', 'SUAVE.Components.Physical_Component', ([], {}), '()\n', (18995, 18997), False, 'import SUAVE\n'), ((19049, 19086), 'SUAVE.Components.Physical_Component', 'SUAVE.Components.Physical_Component', ([], {}), '()\n', (19084, 19086), False, 'import SUAVE\n'), ((19698, 19741), 'SUAVE.Components.Configs.Config.Container', 'SUAVE.Components.Configs.Config.Container', ([], {}), '()\n', (19739, 19741), False, 'import SUAVE\n'), ((19761, 19801), 'SUAVE.Components.Configs.Config', 'SUAVE.Components.Configs.Config', (['vehicle'], {}), '(vehicle)\n', (19792, 19801), False, 'import SUAVE\n'), ((20053, 20097), 'SUAVE.Components.Configs.Config', 'SUAVE.Components.Configs.Config', (['base_config'], {}), '(base_config)\n', (20084, 20097), False, 'import SUAVE\n'), ((20407, 20451), 'SUAVE.Components.Configs.Config', 'SUAVE.Components.Configs.Config', (['base_config'], {}), '(base_config)\n', (20438, 20451), False, 'import SUAVE\n'), ((20754, 20798), 'SUAVE.Components.Configs.Config', 'SUAVE.Components.Configs.Config', (['base_config'], {}), '(base_config)\n', (20785, 20798), False, 'import SUAVE\n'), ((21238, 21282), 'SUAVE.Components.Configs.Config', 'SUAVE.Components.Configs.Config', (['base_config'], {}), '(base_config)\n', (21269, 21282), False, 'import SUAVE\n'), ((21741, 21785), 'SUAVE.Components.Configs.Config', 'SUAVE.Components.Configs.Config', (['base_config'], {}), '(base_config)\n', (21772, 21785), False, 'import SUAVE\n'), ((23341, 23385), 'SUAVE.Analyses.Mission.Sequential_Segments', 'SUAVE.Analyses.Mission.Sequential_Segments', ([], {}), '()\n', (23383, 23385), False, 'import SUAVE\n'), ((23446, 23481), 'SUAVE.Attributes.Airports.Airport', 'SUAVE.Attributes.Airports.Airport', ([], {}), '()\n', (23479, 23481), False, 'import SUAVE\n'), ((23580, 23625), 'SUAVE.Analyses.Atmospheric.US_Standard_1976', 'SUAVE.Analyses.Atmospheric.US_Standard_1976', ([], {}), '()\n', (23623, 23625), False, 'import SUAVE\n'), ((23811, 23864), 'SUAVE.Attributes.Atmospheres.Earth.US_Standard_1976', 'SUAVE.Attributes.Atmospheres.Earth.US_Standard_1976', ([], {}), '()\n', (23862, 23864), False, 'import SUAVE\n'), ((23878, 23910), 'SUAVE.Attributes.Planets.Earth', 'SUAVE.Attributes.Planets.Earth', ([], {}), '()\n', (23908, 23910), False, 'import SUAVE\n'), ((33135, 33177), 'SUAVE.Analyses.Mission.Mission.Container', 'SUAVE.Analyses.Mission.Mission.Container', ([], {}), '()\n', (33175, 33177), False, 'import SUAVE\n'), ((33847, 33895), 'pylab.figure', 'plt.figure', (['"""Aerodynamic Forces"""'], {'figsize': '(8, 6)'}), "('Aerodynamic Forces', figsize=(8, 6))\n", (33857, 33895), True, 'import pylab as plt\n'), ((34784, 34839), 'pylab.figure', 'plt.figure', (['"""Aerodynamic Coefficients"""'], {'figsize': '(8, 10)'}), "('Aerodynamic Coefficients', figsize=(8, 10))\n", (34794, 34839), True, 'import pylab as plt\n'), ((35972, 36018), 'pylab.figure', 'plt.figure', (['"""Drag Components"""'], {'figsize': '(8, 10)'}), "('Drag Components', figsize=(8, 10))\n", (35982, 36018), True, 'import pylab as plt\n'), ((36028, 36037), 'pylab.gca', 'plt.gca', ([], {}), '()\n', (36035, 36037), True, 'import pylab as plt\n'), ((37296, 37324), 'pylab.savefig', 'plt.savefig', (['"""B737_drag.pdf"""'], {}), "('B737_drag.pdf')\n", (37307, 37324), True, 'import pylab as plt\n'), ((37329, 37357), 'pylab.savefig', 'plt.savefig', (['"""B737_drag.png"""'], {}), "('B737_drag.png')\n", (37340, 37357), True, 'import pylab as plt\n'), ((37554, 37604), 'pylab.figure', 'plt.figure', (['"""Altitude_sfc_weight"""'], {'figsize': '(8, 10)'}), "('Altitude_sfc_weight', figsize=(8, 10))\n", (37564, 37604), True, 'import pylab as plt\n'), ((38963, 39004), 'pylab.figure', 'plt.figure', (['"""Velocities"""'], {'figsize': '(8, 10)'}), "('Velocities', figsize=(8, 10))\n", (38973, 39004), True, 'import pylab as plt\n'), ((40369, 40379), 'pylab.show', 'plt.show', ([], {}), '()\n', (40377, 40379), True, 'import pylab as plt\n'), ((34534, 34564), 'pylab.savefig', 'plt.savefig', (['"""B737_engine.pdf"""'], {}), "('B737_engine.pdf')\n", (34545, 34564), True, 'import pylab as plt\n'), ((34573, 34603), 'pylab.savefig', 'plt.savefig', (['"""B737_engine.png"""'], {}), "('B737_engine.png')\n", (34584, 34603), True, 'import pylab as plt\n'), ((35726, 35754), 'pylab.savefig', 'plt.savefig', (['"""B737_aero.pdf"""'], {}), "('B737_aero.pdf')\n", (35737, 35754), True, 'import pylab as plt\n'), ((35763, 35791), 'pylab.savefig', 'plt.savefig', (['"""B737_aero.png"""'], {}), "('B737_aero.png')\n", (35774, 35791), True, 'import pylab as plt\n'), ((38707, 38738), 'pylab.savefig', 'plt.savefig', (['"""B737_mission.pdf"""'], {}), "('B737_mission.pdf')\n", (38718, 38738), True, 'import pylab as plt\n'), ((38747, 38778), 'pylab.savefig', 'plt.savefig', (['"""B737_mission.png"""'], {}), "('B737_mission.png')\n", (38758, 38778), True, 'import pylab as plt\n'), ((39590, 39614), 'numpy.sqrt', 'np.sqrt', (['(density / 1.225)'], {}), '(density / 1.225)\n', (39597, 39614), True, 'import numpy as np\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contain the speech perturbation augmentation model."""
import numpy as np
from deepspeech.frontend.augmentor.base import AugmentorBase
class SpeedPerturbAugmentor(AugmentorBase):
"""Augmentation model for adding speed perturbation."""
def __init__(self, rng, min_speed_rate=0.9, max_speed_rate=1.1,
num_rates=3):
"""speed perturbation.
The speed perturbation in kaldi uses sox-speed instead of sox-tempo,
and sox-speed just to resample the input,
i.e pitch and tempo are changed both.
"Why use speed option instead of tempo -s in SoX for speed perturbation"
https://groups.google.com/forum/#!topic/kaldi-help/8OOG7eE4sZ8
Sox speed:
https://pysox.readthedocs.io/en/latest/api.html#sox.transform.Transformer
See reference paper here:
http://www.danielpovey.com/files/2015_interspeech_augmentation.pdf
Espnet:
https://espnet.github.io/espnet/_modules/espnet/transform/perturb.html
Nemo:
https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/asr/parts/perturb.py#L92
Args:
rng (random.Random): Random generator object.
min_speed_rate (float): Lower bound of new speed rate to sample and should
not be smaller than 0.9.
max_speed_rate (float): Upper bound of new speed rate to sample and should
not be larger than 1.1.
num_rates (int, optional): Number of discrete rates to allow.
Can be a positive or negative integer. Defaults to 3.
If a positive integer greater than 0 is provided, the range of
speed rates will be discretized into `num_rates` values.
If a negative integer or 0 is provided, the full range of speed rates
will be sampled uniformly.
Note: If a positive integer is provided and the resultant discretized
range of rates contains the value '1.0', then those samples with rate=1.0,
will not be augmented at all and simply skipped. This is to unnecessary
augmentation and increase computation time. Effective augmentation chance
in such a case is = `prob * (num_rates - 1 / num_rates) * 100`% chance
where `prob` is the global probability of a sample being augmented.
Raises:
ValueError: when speed_rate error
"""
if min_speed_rate < 0.9:
raise ValueError(
"Sampling speed below 0.9 can cause unnatural effects")
if max_speed_rate > 1.1:
raise ValueError(
"Sampling speed above 1.1 can cause unnatural effects")
self._min_rate = min_speed_rate
self._max_rate = max_speed_rate
self._rng = rng
self._num_rates = num_rates
if num_rates > 0:
self._rates = np.linspace(
self._min_rate, self._max_rate, self._num_rates, endpoint=True)
def __call__(self, x, uttid=None, train=True):
if not train:
return
self.transform_audio(x)
def transform_audio(self, audio_segment):
"""Sample a new speed rate from the given range and
changes the speed of the given audio clip.
Note that this is an in-place transformation.
:param audio_segment: Audio segment to add effects to.
:type audio_segment: AudioSegment|SpeechSegment
"""
if self._num_rates < 0:
speed_rate = self._rng.uniform(self._min_rate, self._max_rate)
else:
speed_rate = self._rng.choice(self._rates)
# Skip perturbation in case of identity speed rate
if speed_rate == 1.0:
return
audio_segment.change_speed(speed_rate)
| [
"numpy.linspace"
] | [((3580, 3655), 'numpy.linspace', 'np.linspace', (['self._min_rate', 'self._max_rate', 'self._num_rates'], {'endpoint': '(True)'}), '(self._min_rate, self._max_rate, self._num_rates, endpoint=True)\n', (3591, 3655), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Remade on Sun 30 May 2021 22:32
@author: <NAME> - <EMAIL>
"""
#%%
# =============================================================================
# Dependencies
# =============================================================================
## Importing modules
import xarray as xr
import numpy as np
# import regionmask
# import geopandas as gpd
import datetime
import pandas as pd
# import matplotlib.pyplot as plt
# import os.path
# Select the years to run
years = np.array([
# '1950', '1951', '1952',
# '1953', '1954', '1955',
# '1956', '1957', '1958',
# '1959', '1960', '1961',
# '1962', '1963', '1964',
# '1965', '1966', '1967',
# '1968', '1969', '1970',
# '1971', '1972', '1973',
# '1974', '1975', '1976',
# '1977', '1978',
'1979', '1980', '1981',
'1982', '1983', '1984',
'1985', '1986', '1987',
'1988', '1989', '1990',
'1991', '1992', '1993',
'1994', '1995', '1996',
'1997', '1998', '1999',
'2000', '2001', '2002',
'2003', '2004', '2005',
'2006', '2007', '2008',
'2009', '2010', '2011',
'2012', '2013',
'2014',
'2015', '2016', '2017',
'2018', '2019',
'2020'
])
# Versions of the TYNDP
CD_TYNDP_input = np.array([
'DE_2030',
'DE_2040',
'GA_2030',
'GA_2040',
'NT_2025',
'NT_2030',
'NT_2040'
])
# Set the path for the data
PATH_TO_TYNDP = '/media/DataStager1/Other/CapacityDistribution/TYNDP/Originv3/'
# Read NetCDF
FOLDER_ERA5_CF_NUTS0 = '/media/DataStager2/ERA5-EU_CF/MarketZones/'
FOLDER_EV_NUTS0 = '/media/DataStager2/ERA5-EU_EV/MarketZones/'
print('NOTIFY: Initialization is complete, Skynet active')
#%%
# =============================================================================
# Load in the MarketZone data
# =============================================================================
# Set the year to run over
for year in years:
print('NOTIFY: Working on year '+year+'!')
# Load in the NetCDF
ds_cf_solar = xr.open_dataset(FOLDER_ERA5_CF_NUTS0+'ERA5-EU_CF-MarketZones_solar_'+str(year)+'.nc') #, chunks = {'time': 8760})
ds_cf_windoff = xr.open_dataset(FOLDER_ERA5_CF_NUTS0+'ERA5-EU_CF-MarketZones_windoff_'+str(year)+'.nc') #, chunks = {'time': 8760})
ds_cf_windon = xr.open_dataset(FOLDER_ERA5_CF_NUTS0+'ERA5-EU_CF-MarketZones_windon_'+str(year)+'.nc') #, chunks = {'time': 8760})
#%%
# =============================================================================
# Load in the datafiles with the capacity distributions
# =============================================================================
# Select the distribution to run over
# capacity_distribution = CD_TYNDP_input[0]
for capacity_distribution in CD_TYNDP_input:
print('NOTIFY: Working on Distribution '+capacity_distribution+'!')
# Read in the Capacity Distribution from the TYNDP
df_cd_tyndp = pd.read_csv(PATH_TO_TYNDP+'TYNDP-'+capacity_distribution+'.csv' )
# Set the index nicely
df_cd_tyndp = df_cd_tyndp.set_index('Country')
# now transpose the data
df_cd_tyndp = df_cd_tyndp.transpose()
#%%
# =============================================================================
# Multiply the capacity distribution with the capacity factor
# =============================================================================
# Set a new dataset for energy variables
ds_ev_solar = xr.Dataset()
ds_ev_windoff = xr.Dataset()
ds_ev_windon = xr.Dataset()
# The countries we now loop
for country in df_cd_tyndp.index:
#%% working on solar
# Define the capacity installed
country_cap_distr_solar = df_cd_tyndp.loc[country].loc['Solar PV']
# If this capacity is not defined, do not calculate
if country_cap_distr_solar.size == 0 or country_cap_distr_solar == 0:
print('There is no solar capacity for '+country)
# Fix the Greek names to international standard
elif country == 'EL00':
ds_ev_solar[country] = country_cap_distr_solar * ds_cf_solar['GR00']
elif country == 'EL03':
ds_ev_solar[country] = country_cap_distr_solar * ds_cf_solar['GR03']
# Fix luxembour (somehow it is called LUG/LUF/LUB)
elif country == 'LUG1':
ds_ev_solar['LU00'] = df_cd_tyndp.loc['LUG1'].loc['Solar PV'] * ds_cf_solar['LU00']
# Apply the wind offshore capacity distribution
else:
# apply the cap distribution
ds_ev_solar[country] = country_cap_distr_solar * ds_cf_solar[country]
#%% working on onshore wind
# Define the capacity installed
country_cap_distr_windon = df_cd_tyndp.loc[country].loc['Onshore Wind']
# If this capacity is not defined, do not calculate
if country_cap_distr_windon.size == 0 or country_cap_distr_windon == 0:
print('There is no onshore wind capacity for '+country)
# Fix the Greek names to international standard
elif country == 'EL00':
ds_ev_windon[country] = country_cap_distr_windon * ds_cf_windon['GR00']
elif country == 'EL03':
ds_ev_windon[country] = country_cap_distr_windon * ds_cf_windon['GR03']
# Fix luxembour (somehow it is called LUG/LUF/LUB)
elif country == 'LUG1':
ds_ev_windon['LU00'] = df_cd_tyndp.loc['LUG1'].loc['Onshore Wind'] * ds_cf_windon['LU00']
# Apply the wind offshore capacity distribution
else:
# apply the cap distribution
ds_ev_windon[country] = country_cap_distr_windon * ds_cf_windon[country]
#%% working on offshore wind
# Define the capacity installed
country_cap_distr_windoff = df_cd_tyndp.loc[country].loc['Offshore Wind']
# If this capacity is not defined, do not calculate
if country_cap_distr_windoff.size == 0 or country_cap_distr_windoff == 0:
print('There is no offshore capacity for '+country)
# Fix the small easternly Danish region
elif country == 'DEKF':
ds_ev_windoff[country] = country_cap_distr_windoff * ds_cf_windoff['DE00_OFF']
# Fix the Greek names to international standard
elif country == 'EL00':
ds_ev_windoff[country] = country_cap_distr_windoff * ds_cf_windoff['GR00_OFF']
elif country == 'EL03':
ds_ev_windoff[country] = country_cap_distr_windoff * ds_cf_windoff['GR00_OFF']
# Apply the wind offshore capacity distribution
else:
ds_ev_windoff[country] = country_cap_distr_windoff * ds_cf_windoff[country+'_OFF']
#%%
# =============================================================================
# Time to save the data
# =============================================================================
# Setting the general dataset attributes
ds_ev_windoff.attrs.update(
author = '<NAME>/KNMI/TenneT',
variables = 'Wind offshore electricity generation',
units = 'MWh',
created = datetime.datetime.today().strftime('%d-%m-%Y'),
region_definition = 'ENTSO-E MarketZones',
CapacityDistribution = 'TYNDP-'+capacity_distribution,
data_source = 'Energy production variables based on TYNDP scenarios and ERA5 reanalysis data, contains modified Copernicus Climate Change Service information [31-05-2021]'
)
# copy most and update partially
ds_ev_windon.attrs = ds_ev_windoff.attrs
ds_ev_windon.attrs.update(
variables = 'Wind onshore electricity generation',
)
ds_ev_solar.attrs = ds_ev_windoff.attrs
ds_ev_solar.attrs.update(
variables = 'Solar PV electricity generation',
)
# Saving the files as NetCDF
# ds_ev_windoff.to_netcdf(FOLDER_EV_NUTS0+capacity_distribution+'/ERA5-EU_EV_TYNDP-'+capacity_distribution+'_WOF_'+str(year)+'.nc', encoding={'time':{'units':'days since 1900-01-01'}})
# ds_ev_windon.to_netcdf(FOLDER_EV_NUTS0+capacity_distribution+'/ERA5-EU_EV_TYNDP-'+capacity_distribution+'_WON_'+str(year)+'.nc', encoding={'time':{'units':'days since 1900-01-01'}})
# ds_ev_solar.to_netcdf(FOLDER_EV_NUTS0+capacity_distribution+'/ERA5-EU_EV_TYNDP-'+capacity_distribution+'_SPV_'+str(year)+'.nc', encoding={'time':{'units':'days since 1900-01-01'}})
# Converting ot Pandas
df_windoff = ds_ev_windoff.to_pandas()
df_windon = ds_ev_windon.to_pandas()
df_solar = ds_ev_solar.to_pandas()
# Saving as CSV
df_windoff.to_csv(FOLDER_EV_NUTS0+capacity_distribution+'/ERA5-EU_EV_TYNDP-'+capacity_distribution+'_WOF_'+str(year)+'.csv')
df_windon.to_csv(FOLDER_EV_NUTS0+capacity_distribution+'/ERA5-EU_EV_TYNDP-'+capacity_distribution+'_WON_'+str(year)+'.csv')
df_solar.to_csv(FOLDER_EV_NUTS0+capacity_distribution+'/ERA5-EU_EV_TYNDP-'+capacity_distribution+'_SPV_'+str(year)+'.csv')
| [
"datetime.datetime.today",
"numpy.array",
"pandas.read_csv",
"xarray.Dataset"
] | [((527, 889), 'numpy.array', 'np.array', (["['1979', '1980', '1981', '1982', '1983', '1984', '1985', '1986', '1987',\n '1988', '1989', '1990', '1991', '1992', '1993', '1994', '1995', '1996',\n '1997', '1998', '1999', '2000', '2001', '2002', '2003', '2004', '2005',\n '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014',\n '2015', '2016', '2017', '2018', '2019', '2020']"], {}), "(['1979', '1980', '1981', '1982', '1983', '1984', '1985', '1986',\n '1987', '1988', '1989', '1990', '1991', '1992', '1993', '1994', '1995',\n '1996', '1997', '1998', '1999', '2000', '2001', '2002', '2003', '2004',\n '2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013',\n '2014', '2015', '2016', '2017', '2018', '2019', '2020'])\n", (535, 889), True, 'import numpy as np\n'), ((1506, 1597), 'numpy.array', 'np.array', (["['DE_2030', 'DE_2040', 'GA_2030', 'GA_2040', 'NT_2025', 'NT_2030', 'NT_2040']"], {}), "(['DE_2030', 'DE_2040', 'GA_2030', 'GA_2040', 'NT_2025', 'NT_2030',\n 'NT_2040'])\n", (1514, 1597), True, 'import numpy as np\n'), ((3315, 3385), 'pandas.read_csv', 'pd.read_csv', (["(PATH_TO_TYNDP + 'TYNDP-' + capacity_distribution + '.csv')"], {}), "(PATH_TO_TYNDP + 'TYNDP-' + capacity_distribution + '.csv')\n", (3326, 3385), True, 'import pandas as pd\n'), ((4005, 4017), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (4015, 4017), True, 'import xarray as xr\n'), ((4042, 4054), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (4052, 4054), True, 'import xarray as xr\n'), ((4078, 4090), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (4088, 4090), True, 'import xarray as xr\n'), ((8268, 8293), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (8291, 8293), False, 'import datetime\n')] |
"""
Mask R-CNN
Copyright (c) 2018 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by <NAME>
------------------------------------------------------------
Minor Changes applied from <NAME>
Applied for:
IEEE Intelligent Vehicles Symposium - IV2020:
"Vehicle Position Estimation with Aerial Imagery from Unmanned Aerial Vehicles"
USAGE: Run from Matlab File or directly via:
python vehDetection_evaluate.py evaluation --dataset=C:\Mask_RCNN\datasets\vehDetection --subset=evaluation --weights=C:\Mask_RCNN\logs\mask_rcnn_car_0300_onlyA4_196img_191222.h5
"""
# Set matplotlib backend
# This has to be done before other importa that might
# set it, but only if we're running in script mode
# rather than being imported.
if __name__ == '__main__':
import matplotlib
# Agg backend runs without a display
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
from imgaug import augmenters as iaa
from pdb import set_trace as bp # bp() -->BreakPoint
from scipy.io import savemat
import math
import cv2
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
from mrcnn import visualize
# Import MISC
from numba import jit
import os.path
import matplotlib.patches as patches
from skimage import io
from sklearn.utils.linear_assignment_ import linear_assignment
import argparse
# Path to trained weights file
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
# Results directory
# Save submission files here
RESULTS_DIR = os.path.join(ROOT_DIR, "results/")
############################################################
# Configurations
############################################################
class vehDetectionConfig(Config):
"""Configuration for training on the vehDetection dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "car"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # Background + car
# Number of training steps per epoch
STEPS_PER_EPOCH = 188 # 377 without satellite images labeled images on 27nd Aug 2019
# use small validation steps since the epoch is small
VALIDATION_STEPS = 7 # 15 labeled images without Satellite on 27nd Aug 2019
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
# Length of square anchor side in pixels
RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512)
# Ratios of anchors at each cell (width/height)
# A value of 1 represents a square anchor, and 0.5 is a wide anchor
RPN_ANCHOR_RATIOS = [0.5, 1, 2]
# If enabled, resizes instance masks to a smaller size to reduce
# memory load. Recommended when using high-resolution images.
USE_MINI_MASK = True #False
MINI_MASK_SHAPE = (224, 224) # (height, width) of the mini-mask
#IMAGE_RESIZE_MODE = "square"
#IMAGE_MIN_DIM = 1024
#IMAGE_MAX_DIM = 1024
IMAGE_RESIZE_MODE = "square"
IMAGE_MIN_DIM = 1080
IMAGE_MAX_DIM = 1920
TRAIN_ROIS_PER_IMAGE = 200 #300
MAX_GT_IMAGE = 50 #parking lot around 125
# ROIs kept after tf.nn.top_k and before non-maximum suppression
PRE_NMS_LIMIT = 1000 # high impact on detection time
# Anchor stride
# If 1 then anchors are created for each cell in the backbone feature map.
# If 2, then anchors are created for every other cell, and so on.
RPN_ANCHOR_STRIDE = 1
# ROIs kept after non-maximum suppression (training and inference)
POST_NMS_ROIS_TRAINING = 3000
POST_NMS_ROIS_INFERENCE = 300
# Max number of final detections
DETECTION_MAX_INSTANCES = 50
# Minimum probability value to accept a detected instance
# ROIs below this threshold are skipped
DETECTION_MIN_CONFIDENCE = 0.5
############################################################
# Dataset
############################################################
class vehDetectionDataset(utils.Dataset):
def load_detect(self, dataset_dir, subset):
"""Load a subset of the nuclei dataset.
dataset_dir: Root directory of the dataset
subset: Subset to load. The folder name is defined in the Matlab Script:
"""
# Add classes. We have one class.
# Naming the dataset nucleus, and the class nucleus
self.add_class("car", 1, "car")
# Which subset?
dataset_dir = os.path.join(dataset_dir, subset)
# Get image ids from directory names
image_ids = next(os.walk(dataset_dir))[2]
image_ids.sort() #FK: sort by sucessive image names
# Add images
for image_id in image_ids:
self.add_image(
"car",
image_id=image_id,
#path=os.path.join(dataset_dir, image_id, "images/{}.png".format(image_id)))
path = os.path.join(dataset_dir, image_id))
def load_vehDetection(self, dataset_dir, subset):
# Add classes. We have only one class to add.
self.add_class("car", 1, "car")
# Train or validation dataset?
assert subset in ["train", "val"]
dataset_dir = os.path.join(dataset_dir, subset)
# Load annotations
# VGG Image Annotator (up to version 1.6) saves each image in the form:
# { 'filename': '28503151_5b5b7ec140_b.jpg',
# 'regions': {
# '0': {
# 'region_attributes': {},
# 'shape_attributes': {
# 'all_points_x': [...],
# 'all_points_y': [...],
# 'name': 'polygon'}},
# ... more regions ...
# },
# 'size': 100202
# }
# We mostly care about the x and y coordinates of each region
# Note: In VIA 2.0, regions was changed from a dict to a list.
annotations = json.load(open(os.path.join(dataset_dir, "via_region_data.json")))
annotations = list(annotations.values()) # don't need the dict keys
# The VIA tool saves images in the JSON even if they don't have any
# annotations. Skip unannotated images.
annotations = [a for a in annotations if a['regions']]
# Add images
for a in annotations:
# Get the x, y coordinaets of points of the polygons that make up
# the outline of each object instance. These are stores in the
# shape_attributes (see json format above)
# The if condition is needed to support VIA versions 1.x and 2.x.
if type(a['regions']) is dict:
polygons = [r['shape_attributes'] for r in a['regions'].values()]
else:
polygons = [r['shape_attributes'] for r in a['regions']]
# load_mask() needs the image size to convert polygons to masks.
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image. This is only managable since the dataset is tiny.
image_path = os.path.join(dataset_dir, a['filename'])
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
self.add_image(
"car",
image_id=a['filename'], # use file name as a unique image id
path=image_path,
width=width, height=height,
polygons=polygons)
def load_evaluation(self, dataset_dir, subset):
# Add classes. We have only one class to add.
self.add_class("car", 1, "car")
dataset_dir = os.path.join(dataset_dir, subset)
annotations = json.load(open(os.path.join(dataset_dir, "via_region_data.json")))
annotations = list(annotations.values()) # don't need the dict keys
# The VIA tool saves images in the JSON even if they don't have any
# annotations. Skip unannotated images.
annotations = [a for a in annotations if a['regions']]
# Add images
for a in annotations:
# Get the x, y coordinaets of points of the polygons that make up
# the outline of each object instance. These are stores in the
# shape_attributes (see json format above)
# The if condition is needed to support VIA versions 1.x and 2.x.
if type(a['regions']) is dict:
polygons = [r['shape_attributes'] for r in a['regions'].values()]
else:
polygons = [r['shape_attributes'] for r in a['regions']]
# load_mask() needs the image size to convert polygons to masks.
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image. This is only managable since the dataset is tiny.
image_path = os.path.join(dataset_dir, a['filename'])
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
self.add_image(
"car",
image_id=a['filename'], # use file name as a unique image id
path=image_path,
width=width, height=height,
polygons=polygons)
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a vehDetection dataset image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "car":
return super(self.__class__, self).load_mask(image_id)
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
dtype=np.uint8)
for i, p in enumerate(info["polygons"]):
# Get indexes of pixels inside the polygon and set them to 1
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
mask[rr, cc, i] = 1
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "car":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
############################################################
# Evaluation
############################################################
def evaluation(model, dataset_dir, subset):
# Create directory
if not os.path.exists(RESULTS_DIR):
os.makedirs(RESULTS_DIR)
submit_dir = "submit_{:%Y%m%dT%H%M%S}".format(datetime.datetime.now())
submit_dir = os.path.join(RESULTS_DIR, submit_dir)
os.makedirs(submit_dir)
# Read dataset
dataset_val = vehDetectionDataset()
dataset_val.load_evaluation(dataset_dir, subset)
dataset_val.prepare()
numImages_eval = dataset_val.image_ids.size
# Prepare for saving as matlab file
images_eval = []
mAP_all = []
mAP_all_range = []
precisions_all = []
recalls_all = []
overlaps_all = []
for image_id in range(0,numImages_eval):
print(image_id)
source_id = dataset_val.image_info[image_id]["id"]
print(source_id)
# Load image and ground truth data
image, image_meta, gt_class_id, gt_bbox, gt_mask = \
modellib.load_image_gt(dataset_val, config,
image_id, use_mini_mask=False)
molded_images = np.expand_dims(modellib.mold_image(image, config), 0)
# Run object detection
results = model.detect([image], verbose=0)
r = results[0]
# Compute AP
mAP, precisions, recalls, overlaps = \
utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
r["rois"], r["class_ids"], r["scores"], r['masks'])
# Compute AP range (0.5:0.05:0.95)
mAP_range = \
utils.compute_ap_range(gt_bbox, gt_class_id, gt_mask,
r["rois"], r["class_ids"], r["scores"], r['masks'], iou_thresholds = None, verbose = 1)
# Append results from image
mAP_all.append(mAP)
mAP_all_range.append(mAP_range)
images_eval.append(source_id)
precisions_all.append(precisions)
recalls_all.append(recalls)
overlaps_all.append(overlaps)
# Save image with shape polygon around vehicles. Bbox and mask can be activated, s. below
visualize.display_instances(
image, r['rois'], r['masks'], r['class_ids'],
dataset_val.class_names, scores=None,# r['scores'],
show_bbox=False, show_mask=False,
colors=None,
figsize=(19.20,10.80)) # can also add title="Predictions"
plt.box(on=None) # plt.box(False)
plt.savefig(os.path.join(submit_dir, dataset_val.image_info[image_id]["id"]))
plt.close() # plt.clf()
print("Evaluation Process with Mask-RCNN Done. Files saved to ", submit_dir)
print("mAP: ", np.mean(mAP_all))
# FK: save to Matlab files
saveMatFileName = submit_dir + "\evaluation_maskrcnn_output.mat"
savemat(saveMatFileName,
{"mAP_all": mAP_all, "mAP_all_range": mAP_all_range, "images_eval": images_eval, "precisions_all": precisions_all, "recalls_all": recalls_all, "overlaps_all": overlaps_all})
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN to detect vehicles.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'detect'")
parser.add_argument('--dataset', required=False,
metavar="/path/to/dataset/",
help='Directory of the dataset')
parser.add_argument('--weights', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--subset', required=False,
metavar="Dataset sub-directory",
help="Subset of dataset to run prediction on")
args = parser.parse_args()
# Validate arguments
if args.command == "train":
assert args.dataset, "Argument --dataset is required for training"
print("Weights: ", args.weights)
print("Dataset: ", args.dataset)
print("Logs: ", args.logs)
# Configurations
if args.command == "train":
config = vehDetectionConfig()
else:
class InferenceConfig(vehDetectionConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
# Select weights file to load
if args.weights.lower() == "coco":
weights_path = COCO_WEIGHTS_PATH
# Download weights file
if not os.path.exists(weights_path):
utils.download_trained_weights(weights_path)
elif args.weights.lower() == "last":
# Find last trained weights
weights_path = model.find_last()
elif args.weights.lower() == "imagenet":
# Start from ImageNet trained weights
weights_path = model.get_imagenet_weights()
else:
weights_path = args.weights
# Load weights
print("Loading weights ", weights_path)
if args.weights.lower() == "coco":
# Exclude the last layers because they require a matching
# number of classes
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
else:
model.load_weights(weights_path, by_name=True)
# Train or evaluate
if args.command == "train":
train(model)
elif args.command == "detect":
detect(model, args.dataset, args.subset)
elif args.command == "evaluation":
evaluation(model, args.dataset, args.subset)
else:
print("'{}' is not recognized. "
"Use 'train' or 'detect'".format(args.command)) | [
"mrcnn.model.MaskRCNN",
"scipy.io.savemat",
"mrcnn.utils.download_trained_weights",
"mrcnn.visualize.display_instances",
"sys.path.append",
"os.walk",
"os.path.exists",
"numpy.mean",
"mrcnn.model.mold_image",
"argparse.ArgumentParser",
"matplotlib.pyplot.close",
"mrcnn.utils.compute_ap_range",... | [((1186, 1211), 'os.path.abspath', 'os.path.abspath', (['"""../../"""'], {}), "('../../')\n", (1201, 1211), False, 'import os\n'), ((1232, 1257), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (1247, 1257), False, 'import sys\n'), ((1644, 1687), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""mask_rcnn_coco.h5"""'], {}), "(ROOT_DIR, 'mask_rcnn_coco.h5')\n", (1656, 1687), False, 'import os\n'), ((1815, 1845), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""logs"""'], {}), "(ROOT_DIR, 'logs')\n", (1827, 1845), False, 'import os\n'), ((1911, 1945), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""results/"""'], {}), "(ROOT_DIR, 'results/')\n", (1923, 1945), False, 'import os\n'), ((851, 872), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (865, 872), False, 'import matplotlib\n'), ((11575, 11612), 'os.path.join', 'os.path.join', (['RESULTS_DIR', 'submit_dir'], {}), '(RESULTS_DIR, submit_dir)\n', (11587, 11612), False, 'import os\n'), ((11617, 11640), 'os.makedirs', 'os.makedirs', (['submit_dir'], {}), '(submit_dir)\n', (11628, 11640), False, 'import os\n'), ((14066, 14272), 'scipy.io.savemat', 'savemat', (['saveMatFileName', "{'mAP_all': mAP_all, 'mAP_all_range': mAP_all_range, 'images_eval':\n images_eval, 'precisions_all': precisions_all, 'recalls_all':\n recalls_all, 'overlaps_all': overlaps_all}"], {}), "(saveMatFileName, {'mAP_all': mAP_all, 'mAP_all_range':\n mAP_all_range, 'images_eval': images_eval, 'precisions_all':\n precisions_all, 'recalls_all': recalls_all, 'overlaps_all': overlaps_all})\n", (14073, 14272), False, 'from scipy.io import savemat\n'), ((14510, 14585), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train Mask R-CNN to detect vehicles."""'}), "(description='Train Mask R-CNN to detect vehicles.')\n", (14533, 14585), False, 'import argparse\n'), ((4961, 4994), 'os.path.join', 'os.path.join', (['dataset_dir', 'subset'], {}), '(dataset_dir, subset)\n', (4973, 4994), False, 'import os\n'), ((5721, 5754), 'os.path.join', 'os.path.join', (['dataset_dir', 'subset'], {}), '(dataset_dir, subset)\n', (5733, 5754), False, 'import os\n'), ((8133, 8166), 'os.path.join', 'os.path.join', (['dataset_dir', 'subset'], {}), '(dataset_dir, subset)\n', (8145, 8166), False, 'import os\n'), ((11421, 11448), 'os.path.exists', 'os.path.exists', (['RESULTS_DIR'], {}), '(RESULTS_DIR)\n', (11435, 11448), False, 'import os\n'), ((11458, 11482), 'os.makedirs', 'os.makedirs', (['RESULTS_DIR'], {}), '(RESULTS_DIR)\n', (11469, 11482), False, 'import os\n'), ((11533, 11556), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11554, 11556), False, 'import datetime\n'), ((12267, 12341), 'mrcnn.model.load_image_gt', 'modellib.load_image_gt', (['dataset_val', 'config', 'image_id'], {'use_mini_mask': '(False)'}), '(dataset_val, config, image_id, use_mini_mask=False)\n', (12289, 12341), True, 'from mrcnn import model as modellib, utils\n'), ((12640, 12743), 'mrcnn.utils.compute_ap', 'utils.compute_ap', (['gt_bbox', 'gt_class_id', 'gt_mask', "r['rois']", "r['class_ids']", "r['scores']", "r['masks']"], {}), "(gt_bbox, gt_class_id, gt_mask, r['rois'], r['class_ids'],\n r['scores'], r['masks'])\n", (12656, 12743), False, 'from mrcnn import model as modellib, utils\n'), ((12847, 12989), 'mrcnn.utils.compute_ap_range', 'utils.compute_ap_range', (['gt_bbox', 'gt_class_id', 'gt_mask', "r['rois']", "r['class_ids']", "r['scores']", "r['masks']"], {'iou_thresholds': 'None', 'verbose': '(1)'}), "(gt_bbox, gt_class_id, gt_mask, r['rois'], r[\n 'class_ids'], r['scores'], r['masks'], iou_thresholds=None, verbose=1)\n", (12869, 12989), False, 'from mrcnn import model as modellib, utils\n'), ((13385, 13573), 'mrcnn.visualize.display_instances', 'visualize.display_instances', (['image', "r['rois']", "r['masks']", "r['class_ids']", 'dataset_val.class_names'], {'scores': 'None', 'show_bbox': '(False)', 'show_mask': '(False)', 'colors': 'None', 'figsize': '(19.2, 10.8)'}), "(image, r['rois'], r['masks'], r['class_ids'],\n dataset_val.class_names, scores=None, show_bbox=False, show_mask=False,\n colors=None, figsize=(19.2, 10.8))\n", (13412, 13573), False, 'from mrcnn import visualize\n'), ((13688, 13704), 'matplotlib.pyplot.box', 'plt.box', ([], {'on': 'None'}), '(on=None)\n', (13695, 13704), True, 'import matplotlib.pyplot as plt\n'), ((13816, 13827), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13825, 13827), True, 'import matplotlib.pyplot as plt\n'), ((13942, 13958), 'numpy.mean', 'np.mean', (['mAP_all'], {}), '(mAP_all)\n', (13949, 13958), True, 'import numpy as np\n'), ((16222, 16292), 'mrcnn.model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""training"""', 'config': 'config', 'model_dir': 'args.logs'}), "(mode='training', config=config, model_dir=args.logs)\n", (16239, 16292), True, 'from mrcnn import model as modellib, utils\n'), ((16353, 16424), 'mrcnn.model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""inference"""', 'config': 'config', 'model_dir': 'args.logs'}), "(mode='inference', config=config, model_dir=args.logs)\n", (16370, 16424), True, 'from mrcnn import model as modellib, utils\n'), ((7586, 7626), 'os.path.join', 'os.path.join', (['dataset_dir', "a['filename']"], {}), "(dataset_dir, a['filename'])\n", (7598, 7626), False, 'import os\n'), ((9331, 9371), 'os.path.join', 'os.path.join', (['dataset_dir', "a['filename']"], {}), "(dataset_dir, a['filename'])\n", (9343, 9371), False, 'import os\n'), ((10886, 10927), 'numpy.ones', 'np.ones', (['[mask.shape[-1]]'], {'dtype': 'np.int32'}), '([mask.shape[-1]], dtype=np.int32)\n', (10893, 10927), True, 'import numpy as np\n'), ((12416, 12450), 'mrcnn.model.mold_image', 'modellib.mold_image', (['image', 'config'], {}), '(image, config)\n', (12435, 12450), True, 'from mrcnn import model as modellib, utils\n'), ((13742, 13806), 'os.path.join', 'os.path.join', (['submit_dir', "dataset_val.image_info[image_id]['id']"], {}), "(submit_dir, dataset_val.image_info[image_id]['id'])\n", (13754, 13806), False, 'import os\n'), ((16621, 16649), 'os.path.exists', 'os.path.exists', (['weights_path'], {}), '(weights_path)\n', (16635, 16649), False, 'import os\n'), ((16663, 16707), 'mrcnn.utils.download_trained_weights', 'utils.download_trained_weights', (['weights_path'], {}), '(weights_path)\n', (16693, 16707), False, 'from mrcnn import model as modellib, utils\n'), ((5065, 5085), 'os.walk', 'os.walk', (['dataset_dir'], {}), '(dataset_dir)\n', (5072, 5085), False, 'import os\n'), ((6459, 6508), 'os.path.join', 'os.path.join', (['dataset_dir', '"""via_region_data.json"""'], {}), "(dataset_dir, 'via_region_data.json')\n", (6471, 6508), False, 'import os\n'), ((8205, 8254), 'os.path.join', 'os.path.join', (['dataset_dir', '"""via_region_data.json"""'], {}), "(dataset_dir, 'via_region_data.json')\n", (8217, 8254), False, 'import os\n'), ((5415, 5450), 'os.path.join', 'os.path.join', (['dataset_dir', 'image_id'], {}), '(dataset_dir, image_id)\n', (5427, 5450), False, 'import os\n')] |
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torch.nn as nn
from torch.quantization import QuantStub, DeQuantStub
import torch.optim as optim
from torchvision import models, datasets, transforms
import torch.utils.data.distributed as distributed
import torch.multiprocessing as mp
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from PIL import Image
import numpy as np
import yaml
import argparse
import os
import shutil
from custom_modules.custom_modules import ConvBNReLU, LinearReLU, ConvReLU, ConvBN
import pruning.pruning as custom_pruning
from custom_modules.resnet import ResNet, imagenet_resnet50, BasicBlock, BottleneckBlock, conv1x1BN, conv3x3BN
from utils.meters import ClassificationMeter, TimeMeter
from experiment.experiment import experimentBase, globalActivationDict, globalWeightDict, hook_activation, load_state_dict_mod
from tracer.tracer import TraceDNN as Tracer
import horovod.torch as hvd
class experimentImagenetResNet50(experimentBase):
"""
Train script is based on https://github.com/horovod/horovod/blob/master/examples/pytorch_imagenet_resnet50.py
"""
def __init__(self, configFile, multiprocessing=False):
super().__init__(configFile, multiprocessing)
self.model = imagenet_resnet50()
datasetTrainDir = self.config.dataTrainDir
datasetValDir = self.config.dataValDir
"""
Original training data augmentation for ResNet-50 training on ImageNet
See Section 3.4 of the original ResNet paper.
"The image is resized with its shorter side randomly sampled in [256;480] for scale augmentation [41].
A 224x224 crop is randomly sampled from an image or its horizontal flip,
with the per-pixel mean subtracted [21]. The standard color augmentation in [21] is used."
A note on torchvision.transforms.RandomSizedCrop
- A crop of random size (default: of 0.08 to 1.0) of the original size and a random aspect ratio
(default: of 3/4 to 4/3) of the original aspect ratio is made.
This crop is finally resized to given size.
This is popularly used to train the Inception networks.
"""
# Might've accidentally used ImageNet's settings....
self.train_transform = transforms.Compose([
transforms.RandomResizedCrop(size=224, scale=(0.5, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
self.val_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
if multiprocessing is False:
self.trainDataSet = datasets.ImageFolder(datasetTrainDir,
transform=self.train_transform)
# print("Using ImageFolderLMDB")
# self.trainDataSet = ImageFolderLMDB(datasetTrainDir, transform=self.train_transform)
else:
self.trainDataSet = datasets.ImageFolder(datasetTrainDir,
transform=self.train_transform)
self.trainDataSampler = distributed.DistributedSampler(
self.trainDataSet, num_replicas=hvd.size(), rank=hvd.rank()
) if multiprocessing is True \
else None
# TODO: Check whether having multiple workers actually speed up data loading
dataLoaderKwargs = {'num_workers': self.config.numThreadsPerWorker}
self.trainDataLoader = DataLoader(
self.trainDataSet,
batch_size=self.config.batchSizePerWorker,
sampler=self.trainDataSampler,
shuffle=True if self.trainDataSampler is None else False,
**dataLoaderKwargs
)
if multiprocessing is False:
self.valDataSet = datasets.ImageFolder(datasetValDir,
transform=self.val_transform)
else:
self.valDataSet = datasets.ImageFolder(datasetValDir,
transform=self.val_transform)
self.valDataSampler = distributed.DistributedSampler(
self.valDataSet, num_replicas=hvd.size(), rank=hvd.rank()
) if multiprocessing is True \
else None
self.valDataLoader = DataLoader(
self.valDataSet,
batch_size=self.config.batchSizePerWorker,
sampler=self.valDataSampler,
shuffle=True if self.valDataSampler is None else False,
**dataLoaderKwargs
)
if (multiprocessing is True and hvd.rank() == 0) or multiprocessing is False:
if not os.path.exists(self.config.logDir):
os.makedirs(self.config.logDir)
self.logWriter = SummaryWriter(self.config.logDir)
self.trainMeter = ClassificationMeter(
multiprocessing,
self.logWriter,
logPrefix='Train'
)
self.valMeter = ClassificationMeter(
multiprocessing,
self.logWriter,
logPrefix='Validation'
)
self.trainTimeMeter = TimeMeter(
multiprocessing,
self.logWriter,
logPrefix='Train'
)
# End of __init__
def initialize_from_pre_trained_model_helper(self) -> None:
'''
Download the pre-trained ResNet-50 model from Torch Vision,
and use the pre-trained parameters to initialize our custom ResNet-50 model
:return: None
'''
print('Downloading the pretrained ResNet-50 from TorchVision')
pretrainedModel = models.resnet50(pretrained=True, progress=True)
'''
Strategy:
- Match each residual block in the pre-trained model to each residual block in our custom model
- Match the first Conv + BN in the pre-trained model to the first Conv + BN in the custom model
- Match the last FC layer
- For the definition of Torch Vision's ResNet-50,
See https://pytorch.org/vision/stable/_modules/torchvision/models/resnet.html#resnet50
'''
# Match the input convolution layer
self.model.inputConvBNReLU[0].load_state_dict(pretrainedModel.conv1.state_dict())
self.model.inputConvBNReLU[1].load_state_dict(pretrainedModel.bn1.state_dict())
# Match the residual blocks
destStages = [self.model.stage1,
self.model.stage2,
self.model.stage3,
self.model.stage4]
sourceStages = [pretrainedModel.layer1,
pretrainedModel.layer2,
pretrainedModel.layer3,
pretrainedModel.layer4]
# Iterate through the stages
for idx, destStage in enumerate(destStages):
# Iterate through the blocks
idxBlock = 0
for block in destStage.children():
# Load the parameter of each layer
block.convBN1[0].load_state_dict(sourceStages[idx][idxBlock].conv1.state_dict())
block.convBN1[1].load_state_dict(sourceStages[idx][idxBlock].bn1.state_dict())
block.convBN2[0].load_state_dict(sourceStages[idx][idxBlock].conv2.state_dict())
block.convBN2[1].load_state_dict(sourceStages[idx][idxBlock].bn2.state_dict())
block.convBN3[0].load_state_dict(sourceStages[idx][idxBlock].conv3.state_dict())
block.convBN3[1].load_state_dict(sourceStages[idx][idxBlock].bn3.state_dict())
idxBlock = idxBlock + 1
self.model.fc.load_state_dict(pretrainedModel.fc.state_dict())
print('Finished loading parameters from the pre-trained ResNet-50 from TorchVision')
def evaluate_loss(self, output: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
return F.cross_entropy(input=output, target=target)
def apply_hook_activation(self, module: torch.nn.Module, prefix=None) -> dict:
myDict = {}
return myDict
def extract_weight(self, module: torch.nn.Module) -> None:
"""
Helper function that specifies which layers' weight tensors should be extracted.
This is NOT called recursively (different from the base class implementation
Prunes the first convolution layer, and the convolution layers in the residual blocks
Don't prune the last fully-connected layer
:param module: Not used
:return: None
"""
#global globalWeightDict
globalWeightDict['inputConvBNReLU'] = self.model.inputConvBNReLU[0].weight.clone()
blockId = 0
for m in self.model.modules():
if isinstance(m, BottleneckBlock):
name = 'block_{}_layer0'.format(blockId)
globalWeightDict[name] = m.convBN1[0].weight.clone()
name = 'block_{}_layer1'.format(blockId)
globalWeightDict[name] = m.convBN2[0].weight.clone()
name = 'block_{}_layer2'.format(blockId)
globalWeightDict[name] = m.convBN3[0].weight.clone()
if isinstance(m.shortcut, ConvBN):
name = 'block_{}_shortcut'.format(blockId)
globalWeightDict[name] = m.shortcut[0].weight.clone()
blockId += 1
# Override the evaluate_sparsity function
def evaluate_sparsity(self, numBatches=None) -> (list, list, list):
"""
Evaluate the activation and weight sparsity of the model on the entire validation set
:return: Three lists. List one for the average activation sparsity per layer
List two for the weight sparsity per layer
List three the relevant layer name list
"""
# List of activation intercept layers
activationList = []
def intercept_activation(module, input, output):
activationList.append(output)
# Change this
def evaluate_setup (model : ResNet, needPrune=False, prefix=None):
interceptHandleList = []
weightList = []
layerNameList = []
targetList = [model.inputConvBNReLU, model.maxpoolrelu,
model.stage1, model.stage2, model.stage3, model.stage4,
model.averagePool, model.fc]
blockId = 0
for target in targetList:
if isinstance(target, ConvBNReLU):
# Input convolution layer
if needPrune is True:
custom_pruning.applyClusterPruning(
target[0], name='weight',
clusterSize=self.config.pruneCluster,
threshold=self.config.pruneThreshold
)
interceptHandle = target.register_forward_hook(intercept_activation)
interceptHandleList.append(interceptHandle)
weightList.append(target[0].weight.detach().clone())
layerNameList.append('Input ConvBNReLU')
elif isinstance(target, nn.Sequential):
for m in target.modules():
if isinstance(m, BottleneckBlock):
for layerId, layer in enumerate([m.convBN1, m.convBN2, m.convBN3]):
name = 'block_{}_layer{}'.format(blockId, layerId)
if needPrune is True:
custom_pruning.applyClusterPruning(
layer[0], name='weight',
clusterSize=self.config.pruneCluster,
threshold=self.config.pruneThreshold
)
interceptHandle = layer.register_forward_hook(intercept_activation)
interceptHandleList.append(interceptHandle)
weightList.append(layer[0].weight.detach().clone())
layerNameList.append(name)
if isinstance(m.shortcut, ConvBN):
name = 'block_{}_shortcut'.format(blockId)
if needPrune is True:
custom_pruning.applyClusterPruning(
m.shortcut[0], name='weight',
clusterSize=self.config.pruneCluster,
threshold=self.config.pruneThreshold
)
interceptHandle = m.shortcut.register_forward_hook(intercept_activation)
interceptHandleList.append(interceptHandle)
weightList.append(m.shortcut[0].weight.detach().clone())
layerNameList.append(name)
interceptHandle = m.register_forward_hook(intercept_activation)
interceptHandleList.append(interceptHandle)
weightList.append(None)
layerNameList.append('block_{}_output'.format(blockId))
blockId += 1
elif isinstance(target, nn.MaxPool2d):
interceptHandle = target.register_forward_hook(intercept_activation)
interceptHandleList.append(interceptHandle)
weightList.append(None)
layerNameList.append('input_max_pool')
elif isinstance(target, nn.AvgPool2d):
interceptHandle = target.register_forward_hook(intercept_activation)
interceptHandleList.append(interceptHandle)
weightList.append(None)
layerNameList.append('average_pool')
elif isinstance(target, nn.Linear):
if needPrune is True:
custom_pruning.applyClusterPruning(
target, name='weight',
clusterSize=self.config.pruneCluster,
threshold=self.config.pruneThreshold
)
interceptHandle = target.register_forward_hook(intercept_activation)
interceptHandleList.append(interceptHandle)
weightList.append(target.weight.detach().clone())
layerNameList.append('classification')
return interceptHandleList, weightList, layerNameList
#End of helper function evaluate_setup
def generate_sparsity_list(tensorList : list):
sparsityList = []
for idx, tensor in enumerate(tensorList):
if tensor is not None:
mask = custom_pruning.compute_group_lasso_mask(tensor, self.config.pruneCluster, self.config.pruneThreshold)
mask = mask.byte()
reference = torch.ones_like(mask)
comparison = torch.eq(mask, reference)
numNz = torch.sum(comparison.float())
sparsity = numNz.item() / comparison.numel()
else:
sparsity = None
sparsityList.append(sparsity)
return sparsityList
if self.multiprocessing is True:
assert hvd.size() == 1, "Sparsity evaluation cannot be done in multi-processing mode!"
# Fuse and quantized the model if this is haven't been done so
#evaluatedModel = copy.deepcopy(self.model)
if self.experimentStatus.flagFusedQuantized is False:
self.quantize_model()
self.experimentStatus.flagFusedQuantized = True
# Bake in the sparsity
if self.experimentStatus.flagPruned is True:
custom_pruning.unPruneNetwork(self.model)
evaluatedModel = self.model
# Apply pruning mask, and activation interception, extract weight
interceptHandleList, weightList, layerNameList = \
evaluate_setup(evaluatedModel, True)
# Compute weight sparsity
weightSparsityList = generate_sparsity_list(weightList)
activationSparsityList = None
numBatchesToRun = len(self.valDataLoader) if numBatches is None else numBatches
iterBatch = 0
with torch.no_grad():
for batchIdx, (data, target) in enumerate(self.valDataLoader):
print("Runing batch {}".format(iterBatch))
activationList.clear()
output = evaluatedModel(data)
batchActivationSparsityList = np.array(generate_sparsity_list(activationList))
if activationSparsityList is None:
activationSparsityList = np.zeros_like(batchActivationSparsityList)
activationSparsityList = np.add(batchActivationSparsityList, activationSparsityList)
iterBatch += 1
if iterBatch == numBatchesToRun:
break
# End of iteration of all validation data
activationSparsityList = activationSparsityList / float(numBatchesToRun)
return activationSparsityList, weightSparsityList, layerNameList
# End of evaluate sparsity
def prune_network_method(cls, model, sparsityTarget, config):
# Prune the residual blocks
for m in model.modules():
if isinstance(m, BottleneckBlock):
for layer in [m.convBN1[0], m.convBN2[0], m.convBN3[0], m.shortcut]:
if not isinstance(layer, nn.Identity):
# Special case for short-cut
if isinstance(layer, (ConvBN, ConvBNReLU)):
layer = layer[0]
sparsity = sparsityTarget
# cap the sparsity-level of layers in residual blocks
# that see change in the number of channels to 50%
if layer.in_channels != layer.out_channels:
sparsity = min(0.5, sparsityTarget)
custom_pruning.applyBalancedPruning(
layer,
'weight',
clusterSize=config.pruneCluster,
pruneRangeInCluster=config.pruneRangeInCluster,
sparsity=sparsity
)
# Prune the FC layer at the end
custom_pruning.applyBalancedPruning(
model.fc,
'weight',
clusterSize=config.pruneCluster,
pruneRangeInCluster=config.pruneRangeInCluster,
sparsity=sparsityTarget
)
return model
def trace_model(self, dirnameOverride=None, numMemoryRegions: int = 3, modelName: str = 'model',
foldBN: bool = True, outputLayerID: int = -1, custom_image_path=None) -> None:
"""
Trace the model after pruning and quantization, and save the trace and parameters
:return: None
"""
dirname = self.config.checkpointSaveDir if dirnameOverride is None else dirnameOverride
# Prune and quantize the model
self.eval_prep()
# Deepcopy doesn't work, do the following instead:
# See https://discuss.pytorch.org/t/deep-copying-pytorch-modules/13514/2
module = imagenet_resnet50()
module = self.quantize_model_method(module, self.qatConfig)
module = self.prune_network_method(module, self.experimentStatus.targetSparsity, self.config)
# module.load_state_dict(self.model.state_dict())
load_state_dict_mod(module, self.model.state_dict())
with torch.no_grad():
# Hack
# module.inputConvBNReLU._modules['0'].running_mean.zero_()
# module.inputConvBNReLU._modules['0'].beta.zero_()
# end of hack
module.eval()
trace = Tracer(module, _foldBN=foldBN, _defaultPruneCluster=self.config.pruneCluster,
_defaultPruneRangeInCluster=self.config.pruneRangeInCluster)
"""
Run inference and save a reference input-output pair
"""
blobPath = os.path.join(dirname, modelName + '_inout.yaml')
blobFile = open(blobPath, 'w')
blobDict: dict = {}
output = None
sampleIn = None
if custom_image_path is None:
for (data, target) in self.valDataLoader:
sampleIn = data[0].unsqueeze(0)
print(sampleIn.shape)
output = trace.getOutput(sampleIn, outputLayerID)
break
else:
print('Using custom image for inference tracing: {}'.format(custom_image_path))
img = Image.open(custom_image_path)
img = img.convert('RGB')
# val_transform = transforms.Compose([
# transforms.Resize(256),
# transforms.CenterCrop(224),
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.000, 0.000, 0.000],
# std=[0.229, 0.224, 0.225])
# ])
sampleIn = self.val_transform(img)
sampleIn = sampleIn.unsqueeze(0)
print(sampleIn.shape)
output = trace.getOutput(sampleIn, outputLayerID)
inputArray = sampleIn.view(sampleIn.numel()).tolist()
blobDict['input'] = inputArray
outputArray = output.view(output.numel()).tolist()
blobDict['output'] = outputArray
# We want list to be dumped as in-line format, hence the choice of the default_flow_style
# See https://stackoverflow.com/questions/56937691/making-yaml-ruamel-yaml-always-dump-lists-inline
yaml.dump(blobDict, blobFile, default_flow_style=None)
trace.traceModel(sampleIn)
trace.annotate(numMemRegions=numMemoryRegions)
trace.dump(dirname, fileNameBase=modelName)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Imagenet_ResNet50 experiment")
parser.add_argument('--mode', type=str, choices=['train', 'evaluate_sparsity', 'print_model', 'trace_model', 'validate'],
default='train',
help='Mode. Valid choices are train, evaluate_sparsity, print model, trace_model, and validate')
parser.add_argument('--config_file', type=str, required=True,
help='Path to the experiment configuration file. Required')
parser.add_argument('--load_checkpoint', type=int, choices=[0, 1, 2, 3], default=0,
help='Load experiment from checkpoint. '
'Default: 0. 0: start from scratch; '
'1: load full experiment; '
'2: load model only'
'3: initialize model from pre-trained ResNet-50 from Torch Vision')
parser.add_argument('--multiprocessing', action='store_true',
help='Enable multiprocessing (using Horovod as backend). Default: False')
parser.add_argument('--checkpoint_path', type=str,
help='Path to the checkpoint to be loaded. Required if --load_checkpoint is set as 1 or 2')
parser.add_argument('--override_cluster_size', type=int,
help='Override the cluster size in the experiment config when performing sparsity evaluation')
parser.add_argument('--output_layer_id', type=int, default=-1,
help='ID of the layer to intercept the output during model tracing. Default: -1')
parser.add_argument('--custom_image_path', type=str, default=None, help='Path to the image to run inference on during tracing')
parser.add_argument('--custom_sparsity', type=float, default=None, help='Override the sparsity target with a custom value')
args = parser.parse_args()
if args.multiprocessing is True:
hvd.init()
experiment = experimentImagenetResNet50(configFile=args.config_file,
multiprocessing=args.multiprocessing)
if args.load_checkpoint == 1 or args.load_checkpoint == 2:
assert args.checkpoint_path is not None, 'Experiment is required to load from an existing checkpoint, but no path to checkpoint is provided!'
loadModelOnly = True if args.load_checkpoint == 2 else False
experiment.restore_experiment_from_checkpoint(checkpoint=args.checkpoint_path,
loadModelOnly=loadModelOnly)
elif args.load_checkpoint == 3:
experiment.initialize_from_pre_trained_model()
if args.mode == 'train':
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cudnn.benchmark = True
experiment.train(device=device)
# Copy the config file into the log directory
logPath = experiment.config.checkpointSaveDir
configFileName = os.path.basename(args.config_file)
newConfigFilePath = os.path.join(logPath, configFileName)
shutil.copy(args.config_file, newConfigFilePath)
elif args.mode == 'evaluate_sparsity':
if args.custom_sparsity is not None:
experiment.experimentStatus.targetSparsity = args.custom_sparsity
experiment.save_sparsity_stats(args.override_cluster_size, numBatches=20)
elif args.mode == 'print_model':
experiment.print_model()
elif args.mode == 'trace_model':
if args.custom_sparsity is not None:
experiment.experimentStatus.targetSparsity = args.custom_sparsity
if args.override_cluster_size is not None:
experiment.experimentStatus.pruneCluster = args.override_cluster_size
experiment.config.pruneCluster = args.override_cluster_size
experiment.trace_model(dirnameOverride=os.getcwd(), numMemoryRegions=3, modelName='resnet50_imagenet',
foldBN=True, outputLayerID=args.output_layer_id, custom_image_path=args.custom_image_path)
elif args.mode == 'validate':
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cudnn.benchmark = True
if args.custom_sparsity is not None:
experiment.experimentStatus.targetSparsity = args.custom_sparsity
if experiment.multiprocessing is False or (experiment.multiprocessing is True and hvd.rank() == 0):
print('Running inference on the entire validation set. Target sparsity = {level:.4f}'
.format(level=experiment.experimentStatus.targetSparsity))
experiment.eval_prep()
experiment.validate(epoch=0, device=device) | [
"custom_modules.resnet.imagenet_resnet50",
"torch.eq",
"horovod.torch.size",
"torch.cuda.is_available",
"torch.utils.tensorboard.SummaryWriter",
"os.path.exists",
"utils.meters.TimeMeter",
"argparse.ArgumentParser",
"horovod.torch.rank",
"torchvision.datasets.ImageFolder",
"pruning.pruning.apply... | [((22867, 22934), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Imagenet_ResNet50 experiment"""'}), "(description='Imagenet_ResNet50 experiment')\n", (22890, 22934), False, 'import argparse\n'), ((1319, 1338), 'custom_modules.resnet.imagenet_resnet50', 'imagenet_resnet50', ([], {}), '()\n', (1336, 1338), False, 'from custom_modules.resnet import ResNet, imagenet_resnet50, BasicBlock, BottleneckBlock, conv1x1BN, conv3x3BN\n'), ((4017, 4206), 'torch.utils.data.DataLoader', 'DataLoader', (['self.trainDataSet'], {'batch_size': 'self.config.batchSizePerWorker', 'sampler': 'self.trainDataSampler', 'shuffle': '(True if self.trainDataSampler is None else False)'}), '(self.trainDataSet, batch_size=self.config.batchSizePerWorker,\n sampler=self.trainDataSampler, shuffle=True if self.trainDataSampler is\n None else False, **dataLoaderKwargs)\n', (4027, 4206), False, 'from torch.utils.data import DataLoader\n'), ((4834, 5017), 'torch.utils.data.DataLoader', 'DataLoader', (['self.valDataSet'], {'batch_size': 'self.config.batchSizePerWorker', 'sampler': 'self.valDataSampler', 'shuffle': '(True if self.valDataSampler is None else False)'}), '(self.valDataSet, batch_size=self.config.batchSizePerWorker,\n sampler=self.valDataSampler, shuffle=True if self.valDataSampler is\n None else False, **dataLoaderKwargs)\n', (4844, 5017), False, 'from torch.utils.data import DataLoader\n'), ((5360, 5431), 'utils.meters.ClassificationMeter', 'ClassificationMeter', (['multiprocessing', 'self.logWriter'], {'logPrefix': '"""Train"""'}), "(multiprocessing, self.logWriter, logPrefix='Train')\n", (5379, 5431), False, 'from utils.meters import ClassificationMeter, TimeMeter\n'), ((5503, 5579), 'utils.meters.ClassificationMeter', 'ClassificationMeter', (['multiprocessing', 'self.logWriter'], {'logPrefix': '"""Validation"""'}), "(multiprocessing, self.logWriter, logPrefix='Validation')\n", (5522, 5579), False, 'from utils.meters import ClassificationMeter, TimeMeter\n'), ((5657, 5718), 'utils.meters.TimeMeter', 'TimeMeter', (['multiprocessing', 'self.logWriter'], {'logPrefix': '"""Train"""'}), "(multiprocessing, self.logWriter, logPrefix='Train')\n", (5666, 5718), False, 'from utils.meters import ClassificationMeter, TimeMeter\n'), ((6152, 6199), 'torchvision.models.resnet50', 'models.resnet50', ([], {'pretrained': '(True)', 'progress': '(True)'}), '(pretrained=True, progress=True)\n', (6167, 6199), False, 'from torchvision import models, datasets, transforms\n'), ((8409, 8453), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', ([], {'input': 'output', 'target': 'target'}), '(input=output, target=target)\n', (8424, 8453), True, 'import torch.nn.functional as F\n'), ((19173, 19344), 'pruning.pruning.applyBalancedPruning', 'custom_pruning.applyBalancedPruning', (['model.fc', '"""weight"""'], {'clusterSize': 'config.pruneCluster', 'pruneRangeInCluster': 'config.pruneRangeInCluster', 'sparsity': 'sparsityTarget'}), "(model.fc, 'weight', clusterSize=config.\n pruneCluster, pruneRangeInCluster=config.pruneRangeInCluster, sparsity=\n sparsityTarget)\n", (19208, 19344), True, 'import pruning.pruning as custom_pruning\n'), ((20082, 20101), 'custom_modules.resnet.imagenet_resnet50', 'imagenet_resnet50', ([], {}), '()\n', (20099, 20101), False, 'from custom_modules.resnet import ResNet, imagenet_resnet50, BasicBlock, BottleneckBlock, conv1x1BN, conv3x3BN\n'), ((24822, 24832), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (24830, 24832), True, 'import horovod.torch as hvd\n'), ((25834, 25868), 'os.path.basename', 'os.path.basename', (['args.config_file'], {}), '(args.config_file)\n', (25850, 25868), False, 'import os\n'), ((25897, 25934), 'os.path.join', 'os.path.join', (['logPath', 'configFileName'], {}), '(logPath, configFileName)\n', (25909, 25934), False, 'import os\n'), ((25943, 25991), 'shutil.copy', 'shutil.copy', (['args.config_file', 'newConfigFilePath'], {}), '(args.config_file, newConfigFilePath)\n', (25954, 25991), False, 'import shutil\n'), ((3196, 3265), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['datasetTrainDir'], {'transform': 'self.train_transform'}), '(datasetTrainDir, transform=self.train_transform)\n', (3216, 3265), False, 'from torchvision import models, datasets, transforms\n'), ((3503, 3572), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['datasetTrainDir'], {'transform': 'self.train_transform'}), '(datasetTrainDir, transform=self.train_transform)\n', (3523, 3572), False, 'from torchvision import models, datasets, transforms\n'), ((4336, 4401), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['datasetValDir'], {'transform': 'self.val_transform'}), '(datasetValDir, transform=self.val_transform)\n', (4356, 4401), False, 'from torchvision import models, datasets, transforms\n'), ((4493, 4558), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['datasetValDir'], {'transform': 'self.val_transform'}), '(datasetValDir, transform=self.val_transform)\n', (4513, 4558), False, 'from torchvision import models, datasets, transforms\n'), ((5299, 5332), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['self.config.logDir'], {}), '(self.config.logDir)\n', (5312, 5332), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((16499, 16540), 'pruning.pruning.unPruneNetwork', 'custom_pruning.unPruneNetwork', (['self.model'], {}), '(self.model)\n', (16528, 16540), True, 'import pruning.pruning as custom_pruning\n'), ((17022, 17037), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17035, 17037), False, 'import torch\n'), ((20404, 20419), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (20417, 20419), False, 'import torch\n'), ((20648, 20791), 'tracer.tracer.TraceDNN', 'Tracer', (['module'], {'_foldBN': 'foldBN', '_defaultPruneCluster': 'self.config.pruneCluster', '_defaultPruneRangeInCluster': 'self.config.pruneRangeInCluster'}), '(module, _foldBN=foldBN, _defaultPruneCluster=self.config.\n pruneCluster, _defaultPruneRangeInCluster=self.config.pruneRangeInCluster)\n', (20654, 20791), True, 'from tracer.tracer import TraceDNN as Tracer\n'), ((20934, 20982), 'os.path.join', 'os.path.join', (['dirname', "(modelName + '_inout.yaml')"], {}), "(dirname, modelName + '_inout.yaml')\n", (20946, 20982), False, 'import os\n'), ((22615, 22669), 'yaml.dump', 'yaml.dump', (['blobDict', 'blobFile'], {'default_flow_style': 'None'}), '(blobDict, blobFile, default_flow_style=None)\n', (22624, 22669), False, 'import yaml\n'), ((2418, 2474), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', ([], {'size': '(224)', 'scale': '(0.5, 1.0)'}), '(size=224, scale=(0.5, 1.0))\n', (2446, 2474), False, 'from torchvision import models, datasets, transforms\n'), ((2516, 2549), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (2547, 2549), False, 'from torchvision import models, datasets, transforms\n'), ((2591, 2612), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2610, 2612), False, 'from torchvision import models, datasets, transforms\n'), ((2654, 2729), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (2674, 2729), False, 'from torchvision import models, datasets, transforms\n'), ((2896, 2918), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (2913, 2918), False, 'from torchvision import models, datasets, transforms\n'), ((2932, 2958), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (2953, 2958), False, 'from torchvision import models, datasets, transforms\n'), ((2972, 2993), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2991, 2993), False, 'from torchvision import models, datasets, transforms\n'), ((3007, 3082), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (3027, 3082), False, 'from torchvision import models, datasets, transforms\n'), ((5186, 5220), 'os.path.exists', 'os.path.exists', (['self.config.logDir'], {}), '(self.config.logDir)\n', (5200, 5220), False, 'import os\n'), ((5238, 5269), 'os.makedirs', 'os.makedirs', (['self.config.logDir'], {}), '(self.config.logDir)\n', (5249, 5269), False, 'import os\n'), ((16042, 16052), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (16050, 16052), True, 'import horovod.torch as hvd\n'), ((17534, 17593), 'numpy.add', 'np.add', (['batchActivationSparsityList', 'activationSparsityList'], {}), '(batchActivationSparsityList, activationSparsityList)\n', (17540, 17593), True, 'import numpy as np\n'), ((21538, 21567), 'PIL.Image.open', 'Image.open', (['custom_image_path'], {}), '(custom_image_path)\n', (21548, 21567), False, 'from PIL import Image\n'), ((25592, 25617), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (25615, 25617), False, 'import torch\n'), ((3734, 3744), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (3742, 3744), True, 'import horovod.torch as hvd\n'), ((3751, 3761), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (3759, 3761), True, 'import horovod.torch as hvd\n'), ((4715, 4725), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (4723, 4725), True, 'import horovod.torch as hvd\n'), ((4732, 4742), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (4740, 4742), True, 'import horovod.torch as hvd\n'), ((5121, 5131), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (5129, 5131), True, 'import horovod.torch as hvd\n'), ((15466, 15571), 'pruning.pruning.compute_group_lasso_mask', 'custom_pruning.compute_group_lasso_mask', (['tensor', 'self.config.pruneCluster', 'self.config.pruneThreshold'], {}), '(tensor, self.config.pruneCluster,\n self.config.pruneThreshold)\n', (15505, 15571), True, 'import pruning.pruning as custom_pruning\n'), ((15639, 15660), 'torch.ones_like', 'torch.ones_like', (['mask'], {}), '(mask)\n', (15654, 15660), False, 'import torch\n'), ((15694, 15719), 'torch.eq', 'torch.eq', (['mask', 'reference'], {}), '(mask, reference)\n', (15702, 15719), False, 'import torch\n'), ((17449, 17491), 'numpy.zeros_like', 'np.zeros_like', (['batchActivationSparsityList'], {}), '(batchActivationSparsityList)\n', (17462, 17491), True, 'import numpy as np\n'), ((11103, 11244), 'pruning.pruning.applyClusterPruning', 'custom_pruning.applyClusterPruning', (['target[0]'], {'name': '"""weight"""', 'clusterSize': 'self.config.pruneCluster', 'threshold': 'self.config.pruneThreshold'}), "(target[0], name='weight', clusterSize=\n self.config.pruneCluster, threshold=self.config.pruneThreshold)\n", (11137, 11244), True, 'import pruning.pruning as custom_pruning\n'), ((18805, 18967), 'pruning.pruning.applyBalancedPruning', 'custom_pruning.applyBalancedPruning', (['layer', '"""weight"""'], {'clusterSize': 'config.pruneCluster', 'pruneRangeInCluster': 'config.pruneRangeInCluster', 'sparsity': 'sparsity'}), "(layer, 'weight', clusterSize=config.\n pruneCluster, pruneRangeInCluster=config.pruneRangeInCluster, sparsity=\n sparsity)\n", (18840, 18967), True, 'import pruning.pruning as custom_pruning\n'), ((26722, 26733), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (26731, 26733), False, 'import os\n'), ((26984, 27009), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (27007, 27009), False, 'import torch\n'), ((27266, 27276), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (27274, 27276), True, 'import horovod.torch as hvd\n'), ((12068, 12208), 'pruning.pruning.applyClusterPruning', 'custom_pruning.applyClusterPruning', (['layer[0]'], {'name': '"""weight"""', 'clusterSize': 'self.config.pruneCluster', 'threshold': 'self.config.pruneThreshold'}), "(layer[0], name='weight', clusterSize=\n self.config.pruneCluster, threshold=self.config.pruneThreshold)\n", (12102, 12208), True, 'import pruning.pruning as custom_pruning\n'), ((12909, 13053), 'pruning.pruning.applyClusterPruning', 'custom_pruning.applyClusterPruning', (['m.shortcut[0]'], {'name': '"""weight"""', 'clusterSize': 'self.config.pruneCluster', 'threshold': 'self.config.pruneThreshold'}), "(m.shortcut[0], name='weight',\n clusterSize=self.config.pruneCluster, threshold=self.config.pruneThreshold)\n", (12943, 13053), True, 'import pruning.pruning as custom_pruning\n'), ((14620, 14758), 'pruning.pruning.applyClusterPruning', 'custom_pruning.applyClusterPruning', (['target'], {'name': '"""weight"""', 'clusterSize': 'self.config.pruneCluster', 'threshold': 'self.config.pruneThreshold'}), "(target, name='weight', clusterSize=self.\n config.pruneCluster, threshold=self.config.pruneThreshold)\n", (14654, 14758), True, 'import pruning.pruning as custom_pruning\n')] |
import ctypes
import numpy
import pygame
import time
from OpenGL.GL import *
from OpenGL.GL.shaders import *
from pygame.locals import *
width = 1024
height = 768
def getFileContents(filename):
return open(filename, 'r').read()
def init():
vertexShader = compileShader(getFileContents("data/shaders/triangle.vert"), GL_VERTEX_SHADER)
fragmentShader = compileShader(getFileContents("data/shaders/triangle.frag"), GL_FRAGMENT_SHADER)
program = glCreateProgram()
glAttachShader(program, vertexShader)
glAttachShader(program, fragmentShader)
glLinkProgram(program)
# Set Clear Color
glClearColor(0.84705, 0.541176, 0.439215, 1.0)
return program
def drawImage(program, images, x_offsets, y_offsets):
# Define Vertice List
# X Y Z R G B
# Bind Attribute
glBindAttribLocation(program, 0, "vPosition")
glBindAttribLocation(program, 1, "color")
# Generate Buffers and Bind Buffers
VBO = glGenBuffers(1)
VAO = glGenVertexArrays(1)
index_list = []
vertex_list = []
temp = 0
glBindVertexArray(VAO)
# Background
index_list.append(temp)
vertex_list.extend([-1, 0, 0, 0.84705, 0.541176, 0.439215,
-1, 1, 0, 0.91372, 0.89804, 0.80784,
1, 1, 0, 0.91372, 0.89804, 0.80784,
1, 0, 0, 0.84705, 0.541176, 0.439215])
index_list.append(4)
temp += 4
for id, image in enumerate(images):
for i in range(len(image)):
index_list.append(temp)
for j in range(1,len(image[i])):
for k in range(0,3):
# Position for Image foreground
if k == 0:
vertex_list.append(image[i][j][k] + x_offsets[id])
elif k == 1:
vertex_list.append(image[i][j][k] + y_offsets[id])
else:
vertex_list.append(image[i][j][k])
for k in range(0,3):
# Color
vertex_list.append(image[i][0][k])
index_list.append(j) #element count
temp = temp + j
vertices = numpy.array(vertex_list, numpy.float32)
glBindBuffer(GL_ARRAY_BUFFER, VBO)
glBufferData(GL_ARRAY_BUFFER, vertices, GL_STATIC_DRAW) # Copy data to buffer
glVertexAttribPointer(0, 3, GL_FLOAT, GL_TRUE, 24, ctypes.c_void_p(0))
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(12))
glEnableVertexAttribArray(0)
glEnableVertexAttribArray(1)
# Draw and Run
glViewport(0, 0, width, height)
glClear(GL_COLOR_BUFFER_BIT)
glUseProgram(program)
glBindVertexArray(VAO)
for i in range(0,len(index_list),2):
glDrawArrays(GL_POLYGON, index_list[i], index_list[i+1] )
pygame.display.flip()
def draw(images):
pygame.init()
pygame.display.set_mode((width, height), HWSURFACE|OPENGL|DOUBLEBUF)
program = init()
x_offsets = []
y_offsets = []
x_vels = []
y_vels = []
# Fill initial value
for i in range(6):
x_offsets.append(0)
y_offsets.append(0)
x_vels.append(0)
y_vels.append(0)
x_offsets[0] = -1
x_offsets[1] = -0.6
x_offsets[2] = -0.5
x_offsets[3] = -4
y_offsets[5] = -0.6
x_vels[0] = 0.000075
x_vels[1] = 0.000025
x_vels[2] = 0.0008
x_vels[3] = 0.002
x_vels[4] = 0.008
running = True
while running:
for i in range(6):
x_offsets[i] += x_vels[i]
y_offsets[i] += y_vels[i]
if i == 0:
# Cloud
if x_offsets[i] > 3:
x_offsets[i] = -2.7
# elif i == 1:
# # Mountain
if i == 2:
# Building - Back
if x_offsets[i] > 2:
x_offsets[i] = -0.8
elif i == 3:
# Building - Front
if x_offsets[i] > 0.8:
x_offsets[i] = -3
elif i == 4:
# Road
if x_offsets[i] > 0.8:
x_offsets[i] = -0.2
elif i == 5:
# Regalia
if y_offsets[i] > -0.6:
y_offsets[i] = -0.6
y_vels[i] = 0
if y_offsets[i] < -0.9:
y_offsets[i] = -0.9
y_vels[i] = 0
drawImage(program, images, x_offsets, y_offsets)
events = pygame.event.get()
# wait for exit
for event in events:
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
pygame.quit()
running = False
if event.key == K_a or event.key == K_LEFT:
x_vels[5] -= 0.0002
if event.key == K_d or event.key == K_RIGHT:
x_vels[5] += 0.0002
if event.key == K_w or event.key == K_UP:
y_vels[5] += 0.0001
if event.key == K_s or event.key == K_DOWN:
y_vels[5] -= 0.0001
if __name__ == '__main__':
print("Hi from car.py")
| [
"pygame.init",
"pygame.quit",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.display.flip",
"numpy.array",
"ctypes.c_void_p"
] | [((2208, 2247), 'numpy.array', 'numpy.array', (['vertex_list', 'numpy.float32'], {}), '(vertex_list, numpy.float32)\n', (2219, 2247), False, 'import numpy\n'), ((2844, 2865), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (2863, 2865), False, 'import pygame\n'), ((2889, 2902), 'pygame.init', 'pygame.init', ([], {}), '()\n', (2900, 2902), False, 'import pygame\n'), ((2907, 2979), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(width, height)', '(HWSURFACE | OPENGL | DOUBLEBUF)'], {}), '((width, height), HWSURFACE | OPENGL | DOUBLEBUF)\n', (2930, 2979), False, 'import pygame\n'), ((2426, 2444), 'ctypes.c_void_p', 'ctypes.c_void_p', (['(0)'], {}), '(0)\n', (2441, 2444), False, 'import ctypes\n'), ((2502, 2521), 'ctypes.c_void_p', 'ctypes.c_void_p', (['(12)'], {}), '(12)\n', (2517, 2521), False, 'import ctypes\n'), ((4535, 4553), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (4551, 4553), False, 'import pygame\n'), ((4708, 4721), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (4719, 4721), False, 'import pygame\n')] |
#/usr/bin/python
#coding:utf8
import numpy as np
def Cosine(x, y):
return 1.0*np.dot(x, y)/np.sqrt(np.dot(x,x))/np.sqrt(np.dot(y,y))
def Sigmoid(x):
return 1.0 / (1.0 + np.exp(-1.0 * x))
def SigmoidGradient(x):
return Sigmoid(x) * (1 - Sigmoid(x))
#平方误差,2范式的正则化项
def SquareErrorFunction2F(target_matrix_y, output_matrix_y, weights, lamda):
m = len(target_matrix_y)
err = target_matrix_y - output_matrix_y
errors = 1.0/(2.0*m) * np.sum(err * err) + lamda*1.0/(2.0*m)*np.sum(weights * weights)
return errors
def LogisticErrorFunction2F(target_matrix_y, output_matrix_y, weights, lamda):
m = len(target_matrix_y)
vector_j = target_matrix_y * np.log(output_matrix_y) + (1.0 - target_matrix_y) * np.log(1.0 - output_matrix_y)
errors = -1.0/m*np.sum(vector_j) + lamda/(2.0*m) * np.sum(weights * weights)
return errors
def RandomSample(size, a, b):
epsilon_init = 0.12;
arr = (b - a) * np.random.random_sample(size) + a
return arr * epsilon_init * 2 - epsilon_init
import math
def GaussFunc(x, dimension, mu, sigma):
inv = np.linalg.inv(sigma)
e = -0.5 * np.dot(x - mu, inv)
e = np.dot(e, (x-mu).T)
re = 1./np.sqrt((2.*math.pi)**dimension) * 1./np.sqrt(np.linalg.det(sigma)) * np.exp(e)
if x.ndim <= 1:return re
else:return np.diag(re)
if __name__ == '__main__':
print( Cosine(np.array([-1,-2]), np.array([1,2])))
print( SigmoidGradient(np.array([-1,-2])))
print( SquareErrorFunction2F(np.array([[0,1], [1,0]]), np.array([[0.5,0.5], [0.5,0.5]]), np.array([1,2,3]), 1))
print( LogisticErrorFunction2F(np.array([[0,1], [1,0]]), np.array([[0.5,0.5], [0.5,0.5]]), np.array([1,2,3]), 1))
print( RandomSample((2,3), 0, 1))
print( GaussFunc(np.array([0,0]), 2, [0, 0], np.array([[1.,0],[0.,1]])))
print( GaussFunc(np.array([[0,0.],[1,2.]]), 2, [0, 0], np.array([[1.,0],[0.,1]])))
| [
"numpy.random.random_sample",
"numpy.sqrt",
"numpy.log",
"numpy.diag",
"numpy.exp",
"numpy.sum",
"numpy.dot",
"numpy.linalg.inv",
"numpy.array",
"numpy.linalg.det"
] | [((1051, 1071), 'numpy.linalg.inv', 'np.linalg.inv', (['sigma'], {}), '(sigma)\n', (1064, 1071), True, 'import numpy as np\n'), ((1111, 1132), 'numpy.dot', 'np.dot', (['e', '(x - mu).T'], {}), '(e, (x - mu).T)\n', (1117, 1132), True, 'import numpy as np\n'), ((1085, 1104), 'numpy.dot', 'np.dot', (['(x - mu)', 'inv'], {}), '(x - mu, inv)\n', (1091, 1104), True, 'import numpy as np\n'), ((1211, 1220), 'numpy.exp', 'np.exp', (['e'], {}), '(e)\n', (1217, 1220), True, 'import numpy as np\n'), ((1262, 1273), 'numpy.diag', 'np.diag', (['re'], {}), '(re)\n', (1269, 1273), True, 'import numpy as np\n'), ((123, 135), 'numpy.dot', 'np.dot', (['y', 'y'], {}), '(y, y)\n', (129, 135), True, 'import numpy as np\n'), ((174, 190), 'numpy.exp', 'np.exp', (['(-1.0 * x)'], {}), '(-1.0 * x)\n', (180, 190), True, 'import numpy as np\n'), ((442, 459), 'numpy.sum', 'np.sum', (['(err * err)'], {}), '(err * err)\n', (448, 459), True, 'import numpy as np\n'), ((480, 505), 'numpy.sum', 'np.sum', (['(weights * weights)'], {}), '(weights * weights)\n', (486, 505), True, 'import numpy as np\n'), ((660, 683), 'numpy.log', 'np.log', (['output_matrix_y'], {}), '(output_matrix_y)\n', (666, 683), True, 'import numpy as np\n'), ((712, 741), 'numpy.log', 'np.log', (['(1.0 - output_matrix_y)'], {}), '(1.0 - output_matrix_y)\n', (718, 741), True, 'import numpy as np\n'), ((760, 776), 'numpy.sum', 'np.sum', (['vector_j'], {}), '(vector_j)\n', (766, 776), True, 'import numpy as np\n'), ((795, 820), 'numpy.sum', 'np.sum', (['(weights * weights)'], {}), '(weights * weights)\n', (801, 820), True, 'import numpy as np\n'), ((909, 938), 'numpy.random.random_sample', 'np.random.random_sample', (['size'], {}), '(size)\n', (932, 938), True, 'import numpy as np\n'), ((1318, 1336), 'numpy.array', 'np.array', (['[-1, -2]'], {}), '([-1, -2])\n', (1326, 1336), True, 'import numpy as np\n'), ((1337, 1353), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (1345, 1353), True, 'import numpy as np\n'), ((1380, 1398), 'numpy.array', 'np.array', (['[-1, -2]'], {}), '([-1, -2])\n', (1388, 1398), True, 'import numpy as np\n'), ((1431, 1457), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (1439, 1457), True, 'import numpy as np\n'), ((1457, 1491), 'numpy.array', 'np.array', (['[[0.5, 0.5], [0.5, 0.5]]'], {}), '([[0.5, 0.5], [0.5, 0.5]])\n', (1465, 1491), True, 'import numpy as np\n'), ((1491, 1510), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1499, 1510), True, 'import numpy as np\n'), ((1547, 1573), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (1555, 1573), True, 'import numpy as np\n'), ((1573, 1607), 'numpy.array', 'np.array', (['[[0.5, 0.5], [0.5, 0.5]]'], {}), '([[0.5, 0.5], [0.5, 0.5]])\n', (1581, 1607), True, 'import numpy as np\n'), ((1607, 1626), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1615, 1626), True, 'import numpy as np\n'), ((1685, 1701), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1693, 1701), True, 'import numpy as np\n'), ((1713, 1743), 'numpy.array', 'np.array', (['[[1.0, 0], [0.0, 1]]'], {}), '([[1.0, 0], [0.0, 1]])\n', (1721, 1743), True, 'import numpy as np\n'), ((1760, 1790), 'numpy.array', 'np.array', (['[[0, 0.0], [1, 2.0]]'], {}), '([[0, 0.0], [1, 2.0]])\n', (1768, 1790), True, 'import numpy as np\n'), ((1798, 1828), 'numpy.array', 'np.array', (['[[1.0, 0], [0.0, 1]]'], {}), '([[1.0, 0], [0.0, 1]])\n', (1806, 1828), True, 'import numpy as np\n'), ((81, 93), 'numpy.dot', 'np.dot', (['x', 'y'], {}), '(x, y)\n', (87, 93), True, 'import numpy as np\n'), ((102, 114), 'numpy.dot', 'np.dot', (['x', 'x'], {}), '(x, x)\n', (108, 114), True, 'import numpy as np\n'), ((1187, 1207), 'numpy.linalg.det', 'np.linalg.det', (['sigma'], {}), '(sigma)\n', (1200, 1207), True, 'import numpy as np\n'), ((1141, 1178), 'numpy.sqrt', 'np.sqrt', (['((2.0 * math.pi) ** dimension)'], {}), '((2.0 * math.pi) ** dimension)\n', (1148, 1178), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy
from PIL import Image
import sys
import cv2
import time
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
import glob
import os
method = 'threshold'
#method = 'threshold_adp'
#method = 'backSub'
#method = 'kmeans'
dataset = 'beach'
save_frame = -1 #98, 130
min_cluster = 10
for i in range(len(sys.argv)-1):
if sys.argv[i]=='--method':
method = sys.argv[i+1]
elif sys.argv[i]=='--dataset':
dataset = sys.argv[i+1]
elif sys.argv[i]=='--save_frame':
save_frame = int(sys.argv[i+1])
elif sys.argv[i]=='--min_cluster':
min_cluster = int(sys.argv[i+1])
backSub = cv2.createBackgroundSubtractorMOG2()
#backSub = cv2.createBackgroundSubtractorKNN()
image_id = 1
fig = plt.figure(figsize=(20,30))
try:
xbound, ybound, imwidth, imheight = [int(t) for t in open('dataset/%s/params.txt'%dataset).readline().split()]
except ValueError:
xbound, ybound, imscale = [int(t) for t in open('dataset/%s/params.txt'%dataset).readline().split()]
imwidth = imheight = imscale
num_samples = len(glob.glob('dataset/%s/label*.png'%dataset))
num_test = num_samples - int(num_samples*0.8)
test_idx = num_samples - num_test + 1
tp = 0
fp = 0
fn = 0
obj_tp = 0
obj_fp = 0
obj_fn = 0
viz = '--viz' in sys.argv
zoomed_in = True
comp_time = []
while True:
if method!='backSub' and image_id < test_idx:
image_id += 1
continue
image_filename = 'dataset/%s/%d.png' % (dataset,image_id)
label_filename = 'dataset/%s/label%d.png'%(dataset,image_id)
if os.path.exists(image_filename) and os.path.exists(label_filename):
I = numpy.array(Image.open(image_filename))
if len(I.shape)>2:
I = numpy.mean(I, axis=2)
else:
break
gt = numpy.array(Image.open(label_filename))
gt = gt > 0
dt = numpy.zeros(I.shape, dtype=bool)
image_np = I[ybound:ybound+imheight, xbound:xbound+imwidth]
t1 = time.time()
if method=='threshold':
Isub = image_np.astype(numpy.uint8)
val, mask = cv2.threshold(Isub,75 if dataset=='beach' else 85 if dataset=='shore' else 120,255,cv2.THRESH_BINARY)
elif method=='threshold_adp':
Isub = image_np.astype(numpy.uint8)
blur = cv2.medianBlur(Isub,5)
if dataset=='beach':
mask = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,15,-5)
elif dataset=='shore':
mask = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,15,-8)
else:
mask = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,15,-10)
elif method=='backSub':
if dataset=='combined' and image_id in [97, 225, 249]:
backSub = cv2.createBackgroundSubtractorMOG2()
mask = backSub.apply(image_np)
Image.fromarray(mask.astype(numpy.uint8), mode='L').save('dataset/%s/backSub/%d.png'%(dataset,image_id))
elif method=='kmeans':
window_size = 15 if dataset=='beach' or dataset=='shore' else 100
margin = 10 if dataset=='beach' or dataset=='shore' else 100
Isub = image_np.copy()
#start with mean shift
centerX = 0
centerY = 0
centerVal = Isub[centerY, centerX]
peaks = []
peakVal = []
while True:
while True:
x1 = max(0,centerX-window_size)
x2 = min(Isub.shape[1],centerX+window_size)
y1 = max(0,centerY-window_size)
y2 = min(Isub.shape[0],centerY+window_size)
Itmp = Isub[y1:y2,x1:x2]
maxVal = Itmp.max()
# print(centerX,centerY,centerVal,maxVal)
if maxVal > centerVal:
dy, dx = numpy.unravel_index(numpy.argmax(Itmp), Itmp.shape)
centerY = y1+dy
centerX = x1+dx
centerVal = maxVal
Isub[y1:y2,x1:x2] = 0
else:
peaks.append([centerX,centerY])
peakVal.append(centerVal)
Isub[y1:y2,x1:x2] = 0
# print('Found peak (%d,%d) at %d'%(centerX,centerY,centerVal))
break
valid_idx = numpy.array(numpy.nonzero(Isub)).T
if len(valid_idx) > 0:
centerY, centerX = valid_idx[0]
centerVal = Isub[centerY, centerX]
else:
break
kmeans = KMeans(n_clusters=2).fit(numpy.array(peakVal).reshape(-1,1))
# print(kmeans.cluster_centers_, numpy.sum(kmeans.labels_==0), numpy.sum(kmeans.labels_==1))
target_label = numpy.argmax(kmeans.cluster_centers_)
if dataset=='beach':
peaks = numpy.array(peaks)[numpy.array(peakVal)>100]
elif dataset=='shore':
peaks = numpy.array(peaks)[numpy.array(peakVal)>85]
else:
peaks = numpy.array(peaks)[kmeans.labels_ == target_label]
Isub = image_np.copy()
mask = numpy.zeros(Isub.shape, dtype=bool)
for x,y in peaks:
xl = max(0,x-margin)
xr = min(Isub.shape[1],x+margin)
yl = max(0,y-margin)
yr = min(Isub.shape[0],y+margin)
cropped = Isub[yl:yr, xl:xr]
kmeans = KMeans(n_clusters=2).fit(cropped.reshape(-1,1))
# print('kmeans %.2f (%d) %.2f (%d)'%(kmeans.cluster_centers_[0], numpy.sum(kmeans.labels_==0), kmeans.cluster_centers_[1], numpy.sum(kmeans.labels_==1)))
target_label = numpy.argmax(kmeans.cluster_centers_)
M = kmeans.labels_.reshape(cropped.shape)==target_label
ym, xm = numpy.nonzero(M)
ym += yl
xm += xl
mask[ym,xm] = True
t2 = time.time()
dt[ybound:ybound+imheight,xbound:xbound+imwidth] = mask
err_viz = numpy.zeros((image_np.shape[0], image_np.shape[1], 3), dtype=numpy.uint8)
if image_id < test_idx:
image_id += 1
continue
gt_sub = gt[ybound:ybound+imheight, xbound:xbound+imwidth] > 0
dt_sub = dt[ybound:ybound+imheight, xbound:xbound+imwidth]
current_tp = numpy.logical_and(gt_sub,dt_sub)
current_fp = numpy.logical_and(numpy.logical_not(gt_sub),dt_sub)
current_fn = numpy.logical_and(gt_sub,numpy.logical_not(dt_sub))
err_viz[current_tp] = [0,255,0]
err_viz[current_fp] = [0,0,255]
err_viz[current_fn] = [255,0,0]
current_tp = numpy.sum(current_tp)
current_fp = numpy.sum(current_fp)
current_fn = numpy.sum(current_fn)
prc = 1.0*current_tp/(current_tp+current_fp+1)
rcl = 1.0*current_tp/(current_tp+current_fn+1)
tp += current_tp
fp += current_fp
fn += current_fn
ret, gt_com = cv2.connectedComponents(gt_sub.astype(numpy.uint8))
ret, dt_com = cv2.connectedComponents(dt_sub.astype(numpy.uint8))
num_gt = 0
num_dt = 0
for i in range(1, gt_com.max()+1):
if numpy.sum(gt_com==i) > min_cluster:
num_gt += 1
gt_com[gt_com==i] = num_gt
else:
gt_com[gt_com==i] = 0
for i in range(1, dt_com.max()+1):
if numpy.sum(dt_com==i) > min_cluster:
num_dt += 1
dt_com[dt_com==i] = num_dt
else:
dt_com[dt_com==i] = 0
current_tp = 0
dt_matched = numpy.zeros(num_dt, dtype=bool)
for i in range(1, gt_com.max()+1):
for j in range(1, dt_com.max()+1):
if dt_matched[j-1]:
continue
m1 = gt_com==i
m2 = dt_com==j
iou = 1.0 * numpy.sum(numpy.logical_and(m1, m2)) / numpy.sum(numpy.logical_or(m1, m2))
if iou > 0:
current_tp += 1
dt_matched[j-1] = True
break
current_fp = numpy.sum(dt_matched==0)
current_fn = num_gt - current_tp
obj_tp += current_tp
obj_fp += current_fp
obj_fn += current_fn
obj_prc = 1.0 * current_tp / (current_tp + current_fp) if current_tp > 0 else 0
obj_rcl = 1.0 * current_tp / (current_tp + current_fn) if current_tp > 0 else 0
gt_viz = numpy.zeros((gt_sub.shape[0], gt_sub.shape[1], 3), dtype=numpy.uint8)
for i in range(1, gt_com.max()+1):
c = numpy.random.randint(0,255,3)
gt_viz[gt_com==i] = c
my, mx = numpy.nonzero(gt_com==i)
x1 = max(mx.min() - 5, 0)
x2 = min(mx.max() + 5, gt_viz.shape[1] - 1)
y1 = max(my.min() - 5, 0)
y2 = min(my.max() + 5, gt_viz.shape[0] - 1)
gt_viz[y1, x1:x2, :] = [255,255,0]
gt_viz[y2, x1:x2, :] = [255,255,0]
gt_viz[y1:y2, x1, :] = [255,255,0]
gt_viz[y1:y2, x2, :] = [255,255,0]
dt_viz = numpy.zeros((dt_sub.shape[0], dt_sub.shape[1], 3), dtype=numpy.uint8)
for i in range(1, dt_com.max()+1):
c = numpy.random.randint(0,255,3)
dt_viz[dt_com==i] = c
my, mx = numpy.nonzero(dt_com==i)
x1 = max(mx.min() - 5, 0)
x2 = min(mx.max() + 5, dt_viz.shape[1] - 1)
y1 = max(my.min() - 5, 0)
y2 = min(my.max() + 5, dt_viz.shape[0] - 1)
dt_viz[y1, x1:x2, :] = [255,255,0]
dt_viz[y2, x1:x2, :] = [255,255,0]
dt_viz[y1:y2, x1, :] = [255,255,0]
dt_viz[y1:y2, x2, :] = [255,255,0]
comp_time.append(t2 - t1)
print('Image #%d Precision:%.2f/%.2f Recall:%.2f/%.2f (%.2fs)'%(image_id, prc,obj_prc,rcl,obj_rcl, t2-t1))
if image_id == save_frame:
Image.fromarray(image_np.astype(numpy.uint8), mode='L').save('results/original_%d.png'%save_frame)
Image.fromarray(dt_viz, mode='RGB').save('results/detected_%s_%d.png'%(method, save_frame))
Image.fromarray(gt_viz, mode='RGB').save('results/ground_truth_%d.png'%save_frame)
print('save_frame',save_frame)
sys.exit(1)
if viz:
plt.clf()
plt.subplot(2,2,1)
plt.imshow(image_np if zoomed_in else I, cmap='gray')
plt.title('Image #%d'%image_id)
plt.subplot(2,2,2)
plt.imshow(gt_sub if zoomed_in else gt, cmap='gray')
plt.subplot(2,2,3)
plt.imshow(dt_viz if zoomed_in else dt, cmap='gray')
plt.subplot(2,2,4)
plt.imshow(gt_viz, cmap='gray')
plt.pause(0.5)
image_id += 1
P = 1.0 * tp / (tp + fp)
R = 1.0 * tp / (tp + fn)
F = 2.0 * P * R / (P + R)
oP = 1.0 * obj_tp / (obj_tp + obj_fp)
oR = 1.0 * obj_tp / (obj_tp + obj_fn)
oF = 2.0 * oP * oR / (oP + oR)
print('Overall Precision:%.3f/%.3f Recall:%.3f/%.3f Fscore:%.3f/%.3f (t=%.6fs)'%(P, oP, R, oR, F, oF, numpy.mean(comp_time)))
| [
"cv2.createBackgroundSubtractorMOG2",
"numpy.logical_not",
"numpy.array",
"sys.exit",
"matplotlib.pyplot.imshow",
"os.path.exists",
"numpy.mean",
"cv2.threshold",
"cv2.medianBlur",
"glob.glob",
"numpy.argmax",
"numpy.nonzero",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.title",
"time.t... | [((646, 682), 'cv2.createBackgroundSubtractorMOG2', 'cv2.createBackgroundSubtractorMOG2', ([], {}), '()\n', (680, 682), False, 'import cv2\n'), ((749, 777), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 30)'}), '(figsize=(20, 30))\n', (759, 777), True, 'import matplotlib.pyplot as plt\n'), ((1072, 1116), 'glob.glob', 'glob.glob', (["('dataset/%s/label*.png' % dataset)"], {}), "('dataset/%s/label*.png' % dataset)\n", (1081, 1116), False, 'import glob\n'), ((1767, 1799), 'numpy.zeros', 'numpy.zeros', (['I.shape'], {'dtype': 'bool'}), '(I.shape, dtype=bool)\n', (1778, 1799), False, 'import numpy\n'), ((1867, 1878), 'time.time', 'time.time', ([], {}), '()\n', (1876, 1878), False, 'import time\n'), ((5017, 5028), 'time.time', 'time.time', ([], {}), '()\n', (5026, 5028), False, 'import time\n'), ((5097, 5170), 'numpy.zeros', 'numpy.zeros', (['(image_np.shape[0], image_np.shape[1], 3)'], {'dtype': 'numpy.uint8'}), '((image_np.shape[0], image_np.shape[1], 3), dtype=numpy.uint8)\n', (5108, 5170), False, 'import numpy\n'), ((5363, 5396), 'numpy.logical_and', 'numpy.logical_and', (['gt_sub', 'dt_sub'], {}), '(gt_sub, dt_sub)\n', (5380, 5396), False, 'import numpy\n'), ((5641, 5662), 'numpy.sum', 'numpy.sum', (['current_tp'], {}), '(current_tp)\n', (5650, 5662), False, 'import numpy\n'), ((5677, 5698), 'numpy.sum', 'numpy.sum', (['current_fp'], {}), '(current_fp)\n', (5686, 5698), False, 'import numpy\n'), ((5713, 5734), 'numpy.sum', 'numpy.sum', (['current_fn'], {}), '(current_fn)\n', (5722, 5734), False, 'import numpy\n'), ((6384, 6415), 'numpy.zeros', 'numpy.zeros', (['num_dt'], {'dtype': 'bool'}), '(num_dt, dtype=bool)\n', (6395, 6415), False, 'import numpy\n'), ((6737, 6763), 'numpy.sum', 'numpy.sum', (['(dt_matched == 0)'], {}), '(dt_matched == 0)\n', (6746, 6763), False, 'import numpy\n'), ((7035, 7104), 'numpy.zeros', 'numpy.zeros', (['(gt_sub.shape[0], gt_sub.shape[1], 3)'], {'dtype': 'numpy.uint8'}), '((gt_sub.shape[0], gt_sub.shape[1], 3), dtype=numpy.uint8)\n', (7046, 7104), False, 'import numpy\n'), ((7543, 7612), 'numpy.zeros', 'numpy.zeros', (['(dt_sub.shape[0], dt_sub.shape[1], 3)'], {'dtype': 'numpy.uint8'}), '((dt_sub.shape[0], dt_sub.shape[1], 3), dtype=numpy.uint8)\n', (7554, 7612), False, 'import numpy\n'), ((1524, 1554), 'os.path.exists', 'os.path.exists', (['image_filename'], {}), '(image_filename)\n', (1538, 1554), False, 'import os\n'), ((1559, 1589), 'os.path.exists', 'os.path.exists', (['label_filename'], {}), '(label_filename)\n', (1573, 1589), False, 'import os\n'), ((1720, 1746), 'PIL.Image.open', 'Image.open', (['label_filename'], {}), '(label_filename)\n', (1730, 1746), False, 'from PIL import Image\n'), ((1956, 2068), 'cv2.threshold', 'cv2.threshold', (['Isub', "(75 if dataset == 'beach' else 85 if dataset == 'shore' else 120)", '(255)', 'cv2.THRESH_BINARY'], {}), "(Isub, 75 if dataset == 'beach' else 85 if dataset == 'shore' else\n 120, 255, cv2.THRESH_BINARY)\n", (1969, 2068), False, 'import cv2\n'), ((5428, 5453), 'numpy.logical_not', 'numpy.logical_not', (['gt_sub'], {}), '(gt_sub)\n', (5445, 5453), False, 'import numpy\n'), ((5501, 5526), 'numpy.logical_not', 'numpy.logical_not', (['dt_sub'], {}), '(dt_sub)\n', (5518, 5526), False, 'import numpy\n'), ((7147, 7178), 'numpy.random.randint', 'numpy.random.randint', (['(0)', '(255)', '(3)'], {}), '(0, 255, 3)\n', (7167, 7178), False, 'import numpy\n'), ((7212, 7238), 'numpy.nonzero', 'numpy.nonzero', (['(gt_com == i)'], {}), '(gt_com == i)\n', (7225, 7238), False, 'import numpy\n'), ((7655, 7686), 'numpy.random.randint', 'numpy.random.randint', (['(0)', '(255)', '(3)'], {}), '(0, 255, 3)\n', (7675, 7686), False, 'import numpy\n'), ((7720, 7746), 'numpy.nonzero', 'numpy.nonzero', (['(dt_com == i)'], {}), '(dt_com == i)\n', (7733, 7746), False, 'import numpy\n'), ((8521, 8532), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8529, 8532), False, 'import sys\n'), ((8544, 8553), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8551, 8553), True, 'import matplotlib.pyplot as plt\n'), ((8556, 8576), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (8567, 8576), True, 'import matplotlib.pyplot as plt\n'), ((8577, 8630), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(image_np if zoomed_in else I)'], {'cmap': '"""gray"""'}), "(image_np if zoomed_in else I, cmap='gray')\n", (8587, 8630), True, 'import matplotlib.pyplot as plt\n'), ((8633, 8666), 'matplotlib.pyplot.title', 'plt.title', (["('Image #%d' % image_id)"], {}), "('Image #%d' % image_id)\n", (8642, 8666), True, 'import matplotlib.pyplot as plt\n'), ((8667, 8687), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (8678, 8687), True, 'import matplotlib.pyplot as plt\n'), ((8688, 8740), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(gt_sub if zoomed_in else gt)'], {'cmap': '"""gray"""'}), "(gt_sub if zoomed_in else gt, cmap='gray')\n", (8698, 8740), True, 'import matplotlib.pyplot as plt\n'), ((8743, 8763), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (8754, 8763), True, 'import matplotlib.pyplot as plt\n'), ((8764, 8816), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(dt_viz if zoomed_in else dt)'], {'cmap': '"""gray"""'}), "(dt_viz if zoomed_in else dt, cmap='gray')\n", (8774, 8816), True, 'import matplotlib.pyplot as plt\n'), ((8819, 8839), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (8830, 8839), True, 'import matplotlib.pyplot as plt\n'), ((8840, 8871), 'matplotlib.pyplot.imshow', 'plt.imshow', (['gt_viz'], {'cmap': '"""gray"""'}), "(gt_viz, cmap='gray')\n", (8850, 8871), True, 'import matplotlib.pyplot as plt\n'), ((8874, 8888), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.5)'], {}), '(0.5)\n', (8883, 8888), True, 'import matplotlib.pyplot as plt\n'), ((1609, 1635), 'PIL.Image.open', 'Image.open', (['image_filename'], {}), '(image_filename)\n', (1619, 1635), False, 'from PIL import Image\n'), ((1665, 1686), 'numpy.mean', 'numpy.mean', (['I'], {'axis': '(2)'}), '(I, axis=2)\n', (1675, 1686), False, 'import numpy\n'), ((2136, 2159), 'cv2.medianBlur', 'cv2.medianBlur', (['Isub', '(5)'], {}), '(Isub, 5)\n', (2150, 2159), False, 'import cv2\n'), ((6085, 6107), 'numpy.sum', 'numpy.sum', (['(gt_com == i)'], {}), '(gt_com == i)\n', (6094, 6107), False, 'import numpy\n'), ((6240, 6262), 'numpy.sum', 'numpy.sum', (['(dt_com == i)'], {}), '(dt_com == i)\n', (6249, 6262), False, 'import numpy\n'), ((9190, 9211), 'numpy.mean', 'numpy.mean', (['comp_time'], {}), '(comp_time)\n', (9200, 9211), False, 'import numpy\n'), ((2192, 2288), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['blur', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY', '(15)', '(-5)'], {}), '(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.\n THRESH_BINARY, 15, -5)\n', (2213, 2288), False, 'import cv2\n'), ((8309, 8344), 'PIL.Image.fromarray', 'Image.fromarray', (['dt_viz'], {'mode': '"""RGB"""'}), "(dt_viz, mode='RGB')\n", (8324, 8344), False, 'from PIL import Image\n'), ((8403, 8438), 'PIL.Image.fromarray', 'Image.fromarray', (['gt_viz'], {'mode': '"""RGB"""'}), "(gt_viz, mode='RGB')\n", (8418, 8438), False, 'from PIL import Image\n'), ((2314, 2410), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['blur', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY', '(15)', '(-8)'], {}), '(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.\n THRESH_BINARY, 15, -8)\n', (2335, 2410), False, 'import cv2\n'), ((2419, 2516), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['blur', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY', '(15)', '(-10)'], {}), '(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.\n THRESH_BINARY, 15, -10)\n', (2440, 2516), False, 'import cv2\n'), ((2602, 2638), 'cv2.createBackgroundSubtractorMOG2', 'cv2.createBackgroundSubtractorMOG2', ([], {}), '()\n', (2636, 2638), False, 'import cv2\n'), ((4095, 4132), 'numpy.argmax', 'numpy.argmax', (['kmeans.cluster_centers_'], {}), '(kmeans.cluster_centers_)\n', (4107, 4132), False, 'import numpy\n'), ((4396, 4431), 'numpy.zeros', 'numpy.zeros', (['Isub.shape'], {'dtype': 'bool'}), '(Isub.shape, dtype=bool)\n', (4407, 4431), False, 'import numpy\n'), ((6625, 6649), 'numpy.logical_or', 'numpy.logical_or', (['m1', 'm2'], {}), '(m1, m2)\n', (6641, 6649), False, 'import numpy\n'), ((4839, 4876), 'numpy.argmax', 'numpy.argmax', (['kmeans.cluster_centers_'], {}), '(kmeans.cluster_centers_)\n', (4851, 4876), False, 'import numpy\n'), ((4948, 4964), 'numpy.nonzero', 'numpy.nonzero', (['M'], {}), '(M)\n', (4961, 4964), False, 'import numpy\n'), ((6586, 6611), 'numpy.logical_and', 'numpy.logical_and', (['m1', 'm2'], {}), '(m1, m2)\n', (6603, 6611), False, 'import numpy\n'), ((3923, 3943), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(2)'}), '(n_clusters=2)\n', (3929, 3943), False, 'from sklearn.cluster import KMeans\n'), ((4167, 4185), 'numpy.array', 'numpy.array', (['peaks'], {}), '(peaks)\n', (4178, 4185), False, 'import numpy\n'), ((3769, 3788), 'numpy.nonzero', 'numpy.nonzero', (['Isub'], {}), '(Isub)\n', (3782, 3788), False, 'import numpy\n'), ((3948, 3968), 'numpy.array', 'numpy.array', (['peakVal'], {}), '(peakVal)\n', (3959, 3968), False, 'import numpy\n'), ((4186, 4206), 'numpy.array', 'numpy.array', (['peakVal'], {}), '(peakVal)\n', (4197, 4206), False, 'import numpy\n'), ((4248, 4266), 'numpy.array', 'numpy.array', (['peaks'], {}), '(peaks)\n', (4259, 4266), False, 'import numpy\n'), ((4311, 4329), 'numpy.array', 'numpy.array', (['peaks'], {}), '(peaks)\n', (4322, 4329), False, 'import numpy\n'), ((4616, 4636), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(2)'}), '(n_clusters=2)\n', (4622, 4636), False, 'from sklearn.cluster import KMeans\n'), ((3433, 3451), 'numpy.argmax', 'numpy.argmax', (['Itmp'], {}), '(Itmp)\n', (3445, 3451), False, 'import numpy\n'), ((4267, 4287), 'numpy.array', 'numpy.array', (['peakVal'], {}), '(peakVal)\n', (4278, 4287), False, 'import numpy\n')] |
import csv
import os
import random
import numpy as np
import sys
from sklearn import svm
from keras.models import Sequential, model_from_yaml
from keras.layers import Dropout, Dense
from keras.callbacks import EarlyStopping
def open_csv(file_path):
# Input read as f_wh, f_wmt, f_posh, f_posmt, f_len, y
assert os.path.isfile(file_path)
raw_data = []
with open(file_path, 'r') as fid:
csv_reader = csv.reader(fid)
for row in csv_reader:
raw_data.append(row)
raw_data = raw_data[1:]
#random.shuffle(raw_data)
raw_data = np.array(raw_data).astype('float32')
features = raw_data[:, :-1]
tags = raw_data[:, -1].astype('int32')
return features, tags
def normalize(a):
mean = a.mean(1, keepdims=True)
std = a.std(1, keepdims=True)
b = np.subtract(a, mean)
c = np.divide(b, std)
return c
def evaluate_model(tags, predictions):
t_p = 0
t_n = 0
f_p = 0
f_n = 0
for idx in range(len(tags)):
# print("Tags: {}, Pred: {}".format(tags[idx], predictions[idx]))
if(tags[idx] == 1 and predictions[idx] == 1):
t_p += 1
elif(tags[idx] == 0 and predictions[idx] == 0):
t_n += 1
elif(tags[idx] == 0 and predictions[idx] == 1):
f_p += 1
else:
f_n += 1
precision = 0
if (t_p + f_p) > 0:
precision = float(t_p)/(t_p + f_p)
accuracy = 0
if (t_p + f_p + t_n + f_n) > 0:
accuracy = float((t_p + t_n))/(t_p + t_n + f_p + f_n)
recall = 0
if (t_p + f_n) > 0:
recall = float(t_p)/(t_p + f_n)
print("Precision: {}".format(precision))
print("Accuracy: {}".format(accuracy))
print("Recall: {}".format(recall))
def evaluate_svm_model(tags, predictions):
t_p = 0
t_n = 0
f_p = 0
f_n = 0
for idx in range(len(tags)):
# print("Tags: {}, Pred: {}".format(tags[idx], predictions[idx]))
if(tags[idx] == 1 and predictions[idx] == 1):
t_p += 1
elif(tags[idx] == 0 and predictions[idx] == 0):
t_n += 1
elif(tags[idx] == 0 and predictions[idx] == 1):
f_p += 1
else:
f_n += 1
precision = 0.
if (t_p + f_p) > 0:
precision = float(t_p)/(t_p + f_p)
accuracy = 0.
if (t_p + f_p + t_n + f_n) > 0:
accuracy = float((t_p + t_n))/(t_p + t_n + f_p + f_n)
recall = 0.
if (t_p + f_n) > 0:
recall = float(t_p)/(t_p + f_n)
print("Precision: {}".format(precision))
print("Accuracy: {}".format(accuracy))
print("Recall: {}".format(recall))
# PREDICTIONS
def mlp_predict(X, bsize=5):
'''
:param X: numpy array [n_samples, n_features] (input features)
:param model: path to yaml file containing model
:param weights: path to h5 file containing model weights
:return: prediction: numpy array with predictions
'''
model = model_from_yaml(open('models/mlp_architecture.yaml').read())
model.load_weights('models/mlp_model_weights.h5')
predictions = model.predict_classes(X, batch_size=bsize, verbose=1)
return predictions
def svm_predict(X):
from sklearn.externals import joblib
classifier = joblib.load('models/svm-model.pkl')
predictions = classifier.predict(X)
return predictions
path = sys.argv[1]
features, tags = open_csv(path)
features = normalize(features)
print("Predicting with svm")
predictions = svm_predict(features)
print(predictions)
real_tags = []
with open("test_tags.csv",'r') as f:
i = 0
csv_reader = csv.reader(f)
for line in csv_reader:
print(line)
i += 1
real_tags.append(line)
#print(real_tags)
real_tags = np.array(real_tags).astype('int32')
print(real_tags)
evaluate_svm_model(real_tags, predictions)
| [
"sklearn.externals.joblib.load",
"numpy.subtract",
"os.path.isfile",
"numpy.array",
"csv.reader",
"numpy.divide"
] | [((322, 347), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (336, 347), False, 'import os\n'), ((818, 838), 'numpy.subtract', 'np.subtract', (['a', 'mean'], {}), '(a, mean)\n', (829, 838), True, 'import numpy as np\n'), ((847, 864), 'numpy.divide', 'np.divide', (['b', 'std'], {}), '(b, std)\n', (856, 864), True, 'import numpy as np\n'), ((3254, 3289), 'sklearn.externals.joblib.load', 'joblib.load', (['"""models/svm-model.pkl"""'], {}), "('models/svm-model.pkl')\n", (3265, 3289), False, 'from sklearn.externals import joblib\n'), ((3604, 3617), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (3614, 3617), False, 'import csv\n'), ((426, 441), 'csv.reader', 'csv.reader', (['fid'], {}), '(fid)\n', (436, 441), False, 'import csv\n'), ((3743, 3762), 'numpy.array', 'np.array', (['real_tags'], {}), '(real_tags)\n', (3751, 3762), True, 'import numpy as np\n'), ((580, 598), 'numpy.array', 'np.array', (['raw_data'], {}), '(raw_data)\n', (588, 598), True, 'import numpy as np\n')] |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_transformer_fingerprints.ipynb (unless otherwise specified).
__all__ = ['RXNBERTFingerprintGenerator', 'RXNBERTMinhashFingerprintGenerator', 'get_default_model_and_tokenizer',
'generate_fingerprints']
# Cell
import torch
import pkg_resources
import numpy as np
from typing import List
from tqdm import tqdm
from itertools import islice
from transformers import BertModel
from .core import (
FingerprintGenerator
)
from .tokenization import (
SmilesTokenizer
)
# Cell
class RXNBERTFingerprintGenerator(FingerprintGenerator):
"""
Generate RXNBERT fingerprints from reaction SMILES
"""
def __init__(self, model: BertModel, tokenizer: SmilesTokenizer, force_no_cuda=False):
super(RXNBERTFingerprintGenerator).__init__()
self.model = model
self.model.eval()
self.tokenizer = tokenizer
self.device = torch.device("cuda" if (torch.cuda.is_available() and not force_no_cuda) else "cpu")
def convert(self, rxn_smiles: str):
"""
Convert rxn_smiles to fingerprint
Args:
rxn_smiles (str): precursors>>products
"""
bert_inputs = self.tokenizer.encode_plus(rxn_smiles,
max_length=self.model.config.max_position_embeddings,
padding=True, truncation=True, return_tensors='pt').to(self.device)
with torch.no_grad():
output = self.model(
**bert_inputs
)
embeddings = output['last_hidden_state'].squeeze()[0].cpu().numpy().tolist()
return embeddings
def convert_batch(self, rxn_smiles_list: List[str]):
bert_inputs = self.tokenizer.batch_encode_plus(rxn_smiles_list,
max_length=self.model.config.max_position_embeddings,
padding=True, truncation=True, return_tensors='pt').to(self.device)
with torch.no_grad():
output = self.model(
**bert_inputs
)
# [CLS] token embeddings in position 0
embeddings = output['last_hidden_state'][:, 0, :].cpu().numpy().tolist()
return embeddings
class RXNBERTMinhashFingerprintGenerator(FingerprintGenerator):
"""
Generate RXNBERT fingerprints from reaction SMILES
"""
def __init__(
self, model: BertModel, tokenizer: SmilesTokenizer, permutations=256, seed=42, force_no_cuda=False
):
super(RXNBERTFingerprintGenerator).__init__()
import tmap as tm
self.model = model
self.tokenizer = tokenizer
self.minhash = tm.Minhash(model.config.hidden_size, seed, permutations)
self.generator = RXNBERTFingerprintGenerator(model, tokenizer)
self.device = torch.device("cuda" if (torch.cuda.is_available() and not force_no_cuda) else "cpu")
def convert(self, rxn_smiles: str):
"""
Convert rxn_smiles to fingerprint
Args:
rxn_smiles (str): precursors>>products
"""
float_fingerprint = self.generator.convert(rxn_smiles)
minhash_fingerprint = self.minhash.from_weight_array(
float_fingerprint, method="I2CWS"
)
return minhash_fingerprint
def convert_batch(self, rxn_smiles_list: List[str]):
float_fingerprints = self.generator.convert_batch(rxn_smiles_list)
minhash_fingerprints = [
self.minhash.from_weight_array(fp, method="I2CWS")
for fp in float_fingerprints
]
return minhash_fingerprints
def get_default_model_and_tokenizer(model='bert_ft', force_no_cuda=False):
model_path = pkg_resources.resource_filename(
"rxnfp",
f"models/transformers/{model}"
)
tokenizer_vocab_path = (
pkg_resources.resource_filename(
"rxnfp",
f"models/transformers/{model}/vocab.txt"
)
)
device = torch.device("cuda" if (torch.cuda.is_available() and not force_no_cuda) else "cpu")
model = BertModel.from_pretrained(model_path)
model = model.eval()
model.to(device)
tokenizer = SmilesTokenizer(
tokenizer_vocab_path
)
return model, tokenizer
def generate_fingerprints(rxns: List[str], fingerprint_generator:FingerprintGenerator, batch_size=1) -> np.array:
fps = []
n_batches = len(rxns) // batch_size
emb_iter = iter(rxns)
for i in tqdm(range(n_batches)):
batch = list(islice(emb_iter, batch_size))
fps_batch = fingerprint_generator.convert_batch(batch)
fps += fps_batch
return np.array(fps) | [
"itertools.islice",
"pkg_resources.resource_filename",
"transformers.BertModel.from_pretrained",
"numpy.array",
"torch.cuda.is_available",
"torch.no_grad",
"tmap.Minhash"
] | [((3781, 3853), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""rxnfp"""', 'f"""models/transformers/{model}"""'], {}), "('rxnfp', f'models/transformers/{model}')\n", (3812, 3853), False, 'import pkg_resources\n'), ((3938, 4024), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""rxnfp"""', 'f"""models/transformers/{model}/vocab.txt"""'], {}), "('rxnfp',\n f'models/transformers/{model}/vocab.txt')\n", (3969, 4024), False, 'import pkg_resources\n'), ((4196, 4233), 'transformers.BertModel.from_pretrained', 'BertModel.from_pretrained', (['model_path'], {}), '(model_path)\n', (4221, 4233), False, 'from transformers import BertModel\n'), ((4761, 4774), 'numpy.array', 'np.array', (['fps'], {}), '(fps)\n', (4769, 4774), True, 'import numpy as np\n'), ((2746, 2802), 'tmap.Minhash', 'tm.Minhash', (['model.config.hidden_size', 'seed', 'permutations'], {}), '(model.config.hidden_size, seed, permutations)\n', (2756, 2802), True, 'import tmap as tm\n'), ((1478, 1493), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1491, 1493), False, 'import torch\n'), ((2060, 2075), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2073, 2075), False, 'import torch\n'), ((4630, 4658), 'itertools.islice', 'islice', (['emb_iter', 'batch_size'], {}), '(emb_iter, batch_size)\n', (4636, 4658), False, 'from itertools import islice\n'), ((4122, 4147), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4145, 4147), False, 'import torch\n'), ((951, 976), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (974, 976), False, 'import torch\n'), ((2920, 2945), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2943, 2945), False, 'import torch\n')] |
import numpy as np
import cv2 as cv
class opencv_camera():
def __init__(self, render, name, frame_interval):
self.frame_int = frame_interval
self.render = render
window_size = (self.render.win.getXSize(), self.render.win.getYSize())
self.buffer = self.render.win.makeTextureBuffer(name, *window_size, None, True)
self.cam = self.render.makeCamera(self.buffer)
self.cam.setName(name)
self.cam.node().getLens().setFilmSize(36, 24)
self.cam.node().getLens().setFocalLength(45)
self.name = name
self.render.taskMgr.add(self.set_active, name)
self.render.taskMgr.add(self.set_active, name)
self.buffer.setActive(0)
def get_image(self, target_frame=True):
tex = self.buffer.getTexture()
img = tex.getRamImage()
image = np.frombuffer(img, np.uint8)
if len(image) > 0:
image = np.reshape(image, (tex.getYSize(), tex.getXSize(), 4))
image = cv.resize(image, (0,0), fx=0.5, fy=0.5)
image = cv.flip(image, 0)
return True, image
else:
return False, None
def set_active(self, task):
if task.frame % 10 == 0:
self.buffer.setActive(1)
return task.cont
def set_inactive(self, task):
if task.frame % 10 == 1:
self.buffer.setActive(0)
return task.cont | [
"numpy.frombuffer",
"cv2.resize",
"cv2.flip"
] | [((867, 895), 'numpy.frombuffer', 'np.frombuffer', (['img', 'np.uint8'], {}), '(img, np.uint8)\n', (880, 895), True, 'import numpy as np\n'), ((1018, 1058), 'cv2.resize', 'cv.resize', (['image', '(0, 0)'], {'fx': '(0.5)', 'fy': '(0.5)'}), '(image, (0, 0), fx=0.5, fy=0.5)\n', (1027, 1058), True, 'import cv2 as cv\n'), ((1078, 1095), 'cv2.flip', 'cv.flip', (['image', '(0)'], {}), '(image, 0)\n', (1085, 1095), True, 'import cv2 as cv\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jieba
import numpy as np
def convert_small_example(example,
task_name,
vocab,
is_tokenized=False,
max_seq_length=128,
is_test=False):
input_ids = []
if task_name == 'senta':
for i, token in enumerate(jieba.cut(example[0])):
if i == max_seq_length:
break
token_id = vocab[token]
input_ids.append(token_id)
else:
if is_tokenized:
tokens = example[0][:max_seq_length]
else:
tokens = vocab(example[0])[:max_seq_length]
input_ids = vocab.convert_tokens_to_ids(tokens)
valid_length = np.array(len(input_ids), dtype='int64')
if not is_test:
label = np.array(example[-1], dtype="int64")
return input_ids, valid_length, label
else:
return input_ids, valid_length
def convert_pair_example(example,
task_name,
vocab,
is_tokenized=True,
max_seq_length=128,
is_test=False):
is_tokenized &= (task_name != 'senta')
seq1 = convert_small_example([example[0], example[2]], task_name, vocab,
is_tokenized, max_seq_length, is_test)[:2]
seq2 = convert_small_example([example[1], example[2]], task_name, vocab,
is_tokenized, max_seq_length, is_test)
pair_features = seq1 + seq2
return pair_features
def convert_two_example(example,
task_name,
tokenizer,
label_list,
max_seq_length,
vocab,
is_tokenized=True,
is_test=False):
is_tokenized &= (task_name != 'senta')
bert_features = convert_example(
example,
tokenizer=tokenizer,
label_list=label_list,
is_tokenized=is_tokenized,
max_seq_length=max_seq_length,
is_test=is_test)
if task_name == 'qqp':
small_features = convert_pair_example(
example, task_name, vocab, is_tokenized, max_seq_length, is_test)
else:
small_features = convert_small_example(
example, task_name, vocab, is_tokenized, max_seq_length, is_test)
return bert_features[:2] + small_features
def convert_example(example,
tokenizer,
label_list,
is_tokenized=False,
max_seq_length=512,
is_test=False):
"""convert a glue example into necessary features"""
def _truncate_seqs(seqs, max_seq_length):
if len(seqs) == 1: # single sentence
# Account for [CLS] and [SEP] with "- 2"
seqs[0] = seqs[0][0:(max_seq_length - 2)]
else: # Sentence pair
# Account for [CLS], [SEP], [SEP] with "- 3"
tokens_a, tokens_b = seqs
max_seq_length -= 3
while True: # Truncate with longest_first strategy
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_seq_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
return seqs
def _concat_seqs(seqs, separators, seq_mask=0, separator_mask=1):
concat = sum((seq + sep for sep, seq in zip(separators, seqs)), [])
segment_ids = sum(
([i] * (len(seq) + len(sep))
for i, (sep, seq) in enumerate(zip(separators, seqs))), [])
if isinstance(seq_mask, int):
seq_mask = [[seq_mask] * len(seq) for seq in seqs]
if isinstance(separator_mask, int):
separator_mask = [[separator_mask] * len(sep) for sep in separators]
p_mask = sum((s_mask + mask
for sep, seq, s_mask, mask in zip(
separators, seqs, seq_mask, separator_mask)), [])
return concat, segment_ids, p_mask
if not is_test:
# `label_list == None` is for regression task
label_dtype = "int64" if label_list else "float32"
# Get the label
label = example[-1]
example = example[:-1]
# Create label maps if classification task
if label_list:
label_map = {}
for (i, l) in enumerate(label_list):
label_map[l] = i
label = label_map[label]
label = np.array([label], dtype=label_dtype)
if is_tokenized:
tokens_raw = example
else:
# Tokenize raw text
tokens_raw = [tokenizer(l) for l in example]
# Truncate to the truncate_length,
tokens_trun = _truncate_seqs(tokens_raw, max_seq_length)
# Concate the sequences with special tokens
tokens_trun[0] = [tokenizer.cls_token] + tokens_trun[0]
tokens, segment_ids, _ = _concat_seqs(tokens_trun, [[tokenizer.sep_token]] *
len(tokens_trun))
# Convert the token to ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
valid_length = len(input_ids)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
# input_mask = [1] * len(input_ids)
if not is_test:
return input_ids, segment_ids, valid_length, label
else:
return input_ids, segment_ids, valid_length
return output_list
| [
"numpy.array",
"jieba.cut"
] | [((1430, 1466), 'numpy.array', 'np.array', (['example[-1]'], {'dtype': '"""int64"""'}), "(example[-1], dtype='int64')\n", (1438, 1466), True, 'import numpy as np\n'), ((5254, 5290), 'numpy.array', 'np.array', (['[label]'], {'dtype': 'label_dtype'}), '([label], dtype=label_dtype)\n', (5262, 5290), True, 'import numpy as np\n'), ((966, 987), 'jieba.cut', 'jieba.cut', (['example[0]'], {}), '(example[0])\n', (975, 987), False, 'import jieba\n')] |
import collections
import contextlib
import sys
import wave
import webrtcvad
import librosa
def read_wave(path):
with contextlib.closing(wave.open(path, 'rb')) as wf:
num_channels = wf.getnchannels()
assert num_channels == 1
sample_width = wf.getsampwidth()
assert sample_width == 2
sample_rate = wf.getframerate()
assert sample_rate in (8000, 16000, 32000, 48000)
pcm_data = wf.readframes(wf.getnframes())
return pcm_data, sample_rate
def write_wave(path, audio, sample_rate):
with contextlib.closing(wave.open(path, 'wb')) as wf:
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(sample_rate)
wf.writeframes(audio)
class Frame(object):
def __init__(self, bytes, timestamp, duration):
self.bytes = bytes
self.timestamp = timestamp
self.duration = duration
def frame_generator(frame_duration_ms, audio, sample_rate):
n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)
offset = 0
timestamp = 0.0
duration = (float(n) / sample_rate) / 2.0
while offset + n < len(audio):
yield Frame(audio[offset:offset + n], timestamp, duration)
timestamp += duration
offset += n
def vad_collector(sample_rate, frame_duration_ms,
padding_duration_ms, vad, frames):
num_padding_frames = int(padding_duration_ms / frame_duration_ms)
# We use a deque for our sliding window/ring buffer.
ring_buffer = collections.deque(maxlen=num_padding_frames)
# We have two states: TRIGGERED and NOTTRIGGERED. We start in the
# NOTTRIGGERED state.
triggered = False
voiced_frames = []
for frame in frames:
is_speech = vad.is_speech(frame.bytes, sample_rate)
sys.stdout.write('1' if is_speech else '0')
if not triggered:
ring_buffer.append((frame, is_speech))
num_voiced = len([f for f, speech in ring_buffer if speech])
# If we're NOTTRIGGERED and more than 90% of the frames in
# the ring buffer are voiced frames, then enter the
# TRIGGERED state.
if num_voiced > 0.9 * ring_buffer.maxlen:
triggered = True
sys.stdout.write('+(%s)' % (ring_buffer[0][0].timestamp,))
# We want to yield all the audio we see from now until
# we are NOTTRIGGERED, but we have to start with the
# audio that's already in the ring buffer.
for f, s in ring_buffer:
voiced_frames.append(f)
ring_buffer.clear()
else:
# We're in the TRIGGERED state, so collect the audio data
# and add it to the ring buffer.
voiced_frames.append(frame)
ring_buffer.append((frame, is_speech))
num_unvoiced = len([f for f, speech in ring_buffer if not speech])
# If more than 90% of the frames in the ring buffer are
# unvoiced, then enter NOTTRIGGERED and yield whatever
# audio we've collected.
if num_unvoiced > 0.9 * ring_buffer.maxlen:
sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration))
triggered = False
yield b''.join([f.bytes for f in voiced_frames])
ring_buffer.clear()
voiced_frames = []
if triggered:
sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration))
sys.stdout.write('\n')
# If we have any leftover voiced audio when we run out of input,
# yield it.
if voiced_frames:
yield b''.join([f.bytes for f in voiced_frames])
########################### IMPLEMENTATION ###########################
from sklearn import preprocessing
import numpy as np
from sklearn.mixture import GaussianMixture
from copy import deepcopy
from sklearn.cluster import SpectralClustering
audio, sample_rate = read_wave('test.wav')
vad = webrtcvad.Vad(2)
frames = frame_generator(30, audio, sample_rate)
frames = list(frames)
segments = vad_collector(sample_rate, 30, 300, vad, frames)
c = 0
for i, segment in enumerate(segments):
path = 'chunk-%002d.wav' % (i,)
print(' Writing %s' % (path,))
write_wave(path, segment, sample_rate)
c +=1
#count of chunks
# c = 14
sampling_rate = 8000
n_mfcc = 13
n_fft = 0.032
hop_length = 0.010
components = 16
cov_type = 'full'
########################### Global GMM i.e UBM ###########################
test_file_path = "test.wav"
y,sr = librosa.load(test_file_path)
print(np.shape(y))
mfcc = librosa.feature.mfcc(np.array(y),sr,hop_length=int(hop_length * sr),n_fft=int(n_fft*sr),n_mfcc=n_mfcc,dct_type=2)
mfcc_delta = librosa.feature.delta(mfcc)
mfcc_delta_second_order = librosa.feature.delta(mfcc,order=2)
temp = librosa.feature.delta(mfcc_delta)
inter = np.vstack((mfcc,mfcc_delta,mfcc_delta_second_order))
ubm_feature = inter.T
#ubm_feature = preprocessing.scale(ubm_feature)
# ubm_feature -= np.mean(ubm_feature)
# ubm_feature /= np.std(ubm_feature)
ubm_model = GaussianMixture(n_components = components, covariance_type = cov_type)
ubm_model.fit(ubm_feature)
print(ubm_model.score(ubm_feature))
print(ubm_model.means_)
def MAP_Estimation(model,data,m_iterations):
N = data.shape[0]
D = data.shape[1]
K = model.n_components
mu_new = np.zeros((K,D))
n_k = np.zeros((K,1))
mu_k = model.means_
pi_k = model.weights_
old_likelihood = model.score(data)
new_likelihood = 0
iterations = 0
while(iterations < m_iterations):
iterations += 1
old_likelihood = new_likelihood
z_n_k = model.predict_proba(data)
n_k = np.sum(z_n_k,axis = 0)
n_k = n_k.reshape(np.shape(n_k)[0],1)
mu_new = np.dot(z_n_k.T,data)
n_k[n_k == 0] = 1e-20
mu_new = mu_new / n_k
adaptation_coefficient = n_k/(n_k + relevance_factor)
I = np.ones(shape=np.shape(n_k))
# for k in range(K):
# mu_k[k] = (adaptation_coefficient[k] * mu_new[k]) + ((1 - adaptation_coefficient[k]) * mu_k[k])
mu_k = (adaptation_coefficient*mu_new) + (( I - adaptation_coefficient) * mu_k)
model.means_ = mu_k
log_likelihood = model.score(data)
new_likelihood = log_likelihood
if abs(old_likelihood - new_likelihood) < 1e-20:
break
print(log_likelihood)
return model
Total = []
relevance_factor = 16
for i in range(c):
fname='chunk-%002d.wav' % (i,)
print('MAP adaptation for {0}'.format(fname))
temp_y,sr_temp = librosa.load(fname,sr=None)
temp_mfcc = librosa.feature.mfcc(np.array(temp_y),sr_temp,hop_length=int(hop_length * sr_temp),n_fft=int(n_fft*sr_temp),n_mfcc=n_mfcc,dct_type=2)
temp_mfcc_delta = librosa.feature.delta(temp_mfcc)
temp_mfcc_delta_second_order = librosa.feature.delta(temp_mfcc,order=2)
temp_inter = np.vstack((temp_mfcc,temp_mfcc_delta,temp_mfcc_delta_second_order))
temp_gmm_feature = temp_inter.T
#data = preprocessing.scale(temp_gmm_feature)
gmm = deepcopy(ubm_model)
gmm = MAP_Estimation(gmm,temp_gmm_feature,m_iterations =1)
sv = gmm.means_.flatten()
#sv = preprocessing.scale(sv)
Total.append(sv)
N_CLUSTERS = 2
def rearrange(labels, n):
seen = set()
distinct = [x for x in labels if x not in seen and not seen.add(x)]
correct = [i for i in range(n)]
dict_ = dict(zip(distinct, correct))
return [x if x not in dict_ else dict_[x] for x in labels]
sc = SpectralClustering(n_clusters=N_CLUSTERS, affinity='cosine')
#Labels help us identify between chunks of customer and call center agent
labels = sc.fit_predict(Total)
labels = rearrange(labels, N_CLUSTERS)
print(labels)
#Since there is no way to identify the voice of a customer just from the audio
#we have assumed that customer is the one who speaks 2nd
#Normally the call center agent is the first one to speak and then the customer
#If that is not the case for a specific audio, change the condition from 'x==1' to 'x==0'
print([i for i, x in enumerate(labels) if x == 1])
| [
"sklearn.cluster.SpectralClustering",
"wave.open",
"sklearn.mixture.GaussianMixture",
"collections.deque",
"copy.deepcopy",
"librosa.feature.delta",
"sys.stdout.write",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.dot",
"numpy.vstack",
"webrtcvad.Vad",
"numpy.shape",
"librosa.load"
... | [((3983, 3999), 'webrtcvad.Vad', 'webrtcvad.Vad', (['(2)'], {}), '(2)\n', (3996, 3999), False, 'import webrtcvad\n'), ((4540, 4568), 'librosa.load', 'librosa.load', (['test_file_path'], {}), '(test_file_path)\n', (4552, 4568), False, 'import librosa\n'), ((4723, 4750), 'librosa.feature.delta', 'librosa.feature.delta', (['mfcc'], {}), '(mfcc)\n', (4744, 4750), False, 'import librosa\n'), ((4777, 4813), 'librosa.feature.delta', 'librosa.feature.delta', (['mfcc'], {'order': '(2)'}), '(mfcc, order=2)\n', (4798, 4813), False, 'import librosa\n'), ((4820, 4853), 'librosa.feature.delta', 'librosa.feature.delta', (['mfcc_delta'], {}), '(mfcc_delta)\n', (4841, 4853), False, 'import librosa\n'), ((4862, 4916), 'numpy.vstack', 'np.vstack', (['(mfcc, mfcc_delta, mfcc_delta_second_order)'], {}), '((mfcc, mfcc_delta, mfcc_delta_second_order))\n', (4871, 4916), True, 'import numpy as np\n'), ((5074, 5140), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'components', 'covariance_type': 'cov_type'}), '(n_components=components, covariance_type=cov_type)\n', (5089, 5140), False, 'from sklearn.mixture import GaussianMixture\n'), ((7550, 7610), 'sklearn.cluster.SpectralClustering', 'SpectralClustering', ([], {'n_clusters': 'N_CLUSTERS', 'affinity': '"""cosine"""'}), "(n_clusters=N_CLUSTERS, affinity='cosine')\n", (7568, 7610), False, 'from sklearn.cluster import SpectralClustering\n'), ((1511, 1555), 'collections.deque', 'collections.deque', ([], {'maxlen': 'num_padding_frames'}), '(maxlen=num_padding_frames)\n', (1528, 1555), False, 'import collections\n'), ((3503, 3525), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (3519, 3525), False, 'import sys\n'), ((4575, 4586), 'numpy.shape', 'np.shape', (['y'], {}), '(y)\n', (4583, 4586), True, 'import numpy as np\n'), ((4617, 4628), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (4625, 4628), True, 'import numpy as np\n'), ((5367, 5383), 'numpy.zeros', 'np.zeros', (['(K, D)'], {}), '((K, D))\n', (5375, 5383), True, 'import numpy as np\n'), ((5393, 5409), 'numpy.zeros', 'np.zeros', (['(K, 1)'], {}), '((K, 1))\n', (5401, 5409), True, 'import numpy as np\n'), ((6601, 6629), 'librosa.load', 'librosa.load', (['fname'], {'sr': 'None'}), '(fname, sr=None)\n', (6613, 6629), False, 'import librosa\n'), ((6806, 6838), 'librosa.feature.delta', 'librosa.feature.delta', (['temp_mfcc'], {}), '(temp_mfcc)\n', (6827, 6838), False, 'import librosa\n'), ((6874, 6915), 'librosa.feature.delta', 'librosa.feature.delta', (['temp_mfcc'], {'order': '(2)'}), '(temp_mfcc, order=2)\n', (6895, 6915), False, 'import librosa\n'), ((6932, 7001), 'numpy.vstack', 'np.vstack', (['(temp_mfcc, temp_mfcc_delta, temp_mfcc_delta_second_order)'], {}), '((temp_mfcc, temp_mfcc_delta, temp_mfcc_delta_second_order))\n', (6941, 7001), True, 'import numpy as np\n'), ((7098, 7117), 'copy.deepcopy', 'deepcopy', (['ubm_model'], {}), '(ubm_model)\n', (7106, 7117), False, 'from copy import deepcopy\n'), ((1792, 1835), 'sys.stdout.write', 'sys.stdout.write', (["('1' if is_speech else '0')"], {}), "('1' if is_speech else '0')\n", (1808, 1835), False, 'import sys\n'), ((3436, 3498), 'sys.stdout.write', 'sys.stdout.write', (["('-(%s)' % (frame.timestamp + frame.duration))"], {}), "('-(%s)' % (frame.timestamp + frame.duration))\n", (3452, 3498), False, 'import sys\n'), ((5705, 5726), 'numpy.sum', 'np.sum', (['z_n_k'], {'axis': '(0)'}), '(z_n_k, axis=0)\n', (5711, 5726), True, 'import numpy as np\n'), ((5792, 5813), 'numpy.dot', 'np.dot', (['z_n_k.T', 'data'], {}), '(z_n_k.T, data)\n', (5798, 5813), True, 'import numpy as np\n'), ((6671, 6687), 'numpy.array', 'np.array', (['temp_y'], {}), '(temp_y)\n', (6679, 6687), True, 'import numpy as np\n'), ((145, 166), 'wave.open', 'wave.open', (['path', '"""rb"""'], {}), "(path, 'rb')\n", (154, 166), False, 'import wave\n'), ((581, 602), 'wave.open', 'wave.open', (['path', '"""wb"""'], {}), "(path, 'wb')\n", (590, 602), False, 'import wave\n'), ((2255, 2313), 'sys.stdout.write', 'sys.stdout.write', (["('+(%s)' % (ring_buffer[0][0].timestamp,))"], {}), "('+(%s)' % (ring_buffer[0][0].timestamp,))\n", (2271, 2313), False, 'import sys\n'), ((3177, 3239), 'sys.stdout.write', 'sys.stdout.write', (["('-(%s)' % (frame.timestamp + frame.duration))"], {}), "('-(%s)' % (frame.timestamp + frame.duration))\n", (3193, 3239), False, 'import sys\n'), ((5754, 5767), 'numpy.shape', 'np.shape', (['n_k'], {}), '(n_k)\n', (5762, 5767), True, 'import numpy as np\n'), ((5962, 5975), 'numpy.shape', 'np.shape', (['n_k'], {}), '(n_k)\n', (5970, 5975), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import logging
from dsbox.ml.neural_networks.keras_factory.text_models import LSTMFactory
from dsbox.ml.neural_networks.processing.workflow import TextNeuralNetPipeline, ImageNeuralNetPipeline
logging.getLogger("tensorflow").setLevel(logging.WARNING)
np.random.seed(42)
class TestPipeline(unittest.TestCase):
def test_fit_predict_text_nn_pipeline_should_return_some_result(self):
# given
x_train = np.array(['this is really really awesome !',
'it is so awesome !',
'that sucks']
)
y_train = np.array([1, 1, 0])
# when
model = TextNeuralNetPipeline(factory_class=LSTMFactory, num_labels=2)
model.fit(x_train, y_train, verbose=0)
x_test = np.array(['it is really awesome !'])
y_pred = model.predict(x_test)
# then
self.assertIsNotNone(y_pred)
def test_fit_predict_proba_text_nn_pipeline_should_return_some_result(self):
# given
x_train = np.array(['this is really really awesome !',
'it is so awesome !',
'that sucks']
)
y_train = np.array([1, 1, 0])
# when
model = TextNeuralNetPipeline(factory_class=LSTMFactory, num_labels=2)
model.fit(x_train, y_train, verbose=0)
x_test = np.array(['it is really awesome !'])
y_pred = model.predict_proba(x_test)[0]
# then
self.assertIsNotNone(y_pred)
def test_fit_image_nn_workflow_should_set_params_automatically(self):
# given
workflow = ImageNeuralNetPipeline(weights="imagenet")
# when
workflow.fit()
# then
self.assertTupleEqual((299, 299), workflow.img_size_)
self.assertEqual("block14_sepconv2_act", workflow.last_conv_layer_name_)
self.assertListEqual(["avg_pool", "predictions"], workflow.classifier_layer_names_)
| [
"logging.getLogger",
"dsbox.ml.neural_networks.processing.workflow.ImageNeuralNetPipeline",
"numpy.array",
"dsbox.ml.neural_networks.processing.workflow.TextNeuralNetPipeline",
"numpy.random.seed"
] | [((292, 310), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (306, 310), True, 'import numpy as np\n'), ((233, 264), 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), "('tensorflow')\n", (250, 264), False, 'import logging\n'), ((461, 546), 'numpy.array', 'np.array', (["['this is really really awesome !', 'it is so awesome !', 'that sucks']"], {}), "(['this is really really awesome !', 'it is so awesome !',\n 'that sucks'])\n", (469, 546), True, 'import numpy as np\n'), ((645, 664), 'numpy.array', 'np.array', (['[1, 1, 0]'], {}), '([1, 1, 0])\n', (653, 664), True, 'import numpy as np\n'), ((697, 759), 'dsbox.ml.neural_networks.processing.workflow.TextNeuralNetPipeline', 'TextNeuralNetPipeline', ([], {'factory_class': 'LSTMFactory', 'num_labels': '(2)'}), '(factory_class=LSTMFactory, num_labels=2)\n', (718, 759), False, 'from dsbox.ml.neural_networks.processing.workflow import TextNeuralNetPipeline, ImageNeuralNetPipeline\n'), ((825, 861), 'numpy.array', 'np.array', (["['it is really awesome !']"], {}), "(['it is really awesome !'])\n", (833, 861), True, 'import numpy as np\n'), ((1070, 1155), 'numpy.array', 'np.array', (["['this is really really awesome !', 'it is so awesome !', 'that sucks']"], {}), "(['this is really really awesome !', 'it is so awesome !',\n 'that sucks'])\n", (1078, 1155), True, 'import numpy as np\n'), ((1254, 1273), 'numpy.array', 'np.array', (['[1, 1, 0]'], {}), '([1, 1, 0])\n', (1262, 1273), True, 'import numpy as np\n'), ((1306, 1368), 'dsbox.ml.neural_networks.processing.workflow.TextNeuralNetPipeline', 'TextNeuralNetPipeline', ([], {'factory_class': 'LSTMFactory', 'num_labels': '(2)'}), '(factory_class=LSTMFactory, num_labels=2)\n', (1327, 1368), False, 'from dsbox.ml.neural_networks.processing.workflow import TextNeuralNetPipeline, ImageNeuralNetPipeline\n'), ((1434, 1470), 'numpy.array', 'np.array', (["['it is really awesome !']"], {}), "(['it is really awesome !'])\n", (1442, 1470), True, 'import numpy as np\n'), ((1682, 1724), 'dsbox.ml.neural_networks.processing.workflow.ImageNeuralNetPipeline', 'ImageNeuralNetPipeline', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (1704, 1724), False, 'from dsbox.ml.neural_networks.processing.workflow import TextNeuralNetPipeline, ImageNeuralNetPipeline\n')] |
import numpy as np
from pipeline.input_provider.base_input_provider import BaseInputProvider
class NormalizedOneHotInputProvider(BaseInputProvider):
def __init__(self):
BaseInputProvider.__init__(self)
self.__game_state = None
self.__screen_shot = None
def store(self, game_state, screen_shot):
self.__game_state = game_state
self.__screen_shot = screen_shot
def min_max_scaling(self, X, X_min, X_max, range_min = 0, range_max = 1):
X_std = (X - X_min) / (X_max - X_min)
X_scaled = X_std * (range_max - range_min) + range_min
return X_scaled
def one_hot(self, X, length):
encoded = np.zeros(length)
encoded[X] = 1
return encoded
def pre_processing(self):
# Normalizes the input and converts to numpy
processed_state = self.normalize_state()
processed_screen_shot = self.__screen_shot / 255.0
return processed_state, processed_screen_shot
def normalize_state(self):
game_state = list()
game_state.extend(self.one_hot(self.__game_state.player1.player_id, 12))
game_state.append(self.min_max_scaling(self.__game_state.player1.health, 0, 176.0)) # Min Max Scaling
game_state.append(self.min_max_scaling(self.__game_state.player1.x_coord, 0, 500.0))
game_state.append(self.min_max_scaling(self.__game_state.player1.y_coord, 0, 192.0))
game_state.extend(self.one_hot(self.__game_state.player1.is_jumping, 2))
game_state.extend(self.one_hot(self.__game_state.player1.is_crouching, 2))
game_state.extend(self.one_hot(self.__game_state.player1.is_player_in_move, 2))
# game_state.append(self.__game_state.player1.move_id)
game_state.extend(self.__game_state.player1.get_action_buttons()) # adding 10 more values
game_state.extend(self.one_hot(self.__game_state.player2.player_id, 12))
game_state.append(self.min_max_scaling(self.__game_state.player2.health, 0, 176.0)) # Min Max Scaling
game_state.append(self.min_max_scaling(self.__game_state.player2.x_coord, 0, 500.0))
game_state.append(self.min_max_scaling(self.__game_state.player2.y_coord, 0, 192.0))
game_state.extend(self.one_hot(self.__game_state.player2.is_jumping, 2))
game_state.extend(self.one_hot(self.__game_state.player2.is_crouching, 2))
game_state.extend(self.one_hot(self.__game_state.player2.is_player_in_move, 2))
# game_state.append(self.__game_state.player2.move_id)
game_state.extend(self.__game_state.player2.get_action_buttons()) # adding 10 more values
return np.array(game_state)
def retrieve(self):
return self.pre_processing()
def store_and_retrieve(self, game_state, screen_shot):
self.store(game_state, screen_shot)
return self.retrieve()
| [
"numpy.array",
"numpy.zeros",
"pipeline.input_provider.base_input_provider.BaseInputProvider.__init__"
] | [((184, 216), 'pipeline.input_provider.base_input_provider.BaseInputProvider.__init__', 'BaseInputProvider.__init__', (['self'], {}), '(self)\n', (210, 216), False, 'from pipeline.input_provider.base_input_provider import BaseInputProvider\n'), ((676, 692), 'numpy.zeros', 'np.zeros', (['length'], {}), '(length)\n', (684, 692), True, 'import numpy as np\n'), ((2646, 2666), 'numpy.array', 'np.array', (['game_state'], {}), '(game_state)\n', (2654, 2666), True, 'import numpy as np\n')] |
# Copyright 2017-2018 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Test for the EpisodeTimeMs callback."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import numpy as np
import six
import deepmind_lab
class EpisodeTimeTest(unittest.TestCase):
def run_at_frame_rate(self, fps):
env = deepmind_lab.Lab(
'tests/episode_time_test', ['EPISODE_TIME_SECONDS'],
config={
'fps': str(fps),
'width': '32',
'height': '32'
})
env.reset()
nop = np.zeros((7,), dtype=np.intc)
for _ in six.moves.range(0, fps):
env.step(nop, 1)
obs = env.observations()
self.assertEqual(obs['EPISODE_TIME_SECONDS'][0], 1.0)
def test_at_60(self):
self.run_at_frame_rate(60)
def test_at_30(self):
self.run_at_frame_rate(30)
if __name__ == '__main__':
if os.environ.get('TEST_SRCDIR'):
deepmind_lab.set_runfiles_path(
os.path.join(os.environ['TEST_SRCDIR'],
'org_deepmind_lab'))
unittest.main()
| [
"six.moves.range",
"os.environ.get",
"os.path.join",
"numpy.zeros",
"unittest.main"
] | [((1604, 1633), 'os.environ.get', 'os.environ.get', (['"""TEST_SRCDIR"""'], {}), "('TEST_SRCDIR')\n", (1618, 1633), False, 'import os\n'), ((1763, 1778), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1776, 1778), False, 'import unittest\n'), ((1279, 1308), 'numpy.zeros', 'np.zeros', (['(7,)'], {'dtype': 'np.intc'}), '((7,), dtype=np.intc)\n', (1287, 1308), True, 'import numpy as np\n'), ((1323, 1346), 'six.moves.range', 'six.moves.range', (['(0)', 'fps'], {}), '(0, fps)\n', (1338, 1346), False, 'import six\n'), ((1679, 1738), 'os.path.join', 'os.path.join', (["os.environ['TEST_SRCDIR']", '"""org_deepmind_lab"""'], {}), "(os.environ['TEST_SRCDIR'], 'org_deepmind_lab')\n", (1691, 1738), False, 'import os\n')] |
from itertools import tee
import numpy as np
import scipy.interpolate as intp
from scipy.signal import savgol_filter
def get_edge_bin(array):
"""Detect the edge indcies of a binary 1-D array.
Args:
array (:class:`numpy.ndarray`): A list or Numpy 1d array, with binary
(0/1) or boolean (True/False) values.
Returns:
list: A list containing starting and ending indices of the non-zero
blocks.
Examples:
.. code-block:: python
>>> a = [0,1,1,0,0,0,1,0,1]
>>> get_edge_bin(a)
[(1, 3), (6, 7), (8, 9)]
>>> b = [True, False, True, True, False, False]
>>> get_edge_bin(b)
[(0, 1), (2, 4)]
"""
array1 = np.int64(array)
array1 = np.insert(array1, 0, 0)
array1 = np.append(array1, 0)
tmp = array1 - np.roll(array1, 1)
i1_lst = np.nonzero(tmp == 1)[0] - 1
i2_lst = np.nonzero(tmp ==-1)[0] - 1
return list(zip(i1_lst, i2_lst))
def get_local_minima(x, window=None):
"""Get the local minima of a 1d array in a window.
Args:
x (:class:`numpy.ndarray`): A list or Numpy 1d array.
window (*int* or :class:`numpy.ndarray`): An odd integer or a list of
odd integers as the lengthes of searching window.
Returns:
tuple: A tuple containing:
* **index** (:class:`numpy.ndarray`): A numpy 1d array containing
indices of all local minima.
* **x[index]** (:class:`numpy.ndarray`): A numpy 1d array containing
values of all local minima.
"""
x = np.array(x)
dif = np.diff(x)
ind = dif > 0
tmp = np.logical_xor(ind, np.roll(ind,1))
idx = np.logical_and(tmp,ind)
index = np.where(idx)[0]
if window is None:
# window is not given
return index, x[index]
else:
# window is given
if isinstance(window, int):
# window is an integer
window = np.repeat(window, len(x))
elif isinstance(window, np.ndarray):
# window is a numpy array
#if np.issubdtype(window.dtype, int):
if window.dtype.type in [np.int16, np.int32, np.int64]:
pass
else:
# window are not integers
print('window array are not integers')
raise ValueError
else:
raise ValueError
if 0 in window%2:
# not all of the windows are odd
raise ValueError
halfwin_lst = (window-1)//2
index_lst = []
for i in index:
halfwin = halfwin_lst[i]
i1 = max(0, i-halfwin)
i2 = min(i+halfwin+1, len(x))
if i == x[i1:i2].argmin() + i1:
index_lst.append(i)
if len(index_lst)>0:
index_lst = np.array(index_lst)
return index_lst, x[index_lst]
else:
return np.array([]), np.array([])
def implete_none(lst):
"""Replace the None elemnets at the beginning and the end of list by auto
increment integers.
Convert the first and last few `None` elements to auto increment integers.
These integers are determined by the first and last integers in the input
array.
While the `None` elements between two integers in the input list will
remain.
Args:
lst (list): A list contaning None values.
Returns:
newlst (list): A list containing auto increment integers.
Examples:
.. code-block:: python
>>> a = [None,None,3,4,None,5,6,None,None]
>>> implete_none(a)
[1, 2, 3, 4, None, 5, 6, 7, 8]
"""
# filter the None values
notnone_lst = [v for v in lst if v is not None]
for i, v in enumerate(lst):
if v == notnone_lst[0]:
# first not-None element and its index
notnone1 = i
value1 = v
if v == notnone_lst[-1]:
# last not-None element and its index
notnone2 = i
value2 = v
newlst = []
for i,v in enumerate(lst):
if i < notnone1:
newlst.append(value1-(notnone1-i))
elif i > notnone2:
newlst.append(value2+(i-notnone2))
else:
newlst.append(v)
return newlst
def derivative(*args, **kwargs):
"""Get the first derivative of data arrays (*x*, *y*).
If **y** is not given, the first argument will be taken as **y**, and the
differential of the input array will be returned.
Args:
x (list or :class:`numpy.ndarray`): X-values of the input array (optional).
y (list or :class:`numpy.ndarray`): Y-values of the input array.
points (int): Number of points used to calculate derivative
(optional, default is 3).
Returns:
:class:`numpy.ndarray`: Derivative of the input array.
"""
if len(args) == 1:
y = np.array(args[0], dtype=np.float64)
x = np.arange(y.size)
elif len(args) == 2:
x = np.array(args[0], dtype=np.float64)
y = np.array(args[1], dtype=np.float64)
else:
raise ValueError
npts = x.size
points = kwargs.pop('points', 3)
if points == 3:
der = (np.roll(y,-1) - np.roll(y,1))/(np.roll(x,-1) - np.roll(x,1))
a = np.array([-3., 4., -1.])
der[0] = (a*y[0:3]).sum() / (a*x[0:3]).sum()
der[-1] = (-a[::-1]*y[-3:]).sum() / (-a[::-1]*x[-3:]).sum()
return der
else:
raise ValueError
def pairwise(array):
"""Return pairwises of an iterable arrary.
Args:
array (list or :class:`numpy.ndarray`): The input iterable array.
Returns:
:class:`zip`: zip objects.
"""
a, b = tee(array)
next(b, None)
return zip(a, b)
def smooth(array, points, deg):
"""Smooth an array.
Args:
array (:class:`numpy.ndarray`): Input array.
points (int): Points of smoothing.
deg (int): Degree of smoothing.
Returns:
:class:`numpy.ndarray`: smoothed array
"""
n = array.size
if points == 5:
if deg == 2:
w_2 = np.array([31., 9., -3., -5., 3.])/35.
w_1 = np.array([ 9., 13., 12., 6., -5.])/35.
w_0 = np.array([-3., 12., 17., 12., -3.])/35.
elif deg == 3:
w_2 = np.array([69., 4., -6., 4., -1.])/70.
w_1 = np.array([ 2., 27., 12., -8., 2.])/35.
w_0 = np.array([-3., 12., 17., 12., -3.])/35.
a = np.zeros((n, n))
a[0, 0:5] = w_2
a[1, 0:5] = w_1
for i in np.arange(2, n-2):
a[i, i-2:i+3] = w_0
a[-2, -5:] = w_1[::-1]
a[-1, -5:] = w_2[::-1]
result = np.matrix(a)*np.matrix(array.reshape(-1,1))
return np.array(result)[:,0]
def iterative_savgol_filter(y, winlen=5, order=3, maxiter=10,
upper_clip=None, lower_clip=None):
"""Smooth the input array with Savitzky-Golay filter with lower and/or
upper clippings.
Args:
y (:class:`numpy.ndarray`): Input array.
winlen (int): Window length of Savitzky-Golay filter.
order (int): Order of Savitzky-Gaoly filter.
maxiter (int): Maximum number of iterations.
lower_clip (float): Lower sigma-clipping value.
upper_clip (float): Upper sigma-clipping value.
Returns:
tuple: A tuple containing:
* **ysmooth** (:class:`numpy.ndarray`) – Smoothed y values.
* **yres** (:class:`numpy.ndarray`) – Residuals of y values.
* **mask** (:class:`numpy.ndarray`) – Mask of y values.
* **std** (float) – Standard deviation.
"""
x = np.arange(y.size)
mask = np.ones_like(y, dtype=np.bool)
for ite in range(maxiter):
# fill masked values in y using interpolation
f = intp.InterpolatedUnivariateSpline(x[mask], y[mask], k=3)
ysmooth = savgol_filter(f(x), window_length=winlen, polyorder=order)
yres = y - ysmooth
std = yres[mask].std()
# generate new mask
# make a copy of existing mask
new_mask = mask * np.ones_like(mask, dtype=np.bool)
# give new mask with lower and upper clipping value
if lower_clip is not None:
new_mask *= (yres > -lower_clip * std)
if upper_clip is not None:
new_mask *= (yres < upper_clip * std)
if new_mask.sum() == mask.sum():
break
mask = new_mask
return ysmooth, yres, mask, std
| [
"numpy.insert",
"numpy.ones_like",
"numpy.int64",
"numpy.roll",
"scipy.interpolate.InterpolatedUnivariateSpline",
"numpy.logical_and",
"numpy.where",
"numpy.diff",
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.nonzero",
"itertools.tee",
"numpy.matrix",
"numpy.arange"
] | [((745, 760), 'numpy.int64', 'np.int64', (['array'], {}), '(array)\n', (753, 760), True, 'import numpy as np\n'), ((774, 797), 'numpy.insert', 'np.insert', (['array1', '(0)', '(0)'], {}), '(array1, 0, 0)\n', (783, 797), True, 'import numpy as np\n'), ((811, 831), 'numpy.append', 'np.append', (['array1', '(0)'], {}), '(array1, 0)\n', (820, 831), True, 'import numpy as np\n'), ((1607, 1618), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1615, 1618), True, 'import numpy as np\n'), ((1629, 1639), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (1636, 1639), True, 'import numpy as np\n'), ((1714, 1738), 'numpy.logical_and', 'np.logical_and', (['tmp', 'ind'], {}), '(tmp, ind)\n', (1728, 1738), True, 'import numpy as np\n'), ((5746, 5756), 'itertools.tee', 'tee', (['array'], {}), '(array)\n', (5749, 5756), False, 'from itertools import tee\n'), ((7674, 7691), 'numpy.arange', 'np.arange', (['y.size'], {}), '(y.size)\n', (7683, 7691), True, 'import numpy as np\n'), ((7703, 7733), 'numpy.ones_like', 'np.ones_like', (['y'], {'dtype': 'np.bool'}), '(y, dtype=np.bool)\n', (7715, 7733), True, 'import numpy as np\n'), ((851, 869), 'numpy.roll', 'np.roll', (['array1', '(1)'], {}), '(array1, 1)\n', (858, 869), True, 'import numpy as np\n'), ((1688, 1703), 'numpy.roll', 'np.roll', (['ind', '(1)'], {}), '(ind, 1)\n', (1695, 1703), True, 'import numpy as np\n'), ((1750, 1763), 'numpy.where', 'np.where', (['idx'], {}), '(idx)\n', (1758, 1763), True, 'import numpy as np\n'), ((4939, 4974), 'numpy.array', 'np.array', (['args[0]'], {'dtype': 'np.float64'}), '(args[0], dtype=np.float64)\n', (4947, 4974), True, 'import numpy as np\n'), ((4987, 5004), 'numpy.arange', 'np.arange', (['y.size'], {}), '(y.size)\n', (4996, 5004), True, 'import numpy as np\n'), ((5324, 5351), 'numpy.array', 'np.array', (['[-3.0, 4.0, -1.0]'], {}), '([-3.0, 4.0, -1.0])\n', (5332, 5351), True, 'import numpy as np\n'), ((6514, 6530), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (6522, 6530), True, 'import numpy as np\n'), ((6596, 6615), 'numpy.arange', 'np.arange', (['(2)', '(n - 2)'], {}), '(2, n - 2)\n', (6605, 6615), True, 'import numpy as np\n'), ((6723, 6735), 'numpy.matrix', 'np.matrix', (['a'], {}), '(a)\n', (6732, 6735), True, 'import numpy as np\n'), ((6778, 6794), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (6786, 6794), True, 'import numpy as np\n'), ((7833, 7889), 'scipy.interpolate.InterpolatedUnivariateSpline', 'intp.InterpolatedUnivariateSpline', (['x[mask]', 'y[mask]'], {'k': '(3)'}), '(x[mask], y[mask], k=3)\n', (7866, 7889), True, 'import scipy.interpolate as intp\n'), ((883, 903), 'numpy.nonzero', 'np.nonzero', (['(tmp == 1)'], {}), '(tmp == 1)\n', (893, 903), True, 'import numpy as np\n'), ((924, 945), 'numpy.nonzero', 'np.nonzero', (['(tmp == -1)'], {}), '(tmp == -1)\n', (934, 945), True, 'import numpy as np\n'), ((2850, 2869), 'numpy.array', 'np.array', (['index_lst'], {}), '(index_lst)\n', (2858, 2869), True, 'import numpy as np\n'), ((5042, 5077), 'numpy.array', 'np.array', (['args[0]'], {'dtype': 'np.float64'}), '(args[0], dtype=np.float64)\n', (5050, 5077), True, 'import numpy as np\n'), ((5090, 5125), 'numpy.array', 'np.array', (['args[1]'], {'dtype': 'np.float64'}), '(args[1], dtype=np.float64)\n', (5098, 5125), True, 'import numpy as np\n'), ((8119, 8152), 'numpy.ones_like', 'np.ones_like', (['mask'], {'dtype': 'np.bool'}), '(mask, dtype=np.bool)\n', (8131, 8152), True, 'import numpy as np\n'), ((2946, 2958), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2954, 2958), True, 'import numpy as np\n'), ((2960, 2972), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2968, 2972), True, 'import numpy as np\n'), ((5251, 5265), 'numpy.roll', 'np.roll', (['y', '(-1)'], {}), '(y, -1)\n', (5258, 5265), True, 'import numpy as np\n'), ((5267, 5280), 'numpy.roll', 'np.roll', (['y', '(1)'], {}), '(y, 1)\n', (5274, 5280), True, 'import numpy as np\n'), ((5282, 5296), 'numpy.roll', 'np.roll', (['x', '(-1)'], {}), '(x, -1)\n', (5289, 5296), True, 'import numpy as np\n'), ((5298, 5311), 'numpy.roll', 'np.roll', (['x', '(1)'], {}), '(x, 1)\n', (5305, 5311), True, 'import numpy as np\n'), ((6148, 6186), 'numpy.array', 'np.array', (['[31.0, 9.0, -3.0, -5.0, 3.0]'], {}), '([31.0, 9.0, -3.0, -5.0, 3.0])\n', (6156, 6186), True, 'import numpy as np\n'), ((6206, 6244), 'numpy.array', 'np.array', (['[9.0, 13.0, 12.0, 6.0, -5.0]'], {}), '([9.0, 13.0, 12.0, 6.0, -5.0])\n', (6214, 6244), True, 'import numpy as np\n'), ((6264, 6304), 'numpy.array', 'np.array', (['[-3.0, 12.0, 17.0, 12.0, -3.0]'], {}), '([-3.0, 12.0, 17.0, 12.0, -3.0])\n', (6272, 6304), True, 'import numpy as np\n'), ((6345, 6383), 'numpy.array', 'np.array', (['[69.0, 4.0, -6.0, 4.0, -1.0]'], {}), '([69.0, 4.0, -6.0, 4.0, -1.0])\n', (6353, 6383), True, 'import numpy as np\n'), ((6403, 6441), 'numpy.array', 'np.array', (['[2.0, 27.0, 12.0, -8.0, 2.0]'], {}), '([2.0, 27.0, 12.0, -8.0, 2.0])\n', (6411, 6441), True, 'import numpy as np\n'), ((6461, 6501), 'numpy.array', 'np.array', (['[-3.0, 12.0, 17.0, 12.0, -3.0]'], {}), '([-3.0, 12.0, 17.0, 12.0, -3.0])\n', (6469, 6501), True, 'import numpy as np\n')] |
"""Option helper functions"""
__docformat__ = "numpy"
import argparse
from typing import List
import pandas as pd
import numpy as np
from gamestonk_terminal.helper_funcs import (
parse_known_args_and_warn,
check_non_negative,
)
# pylint: disable=R1710
def load(other_args: List[str]) -> str:
"""Load ticker into object
Parameters
----------
other_args: List[str]
Agrparse arguments
Returns
-------
str:
Ticker
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="opload",
description="Load a ticker into option menu",
)
parser.add_argument(
"-t",
"--ticker",
action="store",
dest="ticker",
required="-h" not in other_args,
help="Stock ticker",
)
try:
if other_args:
if "-t" not in other_args and "-h" not in other_args:
other_args.insert(0, "-t")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return ""
print("")
return ns_parser.ticker
except Exception as e:
print(e, "\n")
return ""
except SystemExit:
print("")
return ""
# pylint: disable=no-else-return
def select_option_date(avalaiable_dates: List[str], other_args: List[str]) -> str:
"""Select an option date out of a supplied list
Parameters
----------
avalaiable_dates: List[str]
Possible date options
other_args: List[str]
Arparse arguments
Returns
-------
expiry_date: str
Selected expiry date
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="exp",
description="See and set expiration date",
)
parser.add_argument(
"-d",
"--date",
dest="n_date",
action="store",
type=int,
default=-1,
choices=range(len(avalaiable_dates)),
help="Select index for expiry date.",
)
parser.add_argument(
"-D",
dest="date",
type=str,
choices=avalaiable_dates + [""],
help="Select date (YYYY-MM-DD)",
default="",
)
try:
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-d")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return ""
# Print possible expiry dates
if ns_parser.n_date == -1 and not ns_parser.date:
print("\nAvailable expiry dates:")
for i, d in enumerate(avalaiable_dates):
print(f" {(2 - len(str(i))) * ' '}{i}. {d}")
print("")
return ""
# It means an expiry date was correctly selected
else:
if ns_parser.date:
if ns_parser.date in avalaiable_dates:
print(f"Expiraration set to {ns_parser.date} \n")
return ns_parser.date
else:
print("Expiration not an option")
return ""
else:
expiry_date = avalaiable_dates[ns_parser.n_date]
print(f"Expiraration set to {expiry_date} \n")
return expiry_date
except Exception as e:
print(e, "\n")
return ""
def get_loss_at_strike(strike: float, chain: pd.DataFrame) -> float:
"""Function to get the loss at the given expiry
Parameters
----------
strike: Union[int,float]
Value to calculate total loss at
chain: Dataframe:
Dataframe containing at least strike and openInterest
Returns
-------
loss: Union[float,int]
Total loss
"""
itm_calls = chain[chain.index < strike][["OI_call"]]
itm_calls["loss"] = (strike - itm_calls.index) * itm_calls["OI_call"]
call_loss = itm_calls["loss"].sum()
itm_puts = chain[chain.index > strike][["OI_put"]]
itm_puts["loss"] = (itm_puts.index - strike) * itm_puts["OI_put"]
put_loss = itm_puts.loss.sum()
loss = call_loss + put_loss
return loss
def calculate_max_pain(chain: pd.DataFrame) -> int:
"""Returns the max pain for a given call/put dataframe
Parameters
----------
chain: DataFrame
Dataframe to calculate value from
Returns
-------
max_pain : int
Max pain value
"""
strikes = np.array(chain.index)
if ("OI_call" not in chain.columns) or ("OI_put" not in chain.columns):
print("Incorrect columns. Unable to parse max pain")
return np.nan
loss = []
for price_at_exp in strikes:
loss.append(get_loss_at_strike(price_at_exp, chain))
chain["loss"] = loss
max_pain = chain["loss"].idxmin()
return max_pain
def vol(other_args: List[str]):
"""Parse volume argparse
Parameters
----------
other_args: List[str]
Argparse arguments
Returns
-------
ns_parser: argparse.Namespace
Parsed namespace
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="vol",
description="Plot volume. Volume refers to the number of contracts traded today.",
)
parser.add_argument(
"-m",
"--min",
default=-1,
type=check_non_negative,
help="Min strike to plot",
dest="min",
)
parser.add_argument(
"-M",
"--max",
default=-1,
type=check_non_negative,
help="Max strike to plot",
dest="max",
)
parser.add_argument(
"--calls",
action="store_true",
default=False,
dest="calls",
help="Flag to plot call options only",
)
parser.add_argument(
"--puts",
action="store_true",
default=False,
dest="puts",
help="Flag to plot put options only",
)
parser.add_argument(
"--source",
type=str,
default="tr",
choices=["tr", "yf"],
dest="source",
help="Source to get data from",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
return ns_parser
except Exception as e:
print(e, "\n")
def voi(other_args: List[str]):
"""Parse Volume + open interest argparse
Parameters
----------
other_args: List[str]
Argparse arguments
Returns
-------
ns_parser: argparse.Namespace
Parsed namespace
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="voi",
description="""
Plots Volume + Open Interest of calls vs puts.
""",
)
parser.add_argument(
"-v",
"--minv",
dest="min_vol",
type=check_non_negative,
default=-1,
help="minimum volume (considering open interest) threshold of the plot.",
)
parser.add_argument(
"-m",
"--min",
dest="min_sp",
type=check_non_negative,
default=-1,
help="minimum strike price to consider in the plot.",
)
parser.add_argument(
"-M",
"--max",
dest="max_sp",
type=check_non_negative,
default=-1,
help="maximum strike price to consider in the plot.",
)
parser.add_argument(
"--source",
type=str,
default="tr",
choices=["tr", "yf"],
dest="source",
help="Source to get data from",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return None
return ns_parser
except Exception as e:
print(e, "\n")
return None
def oi(other_args: List[str]):
"""Parse Open Interest argparse
Parameters
----------
other_args: List[str]
Argparse arguments
Returns
-------
ns_parser: argparse.Namespace
Parsed namespace
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="oi",
description="Plot open interest. Open interest represents the number of contracts that exist.",
)
parser.add_argument(
"-m",
"--min",
default=-1,
type=check_non_negative,
help="Min strike to plot",
dest="min",
)
parser.add_argument(
"-M",
"--max",
default=-1,
type=check_non_negative,
help="Max strike to plot",
dest="max",
)
parser.add_argument(
"--calls",
action="store_true",
default=False,
dest="calls",
help="Flag to plot call options only",
)
parser.add_argument(
"--puts",
action="store_true",
default=False,
dest="puts",
help="Flag to plot put options only",
)
parser.add_argument(
"--source",
type=str,
default="tr",
choices=["tr", "yf"],
dest="source",
help="Source to get data from",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return None
return ns_parser
except Exception as e:
print(e, "\n")
return None
| [
"numpy.array",
"gamestonk_terminal.helper_funcs.parse_known_args_and_warn",
"argparse.ArgumentParser"
] | [((491, 657), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'add_help': '(False)', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'prog': '"""opload"""', 'description': '"""Load a ticker into option menu"""'}), "(add_help=False, formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, prog='opload', description=\n 'Load a ticker into option menu')\n", (514, 657), False, 'import argparse\n'), ((1716, 1876), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'add_help': '(False)', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'prog': '"""exp"""', 'description': '"""See and set expiration date"""'}), "(add_help=False, formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, prog='exp', description=\n 'See and set expiration date')\n", (1739, 1876), False, 'import argparse\n'), ((4562, 4583), 'numpy.array', 'np.array', (['chain.index'], {}), '(chain.index)\n', (4570, 4583), True, 'import numpy as np\n'), ((5191, 5392), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'add_help': '(False)', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'prog': '"""vol"""', 'description': '"""Plot volume. Volume refers to the number of contracts traded today."""'}), "(add_help=False, formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, prog='vol', description=\n 'Plot volume. Volume refers to the number of contracts traded today.')\n", (5214, 5392), False, 'import argparse\n'), ((6755, 6973), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'add_help': '(False)', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'prog': '"""voi"""', 'description': '"""\n Plots Volume + Open Interest of calls vs puts.\n """'}), '(add_help=False, formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, prog=\'voi\', description=\n """\n Plots Volume + Open Interest of calls vs puts.\n """\n )\n', (6778, 6973), False, 'import argparse\n'), ((8284, 8502), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'add_help': '(False)', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'prog': '"""oi"""', 'description': '"""Plot open interest. Open interest represents the number of contracts that exist."""'}), "(add_help=False, formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, prog='oi', description=\n 'Plot open interest. Open interest represents the number of contracts that exist.'\n )\n", (8307, 8502), False, 'import argparse\n'), ((1033, 1078), 'gamestonk_terminal.helper_funcs.parse_known_args_and_warn', 'parse_known_args_and_warn', (['parser', 'other_args'], {}), '(parser, other_args)\n', (1058, 1078), False, 'from gamestonk_terminal.helper_funcs import parse_known_args_and_warn, check_non_negative\n'), ((2471, 2516), 'gamestonk_terminal.helper_funcs.parse_known_args_and_warn', 'parse_known_args_and_warn', (['parser', 'other_args'], {}), '(parser, other_args)\n', (2496, 2516), False, 'from gamestonk_terminal.helper_funcs import parse_known_args_and_warn, check_non_negative\n'), ((6319, 6364), 'gamestonk_terminal.helper_funcs.parse_known_args_and_warn', 'parse_known_args_and_warn', (['parser', 'other_args'], {}), '(parser, other_args)\n', (6344, 6364), False, 'from gamestonk_terminal.helper_funcs import parse_known_args_and_warn, check_non_negative\n'), ((7833, 7878), 'gamestonk_terminal.helper_funcs.parse_known_args_and_warn', 'parse_known_args_and_warn', (['parser', 'other_args'], {}), '(parser, other_args)\n', (7858, 7878), False, 'from gamestonk_terminal.helper_funcs import parse_known_args_and_warn, check_non_negative\n'), ((9424, 9469), 'gamestonk_terminal.helper_funcs.parse_known_args_and_warn', 'parse_known_args_and_warn', (['parser', 'other_args'], {}), '(parser, other_args)\n', (9449, 9469), False, 'from gamestonk_terminal.helper_funcs import parse_known_args_and_warn, check_non_negative\n')] |
"""
Our modification of the OpenAI Gym Continuous Mountain Car by <NAME>:
https://github.com/openai/gym/blob/master/gym/envs/classic_control/continuous_mountain_car.py
which was (ultimately) based on Sutton's implementation:
http://incompleteideas.net/sutton/MountainCar/MountainCar1.cp
"""
from pilco.errors import EnvironmentError
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
class MountainCar(gym.Env):
metadata = {'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30}
def __init__(self):
# State and action bounds
self.min_action = -1.0
self.max_action = 1.0
self.min_position = - 3.0
self.max_position = 3.0
self.max_speed = 0.07
self.goal_position = 0.5
# Force per mass the car can output
self.power = 0.0015
self.low_state = np.array([self.min_position, -self.max_speed],
dtype=np.float32)
self.high_state = np.array([self.max_position, self.max_speed],
dtype=np.float32)
self.viewer = None
# Allowed action space
self.action_space = spaces.Box(low=self.min_action,
high=self.max_action,
shape=(1,),
dtype=np.float32)
self.seed()
# Temporary hack to work with rest of library
self.env = self
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
# Check if action is in permissible space
if not self.action_space.contains(action):
raise EnvironmentError(f'Expected action in the range of [-1., 1.] '
f'got action {action}.')
# Unpack positiion and valocity
position, velocity = self.state
# Increment position by velocity
position_ = position + velocity
# Increment velocity by Euler rule and clip
velocity_ = velocity + action * self.power - 0.0025 * np.cos(3 * position)
velocity_ = np.clip(velocity_, - self.max_speed, self.max_speed)
self.state = np.array([position_, velocity_])
return self.state, None, False, {}
def reset(self):
self.state = np.array([-0.5, 0.])
return np.array(self.state)
def _height(self, xs):
return 0.55 + 0.45 * np.sin(3 * xs)
def render(self, mode='human'):
# Set picture size
screen_width = 600
screen_height = 400
world_width = self.max_position - self.min_position
scale = screen_width/world_width
# Set car size
carwidth = 40
carheight = 20
if self.viewer is None:
from gym.envs.classic_control import rendering
# Car constants
clearance = 10
# Overall viewer
self.viewer = rendering.Viewer(screen_width, screen_height)
# Track on which the car moves
xs = np.linspace(self.min_position, self.max_position, 200)
ys = self._height(xs)
xys = list(zip((xs - self.min_position) * scale, ys * scale))
# Add car
self.track = rendering.make_polyline(xys)
self.track.set_linewidth(4)
self.viewer.add_geom(self.track)
self.cartrans = rendering.Transform()
# Car chasis
l, r, t, b = -carwidth / 2, carwidth / 2, carheight, 0
car = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
car.add_attr(rendering.Transform(translation=(0, clearance)))
car.add_attr(self.cartrans)
self.viewer.add_geom(car)
# Front wheel
frontwheel = rendering.make_circle(carheight / 2.5)
frontwheel.set_color(.5, .5, .5)
frontwheel.add_attr(rendering.Transform(translation=(carwidth / 4, clearance)))
frontwheel.add_attr(self.cartrans)
self.viewer.add_geom(frontwheel)
# Back wheel
backwheel = rendering.make_circle(carheight / 2.5)
backwheel.add_attr(rendering.Transform(translation=(-carwidth / 4, clearance)))
backwheel.add_attr(self.cartrans)
backwheel.set_color(.5, .5, .5)
self.viewer.add_geom(backwheel)
# Flagpole on mountain peak
flagx = scale * (0.5 - self.min_position)
flagy1 = scale * self._height(self.goal_position)
flagy2 = flagy1 + 50
flagpole = rendering.Line((flagx, flagy1),
(flagx, flagy2))
self.viewer.add_geom(flagpole)
# Flag on flagpole
flag = rendering.FilledPolygon([(flagx, flagy2),
(flagx, flagy2 - 10),
(flagx + 25, flagy2 - 5)])
flag.set_color(.8, .8, 0)
self.viewer.add_geom(flag)
# Translate and rotate car
self.cartrans.set_translation(scale * (self.state[0] - self.min_position),
scale * self._height(self.state[0]))
self.cartrans.set_rotation(np.cos(3 * self.state[0]))
return self.viewer.render(return_rgb_array=mode=='rgb_array')
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
| [
"numpy.clip",
"gym.envs.classic_control.rendering.make_circle",
"gym.envs.classic_control.rendering.Line",
"gym.spaces.Box",
"numpy.array",
"gym.envs.classic_control.rendering.Viewer",
"numpy.linspace",
"gym.envs.classic_control.rendering.Transform",
"numpy.cos",
"pilco.errors.EnvironmentError",
... | [((903, 967), 'numpy.array', 'np.array', (['[self.min_position, -self.max_speed]'], {'dtype': 'np.float32'}), '([self.min_position, -self.max_speed], dtype=np.float32)\n', (911, 967), True, 'import numpy as np\n'), ((1029, 1092), 'numpy.array', 'np.array', (['[self.max_position, self.max_speed]'], {'dtype': 'np.float32'}), '([self.max_position, self.max_speed], dtype=np.float32)\n', (1037, 1092), True, 'import numpy as np\n'), ((1216, 1304), 'gym.spaces.Box', 'spaces.Box', ([], {'low': 'self.min_action', 'high': 'self.max_action', 'shape': '(1,)', 'dtype': 'np.float32'}), '(low=self.min_action, high=self.max_action, shape=(1,), dtype=np.\n float32)\n', (1226, 1304), False, 'from gym import spaces\n'), ((1581, 1604), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (1598, 1604), False, 'from gym.utils import seeding\n'), ((2219, 2270), 'numpy.clip', 'np.clip', (['velocity_', '(-self.max_speed)', 'self.max_speed'], {}), '(velocity_, -self.max_speed, self.max_speed)\n', (2226, 2270), True, 'import numpy as np\n'), ((2294, 2326), 'numpy.array', 'np.array', (['[position_, velocity_]'], {}), '([position_, velocity_])\n', (2302, 2326), True, 'import numpy as np\n'), ((2415, 2436), 'numpy.array', 'np.array', (['[-0.5, 0.0]'], {}), '([-0.5, 0.0])\n', (2423, 2436), True, 'import numpy as np\n'), ((2451, 2471), 'numpy.array', 'np.array', (['self.state'], {}), '(self.state)\n', (2459, 2471), True, 'import numpy as np\n'), ((1777, 1865), 'pilco.errors.EnvironmentError', 'EnvironmentError', (['f"""Expected action in the range of [-1., 1.] got action {action}."""'], {}), "(\n f'Expected action in the range of [-1., 1.] got action {action}.')\n", (1793, 1865), False, 'from pilco.errors import EnvironmentError\n'), ((3041, 3086), 'gym.envs.classic_control.rendering.Viewer', 'rendering.Viewer', (['screen_width', 'screen_height'], {}), '(screen_width, screen_height)\n', (3057, 3086), False, 'from gym.envs.classic_control import rendering\n'), ((3148, 3202), 'numpy.linspace', 'np.linspace', (['self.min_position', 'self.max_position', '(200)'], {}), '(self.min_position, self.max_position, 200)\n', (3159, 3202), True, 'import numpy as np\n'), ((3359, 3387), 'gym.envs.classic_control.rendering.make_polyline', 'rendering.make_polyline', (['xys'], {}), '(xys)\n', (3382, 3387), False, 'from gym.envs.classic_control import rendering\n'), ((3501, 3522), 'gym.envs.classic_control.rendering.Transform', 'rendering.Transform', ([], {}), '()\n', (3520, 3522), False, 'from gym.envs.classic_control import rendering\n'), ((3634, 3691), 'gym.envs.classic_control.rendering.FilledPolygon', 'rendering.FilledPolygon', (['[(l, b), (l, t), (r, t), (r, b)]'], {}), '([(l, b), (l, t), (r, t), (r, b)])\n', (3657, 3691), False, 'from gym.envs.classic_control import rendering\n'), ((3896, 3934), 'gym.envs.classic_control.rendering.make_circle', 'rendering.make_circle', (['(carheight / 2.5)'], {}), '(carheight / 2.5)\n', (3917, 3934), False, 'from gym.envs.classic_control import rendering\n'), ((4214, 4252), 'gym.envs.classic_control.rendering.make_circle', 'rendering.make_circle', (['(carheight / 2.5)'], {}), '(carheight / 2.5)\n', (4235, 4252), False, 'from gym.envs.classic_control import rendering\n'), ((4692, 4740), 'gym.envs.classic_control.rendering.Line', 'rendering.Line', (['(flagx, flagy1)', '(flagx, flagy2)'], {}), '((flagx, flagy1), (flagx, flagy2))\n', (4706, 4740), False, 'from gym.envs.classic_control import rendering\n'), ((4873, 4967), 'gym.envs.classic_control.rendering.FilledPolygon', 'rendering.FilledPolygon', (['[(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)]'], {}), '([(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25,\n flagy2 - 5)])\n', (4896, 4967), False, 'from gym.envs.classic_control import rendering\n'), ((5359, 5384), 'numpy.cos', 'np.cos', (['(3 * self.state[0])'], {}), '(3 * self.state[0])\n', (5365, 5384), True, 'import numpy as np\n'), ((2178, 2198), 'numpy.cos', 'np.cos', (['(3 * position)'], {}), '(3 * position)\n', (2184, 2198), True, 'import numpy as np\n'), ((2530, 2544), 'numpy.sin', 'np.sin', (['(3 * xs)'], {}), '(3 * xs)\n', (2536, 2544), True, 'import numpy as np\n'), ((3717, 3764), 'gym.envs.classic_control.rendering.Transform', 'rendering.Transform', ([], {'translation': '(0, clearance)'}), '(translation=(0, clearance))\n', (3736, 3764), False, 'from gym.envs.classic_control import rendering\n'), ((4012, 4070), 'gym.envs.classic_control.rendering.Transform', 'rendering.Transform', ([], {'translation': '(carwidth / 4, clearance)'}), '(translation=(carwidth / 4, clearance))\n', (4031, 4070), False, 'from gym.envs.classic_control import rendering\n'), ((4284, 4343), 'gym.envs.classic_control.rendering.Transform', 'rendering.Transform', ([], {'translation': '(-carwidth / 4, clearance)'}), '(translation=(-carwidth / 4, clearance))\n', (4303, 4343), False, 'from gym.envs.classic_control import rendering\n')] |
import chainer
import numpy as np
from test.util import generate_kernel_test_case, wrap_template
from webdnn.graph.placeholder import Placeholder
from webdnn.frontend.chainer.converter import ChainerConverter
from webdnn.frontend.chainer.placeholder_variable import PlaceholderVariable
@wrap_template
def template(n=2, c_in=4, h_in=6, w_in=8, c_out=10, ksize=3, stride=1, pad=0, nobias=True, description=""):
link = chainer.links.Convolution2D(c_in, c_out, ksize=ksize, stride=stride, pad=pad, nobias=nobias)
vx = chainer.Variable(np.random.rand(n, c_in, h_in, w_in).astype(np.float32))
vy = link(vx)
graph = ChainerConverter().convert([vx], [vy])
x = graph.inputs[0]
y = graph.outputs[0]
generate_kernel_test_case(
description=f"[chainer] L.Convolution2D {description}",
graph=graph,
inputs={x: vx.data},
expected={y: vy.data},
EPS=1e-2
)
def test():
template()
def test_nobias():
template(nobias=True)
def test_nopadding():
template(pad=0)
def test_irregular_kernel_size():
template(ksize=(3, 4))
def test_irregular_stride_size():
template(stride=(2, 3))
def test_irregular_padding_size1():
template(pad=(1, 2))
def test_irregular_padding_size2():
template(pad=2)
def test_irregular_padding_size3():
template(pad=2, ksize=5)
def test_irregular_padding_size4():
template(pad=(1, 0))
def test_irregular_size():
template(ksize=(3, 5), stride=(2, 3), pad=(1, 3))
def test_special_size():
# https://github.com/mil-tokyo/webdnn/issues/525
# In case that the max position index (=n*c_in*h_in*w_in*ksize*ksize) > 1<<23
template(n=1, c_in=1 << 6, h_in=1 << 7, w_in=1 << 7, c_out=3, ksize=(1 << 2) + 1, pad=1 << 1)
def test_with_placeholder():
link = chainer.links.Convolution2D(None, 16, ksize=3, stride=1, pad=1)
vx = chainer.Variable(np.random.rand(1, 3, 16, 16).astype(np.float32))
vy = link(vx)
N = Placeholder(label="N")
H = Placeholder(label="H")
W = Placeholder(label="W")
px = PlaceholderVariable([N, 3, H, W])
py = link(px)
graph = ChainerConverter().convert([px], [py])
x = graph.inputs[0]
y = graph.outputs[0]
N.value = 1
H.value = 16
W.value = 16
generate_kernel_test_case(
description=f"[chainer] L.Convolution2D with placeholder",
graph=graph,
backend=["webgpu", "webassembly"],
inputs={x: vx.data},
expected={y: vy.data},
EPS=1e-2
)
| [
"numpy.random.rand",
"test.util.generate_kernel_test_case",
"webdnn.graph.placeholder.Placeholder",
"chainer.links.Convolution2D",
"webdnn.frontend.chainer.converter.ChainerConverter",
"webdnn.frontend.chainer.placeholder_variable.PlaceholderVariable"
] | [((423, 520), 'chainer.links.Convolution2D', 'chainer.links.Convolution2D', (['c_in', 'c_out'], {'ksize': 'ksize', 'stride': 'stride', 'pad': 'pad', 'nobias': 'nobias'}), '(c_in, c_out, ksize=ksize, stride=stride, pad=\n pad, nobias=nobias)\n', (450, 520), False, 'import chainer\n'), ((723, 881), 'test.util.generate_kernel_test_case', 'generate_kernel_test_case', ([], {'description': 'f"""[chainer] L.Convolution2D {description}"""', 'graph': 'graph', 'inputs': '{x: vx.data}', 'expected': '{y: vy.data}', 'EPS': '(0.01)'}), "(description=\n f'[chainer] L.Convolution2D {description}', graph=graph, inputs={x: vx.\n data}, expected={y: vy.data}, EPS=0.01)\n", (748, 881), False, 'from test.util import generate_kernel_test_case, wrap_template\n'), ((1803, 1866), 'chainer.links.Convolution2D', 'chainer.links.Convolution2D', (['None', '(16)'], {'ksize': '(3)', 'stride': '(1)', 'pad': '(1)'}), '(None, 16, ksize=3, stride=1, pad=1)\n', (1830, 1866), False, 'import chainer\n'), ((1969, 1991), 'webdnn.graph.placeholder.Placeholder', 'Placeholder', ([], {'label': '"""N"""'}), "(label='N')\n", (1980, 1991), False, 'from webdnn.graph.placeholder import Placeholder\n'), ((2000, 2022), 'webdnn.graph.placeholder.Placeholder', 'Placeholder', ([], {'label': '"""H"""'}), "(label='H')\n", (2011, 2022), False, 'from webdnn.graph.placeholder import Placeholder\n'), ((2031, 2053), 'webdnn.graph.placeholder.Placeholder', 'Placeholder', ([], {'label': '"""W"""'}), "(label='W')\n", (2042, 2053), False, 'from webdnn.graph.placeholder import Placeholder\n'), ((2063, 2096), 'webdnn.frontend.chainer.placeholder_variable.PlaceholderVariable', 'PlaceholderVariable', (['[N, 3, H, W]'], {}), '([N, 3, H, W])\n', (2082, 2096), False, 'from webdnn.frontend.chainer.placeholder_variable import PlaceholderVariable\n'), ((2272, 2472), 'test.util.generate_kernel_test_case', 'generate_kernel_test_case', ([], {'description': 'f"""[chainer] L.Convolution2D with placeholder"""', 'graph': 'graph', 'backend': "['webgpu', 'webassembly']", 'inputs': '{x: vx.data}', 'expected': '{y: vy.data}', 'EPS': '(0.01)'}), "(description=\n f'[chainer] L.Convolution2D with placeholder', graph=graph, backend=[\n 'webgpu', 'webassembly'], inputs={x: vx.data}, expected={y: vy.data},\n EPS=0.01)\n", (2297, 2472), False, 'from test.util import generate_kernel_test_case, wrap_template\n'), ((629, 647), 'webdnn.frontend.chainer.converter.ChainerConverter', 'ChainerConverter', ([], {}), '()\n', (645, 647), False, 'from webdnn.frontend.chainer.converter import ChainerConverter\n'), ((2128, 2146), 'webdnn.frontend.chainer.converter.ChainerConverter', 'ChainerConverter', ([], {}), '()\n', (2144, 2146), False, 'from webdnn.frontend.chainer.converter import ChainerConverter\n'), ((542, 577), 'numpy.random.rand', 'np.random.rand', (['n', 'c_in', 'h_in', 'w_in'], {}), '(n, c_in, h_in, w_in)\n', (556, 577), True, 'import numpy as np\n'), ((1893, 1921), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)', '(16)', '(16)'], {}), '(1, 3, 16, 16)\n', (1907, 1921), True, 'import numpy as np\n')] |
import numpy as np
from scipy.optimize import fmin_l_bfgs_b
import time
import argparse
import cv2
from tensorflow.keras.models import load_model
import numpy as np
import csv
import sys
from matplotlib import pyplot as plt
from PIL import Image
from keras.preprocessing.image import img_to_array
img = cv2.imread('pokemonimages/Groudon.jpg',cv2.COLOR_BGR2RGB)
print (img.shape)
im = Image.open("pokemonimages/Groudon.jpg")
im1 = im.resize((200,200))
#im1= img_to_array(im1, dtype='uint8')
print(im1)
def remove_transparency(im, bg_colour=(255, 255, 255)):
# Only process if image has transparency (http://stackoverflow.com/a/1963146)
if im.mode in ('RGBA', 'LA') or (im.mode == 'P' and 'transparency' in im.info):
# Need to convert to RGBA if LA format due to a bug in PIL (http://stackoverflow.com/a/1963146)
alpha = im.convert('RGBA').split()[-1]
# Create a new background image of our matt color.
# Must be RGBA because paste requires both images have the same format
# (http://stackoverflow.com/a/8720632 and http://stackoverflow.com/a/9459208)
bg = Image.new("RGBA", im.size, bg_colour + (255,))
bg.paste(im, mask=alpha)
return bg
else:
return im
y=remove_transparency(im1)
y=y.convert("RGB")
print("rgb")
y.show()
y= img_to_array(y, dtype='uint8')
print(y.shape)
#img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
mask = np.zeros(img.shape[:2],np.uint8)
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
height, width = img.shape[:2]
rect = (0,0,width-10,height-10)
cv2.grabCut(img,mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT)
mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
imgnew= img*mask2[:,:,np.newaxis]
background=img-imgnew
background[np.where((background>[0,0,0]).all(axis=2))]=[255,255,255]
final=background+imgnew
#print mask2
#plt.imshow(final)
#plt.show() | [
"keras.preprocessing.image.img_to_array",
"PIL.Image.open",
"numpy.where",
"PIL.Image.new",
"cv2.grabCut",
"numpy.zeros",
"cv2.imread"
] | [((306, 364), 'cv2.imread', 'cv2.imread', (['"""pokemonimages/Groudon.jpg"""', 'cv2.COLOR_BGR2RGB'], {}), "('pokemonimages/Groudon.jpg', cv2.COLOR_BGR2RGB)\n", (316, 364), False, 'import cv2\n'), ((387, 426), 'PIL.Image.open', 'Image.open', (['"""pokemonimages/Groudon.jpg"""'], {}), "('pokemonimages/Groudon.jpg')\n", (397, 426), False, 'from PIL import Image\n'), ((1322, 1352), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['y'], {'dtype': '"""uint8"""'}), "(y, dtype='uint8')\n", (1334, 1352), False, 'from keras.preprocessing.image import img_to_array\n'), ((1423, 1456), 'numpy.zeros', 'np.zeros', (['img.shape[:2]', 'np.uint8'], {}), '(img.shape[:2], np.uint8)\n', (1431, 1456), True, 'import numpy as np\n'), ((1468, 1497), 'numpy.zeros', 'np.zeros', (['(1, 65)', 'np.float64'], {}), '((1, 65), np.float64)\n', (1476, 1497), True, 'import numpy as np\n'), ((1508, 1537), 'numpy.zeros', 'np.zeros', (['(1, 65)', 'np.float64'], {}), '((1, 65), np.float64)\n', (1516, 1537), True, 'import numpy as np\n'), ((1599, 1673), 'cv2.grabCut', 'cv2.grabCut', (['img', 'mask', 'rect', 'bgdModel', 'fgdModel', '(5)', 'cv2.GC_INIT_WITH_RECT'], {}), '(img, mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_RECT)\n', (1610, 1673), False, 'import cv2\n'), ((1122, 1168), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', 'im.size', '(bg_colour + (255,))'], {}), "('RGBA', im.size, bg_colour + (255,))\n", (1131, 1168), False, 'from PIL import Image\n'), ((1677, 1718), 'numpy.where', 'np.where', (['((mask == 2) | (mask == 0))', '(0)', '(1)'], {}), '((mask == 2) | (mask == 0), 0, 1)\n', (1685, 1718), True, 'import numpy as np\n')] |
import os
import json
import torch
import sys
import time
import random
import numpy as np
from tqdm import tqdm, trange
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.utils.tensorboard import SummaryWriter
from apex.parallel import DistributedDataParallel as DDP
from apex import amp
sys.path.append('..')
from models_gqa.model import LCGNwrapper
from models_gqa.config import build_cfg_from_argparse
from util.gqa_train.data_reader import DataReader
#from util.gqa_train.data_reader import gqa_convert_examples_to_features
# Load config
# cmd = '--cfg /home/xdjf/lcgn-pytorch/exp_gqa/cfgs/lcgn_spatial.yaml train True'.split()
# sys.argv.extend(cmd)
# Start session
#os.environ["CUDA_VISIBLE_DEVICES"] = cfg.GPUS
# if len(cfg.GPUS.split(',')) > 1:
# print('PyTorch implementation currently only supports single GPU')
import wandb
def load_train_data(cfg, rank, gpu, max_num=0, num_replicas=1):
imdb_file = cfg.IMDB_FILE % cfg.TRAIN.SPLIT_VQA
scene_graph_file = cfg.SCENE_GRAPH_FILE % \
cfg.TRAIN.SPLIT_VQA.replace('_balanced', '').replace('_all', '')
#a = gqa_convert_examples_to_features(imdb_file, scene_graph_file, cfg)
data_reader = DataReader(
imdb_file, rank, gpu, num_replicas, shuffle=True, max_num=max_num,
batch_size=cfg.TRAIN.BATCH_SIZE,
vocab_question_file=cfg.VOCAB_QUESTION_FILE,
T_encoder=cfg.T_ENCODER,
N_encoder=cfg.N_ENCODER,
O_encoder = cfg.O_ENCODER,
vocab_answer_file=cfg.VOCAB_ANSWER_FILE,
feature_type=cfg.FEAT_TYPE,
spatial_feature_dir=cfg.SPATIAL_FEATURE_DIR,
objects_feature_dir=cfg.OBJECTS_FEATURE_DIR,
objects_max_num=cfg.W_FEAT,
scene_graph_file=scene_graph_file,
vocab_name_file=cfg.VOCAB_NAME_FILE,
vocab_attr_file=cfg.VOCAB_ATTR_FILE,
add_pos_enc=cfg.ADD_POS_ENC,
pos_enc_dim=cfg.PE_DIM,
pos_enc_scale=cfg.PE_SCALE)
num_vocab = data_reader.batch_loader.vocab_dict.num_vocab
num_choices = data_reader.batch_loader.answer_dict.num_vocab
return data_reader, num_vocab, num_choices
def batch_to_data(batch):
questionIndices = torch.from_numpy(
batch['input_seq_batch'].astype(np.int64)).cuda() # 128 * 30
questionLengths = torch.from_numpy(
batch['seq_length_batch'].astype(np.int64)).cuda() # 128
semanIndices = torch.from_numpy(
batch['input_seman_batch'].astype(np.int64)).cuda() # 128 * 30
semanLengths = torch.from_numpy(
batch['seman_length_batch'].astype(np.int64)).cuda() # 128
answerIndices = torch.from_numpy(
batch['answer_label_batch'].astype(np.int64)).cuda() # 128
nameIndices = torch.from_numpy(
batch['input_name_batch'].astype(np.int64)).cuda()
nameLengths = torch.from_numpy(
batch['name_length_batch'].astype(np.int64)).cuda()
images = torch.from_numpy(
batch['image_feat_batch'].astype(np.float32)).cuda() # 128 * 49 * 2112
imagesObjectNum = torch.from_numpy(
np.sum(batch['image_valid_batch'].astype(np.int64), axis=1)).cuda() # 128
return (questionIndices, questionLengths, semanIndices, semanLengths, answerIndices, nameIndices, nameLengths, images, imagesObjectNum)
def run_train_on_data(model, data_reader_train, cfg, rank, gpu, run_eval=False,
data_reader_eval=None):
model.train()
global_step = 1
lr = cfg.TRAIN.SOLVER.LR
correct, total, loss_sum, batch_num = 0, 0, 0., 0
tr_loss, logging_loss = 0.0, 0.0
# if rank in [-1, 0]:
# tb_writer = SummaryWriter()
for batch, n_sample, e in data_reader_train.batches(one_pass=False):
n_epoch = cfg.TRAIN.START_EPOCH + e
if n_sample == 0 and n_epoch > cfg.TRAIN.START_EPOCH and rank in [-1, 0]:
print('')
# save snapshot
snapshot_file = cfg.SNAPSHOT_FILE % (cfg.EXP_NAME, n_epoch)
torch.save(model.state_dict(), snapshot_file)
# run evaluation
if run_eval:
batch_eval = run_eval_on_data(cfg, model, data_reader_eval)
#tb_writer.add_scalar("eval_loss", batch_eval['loss'], global_step)
model.train()
if cfg.DEBUG == False:
wandb.log({"eval_loss": batch_eval['loss'], "eval_correct": batch_eval['accuracy']})
# clear stats
correct, total, loss_sum, batch_num = 0, 0, 0., 0
if n_epoch >= cfg.TRAIN.MAX_EPOCH:
break
batch_list = batch_to_data(batch)
# if first and rank in [-1, 0]:
# tb_writer.add_graph(model.model, (batch_list, ))
# first = False
batch_res = model.run_batch(batch_list, train=True, lr=lr)
correct += batch_res['num_correct']
total += batch_res['batch_size']
loss_sum += batch_res['loss'].item()
tr_loss += loss_sum
batch_num += 1
global_step += 1
lr = batch_res['lr']
if rank in [-1, 0] and cfg.logging_steps > 0 and global_step % cfg.logging_steps == 0 and cfg.DEBUG == False:
wandb.log({"lr": batch_res['lr'], "train_loss": loss_sum/batch_num, "train_correct": correct/total})
# tb_writer.add_scalar("lr", batch_res['lr'], global_step)
# tb_writer.add_scalar("loss", (tr_loss - logging_loss) / cfg.logging_steps, global_step)
if rank in [-1, 0]:
print('\rTrain E %d S %d: avgL=%.4f, avgA=%.4f, lr=%.1e' % (
n_epoch+1, total, loss_sum/batch_num, correct/total, lr),
end='')
# if rank in [-1, 0]:
# tb_writer.close()
def load_eval_data(cfg, rank, gpu, max_num=0):
imdb_file = cfg.IMDB_FILE % cfg.TEST.SPLIT_VQA
scene_graph_file = cfg.SCENE_GRAPH_FILE % \
cfg.TEST.SPLIT_VQA.replace('_balanced', '').replace('_all', '')
data_reader = DataReader(
imdb_file, rank, gpu, 1, shuffle=False, max_num=max_num,
batch_size=cfg.TEST.BATCH_SIZE,
vocab_question_file=cfg.VOCAB_QUESTION_FILE,
T_encoder=cfg.T_ENCODER,
N_encoder=cfg.N_ENCODER,
O_encoder = cfg.O_ENCODER,
vocab_answer_file=cfg.VOCAB_ANSWER_FILE,
feature_type=cfg.FEAT_TYPE,
spatial_feature_dir=cfg.SPATIAL_FEATURE_DIR,
objects_feature_dir=cfg.OBJECTS_FEATURE_DIR,
objects_max_num=cfg.W_FEAT,
scene_graph_file=scene_graph_file,
vocab_name_file=cfg.VOCAB_NAME_FILE,
vocab_attr_file=cfg.VOCAB_ATTR_FILE,
add_pos_enc=cfg.ADD_POS_ENC,
pos_enc_dim=cfg.PE_DIM, pos_enc_scale=cfg.PE_SCALE)
num_vocab = data_reader.batch_loader.vocab_dict.num_vocab
num_choices = data_reader.batch_loader.answer_dict.num_vocab
return data_reader, num_vocab, num_choices
def run_eval_on_data(cfg, model, data_reader_eval, pred=False):
model.eval()
predictions = []
answer_tokens = data_reader_eval.batch_loader.answer_dict.word_list
correct, total, loss_sum, batch_num = 0, 0, 0., 0
for batch, _, _ in data_reader_eval.batches(one_pass=True):
batch_list = batch_to_data(batch)
batch_res = model.run_batch(batch_list, train=False)
if pred:
predictions.extend([
{'questionId': q, 'prediction': answer_tokens[p]}
for q, p in zip(batch['qid_list'], batch_res['predictions'])])
correct += batch_res['num_correct']
total += batch_res['batch_size']
loss_sum += batch_res['loss'].item()
batch_num += 1
print('\rEval S %d: avgL=%.4f, avgA=%.4f' % (
total, loss_sum/batch_num, correct/total), end='')
print('')
eval_res = {
'correct': correct,
'total': total,
'accuracy': correct/total,
'loss': loss_sum/batch_num,
'predictions': predictions}
return eval_res
def dump_prediction_to_file(cfg, predictions, res_dir):
pred_file = os.path.join(res_dir, 'pred_%s_%04d_%s.json' % (
cfg.EXP_NAME, cfg.TEST.EPOCH, cfg.TEST.SPLIT_VQA))
with open(pred_file, 'w') as f:
json.dump(predictions, f, indent=2)
print('predictions written to %s' % pred_file)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpus > 0:
torch.cuda.manual_seed_all(args.seed)
def train(gpu, cfg):
rank = -1
if gpu != -1:
rank = cfg.nr * cfg.n_gpus + gpu
dist.init_process_group(
backend='nccl',
init_method='env://',
world_size=cfg.world_size,
rank=rank
)
if rank in [-1, 0, 1]:
gpu = 0
elif rank in [2, 3]:
gpu = 1
set_seed(cfg)
print(f'rank: {rank} pid: {os.getpid()} is running...')
num_replicas = cfg.world_size if rank != -1 else 1
data_reader_train, num_vocab, num_choices = load_train_data(cfg, rank, gpu, num_replicas=num_replicas)
data_reader_eval, _, _ = load_eval_data(cfg, rank, gpu, max_num=cfg.TRAIN.EVAL_MAX_NUM)
# Load model
model = LCGNwrapper(num_vocab, num_choices, cfg=cfg, rank=rank, gpu=gpu)
# Save snapshot
if rank in [-1, 0]:
if cfg.DEBUG == False:
name = time.strftime('%Y%m%d-%H%M%S')
wandb.init(project="gtp", notes="graph tensor propa", name=name)
wandb.watch(model.model, log="all")
wandb.config.update(cfg)
snapshot_dir = os.path.dirname(cfg.SNAPSHOT_FILE % (cfg.EXP_NAME, 0))
os.makedirs(snapshot_dir, exist_ok=True)
with open(os.path.join(snapshot_dir, 'cfg.json'), 'w') as f:
json.dump(cfg, f, indent=2)
if cfg.TRAIN.START_EPOCH > 0 and rank in [-1, 0]:
print('resuming from epoch %d' % cfg.TRAIN.START_EPOCH)
model.load_state_dict(torch.load(
cfg.SNAPSHOT_FILE % (cfg.EXP_NAME, cfg.TRAIN.START_EPOCH)))
if rank in [-1, 0]:
print('%s - train for %d epochs' % (cfg.EXP_NAME, cfg.TRAIN.MAX_EPOCH))
run_train_on_data(
model, data_reader_train, cfg, rank, gpu, run_eval=cfg.TRAIN.RUN_EVAL,
data_reader_eval=data_reader_eval)
if rank in [-1, 0]:
print('%s - train (done)' % cfg.EXP_NAME)
def test(cfg):
data_reader_eval, num_vocab, num_choices = load_eval_data(cfg, -1, 0)
# Load model
model = LCGNwrapper(num_vocab, num_choices, cfg)
# Load test snapshot
snapshot_file = cfg.SNAPSHOT_FILE % (cfg.EXP_NAME, cfg.TEST.EPOCH)
model.load_state_dict(torch.load(snapshot_file))
res_dir = cfg.TEST.RESULT_DIR % (cfg.EXP_NAME, cfg.TEST.EPOCH)
vis_dir = os.path.join(
res_dir, '%s_%s' % (cfg.TEST.VIS_DIR_PREFIX, cfg.TEST.SPLIT_VQA))
os.makedirs(res_dir, exist_ok=True)
os.makedirs(vis_dir, exist_ok=True)
pred = cfg.TEST.DUMP_PRED
if not pred:
print('NOT writing predictions (set TEST.DUMP_PRED True to write)')
print('%s - test epoch %d' % (cfg.EXP_NAME, cfg.TEST.EPOCH))
eval_res = run_eval_on_data(cfg, model, data_reader_eval, pred=pred)
print('%s - test epoch %d: accuracy = %.4f' % (
cfg.EXP_NAME, cfg.TEST.EPOCH, eval_res['accuracy']))
# write results
if pred:
dump_prediction_to_file(cfg, eval_res['predictions'], res_dir)
eval_res.pop('predictions')
res_file = os.path.join(res_dir, 'res_%s_%04d_%s.json' % (
cfg.EXP_NAME, cfg.TEST.EPOCH, cfg.TEST.SPLIT_VQA))
with open(res_file, 'w') as f:
json.dump(eval_res, f)
if __name__ == '__main__':
cfg = build_cfg_from_argparse()
start = time.time()
print(f'pid: {os.getpid()} is running...')
if cfg.train:
if cfg.n_gpus > 1:
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '12801'
cfg.world_size = cfg.n_gpus * cfg.nodes
mp.spawn(train, nprocs=cfg.n_gpus, args=(cfg,))
else:
os.environ["CUDA_VISIBLE_DEVICES"] = cfg.GPUS
train(-1, cfg)
end_ = time.time()
if cfg.DEBUG == False:
wandb.log({"training time": int((end_ - start) / 60)})
print(f'time has cost : {end_ - start}')
else:
test(cfg)
| [
"wandb.log",
"wandb.init",
"sys.path.append",
"wandb.config.update",
"numpy.random.seed",
"os.getpid",
"os.path.dirname",
"util.gqa_train.data_reader.DataReader",
"time.time",
"models_gqa.model.LCGNwrapper",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"os.makedirs",
"torch.multiproc... | [((319, 340), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (334, 340), False, 'import sys\n'), ((1205, 1846), 'util.gqa_train.data_reader.DataReader', 'DataReader', (['imdb_file', 'rank', 'gpu', 'num_replicas'], {'shuffle': '(True)', 'max_num': 'max_num', 'batch_size': 'cfg.TRAIN.BATCH_SIZE', 'vocab_question_file': 'cfg.VOCAB_QUESTION_FILE', 'T_encoder': 'cfg.T_ENCODER', 'N_encoder': 'cfg.N_ENCODER', 'O_encoder': 'cfg.O_ENCODER', 'vocab_answer_file': 'cfg.VOCAB_ANSWER_FILE', 'feature_type': 'cfg.FEAT_TYPE', 'spatial_feature_dir': 'cfg.SPATIAL_FEATURE_DIR', 'objects_feature_dir': 'cfg.OBJECTS_FEATURE_DIR', 'objects_max_num': 'cfg.W_FEAT', 'scene_graph_file': 'scene_graph_file', 'vocab_name_file': 'cfg.VOCAB_NAME_FILE', 'vocab_attr_file': 'cfg.VOCAB_ATTR_FILE', 'add_pos_enc': 'cfg.ADD_POS_ENC', 'pos_enc_dim': 'cfg.PE_DIM', 'pos_enc_scale': 'cfg.PE_SCALE'}), '(imdb_file, rank, gpu, num_replicas, shuffle=True, max_num=\n max_num, batch_size=cfg.TRAIN.BATCH_SIZE, vocab_question_file=cfg.\n VOCAB_QUESTION_FILE, T_encoder=cfg.T_ENCODER, N_encoder=cfg.N_ENCODER,\n O_encoder=cfg.O_ENCODER, vocab_answer_file=cfg.VOCAB_ANSWER_FILE,\n feature_type=cfg.FEAT_TYPE, spatial_feature_dir=cfg.SPATIAL_FEATURE_DIR,\n objects_feature_dir=cfg.OBJECTS_FEATURE_DIR, objects_max_num=cfg.W_FEAT,\n scene_graph_file=scene_graph_file, vocab_name_file=cfg.VOCAB_NAME_FILE,\n vocab_attr_file=cfg.VOCAB_ATTR_FILE, add_pos_enc=cfg.ADD_POS_ENC,\n pos_enc_dim=cfg.PE_DIM, pos_enc_scale=cfg.PE_SCALE)\n', (1215, 1846), False, 'from util.gqa_train.data_reader import DataReader\n'), ((5914, 6543), 'util.gqa_train.data_reader.DataReader', 'DataReader', (['imdb_file', 'rank', 'gpu', '(1)'], {'shuffle': '(False)', 'max_num': 'max_num', 'batch_size': 'cfg.TEST.BATCH_SIZE', 'vocab_question_file': 'cfg.VOCAB_QUESTION_FILE', 'T_encoder': 'cfg.T_ENCODER', 'N_encoder': 'cfg.N_ENCODER', 'O_encoder': 'cfg.O_ENCODER', 'vocab_answer_file': 'cfg.VOCAB_ANSWER_FILE', 'feature_type': 'cfg.FEAT_TYPE', 'spatial_feature_dir': 'cfg.SPATIAL_FEATURE_DIR', 'objects_feature_dir': 'cfg.OBJECTS_FEATURE_DIR', 'objects_max_num': 'cfg.W_FEAT', 'scene_graph_file': 'scene_graph_file', 'vocab_name_file': 'cfg.VOCAB_NAME_FILE', 'vocab_attr_file': 'cfg.VOCAB_ATTR_FILE', 'add_pos_enc': 'cfg.ADD_POS_ENC', 'pos_enc_dim': 'cfg.PE_DIM', 'pos_enc_scale': 'cfg.PE_SCALE'}), '(imdb_file, rank, gpu, 1, shuffle=False, max_num=max_num,\n batch_size=cfg.TEST.BATCH_SIZE, vocab_question_file=cfg.\n VOCAB_QUESTION_FILE, T_encoder=cfg.T_ENCODER, N_encoder=cfg.N_ENCODER,\n O_encoder=cfg.O_ENCODER, vocab_answer_file=cfg.VOCAB_ANSWER_FILE,\n feature_type=cfg.FEAT_TYPE, spatial_feature_dir=cfg.SPATIAL_FEATURE_DIR,\n objects_feature_dir=cfg.OBJECTS_FEATURE_DIR, objects_max_num=cfg.W_FEAT,\n scene_graph_file=scene_graph_file, vocab_name_file=cfg.VOCAB_NAME_FILE,\n vocab_attr_file=cfg.VOCAB_ATTR_FILE, add_pos_enc=cfg.ADD_POS_ENC,\n pos_enc_dim=cfg.PE_DIM, pos_enc_scale=cfg.PE_SCALE)\n', (5924, 6543), False, 'from util.gqa_train.data_reader import DataReader\n'), ((7962, 8065), 'os.path.join', 'os.path.join', (['res_dir', "('pred_%s_%04d_%s.json' % (cfg.EXP_NAME, cfg.TEST.EPOCH, cfg.TEST.SPLIT_VQA))"], {}), "(res_dir, 'pred_%s_%04d_%s.json' % (cfg.EXP_NAME, cfg.TEST.\n EPOCH, cfg.TEST.SPLIT_VQA))\n", (7974, 8065), False, 'import os\n'), ((8227, 8249), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (8238, 8249), False, 'import random\n'), ((8254, 8279), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (8268, 8279), True, 'import numpy as np\n'), ((8284, 8312), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (8301, 8312), False, 'import torch\n'), ((9263, 9327), 'models_gqa.model.LCGNwrapper', 'LCGNwrapper', (['num_vocab', 'num_choices'], {'cfg': 'cfg', 'rank': 'rank', 'gpu': 'gpu'}), '(num_vocab, num_choices, cfg=cfg, rank=rank, gpu=gpu)\n', (9274, 9327), False, 'from models_gqa.model import LCGNwrapper\n'), ((10528, 10568), 'models_gqa.model.LCGNwrapper', 'LCGNwrapper', (['num_vocab', 'num_choices', 'cfg'], {}), '(num_vocab, num_choices, cfg)\n', (10539, 10568), False, 'from models_gqa.model import LCGNwrapper\n'), ((10801, 10879), 'os.path.join', 'os.path.join', (['res_dir', "('%s_%s' % (cfg.TEST.VIS_DIR_PREFIX, cfg.TEST.SPLIT_VQA))"], {}), "(res_dir, '%s_%s' % (cfg.TEST.VIS_DIR_PREFIX, cfg.TEST.SPLIT_VQA))\n", (10813, 10879), False, 'import os\n'), ((10893, 10928), 'os.makedirs', 'os.makedirs', (['res_dir'], {'exist_ok': '(True)'}), '(res_dir, exist_ok=True)\n', (10904, 10928), False, 'import os\n'), ((10933, 10968), 'os.makedirs', 'os.makedirs', (['vis_dir'], {'exist_ok': '(True)'}), '(vis_dir, exist_ok=True)\n', (10944, 10968), False, 'import os\n'), ((11496, 11597), 'os.path.join', 'os.path.join', (['res_dir', "('res_%s_%04d_%s.json' % (cfg.EXP_NAME, cfg.TEST.EPOCH, cfg.TEST.SPLIT_VQA))"], {}), "(res_dir, 'res_%s_%04d_%s.json' % (cfg.EXP_NAME, cfg.TEST.EPOCH,\n cfg.TEST.SPLIT_VQA))\n", (11508, 11597), False, 'import os\n'), ((11709, 11734), 'models_gqa.config.build_cfg_from_argparse', 'build_cfg_from_argparse', ([], {}), '()\n', (11732, 11734), False, 'from models_gqa.config import build_cfg_from_argparse\n'), ((11747, 11758), 'time.time', 'time.time', ([], {}), '()\n', (11756, 11758), False, 'import time\n'), ((8114, 8149), 'json.dump', 'json.dump', (['predictions', 'f'], {'indent': '(2)'}), '(predictions, f, indent=2)\n', (8123, 8149), False, 'import json\n'), ((8345, 8382), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (8371, 8382), False, 'import torch\n'), ((8491, 8595), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': '"""nccl"""', 'init_method': '"""env://"""', 'world_size': 'cfg.world_size', 'rank': 'rank'}), "(backend='nccl', init_method='env://', world_size=\n cfg.world_size, rank=rank)\n", (8514, 8595), True, 'import torch.distributed as dist\n'), ((9638, 9692), 'os.path.dirname', 'os.path.dirname', (['(cfg.SNAPSHOT_FILE % (cfg.EXP_NAME, 0))'], {}), '(cfg.SNAPSHOT_FILE % (cfg.EXP_NAME, 0))\n', (9653, 9692), False, 'import os\n'), ((9701, 9741), 'os.makedirs', 'os.makedirs', (['snapshot_dir'], {'exist_ok': '(True)'}), '(snapshot_dir, exist_ok=True)\n', (9712, 9741), False, 'import os\n'), ((10692, 10717), 'torch.load', 'torch.load', (['snapshot_file'], {}), '(snapshot_file)\n', (10702, 10717), False, 'import torch\n'), ((11646, 11668), 'json.dump', 'json.dump', (['eval_res', 'f'], {}), '(eval_res, f)\n', (11655, 11668), False, 'import json\n'), ((12183, 12194), 'time.time', 'time.time', ([], {}), '()\n', (12192, 12194), False, 'import time\n'), ((5143, 5251), 'wandb.log', 'wandb.log', (["{'lr': batch_res['lr'], 'train_loss': loss_sum / batch_num, 'train_correct':\n correct / total}"], {}), "({'lr': batch_res['lr'], 'train_loss': loss_sum / batch_num,\n 'train_correct': correct / total})\n", (5152, 5251), False, 'import wandb\n'), ((9422, 9452), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (9435, 9452), False, 'import time\n'), ((9465, 9529), 'wandb.init', 'wandb.init', ([], {'project': '"""gtp"""', 'notes': '"""graph tensor propa"""', 'name': 'name'}), "(project='gtp', notes='graph tensor propa', name=name)\n", (9475, 9529), False, 'import wandb\n'), ((9542, 9577), 'wandb.watch', 'wandb.watch', (['model.model'], {'log': '"""all"""'}), "(model.model, log='all')\n", (9553, 9577), False, 'import wandb\n'), ((9590, 9614), 'wandb.config.update', 'wandb.config.update', (['cfg'], {}), '(cfg)\n', (9609, 9614), False, 'import wandb\n'), ((9823, 9850), 'json.dump', 'json.dump', (['cfg', 'f'], {'indent': '(2)'}), '(cfg, f, indent=2)\n', (9832, 9850), False, 'import json\n'), ((9999, 10068), 'torch.load', 'torch.load', (['(cfg.SNAPSHOT_FILE % (cfg.EXP_NAME, cfg.TRAIN.START_EPOCH))'], {}), '(cfg.SNAPSHOT_FILE % (cfg.EXP_NAME, cfg.TRAIN.START_EPOCH))\n', (10009, 10068), False, 'import torch\n'), ((12021, 12068), 'torch.multiprocessing.spawn', 'mp.spawn', (['train'], {'nprocs': 'cfg.n_gpus', 'args': '(cfg,)'}), '(train, nprocs=cfg.n_gpus, args=(cfg,))\n', (12029, 12068), True, 'import torch.multiprocessing as mp\n'), ((4285, 4374), 'wandb.log', 'wandb.log', (["{'eval_loss': batch_eval['loss'], 'eval_correct': batch_eval['accuracy']}"], {}), "({'eval_loss': batch_eval['loss'], 'eval_correct': batch_eval[\n 'accuracy']})\n", (4294, 4374), False, 'import wandb\n'), ((8950, 8961), 'os.getpid', 'os.getpid', ([], {}), '()\n', (8959, 8961), False, 'import os\n'), ((9760, 9798), 'os.path.join', 'os.path.join', (['snapshot_dir', '"""cfg.json"""'], {}), "(snapshot_dir, 'cfg.json')\n", (9772, 9798), False, 'import os\n'), ((11777, 11788), 'os.getpid', 'os.getpid', ([], {}), '()\n', (11786, 11788), False, 'import os\n')] |
from sys import platform
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import numpy
ext_modules = [
Extension(
"src.libs.cutils",
["src/libs/cutils.pyx"],
extra_compile_args=['/openmp' if platform == "win32" else '-fopenmp']
)
]
setup(
ext_modules=cythonize(ext_modules),
include_dirs=[numpy.get_include()],
) | [
"Cython.Build.cythonize",
"distutils.extension.Extension",
"numpy.get_include"
] | [((170, 299), 'distutils.extension.Extension', 'Extension', (['"""src.libs.cutils"""', "['src/libs/cutils.pyx']"], {'extra_compile_args': "['/openmp' if platform == 'win32' else '-fopenmp']"}), "('src.libs.cutils', ['src/libs/cutils.pyx'], extra_compile_args=[\n '/openmp' if platform == 'win32' else '-fopenmp'])\n", (179, 299), False, 'from distutils.extension import Extension\n'), ((351, 373), 'Cython.Build.cythonize', 'cythonize', (['ext_modules'], {}), '(ext_modules)\n', (360, 373), False, 'from Cython.Build import cythonize\n'), ((393, 412), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (410, 412), False, 'import numpy\n')] |
import numpy as np
from nnfs.layers import Linear
from nnfs.optimizers import SGD
class Model:
def __init__(self, layers, loss, optimizer=SGD(lr=0.01)):
self.layers = layers
self.loss = loss
self.optimizer = optimizer
def save_weights(self, filename):
weights = []
for layer in self.layers:
for param in layer.get_parameters():
weights.append(param.value)
np.savez(filename, *weights)
def load_weights(self, filename):
weights = np.load(filename)
param_index = 0
for layer in self.layers:
for param in layer.get_parameters():
param.value = weights[f'arr_{param_index}']
param_index += 1
def predict(self, inputs):
outputs = inputs
for layer in self.layers:
outputs = layer.forward(outputs)
return outputs
def train(self, X, y, epochs=20, batch_size=32, validation_data=None, metrics=None, verbose=1):
history = {'train_loss': [0.0] * epochs}
if validation_data:
history['valid_loss'] = [0.0] * epochs
if metrics:
for name, _ in metrics.items():
history[f'train_{name}'] = [0.0] * epochs
if validation_data:
history[f'valid_{name}'] = [0.0] * epochs
n_batches = (len(X) + batch_size - 1) // batch_size
for epoch in range(epochs):
train_loss = 0.0
for batch_index in range(n_batches):
batch_start = batch_index * batch_size
batch_end = min((batch_index + 1) * batch_size, X.shape[0])
X_batch = X[batch_start:batch_end, ...]
y_batch = y[batch_start:batch_end, ...]
y_pred = self.predict(X_batch)
batch_loss = self.loss(y_pred, y_batch)
batch_loss += np.sum([layer.get_loss() for layer in self.layers])
train_loss += batch_loss / n_batches
parameters = []
grad_in = self.loss.get_grad_in(y_pred, y_batch)
for layer in reversed(self.layers):
grad_in = layer.backward(grad_in)
for param in layer.get_parameters():
parameters.append(param)
self.optimizer.apply_gradients(parameters)
if metrics:
for name, metric in metrics.items():
history[f'train_{name}'][epoch] += metric(y_pred, y_batch) / n_batches
history['train_loss'][epoch] = train_loss
if validation_data:
valid_loss = 0.0
n_valid_batches = (len(validation_data[0]) + batch_size - 1) // batch_size
for batch_index in range(n_valid_batches):
batch_start = batch_index * batch_size
batch_end = min((batch_index + 1) * batch_size, validation_data[0].shape[0])
X_batch = validation_data[0][batch_start:batch_end, ...]
y_batch = validation_data[1][batch_start:batch_end, ...]
y_pred = self.predict(X_batch)
batch_loss = self.loss(y_pred, y_batch)
batch_loss += np.sum([layer.get_loss() for layer in self.layers])
valid_loss += batch_loss / n_valid_batches
if metrics:
for name, metric in metrics.items():
history[f'valid_{name}'][epoch] += metric(y_pred, y_batch) / n_valid_batches
history['valid_loss'][epoch] = valid_loss
if not verbose:
continue
log_str = f"epoch: {epoch+1}/{epochs} - train_loss: {train_loss:.8f}"
if metrics:
for name, metric in metrics.items():
value = history[f'train_{name}'][epoch]
log_str += f" - train_{name}: {value:.8f}"
if validation_data:
log_str += f" - valid_loss: {valid_loss:.8f}"
if metrics:
for name, metric in metrics.items():
value = history[f'valid_{name}'][epoch]
log_str += f" - valid_{name}: {value:.8f}"
print(log_str)
return history
| [
"numpy.savez",
"numpy.load",
"nnfs.optimizers.SGD"
] | [((144, 156), 'nnfs.optimizers.SGD', 'SGD', ([], {'lr': '(0.01)'}), '(lr=0.01)\n', (147, 156), False, 'from nnfs.optimizers import SGD\n'), ((443, 471), 'numpy.savez', 'np.savez', (['filename', '*weights'], {}), '(filename, *weights)\n', (451, 471), True, 'import numpy as np\n'), ((529, 546), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (536, 546), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import os, sys, pdb
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torch.utils.data as data
from torchvision import datasets, transforms
import numpy as np
import cv2, copy, time
import matplotlib.pyplot as plt
from scipy.ndimage import binary_fill_holes, binary_closing, binary_dilation
from skimage import transform, morphology, filters
from skimage.morphology import remove_small_objects
import loader
def refine_prediction(pred, thresh, min_size):
binary = pred > thresh # Threshold
binary = binary_dilation(binary, structure=np.ones((5,5))) # dilation to connect
binary = binary_fill_holes(binary) # Fill holes
# Remove outliers
mask = remove_small_objects(binary, min_size=min_size, connectivity=8)
return mask
def pred_patches(cls_model, patches, args):
preds = []
start_time = time.time()
slide_dset = loader.PatchDataset(patches)
dset_loader = data.DataLoader(slide_dset, batch_size=args.batch_size, shuffle=False, num_workers=4)
with torch.no_grad():
for ind, inputs in enumerate(dset_loader):
inputs = inputs.type(torch.FloatTensor)
inputs = Variable(inputs.cuda())
outputs = cls_model(inputs)
_, batch_preds = outputs.max(1)
preds.extend(batch_preds.cpu().tolist())
elapsed_time = time.time() - start_time
print("{} seconds for {} patches.".format(elapsed_time, patches.shape[0]))
return preds
def slide_pred(cls_model, split_arr, patches, wsi_dim, args):
# Save prediction results
RAW_SIZE = 299
SIZE1, SIZE2, SIZE4 = int(RAW_SIZE/4), int(RAW_SIZE/2), RAW_SIZE
class_num = 3
result_map = np.zeros((wsi_dim[0], wsi_dim[1], class_num), dtype=np.uint8)
# Prediction
if patches.shape[0] > 0: # exist
preds = pred_patches(cls_model, patches, args)
for coor, pred in zip(split_arr, preds):
result_map[coor[0]+SIZE1:coor[0]+SIZE1+SIZE2, coor[1]+SIZE1:coor[1]+SIZE1+SIZE2, pred] = 255
# Resize results
args.img_cnt_ratio = 2**(args.cnt_level - args.img_level)
s_height, s_width = wsi_dim[0] / args.img_cnt_ratio, wsi_dim[1] / args.img_cnt_ratio
result_img = transform.resize(result_map, (s_height, s_width))
MINIMUM_REGION_SIZE = (np.floor(SIZE2 / args.img_cnt_ratio))**2
# refine unsure
unsure_min_size = MINIMUM_REGION_SIZE * args.unsure_grid_num
result_img[:,:,1] = refine_prediction(result_img[:,:,1], thresh=args.unsure_prob, min_size=unsure_min_size)
unsure_img = (result_img[:,:,1] * 255).astype(np.uint8)
_, unsure_cnts, _ = cv2.findContours(unsure_img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
max_unsure = 0
if len(unsure_cnts) != 0:
max_unsure_cnt = max(unsure_cnts, key = cv2.contourArea)
max_unsure = cv2.contourArea(max_unsure_cnt)
unsure_num_grid = int(max_unsure / MINIMUM_REGION_SIZE)
# refine malignant
yes_min_size = MINIMUM_REGION_SIZE * args.malignant_num_min
result_img[:,:,2] = refine_prediction(result_img[:,:,2], thresh=args.malignant_prob, min_size=yes_min_size)
yes_img = (result_img[:,:,2] * 255).astype(np.uint8)
_, yes_cnts, _ = cv2.findContours(yes_img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
max_yes = 0
if len(yes_cnts) != 0:
max_yes_cnt = max(yes_cnts, key = cv2.contourArea)
max_yes = cv2.contourArea(max_yes_cnt)
yes_num_grid = int(max_yes / MINIMUM_REGION_SIZE)
# Rule-based diagnosis
diag_flag = thyroid_diagnosis_rule(unsure_num_grid, yes_num_grid, args)
return result_img, diag_flag
def thyroid_diagnosis_rule(unsure_num, yes_num, args):
diag_flag = "Benign"
# if there are unsure regions, take it unsure
if unsure_num != 0:
diag_flag = "Unsure"
else:
# if malignant regions large than 16, take it as malignant
if yes_num >= args.malignant_num_max:
diag_flag = "Malignant"
# if malignant regions num between 2-16, take is as Unsure
elif yes_num >= args.malignant_num_min and yes_num < args.malignant_num_max:
diag_flag = "Unsure"
else:
diag_flag = "Benign"
return diag_flag
def pred_feas(cls_model, patches, args):
probs, logits, vecs = [], [], []
def fea_hook(module, input, output):
t_fea2048 = input[0].cpu().tolist()
cur_vecs = copy.deepcopy(t_fea2048)
t_logit3 = output.cpu().tolist()
cur_logits = copy.deepcopy(t_logit3)
t_fea3 = F.softmax(output, dim=-1)
cur_fea3 = t_fea3.cpu().tolist()
cur_probs = copy.deepcopy(cur_fea3)
vecs.extend(cur_vecs)
logits.extend(cur_logits)
probs.extend(cur_probs)
cls_model.fc.register_forward_hook(fea_hook)
slide_dset = loader.PatchDataset(patches)
dset_loader = data.DataLoader(slide_dset, batch_size=args.batch_size, shuffle=False, num_workers=4)
with torch.no_grad():
for ind, inputs in enumerate(dset_loader):
inputs = inputs.type(torch.FloatTensor)
inputs = Variable(inputs.cuda())
outputs = cls_model(inputs)
return probs, logits, vecs
def sort_by_prob(BBoxes, ClsProbs, ClsLogits, FeaVecs):
fea_dict = {}
norm_prob_list = [ele[0] for ele in ClsProbs]
sorting_indx = np.argsort(norm_prob_list)
fea_dict["bbox"] = [BBoxes[ind] for ind in sorting_indx]
fea_dict["prob"] = [ClsProbs[ind] for ind in sorting_indx]
fea_dict["logit"] = [ClsLogits[ind] for ind in sorting_indx]
fea_dict["feaVec"] = [FeaVecs[ind] for ind in sorting_indx]
return fea_dict
def gen_slide_feas(cls_model, split_arr, patches, wsi_dim, args):
RAW_SIZE = 299
SIZE1, SIZE2, SIZE4 = int(RAW_SIZE/4), int(RAW_SIZE/2), RAW_SIZE
class_num = 3
FeasList = []
BBoxes, ClsProbs, ClsLogits, FeaVecs = [], [], [], []
# Prediction
if patches.shape[0] > 0: # exist
ClsProbs, ClsLogits, FeaVecs = pred_feas(cls_model, patches, args)
for coor in split_arr:
cur_x, cur_y = coor[1]+SIZE1, coor[0]+SIZE1
cur_bbox = [cur_x, cur_y, SIZE2, SIZE2]
BBoxes.append(cur_bbox)
fea_dict = sort_by_prob(BBoxes, ClsProbs, ClsLogits, FeaVecs)
return fea_dict
| [
"skimage.morphology.remove_small_objects",
"copy.deepcopy",
"numpy.ones",
"loader.PatchDataset",
"scipy.ndimage.binary_fill_holes",
"numpy.floor",
"cv2.contourArea",
"numpy.argsort",
"numpy.zeros",
"torch.utils.data.DataLoader",
"cv2.findContours",
"torch.no_grad",
"skimage.transform.resize"... | [((661, 686), 'scipy.ndimage.binary_fill_holes', 'binary_fill_holes', (['binary'], {}), '(binary)\n', (678, 686), False, 'from scipy.ndimage import binary_fill_holes, binary_closing, binary_dilation\n'), ((734, 797), 'skimage.morphology.remove_small_objects', 'remove_small_objects', (['binary'], {'min_size': 'min_size', 'connectivity': '(8)'}), '(binary, min_size=min_size, connectivity=8)\n', (754, 797), False, 'from skimage.morphology import remove_small_objects\n'), ((894, 905), 'time.time', 'time.time', ([], {}), '()\n', (903, 905), False, 'import cv2, copy, time\n'), ((923, 951), 'loader.PatchDataset', 'loader.PatchDataset', (['patches'], {}), '(patches)\n', (942, 951), False, 'import loader\n'), ((970, 1059), 'torch.utils.data.DataLoader', 'data.DataLoader', (['slide_dset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(4)'}), '(slide_dset, batch_size=args.batch_size, shuffle=False,\n num_workers=4)\n', (985, 1059), True, 'import torch.utils.data as data\n'), ((1730, 1791), 'numpy.zeros', 'np.zeros', (['(wsi_dim[0], wsi_dim[1], class_num)'], {'dtype': 'np.uint8'}), '((wsi_dim[0], wsi_dim[1], class_num), dtype=np.uint8)\n', (1738, 1791), True, 'import numpy as np\n'), ((2246, 2295), 'skimage.transform.resize', 'transform.resize', (['result_map', '(s_height, s_width)'], {}), '(result_map, (s_height, s_width))\n', (2262, 2295), False, 'from skimage import transform, morphology, filters\n'), ((2646, 2714), 'cv2.findContours', 'cv2.findContours', (['unsure_img', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(unsure_img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n', (2662, 2714), False, 'import cv2, copy, time\n'), ((3219, 3284), 'cv2.findContours', 'cv2.findContours', (['yes_img', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(yes_img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n', (3235, 3284), False, 'import cv2, copy, time\n'), ((4811, 4839), 'loader.PatchDataset', 'loader.PatchDataset', (['patches'], {}), '(patches)\n', (4830, 4839), False, 'import loader\n'), ((4858, 4947), 'torch.utils.data.DataLoader', 'data.DataLoader', (['slide_dset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(4)'}), '(slide_dset, batch_size=args.batch_size, shuffle=False,\n num_workers=4)\n', (4873, 4947), True, 'import torch.utils.data as data\n'), ((5336, 5362), 'numpy.argsort', 'np.argsort', (['norm_prob_list'], {}), '(norm_prob_list)\n', (5346, 5362), True, 'import numpy as np\n'), ((1065, 1080), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1078, 1080), False, 'import torch\n'), ((1387, 1398), 'time.time', 'time.time', ([], {}), '()\n', (1396, 1398), False, 'import cv2, copy, time\n'), ((2324, 2360), 'numpy.floor', 'np.floor', (['(SIZE2 / args.img_cnt_ratio)'], {}), '(SIZE2 / args.img_cnt_ratio)\n', (2332, 2360), True, 'import numpy as np\n'), ((2850, 2881), 'cv2.contourArea', 'cv2.contourArea', (['max_unsure_cnt'], {}), '(max_unsure_cnt)\n', (2865, 2881), False, 'import cv2, copy, time\n'), ((3405, 3433), 'cv2.contourArea', 'cv2.contourArea', (['max_yes_cnt'], {}), '(max_yes_cnt)\n', (3420, 3433), False, 'import cv2, copy, time\n'), ((4408, 4432), 'copy.deepcopy', 'copy.deepcopy', (['t_fea2048'], {}), '(t_fea2048)\n', (4421, 4432), False, 'import cv2, copy, time\n'), ((4495, 4518), 'copy.deepcopy', 'copy.deepcopy', (['t_logit3'], {}), '(t_logit3)\n', (4508, 4518), False, 'import cv2, copy, time\n'), ((4536, 4561), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {'dim': '(-1)'}), '(output, dim=-1)\n', (4545, 4561), True, 'import torch.nn.functional as F\n'), ((4623, 4646), 'copy.deepcopy', 'copy.deepcopy', (['cur_fea3'], {}), '(cur_fea3)\n', (4636, 4646), False, 'import cv2, copy, time\n'), ((4953, 4968), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4966, 4968), False, 'import torch\n'), ((610, 625), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (617, 625), True, 'import numpy as np\n')] |
import os
import subprocess
from inspect import isclass
import configargparse
import numpy as np
import sqlalchemy
import yaml
from IPython import embed
from angular_solver import solve
from database import Config, ConfigHolder, Graph, Task, get_session, DatabaseGraphGenome
from genetic_algorithm import (GeneticAlgorithm, Genome,
IterationTerminationConditionMet, SaveCallback,
k_point_crossover, linear_rank_selection,
one_point_crossover, uniform_crossover,
uniform_wheel_selection)
from instance_generation import (create_circle, create_circle_n_k,
create_random_circle)
from solver import MscColoringSolver, AngularMinSumGreedySolver
from solver.min_sum_simple_solver import solve_min_sum_simple_n_gon
from solver.mip import (AngularGraphScanMakespanAbsolute,
AngularGraphScanMakespanAbsoluteReduced,
AngularGraphScanMakespanHamilton,
AngularGraphScanMinSumHamilton,
AngularDependencySolver,
AngularDependencyLocalMinSumSolver,
AngularGraphScanLocalMinSumHamilton)
from solver.cp import (ConstraintAbsSolver,
ConstraintDependencySolver)
from utils import (Multidict, visualize_graph_2d, visualize_min_sum_sol_2d,
visualize_solution_2d)
from angular_evolver import (AngularSolverFitness, CompleteGraphGenome, GraphGenome, GraphGenomeCreator,
CompleteGraphGenomeCreator, mutate_2d_points, mutate_vertex_edge_genomes)
from solver import ALL_SOLVER
class GroupedAction(configargparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
group, dest = self.dest.split('.', 2)
groupspace = getattr(namespace, group, configargparse.Namespace())
setattr(groupspace, dest, values)
setattr(namespace, group, groupspace)
def string_to_callable(function_name):
assert function_name != 'eval', "Eval is not allowed!"
warning_displayed_once = getattr(StringToCallableAction, "warning_displayed", False)
if not warning_displayed_once:
print("WARNING: Do not use StringToCallableAction in production code! This is just a hack for faster development!")
setattr(StringToCallableAction, "warning_displayed", True)
try:
call = ALL_SOLVER[function_name]
except KeyError:
call = globals()[function_name]
return call
class StringToCallableAction(configargparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
warning_displayed_once = getattr(StringToCallableAction, "warning_displayed", False)
if not warning_displayed_once:
print("WARNING: Do not use StringToCallableAction in production code! This is just a hack for faster development!")
setattr(StringToCallableAction, "warning_displayed", True)
call = globals()[values]
if callable(call):
setattr(namespace, self.dest, call)
else:
raise TypeError(f"{values} is not callable")
def _instantiate_callables(func_name, obj_args):
callable_obj = string_to_callable(func_name)
if not callable_obj:
raise AttributeError(f"{func_name} function is not set.".capitalize())
if not isclass(callable_obj):
return callable_obj
if not obj_args:
obj_args = {}
return callable_obj(**obj_args)
def _get_task_and_config(session, arg_config):
task = None
config = None
if arg_config.url_path:
if hasattr(arg_config, "task") and arg_config.task is not None:
task = session.query(Task).filter(Task.id == arg_config.task).one()
if arg_config.override_config and \
input(f"Are you sure to override the configs for {task.id}? (y/N)").lower() in ["y", "yes"]:
print(f"Override config from task {task.id})")
for task_config in task.configs:
session.delete(task_config)
arg_config.override_config = False
config = ConfigHolder.fromNamespace(arg_config, task, ["override_config", "url_path", "PreEvolveInteractive", "create_only"])
session.add(config)
session.commit()
else:
print("Using config from database")
config = ConfigHolder(task)
else:
if input("New Task will be created (Y/n)?").lower() in ["", "yes", "y"]:
print("Will create a new task.")
task = Task(task_type="instance_evolver", status=Task.STATUS_OPTIONS.CREATED, name=arg_config.name)
session.add(task)
session.commit()
arg_config.task = task.id
config = ConfigHolder.fromNamespace(arg_config, task, ignored_attributes=["url_path", "create_only", "name", "override_config"])
session.add_all(config.database_configs)
session.commit()
savepath = input(f"Task ID is {task.id}. Type a filepath to save the ID in a config file (default: Skip save): ")
if savepath:
_save_task_file(savepath, config, task)
else:
config = arg_config
return task, config
def _save_task_file(savepath, config, task):
n_s = configargparse.Namespace()
n_s.task = task.id
parser = configargparse.Parser()
parser.add_argument("--task")
parser.add_argument("--database")
parsed = parser.parse_args(args=[f"--task={task.id}", f"--database={config.url_path}"])
parser.write_config_file(n_s, [savepath])
def _evolve_instances(arg_config):
session = get_session(arg_config.url_path)
task, config = _get_task_and_config(session, arg_config)
if not arg_config.create_only:
process_task(config, task, session)
def process_task(config, task, session):
# First init all callable classes
try:
mutation = _instantiate_callables(config.mutation_func, None)
selection = _instantiate_callables(config.selection_func, None)
crossover = _instantiate_callables(config.crossover_func, None)
fitness = _instantiate_callables(config.fitness_func, config.fitness_func_initargs)
if config.term_condition == 'IterationTerminationConditionMet' and not config.term_condition_initargs:
term_con = IterationTerminationConditionMet(max_iter=config.generations)
else:
term_con = _instantiate_callables(config.term_condition, config.term_condition_initargs)
if config.callback == 'SaveCallback' and config.callback_initargs is None:
callback = SaveCallback(config.generations, config.population_amount, task, session)
else:
callback = _instantiate_callables(config.callback, config.callback_initargs)
task.status = Task.STATUS_OPTIONS.PROCESSING
if session:
session.commit()
# Now load population if provided, else generate it
starting_generation, population = _load_population(config, task, session)
if config.PreEvolveInteractive:
print("Config set up. To change the population just change the 'population' variable.")
print("For other variables just refer to the locals.")
embed()
gen_algo = GeneticAlgorithm(
genomes=population,
selection=selection,
mutation=mutation,
fitness=fitness,
crossover=crossover,
callback=callback,
termCon=term_con,
elitism=config.elitism,
mutationChance=config.mutation_chance_genome,
mutationChanceGene=config.mutation_chance_gene
)
gen_algo.evolve(generation=starting_generation)
task.status = Task.STATUS_OPTIONS.FINISHED
if session:
session.commit()
except InterruptedError as e:
task.status = task.STATUS_OPTIONS.INTERRUPTED
if session:
session.commit()
except Exception as e:
if session:
task.status = Task.STATUS_OPTIONS.ERROR
task.error_message = str(e)
session.commit()
print(e)
raise e
def _load_population(config, task, session: 'Session'):
population = []
curr_generation = 0
if session is not None:
try:
last_gen = session.query(DatabaseGraphGenome)\
.filter(DatabaseGraphGenome.task_id == task.id)\
.order_by(DatabaseGraphGenome.generation.desc())\
.limit(1)\
.one()
curr_generation = last_gen.generation
queue = session.query(DatabaseGraphGenome)\
.filter(DatabaseGraphGenome.task_id == task.id, DatabaseGraphGenome.generation == curr_generation)\
.order_by(DatabaseGraphGenome.generation.desc())\
.limit(config.population_amount)
population = np.zeros(config.population_amount, dtype=object)
population[:] = [genome for genome in queue]
assert isinstance(population[0], Genome), "Loaded data does not contain valid genomes"
except sqlalchemy.orm.exc.NoResultFound as e:
pass
if len(population) < config.population_amount:
if population:
print("Given population smaller than wanted. Fill with random instances")
temp_pop = np.zeros(config.population_amount - len(population), dtype=object)
create_instances = _instantiate_callables(config.instance_creation_func, config.instance_creation_initargs)
temp_pop[:] = [
create_instances(task, generation=curr_generation)
for i in range(config.population_amount - len(population))
]
session.add_all(temp_pop.tolist())
session.commit()
population = np.hstack([population[:len(population)],
temp_pop]) # ToDo: This call needs to be reworked
elif len(population) > config.population_amount:
print("Given population too large. Will slice off the end")
population = population[:config.population_amount]
return curr_generation, population
def _argument_parser():
parser = configargparse.ArgumentParser(description="Parser for the instance evolver")
parser.add_argument(
'--config',
type=str,
help='Path to config file (default: inst_evo_settings.yaml)',
default="inst_evo_settings.yaml",
is_config_file_arg=True)
parser.add_argument(
'--PreEvolveInteractive',
action='store_true',
help='Ipython interactive for instance creation (default: False)',
default=False)
parser.add_argument('--override-config', action="store_true", default=False, help="Set this flag to override configuration with passed arguments")
parser.add_argument('--url-path', type=str, default="angular.db", help="Path to database. Creates Database if it does not exist (Default: angular.db)")
parser.add_argument('--task', type=int, help="Id of the task that shall be continued")
parser.add_argument('--generations', type=int, default=200, help="Amount of generations evolved. If a save is loaded, it will only evolve the difference for the generations (default: 200)")
parser.add_argument('--elitism', type=float, default=0.01, help="Elitism rate (default: 0.01)")
#parser.add_argument('--genome-creator',
parser.add_argument('--instance-creation-func', type=str, help="Function for initial creation of instances")
parser.add_argument('--instance-creation-initargs', type=yaml.safe_load, help="Parameter for instance creation")
parser.add_argument('--population-amount', type=int, default=200, help="Amont of genomes per generation (default: 200)")
parser.add_argument('--mutation-chance-genome', type=float, default=0.03, help="Chance a genome will be selected for mutation (default: 0.03)")
parser.add_argument('--mutation-chance-gene', type=float, default=0.03, help="Chance a gene is changed (default: 0.03)")
parser.add_argument('--mutation-func', type=str, help="Mutation callable used. Required if no safefile config is used")
parser.add_argument('--selection-func', type=str, help="Selection callable used. Required if no safefile is used")
parser.add_argument('--crossover-func', type=str, help="Crossover callable used. Required if no safefile is used")
parser.add_argument('--fitness-func', type=str, help="Fitness callable used. Required if no safefile is used")
parser.add_argument('--fitness-func-initargs', type=yaml.safe_load, default=None, help="Fitness callable init keyword arguments. Omitted when emtpy.")
parser.add_argument('--term-condition', type=str, default='IterationTerminationConditionMet', help="Termination callable used. (default: IterationTerminationConditionMet)")
parser.add_argument('--term-condition-initargs', type=yaml.safe_load, default=None, help="Keyword arguments dict for termination condition callable init. Not needed for standard term-condition.")
parser.add_argument('--callback', type=str, default='SaveCallback', help="Callback used in genetic_algorithm (default: SaveCallback)")
parser.add_argument('--callback-initargs', type=yaml.safe_load, default=None, help="Callback keyword arguments dict for init. Not needed for standard SaveCallback else omitted if not provided")
parser.add_argument('--create-only', action="store_true", help="Only create task instead of also solving it")
parser.add_argument('--name', type=str, default="", help="Optional name description of the task")
parsed = parser.parse_args()
#parser.write_config_file()
#print(vars(parsed))
return parsed
if __name__ == "__main__":
CONFIG = _argument_parser()
_evolve_instances(CONFIG)
| [
"database.ConfigHolder.fromNamespace",
"database.Task",
"genetic_algorithm.SaveCallback",
"database.DatabaseGraphGenome.generation.desc",
"configargparse.Parser",
"database.get_session",
"genetic_algorithm.GeneticAlgorithm",
"IPython.embed",
"database.ConfigHolder",
"numpy.zeros",
"configargpars... | [((5499, 5525), 'configargparse.Namespace', 'configargparse.Namespace', ([], {}), '()\n', (5523, 5525), False, 'import configargparse\n'), ((5562, 5585), 'configargparse.Parser', 'configargparse.Parser', ([], {}), '()\n', (5583, 5585), False, 'import configargparse\n'), ((5846, 5878), 'database.get_session', 'get_session', (['arg_config.url_path'], {}), '(arg_config.url_path)\n', (5857, 5878), False, 'from database import Config, ConfigHolder, Graph, Task, get_session, DatabaseGraphGenome\n'), ((10453, 10529), 'configargparse.ArgumentParser', 'configargparse.ArgumentParser', ([], {'description': '"""Parser for the instance evolver"""'}), "(description='Parser for the instance evolver')\n", (10482, 10529), False, 'import configargparse\n'), ((3450, 3471), 'inspect.isclass', 'isclass', (['callable_obj'], {}), '(callable_obj)\n', (3457, 3471), False, 'from inspect import isclass\n'), ((7501, 7784), 'genetic_algorithm.GeneticAlgorithm', 'GeneticAlgorithm', ([], {'genomes': 'population', 'selection': 'selection', 'mutation': 'mutation', 'fitness': 'fitness', 'crossover': 'crossover', 'callback': 'callback', 'termCon': 'term_con', 'elitism': 'config.elitism', 'mutationChance': 'config.mutation_chance_genome', 'mutationChanceGene': 'config.mutation_chance_gene'}), '(genomes=population, selection=selection, mutation=mutation,\n fitness=fitness, crossover=crossover, callback=callback, termCon=\n term_con, elitism=config.elitism, mutationChance=config.\n mutation_chance_genome, mutationChanceGene=config.mutation_chance_gene)\n', (7517, 7784), False, 'from genetic_algorithm import GeneticAlgorithm, Genome, IterationTerminationConditionMet, SaveCallback, k_point_crossover, linear_rank_selection, one_point_crossover, uniform_crossover, uniform_wheel_selection\n'), ((1935, 1961), 'configargparse.Namespace', 'configargparse.Namespace', ([], {}), '()\n', (1959, 1961), False, 'import configargparse\n'), ((6549, 6610), 'genetic_algorithm.IterationTerminationConditionMet', 'IterationTerminationConditionMet', ([], {'max_iter': 'config.generations'}), '(max_iter=config.generations)\n', (6581, 6610), False, 'from genetic_algorithm import GeneticAlgorithm, Genome, IterationTerminationConditionMet, SaveCallback, k_point_crossover, linear_rank_selection, one_point_crossover, uniform_crossover, uniform_wheel_selection\n'), ((6832, 6905), 'genetic_algorithm.SaveCallback', 'SaveCallback', (['config.generations', 'config.population_amount', 'task', 'session'], {}), '(config.generations, config.population_amount, task, session)\n', (6844, 6905), False, 'from genetic_algorithm import GeneticAlgorithm, Genome, IterationTerminationConditionMet, SaveCallback, k_point_crossover, linear_rank_selection, one_point_crossover, uniform_crossover, uniform_wheel_selection\n'), ((7473, 7480), 'IPython.embed', 'embed', ([], {}), '()\n', (7478, 7480), False, 'from IPython import embed\n'), ((9139, 9187), 'numpy.zeros', 'np.zeros', (['config.population_amount'], {'dtype': 'object'}), '(config.population_amount, dtype=object)\n', (9147, 9187), True, 'import numpy as np\n'), ((4234, 4354), 'database.ConfigHolder.fromNamespace', 'ConfigHolder.fromNamespace', (['arg_config', 'task', "['override_config', 'url_path', 'PreEvolveInteractive', 'create_only']"], {}), "(arg_config, task, ['override_config', 'url_path',\n 'PreEvolveInteractive', 'create_only'])\n", (4260, 4354), False, 'from database import Config, ConfigHolder, Graph, Task, get_session, DatabaseGraphGenome\n'), ((4515, 4533), 'database.ConfigHolder', 'ConfigHolder', (['task'], {}), '(task)\n', (4527, 4533), False, 'from database import Config, ConfigHolder, Graph, Task, get_session, DatabaseGraphGenome\n'), ((4705, 4802), 'database.Task', 'Task', ([], {'task_type': '"""instance_evolver"""', 'status': 'Task.STATUS_OPTIONS.CREATED', 'name': 'arg_config.name'}), "(task_type='instance_evolver', status=Task.STATUS_OPTIONS.CREATED, name\n =arg_config.name)\n", (4709, 4802), False, 'from database import Config, ConfigHolder, Graph, Task, get_session, DatabaseGraphGenome\n'), ((4932, 5055), 'database.ConfigHolder.fromNamespace', 'ConfigHolder.fromNamespace', (['arg_config', 'task'], {'ignored_attributes': "['url_path', 'create_only', 'name', 'override_config']"}), "(arg_config, task, ignored_attributes=['url_path',\n 'create_only', 'name', 'override_config'])\n", (4958, 5055), False, 'from database import Config, ConfigHolder, Graph, Task, get_session, DatabaseGraphGenome\n'), ((9025, 9062), 'database.DatabaseGraphGenome.generation.desc', 'DatabaseGraphGenome.generation.desc', ([], {}), '()\n', (9060, 9062), False, 'from database import Config, ConfigHolder, Graph, Task, get_session, DatabaseGraphGenome\n'), ((8687, 8724), 'database.DatabaseGraphGenome.generation.desc', 'DatabaseGraphGenome.generation.desc', ([], {}), '()\n', (8722, 8724), False, 'from database import Config, ConfigHolder, Graph, Task, get_session, DatabaseGraphGenome\n')] |
from __future__ import print_function, division
import os
import sys
import pytest
import warnings
import numpy
from galpy.util import galpyWarning
from test_actionAngle import reset_warning_registry
_TRAVIS= bool(os.getenv('TRAVIS'))
PY2= sys.version < '3'
# Print all galpyWarnings always for tests of warnings
warnings.simplefilter("always",galpyWarning)
#Basic sanity checking: circular orbit should have constant R, zero vR, vT=vc
def test_actionAngleTorus_basic():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import MWPotential, rl, vcirc, \
FlattenedPowerPotential, PlummerPotential
tol= -4.
jr= 10.**-10.
jz= 10.**-10.
aAT= actionAngleTorus(pot=MWPotential)
# at R=1, Lz=1
jphi= 1.
angler= numpy.linspace(0.,2.*numpy.pi,101)
anglephi= numpy.linspace(0.,2.*numpy.pi,101)+1.
anglez= numpy.linspace(0.,2.*numpy.pi,101)+2.
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
assert numpy.all(numpy.fabs(RvR[0]-rl(MWPotential,jphi)) < 10.**tol), \
'circular orbit does not have constant radius for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[1]) < 10.**tol), \
'circular orbit does not have zero radial velocity for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[2]-vcirc(MWPotential,rl(MWPotential,jphi))) < 10.**tol), \
'circular orbit does not have constant vT=vc for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[3]) < 10.**tol), \
'circular orbit does not have zero vertical height for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[4]) < 10.**tol), \
'circular orbit does not have zero vertical velocity for actionAngleTorus'
# at Lz=1.5, using Plummer
tol= -3.25
pp= PlummerPotential(normalize=1.)
aAT= actionAngleTorus(pot=pp)
jphi= 1.5
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
assert numpy.all(numpy.fabs(RvR[0]-rl(pp,jphi)) < 10.**tol), \
'circular orbit does not have constant radius for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[1]) < 10.**tol), \
'circular orbit does not have zero radial velocity for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[2]-vcirc(pp,rl(pp,jphi))) < 10.**tol), \
'circular orbit does not have constant vT=vc for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[3]) < 10.**tol), \
'circular orbit does not have zero vertical height for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[4]) < 10.**tol), \
'circular orbit does not have zero vertical velocity for actionAngleTorus'
# at Lz=0.5, using FlattenedPowerPotential
tol= -4.
fp= FlattenedPowerPotential(normalize=1.)
aAT= actionAngleTorus(pot=fp)
jphi= 0.5
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
assert numpy.all(numpy.fabs(RvR[0]-rl(fp,jphi)) < 10.**tol), \
'circular orbit does not have constant radius for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[1]) < 10.**tol), \
'circular orbit does not have zero radial velocity for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[2]-vcirc(fp,rl(fp,jphi))) < 10.**tol), \
'circular orbit does not have constant vT=vc for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[3]) < 10.**tol), \
'circular orbit does not have zero vertical height for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[4]) < 10.**tol), \
'circular orbit does not have zero vertical velocity for actionAngleTorus'
return None
#Basic sanity checking: close-to-circular orbit should have freq. = epicycle freq.
def test_actionAngleTorus_basic_freqs():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import epifreq, omegac, verticalfreq, rl, \
JaffePotential, PowerSphericalPotential, HernquistPotential
tol= -3.
jr= 10.**-6.
jz= 10.**-6.
jp= JaffePotential(normalize=1.)
aAT= actionAngleTorus(pot=jp)
# at Lz=1
jphi= 1.
om= aAT.Freqs(jr,jphi,jz)
assert numpy.fabs((om[0]-epifreq(jp,rl(jp,jphi)))/om[0]) < 10.**tol, \
'Close-to-circular orbit does not have Or=kappa for actionAngleTorus'
assert numpy.fabs((om[1]-omegac(jp,rl(jp,jphi)))/om[1]) < 10.**tol, \
'Close-to-circular orbit does not have Ophi=omega for actionAngleTorus'
assert numpy.fabs((om[2]-verticalfreq(jp,rl(jp,jphi)))/om[2]) < 10.**tol, \
'Close-to-circular orbit does not have Oz=nu for actionAngleTorus'
# at Lz=1.5, w/ different potential
pp= PowerSphericalPotential(normalize=1.)
aAT= actionAngleTorus(pot=pp)
jphi= 1.5
om= aAT.Freqs(jr,jphi,jz)
assert numpy.fabs((om[0]-epifreq(pp,rl(pp,jphi)))/om[0]) < 10.**tol, \
'Close-to-circular orbit does not have Or=kappa for actionAngleTorus'
assert numpy.fabs((om[1]-omegac(pp,rl(pp,jphi)))/om[1]) < 10.**tol, \
'Close-to-circular orbit does not have Ophi=omega for actionAngleTorus'
assert numpy.fabs((om[2]-verticalfreq(pp,rl(pp,jphi)))/om[2]) < 10.**tol, \
'Close-to-circular orbit does not have Oz=nu for actionAngleTorus'
# at Lz=0.5, w/ different potential
tol= -2.5 # appears more difficult
hp= HernquistPotential(normalize=1.)
aAT= actionAngleTorus(pot=hp)
jphi= 0.5
om= aAT.Freqs(jr,jphi,jz)
assert numpy.fabs((om[0]-epifreq(hp,rl(hp,jphi)))/om[0]) < 10.**tol, \
'Close-to-circular orbit does not have Or=kappa for actionAngleTorus'
assert numpy.fabs((om[1]-omegac(hp,rl(hp,jphi)))/om[1]) < 10.**tol, \
'Close-to-circular orbit does not have Ophi=omega for actionAngleTorus'
assert numpy.fabs((om[2]-verticalfreq(hp,rl(hp,jphi)))/om[2]) < 10.**tol, \
'Close-to-circular orbit does not have Oz=nu for actionAngleTorus'
return None
#Test that orbit from actionAngleTorus is the same as an integrated orbit
def test_actionAngleTorus_orbit():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import MWPotential2014
from galpy.orbit import Orbit
# Set up instance
aAT= actionAngleTorus(pot=MWPotential2014,tol=10.**-5.)
jr,jphi,jz= 0.05,1.1,0.025
# First calculate frequencies and the initial RvR
RvRom= aAT.xvFreqs(jr,jphi,jz,
numpy.array([0.]),
numpy.array([1.]),
numpy.array([2.]))
om= RvRom[1:]
# Angles along an orbit
ts= numpy.linspace(0.,100.,1001)
angler= ts*om[0]
anglephi= 1.+ts*om[1]
anglez= 2.+ts*om[2]
# Calculate the orbit using actionAngleTorus
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
# Calculate the orbit using orbit integration
orb= Orbit([RvRom[0][0,0],RvRom[0][0,1],RvRom[0][0,2],
RvRom[0][0,3],RvRom[0][0,4],RvRom[0][0,5]])
orb.integrate(ts,MWPotential2014)
# Compare
tol= -3.
assert numpy.all(numpy.fabs(orb.R(ts)-RvR[0]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in R'
assert numpy.all(numpy.fabs(orb.vR(ts)-RvR[1]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in vR'
assert numpy.all(numpy.fabs(orb.vT(ts)-RvR[2]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in vT'
assert numpy.all(numpy.fabs(orb.z(ts)-RvR[3]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in z'
assert numpy.all(numpy.fabs(orb.vz(ts)-RvR[4]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in vz'
assert numpy.all(numpy.fabs((orb.phi(ts)-RvR[5]+numpy.pi) % (2.*numpy.pi) -numpy.pi) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in phi'
return None
# Test that actionAngleTorus w/ interp pot gives same freqs as regular pot
# Doesn't work well: TM aborts because our interpolated forces aren't
# consistent enough with the potential for TM's taste, but we test that it at
# at least works somewhat
def test_actionAngleTorus_interppot_freqs():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import LogarithmicHaloPotential, interpRZPotential
lp= LogarithmicHaloPotential(normalize=1.)
ip= interpRZPotential(RZPot=lp,
interpPot=True,
interpDens=True,interpRforce=True,interpzforce=True,
enable_c=True)
aAT= actionAngleTorus(pot=lp)
aATi= actionAngleTorus(pot=ip)
jr,jphi,jz= 0.05,1.1,0.02
om= aAT.Freqs(jr,jphi,jz)
omi= aATi.Freqs(jr,jphi,jz)
assert numpy.fabs((om[0]-omi[0])/om[0]) < 0.2, 'Radial frequency computed using the torus machine does not agree between potential and interpolated potential'
assert numpy.fabs((om[1]-omi[1])/om[1]) < 0.2, 'Azimuthal frequency computed using the torus machine does not agree between potential and interpolated potential'
assert numpy.fabs((om[2]-omi[2])/om[2]) < 0.8, 'Vertical frequency computed using the torus machine does not agree between potential and interpolated potential'
return None
#Test the actionAngleTorus against an isochrone potential: actions
def test_actionAngleTorus_Isochrone_actions():
from galpy.potential import IsochronePotential
from galpy.actionAngle import actionAngleTorus, \
actionAngleIsochrone
ip= IsochronePotential(normalize=1.,b=1.2)
aAI= actionAngleIsochrone(ip=ip)
tol= -6.
aAT= actionAngleTorus(pot=ip,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.])
anglephi= numpy.array([numpy.pi])
anglez= numpy.array([numpy.pi/2.])
# Calculate position from aAT
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
# Calculate actions from aAI
ji= aAI(*RvR)
djr= numpy.fabs((ji[0]-jr)/jr)
dlz= numpy.fabs((ji[1]-jphi)/jphi)
djz= numpy.fabs((ji[2]-jz)/jz)
assert djr < 10.**tol, 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (djr*100.)
assert dlz < 10.**tol, 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (dlz*100.)
assert djz < 10.**tol, 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (djz*100.)
return None
#Test the actionAngleTorus against an isochrone potential: frequencies and angles
def test_actionAngleTorus_Isochrone_freqsAngles():
from galpy.potential import IsochronePotential
from galpy.actionAngle import actionAngleTorus, \
actionAngleIsochrone
ip= IsochronePotential(normalize=1.,b=1.2)
aAI= actionAngleIsochrone(ip=ip)
tol= -6.
aAT= actionAngleTorus(pot=ip,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.1])+numpy.linspace(0.,numpy.pi,101)
angler= angler % (2.*numpy.pi)
anglephi= numpy.array([numpy.pi])+numpy.linspace(0.,numpy.pi,101)
anglephi= anglephi % (2.*numpy.pi)
anglez= numpy.array([numpy.pi/2.])+numpy.linspace(0.,numpy.pi,101)
anglez= anglez % (2.*numpy.pi)
# Calculate position from aAT
RvRom= aAT.xvFreqs(jr,jphi,jz,angler,anglephi,anglez)
# Calculate actions, frequencies, and angles from aAI
ws= aAI.actionsFreqsAngles(*RvRom[0].T)
dOr= numpy.fabs((ws[3]-RvRom[1]))
dOp= numpy.fabs((ws[4]-RvRom[2]))
dOz= numpy.fabs((ws[5]-RvRom[3]))
dar= numpy.fabs((ws[6]-angler))
dap= numpy.fabs((ws[7]-anglephi))
daz= numpy.fabs((ws[8]-anglez))
dar[dar > numpy.pi]-= 2.*numpy.pi
dar[dar < -numpy.pi]+= 2.*numpy.pi
dap[dap > numpy.pi]-= 2.*numpy.pi
dap[dap < -numpy.pi]+= 2.*numpy.pi
daz[daz > numpy.pi]-= 2.*numpy.pi
daz[daz < -numpy.pi]+= 2.*numpy.pi
assert numpy.all(dOr < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Or at %f%%' % (numpy.nanmax(dOr)*100.)
assert numpy.all(dOp < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Ophi at %f%%' % (numpy.nanmax(dOp)*100.)
assert numpy.all(dOz < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Oz at %f%%' % (numpy.nanmax(dOz)*100.)
assert numpy.all(dar < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for ar at %f' % (numpy.nanmax(dar))
assert numpy.all(dap < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for aphi at %f' % (numpy.nanmax(dap))
assert numpy.all(daz < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for az at %f' % (numpy.nanmax(daz))
return None
#Test the actionAngleTorus against a Staeckel potential: actions
def test_actionAngleTorus_Staeckel_actions():
from galpy.potential import KuzminKutuzovStaeckelPotential
from galpy.actionAngle import actionAngleTorus, \
actionAngleStaeckel
delta= 1.2
kp= KuzminKutuzovStaeckelPotential(normalize=1.,Delta=delta)
aAS= actionAngleStaeckel(pot=kp,delta=delta,c=True)
tol= -3.
aAT= actionAngleTorus(pot=kp,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.])
anglephi= numpy.array([numpy.pi])
anglez= numpy.array([numpy.pi/2.])
# Calculate position from aAT
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
# Calculate actions from aAI
ji= aAS(*RvR)
djr= numpy.fabs((ji[0]-jr)/jr)
dlz= numpy.fabs((ji[1]-jphi)/jphi)
djz= numpy.fabs((ji[2]-jz)/jz)
assert djr < 10.**tol, 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Jr at %f%%' % (djr*100.)
assert dlz < 10.**tol, 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Jr at %f%%' % (dlz*100.)
assert djz < 10.**tol, 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Jr at %f%%' % (djz*100.)
return None
#Test the actionAngleTorus against an isochrone potential: frequencies and angles
def test_actionAngleTorus_Staeckel_freqsAngles():
from galpy.potential import KuzminKutuzovStaeckelPotential
from galpy.actionAngle import actionAngleTorus, \
actionAngleStaeckel
delta= 1.2
kp= KuzminKutuzovStaeckelPotential(normalize=1.,Delta=delta)
aAS= actionAngleStaeckel(pot=kp,delta=delta,c=True)
tol= -3.
aAT= actionAngleTorus(pot=kp,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.1])+numpy.linspace(0.,numpy.pi,101)
angler= angler % (2.*numpy.pi)
anglephi= numpy.array([numpy.pi])+numpy.linspace(0.,numpy.pi,101)
anglephi= anglephi % (2.*numpy.pi)
anglez= numpy.array([numpy.pi/2.])+numpy.linspace(0.,numpy.pi,101)
anglez= anglez % (2.*numpy.pi)
# Calculate position from aAT
RvRom= aAT.xvFreqs(jr,jphi,jz,angler,anglephi,anglez)
# Calculate actions, frequencies, and angles from aAI
ws= aAS.actionsFreqsAngles(*RvRom[0].T)
dOr= numpy.fabs((ws[3]-RvRom[1]))
dOp= numpy.fabs((ws[4]-RvRom[2]))
dOz= numpy.fabs((ws[5]-RvRom[3]))
dar= numpy.fabs((ws[6]-angler))
dap= numpy.fabs((ws[7]-anglephi))
daz= numpy.fabs((ws[8]-anglez))
dar[dar > numpy.pi]-= 2.*numpy.pi
dar[dar < -numpy.pi]+= 2.*numpy.pi
dap[dap > numpy.pi]-= 2.*numpy.pi
dap[dap < -numpy.pi]+= 2.*numpy.pi
daz[daz > numpy.pi]-= 2.*numpy.pi
daz[daz < -numpy.pi]+= 2.*numpy.pi
assert numpy.all(dOr < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Or at %f%%' % (numpy.nanmax(dOr)*100.)
assert numpy.all(dOp < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Ophi at %f%%' % (numpy.nanmax(dOp)*100.)
assert numpy.all(dOz < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Oz at %f%%' % (numpy.nanmax(dOz)*100.)
assert numpy.all(dar < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for ar at %f' % (numpy.nanmax(dar))
assert numpy.all(dap < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for aphi at %f' % (numpy.nanmax(dap))
assert numpy.all(daz < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for az at %f' % (numpy.nanmax(daz))
return None
#Test the actionAngleTorus against a general potential w/ actionAngleIsochroneApprox: actions
def test_actionAngleTorus_isochroneApprox_actions():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus, \
actionAngleIsochroneApprox
aAIA= actionAngleIsochroneApprox(pot=MWPotential2014,b=0.8)
tol= -2.5
aAT= actionAngleTorus(pot=MWPotential2014,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.])
anglephi= numpy.array([numpy.pi])
anglez= numpy.array([numpy.pi/2.])
# Calculate position from aAT
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
# Calculate actions from aAIA
ji= aAIA(*RvR)
djr= numpy.fabs((ji[0]-jr)/jr)
dlz= numpy.fabs((ji[1]-jphi)/jphi)
djz= numpy.fabs((ji[2]-jz)/jz)
assert djr < 10.**tol, 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Jr at %f%%' % (djr*100.)
assert dlz < 10.**tol, 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Jr at %f%%' % (dlz*100.)
assert djz < 10.**tol, 'actionAngleTorus and actionAngleMWPotential2014 applied to MWPotential2014 potential disagree for Jr at %f%%' % (djz*100.)
return None
#Test the actionAngleTorus against a general potential w/ actionAngleIsochrone: frequencies and angles
def test_actionAngleTorus_isochroneApprox_freqsAngles():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus, \
actionAngleIsochroneApprox
aAIA= actionAngleIsochroneApprox(pot=MWPotential2014,b=0.8)
tol= -3.5
aAT= actionAngleTorus(pot=MWPotential2014,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.1])+numpy.linspace(0.,numpy.pi,21)
angler= angler % (2.*numpy.pi)
anglephi= numpy.array([numpy.pi])+numpy.linspace(0.,numpy.pi,21)
anglephi= anglephi % (2.*numpy.pi)
anglez= numpy.array([numpy.pi/2.])+numpy.linspace(0.,numpy.pi,21)
anglez= anglez % (2.*numpy.pi)
# Calculate position from aAT
RvRom= aAT.xvFreqs(jr,jphi,jz,angler,anglephi,anglez)
# Calculate actions, frequencies, and angles from aAI
ws= aAIA.actionsFreqsAngles(*RvRom[0].T)
dOr= numpy.fabs((ws[3]-RvRom[1]))
dOp= numpy.fabs((ws[4]-RvRom[2]))
dOz= numpy.fabs((ws[5]-RvRom[3]))
dar= numpy.fabs((ws[6]-angler))
dap= numpy.fabs((ws[7]-anglephi))
daz= numpy.fabs((ws[8]-anglez))
dar[dar > numpy.pi]-= 2.*numpy.pi
dar[dar < -numpy.pi]+= 2.*numpy.pi
dap[dap > numpy.pi]-= 2.*numpy.pi
dap[dap < -numpy.pi]+= 2.*numpy.pi
daz[daz > numpy.pi]-= 2.*numpy.pi
daz[daz < -numpy.pi]+= 2.*numpy.pi
assert numpy.all(dOr < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Or at %f%%' % (numpy.nanmax(dOr)*100.)
assert numpy.all(dOp < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Ophi at %f%%' % (numpy.nanmax(dOp)*100.)
assert numpy.all(dOz < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Oz at %f%%' % (numpy.nanmax(dOz)*100.)
assert numpy.all(dar < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for ar at %f' % (numpy.nanmax(dar))
assert numpy.all(dap < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for aphi at %f' % (numpy.nanmax(dap))
assert numpy.all(daz < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for az at %f' % (numpy.nanmax(daz))
return None
# Test that the frequencies returned by hessianFreqs are the same as those returned by Freqs
def test_actionAngleTorus_hessian_freqs():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014)
jr,jphi,jz= 0.075,1.1,0.05
fO= aAT.Freqs(jr,jphi,jz)[:3]
hO= aAT.hessianFreqs(jr,jphi,jz)[1:4]
assert numpy.all(numpy.fabs(numpy.array(fO)-numpy.array(hO)) < 10.**-8.), 'actionAngleTorus methods Freqs and hessianFreqs return different frequencies'
return None
# Test that the Hessian is approximately symmetric
def test_actionAngleTorus_hessian_symm():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014)
jr,jphi,jz= 0.075,1.1,0.05
h= aAT.hessianFreqs(jr,jphi,jz,tol=0.0001,nosym=True)[0]
assert numpy.all(numpy.fabs((h-h.T)/h) < 0.03), 'actionAngleTorus Hessian is not symmetric'
return None
# Test that the Hessian is approximately correct
def test_actionAngleTorus_hessian_linear():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014)
jr,jphi,jz= 0.075,1.1,0.05
h= aAT.hessianFreqs(jr,jphi,jz,tol=0.0001,nosym=True)[0]
dj= numpy.array([0.02,0.005,-0.01])
do_fromhessian= numpy.dot(h,dj)
O= numpy.array(aAT.Freqs(jr,jphi,jz)[:3])
do= numpy.array(aAT.Freqs(jr+dj[0],jphi+dj[1],jz+dj[2])[:3])-O
assert numpy.all(numpy.fabs((do_fromhessian-do)/O)< 0.001), 'actionAngleTorus Hessian does not return good approximation to dO/dJ'
return None
# Test that the frequencies returned by xvJacobianFreqs are the same as those returned by Freqs
def test_actionAngleTorus_jacobian_freqs():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014)
jr,jphi,jz= 0.075,1.1,0.05
fO= aAT.Freqs(jr,jphi,jz)[:3]
hO= aAT.xvJacobianFreqs(jr,jphi,jz,
numpy.array([0.]),numpy.array([1.]),
numpy.array([2.]))[3:6]
assert numpy.all(numpy.fabs(numpy.array(fO)-numpy.array(hO)) < 10.**-8.), 'actionAngleTorus methods Freqs and xvJacobianFreqs return different frequencies'
return None
# Test that the Hessian returned by xvJacobianFreqs are the same as those returned by hessianFreqs
def test_actionAngleTorus_jacobian_hessian():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014)
jr,jphi,jz= 0.075,1.1,0.05
fO= aAT.hessianFreqs(jr,jphi,jz)[0]
hO= aAT.xvJacobianFreqs(jr,jphi,jz,
numpy.array([0.]),numpy.array([1.]),
numpy.array([2.]))[2]
assert numpy.all(numpy.fabs(numpy.array(fO)-numpy.array(hO)) < 10.**-8.), 'actionAngleTorus methods hessianFreqs and xvJacobianFreqs return different Hessians'
return None
# Test that the xv returned by xvJacobianFreqs are the same as those returned by __call__
def test_actionAngleTorus_jacobian_xv():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.,1.])
anglephi= numpy.array([1.,2.])
anglez= numpy.array([2.,3.])
fO= aAT(jr,jphi,jz,angler,anglephi,anglez)
hO= aAT.xvJacobianFreqs(jr,jphi,jz,angler,anglephi,anglez)[0]
assert numpy.all(numpy.fabs(numpy.array(fO)-numpy.array(hO)) < 10.**-8.), 'actionAngleTorus methods __call__ and xvJacobianFreqs return different xv'
return None
# Test that the determinant of the Jacobian returned by xvJacobianFreqs is close to 1/R (should be 1 for rectangular coordinates, 1/R for cylindrical
def test_actionAngleTorus_jacobian_detone():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014,dJ=0.0001)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.,1.])
anglephi= numpy.array([1.,2.])
anglez= numpy.array([2.,3.])
jf= aAT.xvJacobianFreqs(jr,jphi,jz,angler,anglephi,anglez)
assert numpy.fabs(jf[0][0,0]*numpy.fabs(numpy.linalg.det(jf[1][0]))-1) < 0.01, 'Jacobian returned by actionAngleTorus method xvJacobianFreqs does not have the expected determinant'
assert numpy.fabs(jf[0][1,0]*numpy.fabs(numpy.linalg.det(jf[1][1]))-1) < 0.01, 'Jacobian returned by actionAngleTorus method xvJacobianFreqs does not have the expected determinant'
return None
# Test that Jacobian returned by xvJacobianFreqs is approximately correct
def test_actionAngleTorus_jacobian_linear():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.5])
anglephi= numpy.array([1.])
anglez= numpy.array([2.])
jf= aAT.xvJacobianFreqs(jr,jphi,jz,angler,anglephi,anglez)
xv= aAT(jr,jphi,jz,angler,anglephi,anglez)
dja= 2.*numpy.array([0.001,0.002,0.003,-0.002,0.004,0.002])
xv_direct= aAT(jr+dja[0],jphi+dja[1],jz+dja[2],
angler+dja[3],anglephi+dja[4],anglez+dja[5])
xv_fromjac= xv+numpy.dot(jf[1],dja)
assert numpy.all(numpy.fabs((xv_fromjac-xv_direct)/xv_direct) < 0.01), 'Jacobian returned by actionAngleTorus method xvJacobianFreqs does not appear to be correct'
return None
#Test error when potential is not implemented in C
def test_actionAngleTorus_nocerr():
from galpy.actionAngle import actionAngleTorus
from test_potential import BurkertPotentialNoC
bp= BurkertPotentialNoC()
try:
aAT= actionAngleTorus(pot=bp)
except RuntimeError: pass
else:
raise AssertionError("actionAngleTorus initialization with potential w/o C should have given a RuntimeError, but didn't")
return None
#Test error when potential is not axisymmetric
def test_actionAngleTorus_nonaxierr():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import TriaxialNFWPotential
np= TriaxialNFWPotential(normalize=1.,b=0.9)
try:
aAT= actionAngleTorus(pot=np)
except RuntimeError: pass
else:
raise AssertionError("actionAngleTorus initialization with non-axisymmetric potential should have given a RuntimeError, but didn't")
return None
# Test the Autofit torus warnings
def test_actionAngleTorus_AutoFitWarning():
from galpy.potential import LogarithmicHaloPotential
from galpy.actionAngle import actionAngleTorus
lp= LogarithmicHaloPotential(normalize=1.,q=0.9)
aAT= actionAngleTorus(pot=lp,tol=10.**-8.)
# These should give warnings
jr, jp, jz= 0.27209033, 1.80253892, 0.6078445
ar, ap, az= numpy.array([1.95732492]), numpy.array([6.16753224]), \
numpy.array([4.08233059])
#Turn warnings into errors to test for them
import warnings
with warnings.catch_warnings(record=True) as w:
if PY2: reset_warning_registry('galpy')
warnings.simplefilter("always",galpyWarning)
aAT(jr,jp,jz,ar,ap,az)
# Should raise warning bc of Autofit, might raise others
raisedWarning= False
for wa in w:
raisedWarning= (str(wa.message) == "actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2")
if raisedWarning: break
assert raisedWarning, "actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always",galpyWarning)
aAT.xvFreqs(jr,jp,jz,ar,ap,az)
# Should raise warning bc of Autofit, might raise others
raisedWarning= False
for wa in w:
raisedWarning= (str(wa.message) == "actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2")
if raisedWarning: break
assert raisedWarning, "actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always",galpyWarning)
aAT.Freqs(jr,jp,jz)
# Should raise warning bc of Autofit, might raise others
raisedWarning= False
for wa in w:
raisedWarning= (str(wa.message) == "actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2")
if raisedWarning: break
assert raisedWarning, "actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always",galpyWarning)
aAT.hessianFreqs(jr,jp,jz)
# Should raise warning bc of Autofit, might raise others
raisedWarning= False
for wa in w:
raisedWarning= (str(wa.message) == "actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2")
if raisedWarning: break
assert raisedWarning, "actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always",galpyWarning)
aAT.xvJacobianFreqs(jr,jp,jz,ar,ap,az)
# Should raise warning bc of Autofit, might raise others
raisedWarning= False
for wa in w:
raisedWarning= (str(wa.message) == "actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2")
if raisedWarning: break
assert raisedWarning, "actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't"
return None
def test_MWPotential_warning_torus():
# Test that using MWPotential throws a warning, see #229
from galpy.actionAngle import actionAngleTorus
from galpy.potential import MWPotential
if PY2: reset_warning_registry('galpy')
warnings.simplefilter("error",galpyWarning)
try:
aAA= actionAngleTorus(pot=MWPotential)
except: pass
else:
raise AssertionError("actionAngleTorus with MWPotential should have thrown a warning, but didn't")
#Turn warnings back into warnings
warnings.simplefilter("always",galpyWarning)
return None
| [
"galpy.actionAngle.actionAngleTorus",
"numpy.array",
"galpy.potential.IsochronePotential",
"galpy.potential.interpRZPotential",
"galpy.potential.PowerSphericalPotential",
"test_potential.BurkertPotentialNoC",
"galpy.potential.JaffePotential",
"galpy.actionAngle.actionAngleStaeckel",
"numpy.linspace"... | [((313, 358), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""', 'galpyWarning'], {}), "('always', galpyWarning)\n", (334, 358), False, 'import warnings\n'), ((214, 233), 'os.getenv', 'os.getenv', (['"""TRAVIS"""'], {}), "('TRAVIS')\n", (223, 233), False, 'import os\n'), ((689, 722), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'MWPotential'}), '(pot=MWPotential)\n', (705, 722), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((767, 807), 'numpy.linspace', 'numpy.linspace', (['(0.0)', '(2.0 * numpy.pi)', '(101)'], {}), '(0.0, 2.0 * numpy.pi, 101)\n', (781, 807), False, 'import numpy\n'), ((1740, 1771), 'galpy.potential.PlummerPotential', 'PlummerPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (1756, 1771), False, 'from galpy.potential import MWPotential, rl, vcirc, FlattenedPowerPotential, PlummerPotential\n'), ((1780, 1804), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'pp'}), '(pot=pp)\n', (1796, 1804), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((2642, 2680), 'galpy.potential.FlattenedPowerPotential', 'FlattenedPowerPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (2665, 2680), False, 'from galpy.potential import MWPotential, rl, vcirc, FlattenedPowerPotential, PlummerPotential\n'), ((2689, 2713), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'fp'}), '(pot=fp)\n', (2705, 2713), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((3867, 3896), 'galpy.potential.JaffePotential', 'JaffePotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (3881, 3896), False, 'from galpy.potential import epifreq, omegac, verticalfreq, rl, JaffePotential, PowerSphericalPotential, HernquistPotential\n'), ((3905, 3929), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'jp'}), '(pot=jp)\n', (3921, 3929), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((4497, 4535), 'galpy.potential.PowerSphericalPotential', 'PowerSphericalPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (4520, 4535), False, 'from galpy.potential import epifreq, omegac, verticalfreq, rl, JaffePotential, PowerSphericalPotential, HernquistPotential\n'), ((4544, 4568), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'pp'}), '(pot=pp)\n', (4560, 4568), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((5162, 5195), 'galpy.potential.HernquistPotential', 'HernquistPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (5180, 5195), False, 'from galpy.potential import epifreq, omegac, verticalfreq, rl, JaffePotential, PowerSphericalPotential, HernquistPotential\n'), ((5204, 5228), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'hp'}), '(pot=hp)\n', (5220, 5228), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((6025, 6080), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'MWPotential2014', 'tol': '(10.0 ** -5.0)'}), '(pot=MWPotential2014, tol=10.0 ** -5.0)\n', (6041, 6080), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((6376, 6408), 'numpy.linspace', 'numpy.linspace', (['(0.0)', '(100.0)', '(1001)'], {}), '(0.0, 100.0, 1001)\n', (6390, 6408), False, 'import numpy\n'), ((6634, 6741), 'galpy.orbit.Orbit', 'Orbit', (['[RvRom[0][0, 0], RvRom[0][0, 1], RvRom[0][0, 2], RvRom[0][0, 3], RvRom[0][0,\n 4], RvRom[0][0, 5]]'], {}), '([RvRom[0][0, 0], RvRom[0][0, 1], RvRom[0][0, 2], RvRom[0][0, 3],\n RvRom[0][0, 4], RvRom[0][0, 5]])\n', (6639, 6741), False, 'from galpy.orbit import Orbit\n'), ((8076, 8115), 'galpy.potential.LogarithmicHaloPotential', 'LogarithmicHaloPotential', ([], {'normalize': '(1.0)'}), '(normalize=1.0)\n', (8100, 8115), False, 'from galpy.potential import LogarithmicHaloPotential\n'), ((8123, 8241), 'galpy.potential.interpRZPotential', 'interpRZPotential', ([], {'RZPot': 'lp', 'interpPot': '(True)', 'interpDens': '(True)', 'interpRforce': '(True)', 'interpzforce': '(True)', 'enable_c': '(True)'}), '(RZPot=lp, interpPot=True, interpDens=True, interpRforce=\n True, interpzforce=True, enable_c=True)\n', (8140, 8241), False, 'from galpy.potential import LogarithmicHaloPotential, interpRZPotential\n'), ((8322, 8346), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'lp'}), '(pot=lp)\n', (8338, 8346), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((8357, 8381), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'ip'}), '(pot=ip)\n', (8373, 8381), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((9241, 9281), 'galpy.potential.IsochronePotential', 'IsochronePotential', ([], {'normalize': '(1.0)', 'b': '(1.2)'}), '(normalize=1.0, b=1.2)\n', (9259, 9281), False, 'from galpy.potential import IsochronePotential\n'), ((9289, 9316), 'galpy.actionAngle.actionAngleIsochrone', 'actionAngleIsochrone', ([], {'ip': 'ip'}), '(ip=ip)\n', (9309, 9316), False, 'from galpy.actionAngle import actionAngleTorus, actionAngleIsochrone\n'), ((9339, 9372), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'ip', 'tol': 'tol'}), '(pot=ip, tol=tol)\n', (9355, 9372), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((9415, 9433), 'numpy.array', 'numpy.array', (['[0.0]'], {}), '([0.0])\n', (9426, 9433), False, 'import numpy\n'), ((9447, 9470), 'numpy.array', 'numpy.array', (['[numpy.pi]'], {}), '([numpy.pi])\n', (9458, 9470), False, 'import numpy\n'), ((9483, 9512), 'numpy.array', 'numpy.array', (['[numpy.pi / 2.0]'], {}), '([numpy.pi / 2.0])\n', (9494, 9512), False, 'import numpy\n'), ((9654, 9683), 'numpy.fabs', 'numpy.fabs', (['((ji[0] - jr) / jr)'], {}), '((ji[0] - jr) / jr)\n', (9664, 9683), False, 'import numpy\n'), ((9689, 9722), 'numpy.fabs', 'numpy.fabs', (['((ji[1] - jphi) / jphi)'], {}), '((ji[1] - jphi) / jphi)\n', (9699, 9722), False, 'import numpy\n'), ((9728, 9757), 'numpy.fabs', 'numpy.fabs', (['((ji[2] - jz) / jz)'], {}), '((ji[2] - jz) / jz)\n', (9738, 9757), False, 'import numpy\n'), ((10464, 10504), 'galpy.potential.IsochronePotential', 'IsochronePotential', ([], {'normalize': '(1.0)', 'b': '(1.2)'}), '(normalize=1.0, b=1.2)\n', (10482, 10504), False, 'from galpy.potential import IsochronePotential\n'), ((10512, 10539), 'galpy.actionAngle.actionAngleIsochrone', 'actionAngleIsochrone', ([], {'ip': 'ip'}), '(ip=ip)\n', (10532, 10539), False, 'from galpy.actionAngle import actionAngleTorus, actionAngleIsochrone\n'), ((10562, 10595), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'ip', 'tol': 'tol'}), '(pot=ip, tol=tol)\n', (10578, 10595), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((11142, 11170), 'numpy.fabs', 'numpy.fabs', (['(ws[3] - RvRom[1])'], {}), '(ws[3] - RvRom[1])\n', (11152, 11170), False, 'import numpy\n'), ((11180, 11208), 'numpy.fabs', 'numpy.fabs', (['(ws[4] - RvRom[2])'], {}), '(ws[4] - RvRom[2])\n', (11190, 11208), False, 'import numpy\n'), ((11218, 11246), 'numpy.fabs', 'numpy.fabs', (['(ws[5] - RvRom[3])'], {}), '(ws[5] - RvRom[3])\n', (11228, 11246), False, 'import numpy\n'), ((11256, 11282), 'numpy.fabs', 'numpy.fabs', (['(ws[6] - angler)'], {}), '(ws[6] - angler)\n', (11266, 11282), False, 'import numpy\n'), ((11292, 11320), 'numpy.fabs', 'numpy.fabs', (['(ws[7] - anglephi)'], {}), '(ws[7] - anglephi)\n', (11302, 11320), False, 'import numpy\n'), ((11330, 11356), 'numpy.fabs', 'numpy.fabs', (['(ws[8] - anglez)'], {}), '(ws[8] - anglez)\n', (11340, 11356), False, 'import numpy\n'), ((11599, 11627), 'numpy.all', 'numpy.all', (['(dOr < 10.0 ** tol)'], {}), '(dOr < 10.0 ** tol)\n', (11608, 11627), False, 'import numpy\n'), ((11763, 11791), 'numpy.all', 'numpy.all', (['(dOp < 10.0 ** tol)'], {}), '(dOp < 10.0 ** tol)\n', (11772, 11791), False, 'import numpy\n'), ((11930, 11958), 'numpy.all', 'numpy.all', (['(dOz < 10.0 ** tol)'], {}), '(dOz < 10.0 ** tol)\n', (11939, 11958), False, 'import numpy\n'), ((12094, 12122), 'numpy.all', 'numpy.all', (['(dar < 10.0 ** tol)'], {}), '(dar < 10.0 ** tol)\n', (12103, 12122), False, 'import numpy\n'), ((12251, 12279), 'numpy.all', 'numpy.all', (['(dap < 10.0 ** tol)'], {}), '(dap < 10.0 ** tol)\n', (12260, 12279), False, 'import numpy\n'), ((12410, 12438), 'numpy.all', 'numpy.all', (['(daz < 10.0 ** tol)'], {}), '(daz < 10.0 ** tol)\n', (12419, 12438), False, 'import numpy\n'), ((12852, 12910), 'galpy.potential.KuzminKutuzovStaeckelPotential', 'KuzminKutuzovStaeckelPotential', ([], {'normalize': '(1.0)', 'Delta': 'delta'}), '(normalize=1.0, Delta=delta)\n', (12882, 12910), False, 'from galpy.potential import KuzminKutuzovStaeckelPotential\n'), ((12918, 12966), 'galpy.actionAngle.actionAngleStaeckel', 'actionAngleStaeckel', ([], {'pot': 'kp', 'delta': 'delta', 'c': '(True)'}), '(pot=kp, delta=delta, c=True)\n', (12937, 12966), False, 'from galpy.actionAngle import actionAngleTorus, actionAngleStaeckel\n'), ((12987, 13020), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'kp', 'tol': 'tol'}), '(pot=kp, tol=tol)\n', (13003, 13020), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((13063, 13081), 'numpy.array', 'numpy.array', (['[0.0]'], {}), '([0.0])\n', (13074, 13081), False, 'import numpy\n'), ((13095, 13118), 'numpy.array', 'numpy.array', (['[numpy.pi]'], {}), '([numpy.pi])\n', (13106, 13118), False, 'import numpy\n'), ((13131, 13160), 'numpy.array', 'numpy.array', (['[numpy.pi / 2.0]'], {}), '([numpy.pi / 2.0])\n', (13142, 13160), False, 'import numpy\n'), ((13302, 13331), 'numpy.fabs', 'numpy.fabs', (['((ji[0] - jr) / jr)'], {}), '((ji[0] - jr) / jr)\n', (13312, 13331), False, 'import numpy\n'), ((13337, 13370), 'numpy.fabs', 'numpy.fabs', (['((ji[1] - jphi) / jphi)'], {}), '((ji[1] - jphi) / jphi)\n', (13347, 13370), False, 'import numpy\n'), ((13376, 13405), 'numpy.fabs', 'numpy.fabs', (['((ji[2] - jz) / jz)'], {}), '((ji[2] - jz) / jz)\n', (13386, 13405), False, 'import numpy\n'), ((14131, 14189), 'galpy.potential.KuzminKutuzovStaeckelPotential', 'KuzminKutuzovStaeckelPotential', ([], {'normalize': '(1.0)', 'Delta': 'delta'}), '(normalize=1.0, Delta=delta)\n', (14161, 14189), False, 'from galpy.potential import KuzminKutuzovStaeckelPotential\n'), ((14197, 14245), 'galpy.actionAngle.actionAngleStaeckel', 'actionAngleStaeckel', ([], {'pot': 'kp', 'delta': 'delta', 'c': '(True)'}), '(pot=kp, delta=delta, c=True)\n', (14216, 14245), False, 'from galpy.actionAngle import actionAngleTorus, actionAngleStaeckel\n'), ((14266, 14299), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'kp', 'tol': 'tol'}), '(pot=kp, tol=tol)\n', (14282, 14299), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((14846, 14874), 'numpy.fabs', 'numpy.fabs', (['(ws[3] - RvRom[1])'], {}), '(ws[3] - RvRom[1])\n', (14856, 14874), False, 'import numpy\n'), ((14884, 14912), 'numpy.fabs', 'numpy.fabs', (['(ws[4] - RvRom[2])'], {}), '(ws[4] - RvRom[2])\n', (14894, 14912), False, 'import numpy\n'), ((14922, 14950), 'numpy.fabs', 'numpy.fabs', (['(ws[5] - RvRom[3])'], {}), '(ws[5] - RvRom[3])\n', (14932, 14950), False, 'import numpy\n'), ((14960, 14986), 'numpy.fabs', 'numpy.fabs', (['(ws[6] - angler)'], {}), '(ws[6] - angler)\n', (14970, 14986), False, 'import numpy\n'), ((14996, 15024), 'numpy.fabs', 'numpy.fabs', (['(ws[7] - anglephi)'], {}), '(ws[7] - anglephi)\n', (15006, 15024), False, 'import numpy\n'), ((15034, 15060), 'numpy.fabs', 'numpy.fabs', (['(ws[8] - anglez)'], {}), '(ws[8] - anglez)\n', (15044, 15060), False, 'import numpy\n'), ((15303, 15331), 'numpy.all', 'numpy.all', (['(dOr < 10.0 ** tol)'], {}), '(dOr < 10.0 ** tol)\n', (15312, 15331), False, 'import numpy\n'), ((15465, 15493), 'numpy.all', 'numpy.all', (['(dOp < 10.0 ** tol)'], {}), '(dOp < 10.0 ** tol)\n', (15474, 15493), False, 'import numpy\n'), ((15630, 15658), 'numpy.all', 'numpy.all', (['(dOz < 10.0 ** tol)'], {}), '(dOz < 10.0 ** tol)\n', (15639, 15658), False, 'import numpy\n'), ((15792, 15820), 'numpy.all', 'numpy.all', (['(dar < 10.0 ** tol)'], {}), '(dar < 10.0 ** tol)\n', (15801, 15820), False, 'import numpy\n'), ((15947, 15975), 'numpy.all', 'numpy.all', (['(dap < 10.0 ** tol)'], {}), '(dap < 10.0 ** tol)\n', (15956, 15975), False, 'import numpy\n'), ((16104, 16132), 'numpy.all', 'numpy.all', (['(daz < 10.0 ** tol)'], {}), '(daz < 10.0 ** tol)\n', (16113, 16132), False, 'import numpy\n'), ((16559, 16613), 'galpy.actionAngle.actionAngleIsochroneApprox', 'actionAngleIsochroneApprox', ([], {'pot': 'MWPotential2014', 'b': '(0.8)'}), '(pot=MWPotential2014, b=0.8)\n', (16585, 16613), False, 'from galpy.actionAngle import actionAngleTorus, actionAngleIsochroneApprox\n'), ((16636, 16682), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'MWPotential2014', 'tol': 'tol'}), '(pot=MWPotential2014, tol=tol)\n', (16652, 16682), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((16725, 16743), 'numpy.array', 'numpy.array', (['[0.0]'], {}), '([0.0])\n', (16736, 16743), False, 'import numpy\n'), ((16757, 16780), 'numpy.array', 'numpy.array', (['[numpy.pi]'], {}), '([numpy.pi])\n', (16768, 16780), False, 'import numpy\n'), ((16793, 16822), 'numpy.array', 'numpy.array', (['[numpy.pi / 2.0]'], {}), '([numpy.pi / 2.0])\n', (16804, 16822), False, 'import numpy\n'), ((16966, 16995), 'numpy.fabs', 'numpy.fabs', (['((ji[0] - jr) / jr)'], {}), '((ji[0] - jr) / jr)\n', (16976, 16995), False, 'import numpy\n'), ((17001, 17034), 'numpy.fabs', 'numpy.fabs', (['((ji[1] - jphi) / jphi)'], {}), '((ji[1] - jphi) / jphi)\n', (17011, 17034), False, 'import numpy\n'), ((17040, 17069), 'numpy.fabs', 'numpy.fabs', (['((ji[2] - jz) / jz)'], {}), '((ji[2] - jz) / jz)\n', (17050, 17069), False, 'import numpy\n'), ((17844, 17898), 'galpy.actionAngle.actionAngleIsochroneApprox', 'actionAngleIsochroneApprox', ([], {'pot': 'MWPotential2014', 'b': '(0.8)'}), '(pot=MWPotential2014, b=0.8)\n', (17870, 17898), False, 'from galpy.actionAngle import actionAngleTorus, actionAngleIsochroneApprox\n'), ((17921, 17967), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'MWPotential2014', 'tol': 'tol'}), '(pot=MWPotential2014, tol=tol)\n', (17937, 17967), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((18512, 18540), 'numpy.fabs', 'numpy.fabs', (['(ws[3] - RvRom[1])'], {}), '(ws[3] - RvRom[1])\n', (18522, 18540), False, 'import numpy\n'), ((18550, 18578), 'numpy.fabs', 'numpy.fabs', (['(ws[4] - RvRom[2])'], {}), '(ws[4] - RvRom[2])\n', (18560, 18578), False, 'import numpy\n'), ((18588, 18616), 'numpy.fabs', 'numpy.fabs', (['(ws[5] - RvRom[3])'], {}), '(ws[5] - RvRom[3])\n', (18598, 18616), False, 'import numpy\n'), ((18626, 18652), 'numpy.fabs', 'numpy.fabs', (['(ws[6] - angler)'], {}), '(ws[6] - angler)\n', (18636, 18652), False, 'import numpy\n'), ((18662, 18690), 'numpy.fabs', 'numpy.fabs', (['(ws[7] - anglephi)'], {}), '(ws[7] - anglephi)\n', (18672, 18690), False, 'import numpy\n'), ((18700, 18726), 'numpy.fabs', 'numpy.fabs', (['(ws[8] - anglez)'], {}), '(ws[8] - anglez)\n', (18710, 18726), False, 'import numpy\n'), ((18969, 18997), 'numpy.all', 'numpy.all', (['(dOr < 10.0 ** tol)'], {}), '(dOr < 10.0 ** tol)\n', (18978, 18997), False, 'import numpy\n'), ((19145, 19173), 'numpy.all', 'numpy.all', (['(dOp < 10.0 ** tol)'], {}), '(dOp < 10.0 ** tol)\n', (19154, 19173), False, 'import numpy\n'), ((19324, 19352), 'numpy.all', 'numpy.all', (['(dOz < 10.0 ** tol)'], {}), '(dOz < 10.0 ** tol)\n', (19333, 19352), False, 'import numpy\n'), ((19500, 19528), 'numpy.all', 'numpy.all', (['(dar < 10.0 ** tol)'], {}), '(dar < 10.0 ** tol)\n', (19509, 19528), False, 'import numpy\n'), ((19669, 19697), 'numpy.all', 'numpy.all', (['(dap < 10.0 ** tol)'], {}), '(dap < 10.0 ** tol)\n', (19678, 19697), False, 'import numpy\n'), ((19840, 19868), 'numpy.all', 'numpy.all', (['(daz < 10.0 ** tol)'], {}), '(daz < 10.0 ** tol)\n', (19849, 19868), False, 'import numpy\n'), ((20259, 20296), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'MWPotential2014'}), '(pot=MWPotential2014)\n', (20275, 20296), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((20779, 20816), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'MWPotential2014'}), '(pot=MWPotential2014)\n', (20795, 20816), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((21223, 21260), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'MWPotential2014'}), '(pot=MWPotential2014)\n', (21239, 21260), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((21361, 21394), 'numpy.array', 'numpy.array', (['[0.02, 0.005, -0.01]'], {}), '([0.02, 0.005, -0.01])\n', (21372, 21394), False, 'import numpy\n'), ((21413, 21429), 'numpy.dot', 'numpy.dot', (['h', 'dj'], {}), '(h, dj)\n', (21422, 21429), False, 'import numpy\n'), ((21942, 21979), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'MWPotential2014'}), '(pot=MWPotential2014)\n', (21958, 21979), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((22632, 22669), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'MWPotential2014'}), '(pot=MWPotential2014)\n', (22648, 22669), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((23316, 23353), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'MWPotential2014'}), '(pot=MWPotential2014)\n', (23332, 23353), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((23397, 23420), 'numpy.array', 'numpy.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (23408, 23420), False, 'import numpy\n'), ((23432, 23455), 'numpy.array', 'numpy.array', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (23443, 23455), False, 'import numpy\n'), ((23465, 23488), 'numpy.array', 'numpy.array', (['[2.0, 3.0]'], {}), '([2.0, 3.0])\n', (23476, 23488), False, 'import numpy\n'), ((24073, 24121), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'MWPotential2014', 'dJ': '(0.0001)'}), '(pot=MWPotential2014, dJ=0.0001)\n', (24089, 24121), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((24164, 24187), 'numpy.array', 'numpy.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (24175, 24187), False, 'import numpy\n'), ((24199, 24222), 'numpy.array', 'numpy.array', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (24210, 24222), False, 'import numpy\n'), ((24232, 24255), 'numpy.array', 'numpy.array', (['[2.0, 3.0]'], {}), '([2.0, 3.0])\n', (24243, 24255), False, 'import numpy\n'), ((24930, 24967), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'MWPotential2014'}), '(pot=MWPotential2014)\n', (24946, 24967), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((25011, 25029), 'numpy.array', 'numpy.array', (['[0.5]'], {}), '([0.5])\n', (25022, 25029), False, 'import numpy\n'), ((25044, 25062), 'numpy.array', 'numpy.array', (['[1.0]'], {}), '([1.0])\n', (25055, 25062), False, 'import numpy\n'), ((25074, 25092), 'numpy.array', 'numpy.array', (['[2.0]'], {}), '([2.0])\n', (25085, 25092), False, 'import numpy\n'), ((25804, 25825), 'test_potential.BurkertPotentialNoC', 'BurkertPotentialNoC', ([], {}), '()\n', (25823, 25825), False, 'from test_potential import BurkertPotentialNoC\n'), ((26258, 26300), 'galpy.potential.TriaxialNFWPotential', 'TriaxialNFWPotential', ([], {'normalize': '(1.0)', 'b': '(0.9)'}), '(normalize=1.0, b=0.9)\n', (26278, 26300), False, 'from galpy.potential import TriaxialNFWPotential\n'), ((26738, 26784), 'galpy.potential.LogarithmicHaloPotential', 'LogarithmicHaloPotential', ([], {'normalize': '(1.0)', 'q': '(0.9)'}), '(normalize=1.0, q=0.9)\n', (26762, 26784), False, 'from galpy.potential import LogarithmicHaloPotential\n'), ((26792, 26834), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'lp', 'tol': '(10.0 ** -8.0)'}), '(pot=lp, tol=10.0 ** -8.0)\n', (26808, 26834), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((30369, 30413), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""', 'galpyWarning'], {}), "('error', galpyWarning)\n", (30390, 30413), False, 'import warnings\n'), ((30645, 30690), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""', 'galpyWarning'], {}), "('always', galpyWarning)\n", (30666, 30690), False, 'import warnings\n'), ((816, 856), 'numpy.linspace', 'numpy.linspace', (['(0.0)', '(2.0 * numpy.pi)', '(101)'], {}), '(0.0, 2.0 * numpy.pi, 101)\n', (830, 856), False, 'import numpy\n'), ((866, 906), 'numpy.linspace', 'numpy.linspace', (['(0.0)', '(2.0 * numpy.pi)', '(101)'], {}), '(0.0, 2.0 * numpy.pi, 101)\n', (880, 906), False, 'import numpy\n'), ((6219, 6237), 'numpy.array', 'numpy.array', (['[0.0]'], {}), '([0.0])\n', (6230, 6237), False, 'import numpy\n'), ((6261, 6279), 'numpy.array', 'numpy.array', (['[1.0]'], {}), '([1.0])\n', (6272, 6279), False, 'import numpy\n'), ((6303, 6321), 'numpy.array', 'numpy.array', (['[2.0]'], {}), '([2.0])\n', (6314, 6321), False, 'import numpy\n'), ((8485, 8521), 'numpy.fabs', 'numpy.fabs', (['((om[0] - omi[0]) / om[0])'], {}), '((om[0] - omi[0]) / om[0])\n', (8495, 8521), False, 'import numpy\n'), ((8648, 8684), 'numpy.fabs', 'numpy.fabs', (['((om[1] - omi[1]) / om[1])'], {}), '((om[1] - omi[1]) / om[1])\n', (8658, 8684), False, 'import numpy\n'), ((8814, 8850), 'numpy.fabs', 'numpy.fabs', (['((om[2] - omi[2]) / om[2])'], {}), '((om[2] - omi[2]) / om[2])\n', (8824, 8850), False, 'import numpy\n'), ((10638, 10656), 'numpy.array', 'numpy.array', (['[0.1]'], {}), '([0.1])\n', (10649, 10656), False, 'import numpy\n'), ((10657, 10691), 'numpy.linspace', 'numpy.linspace', (['(0.0)', 'numpy.pi', '(101)'], {}), '(0.0, numpy.pi, 101)\n', (10671, 10691), False, 'import numpy\n'), ((10738, 10761), 'numpy.array', 'numpy.array', (['[numpy.pi]'], {}), '([numpy.pi])\n', (10749, 10761), False, 'import numpy\n'), ((10762, 10796), 'numpy.linspace', 'numpy.linspace', (['(0.0)', 'numpy.pi', '(101)'], {}), '(0.0, numpy.pi, 101)\n', (10776, 10796), False, 'import numpy\n'), ((10845, 10874), 'numpy.array', 'numpy.array', (['[numpy.pi / 2.0]'], {}), '([numpy.pi / 2.0])\n', (10856, 10874), False, 'import numpy\n'), ((10872, 10906), 'numpy.linspace', 'numpy.linspace', (['(0.0)', 'numpy.pi', '(101)'], {}), '(0.0, numpy.pi, 101)\n', (10886, 10906), False, 'import numpy\n'), ((12221, 12238), 'numpy.nanmax', 'numpy.nanmax', (['dar'], {}), '(dar)\n', (12233, 12238), False, 'import numpy\n'), ((12380, 12397), 'numpy.nanmax', 'numpy.nanmax', (['dap'], {}), '(dap)\n', (12392, 12397), False, 'import numpy\n'), ((12537, 12554), 'numpy.nanmax', 'numpy.nanmax', (['daz'], {}), '(daz)\n', (12549, 12554), False, 'import numpy\n'), ((14342, 14360), 'numpy.array', 'numpy.array', (['[0.1]'], {}), '([0.1])\n', (14353, 14360), False, 'import numpy\n'), ((14361, 14395), 'numpy.linspace', 'numpy.linspace', (['(0.0)', 'numpy.pi', '(101)'], {}), '(0.0, numpy.pi, 101)\n', (14375, 14395), False, 'import numpy\n'), ((14442, 14465), 'numpy.array', 'numpy.array', (['[numpy.pi]'], {}), '([numpy.pi])\n', (14453, 14465), False, 'import numpy\n'), ((14466, 14500), 'numpy.linspace', 'numpy.linspace', (['(0.0)', 'numpy.pi', '(101)'], {}), '(0.0, numpy.pi, 101)\n', (14480, 14500), False, 'import numpy\n'), ((14549, 14578), 'numpy.array', 'numpy.array', (['[numpy.pi / 2.0]'], {}), '([numpy.pi / 2.0])\n', (14560, 14578), False, 'import numpy\n'), ((14576, 14610), 'numpy.linspace', 'numpy.linspace', (['(0.0)', 'numpy.pi', '(101)'], {}), '(0.0, numpy.pi, 101)\n', (14590, 14610), False, 'import numpy\n'), ((15917, 15934), 'numpy.nanmax', 'numpy.nanmax', (['dar'], {}), '(dar)\n', (15929, 15934), False, 'import numpy\n'), ((16074, 16091), 'numpy.nanmax', 'numpy.nanmax', (['dap'], {}), '(dap)\n', (16086, 16091), False, 'import numpy\n'), ((16229, 16246), 'numpy.nanmax', 'numpy.nanmax', (['daz'], {}), '(daz)\n', (16241, 16246), False, 'import numpy\n'), ((18010, 18028), 'numpy.array', 'numpy.array', (['[0.1]'], {}), '([0.1])\n', (18021, 18028), False, 'import numpy\n'), ((18029, 18062), 'numpy.linspace', 'numpy.linspace', (['(0.0)', 'numpy.pi', '(21)'], {}), '(0.0, numpy.pi, 21)\n', (18043, 18062), False, 'import numpy\n'), ((18109, 18132), 'numpy.array', 'numpy.array', (['[numpy.pi]'], {}), '([numpy.pi])\n', (18120, 18132), False, 'import numpy\n'), ((18133, 18166), 'numpy.linspace', 'numpy.linspace', (['(0.0)', 'numpy.pi', '(21)'], {}), '(0.0, numpy.pi, 21)\n', (18147, 18166), False, 'import numpy\n'), ((18215, 18244), 'numpy.array', 'numpy.array', (['[numpy.pi / 2.0]'], {}), '([numpy.pi / 2.0])\n', (18226, 18244), False, 'import numpy\n'), ((18242, 18275), 'numpy.linspace', 'numpy.linspace', (['(0.0)', 'numpy.pi', '(21)'], {}), '(0.0, numpy.pi, 21)\n', (18256, 18275), False, 'import numpy\n'), ((19639, 19656), 'numpy.nanmax', 'numpy.nanmax', (['dar'], {}), '(dar)\n', (19651, 19656), False, 'import numpy\n'), ((19810, 19827), 'numpy.nanmax', 'numpy.nanmax', (['dap'], {}), '(dap)\n', (19822, 19827), False, 'import numpy\n'), ((19979, 19996), 'numpy.nanmax', 'numpy.nanmax', (['daz'], {}), '(daz)\n', (19991, 19996), False, 'import numpy\n'), ((25214, 25270), 'numpy.array', 'numpy.array', (['[0.001, 0.002, 0.003, -0.002, 0.004, 0.002]'], {}), '([0.001, 0.002, 0.003, -0.002, 0.004, 0.002])\n', (25225, 25270), False, 'import numpy\n'), ((25401, 25422), 'numpy.dot', 'numpy.dot', (['jf[1]', 'dja'], {}), '(jf[1], dja)\n', (25410, 25422), False, 'import numpy\n'), ((25848, 25872), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'bp'}), '(pot=bp)\n', (25864, 25872), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((26321, 26345), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'np'}), '(pot=np)\n', (26337, 26345), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((26929, 26954), 'numpy.array', 'numpy.array', (['[1.95732492]'], {}), '([1.95732492])\n', (26940, 26954), False, 'import numpy\n'), ((26956, 26981), 'numpy.array', 'numpy.array', (['[6.16753224]'], {}), '([6.16753224])\n', (26967, 26981), False, 'import numpy\n'), ((26993, 27018), 'numpy.array', 'numpy.array', (['[4.08233059]'], {}), '([4.08233059])\n', (27004, 27018), False, 'import numpy\n'), ((27096, 27132), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (27119, 27132), False, 'import warnings\n'), ((27195, 27240), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""', 'galpyWarning'], {}), "('always', galpyWarning)\n", (27216, 27240), False, 'import warnings\n'), ((27734, 27770), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (27757, 27770), False, 'import warnings\n'), ((27785, 27830), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""', 'galpyWarning'], {}), "('always', galpyWarning)\n", (27806, 27830), False, 'import warnings\n'), ((28332, 28368), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (28355, 28368), False, 'import warnings\n'), ((28383, 28428), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""', 'galpyWarning'], {}), "('always', galpyWarning)\n", (28404, 28428), False, 'import warnings\n'), ((28919, 28955), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (28942, 28955), False, 'import warnings\n'), ((28970, 29015), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""', 'galpyWarning'], {}), "('always', galpyWarning)\n", (28991, 29015), False, 'import warnings\n'), ((29513, 29549), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (29536, 29549), False, 'import warnings\n'), ((29564, 29609), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""', 'galpyWarning'], {}), "('always', galpyWarning)\n", (29585, 29609), False, 'import warnings\n'), ((30333, 30364), 'test_actionAngle.reset_warning_registry', 'reset_warning_registry', (['"""galpy"""'], {}), "('galpy')\n", (30355, 30364), False, 'from test_actionAngle import reset_warning_registry\n'), ((30435, 30468), 'galpy.actionAngle.actionAngleTorus', 'actionAngleTorus', ([], {'pot': 'MWPotential'}), '(pot=MWPotential)\n', (30451, 30468), False, 'from galpy.actionAngle import actionAngleTorus\n'), ((1127, 1145), 'numpy.fabs', 'numpy.fabs', (['RvR[1]'], {}), '(RvR[1])\n', (1137, 1145), False, 'import numpy\n'), ((1433, 1451), 'numpy.fabs', 'numpy.fabs', (['RvR[3]'], {}), '(RvR[3])\n', (1443, 1451), False, 'import numpy\n'), ((1569, 1587), 'numpy.fabs', 'numpy.fabs', (['RvR[4]'], {}), '(RvR[4])\n', (1579, 1587), False, 'import numpy\n'), ((2033, 2051), 'numpy.fabs', 'numpy.fabs', (['RvR[1]'], {}), '(RvR[1])\n', (2043, 2051), False, 'import numpy\n'), ((2321, 2339), 'numpy.fabs', 'numpy.fabs', (['RvR[3]'], {}), '(RvR[3])\n', (2331, 2339), False, 'import numpy\n'), ((2457, 2475), 'numpy.fabs', 'numpy.fabs', (['RvR[4]'], {}), '(RvR[4])\n', (2467, 2475), False, 'import numpy\n'), ((2942, 2960), 'numpy.fabs', 'numpy.fabs', (['RvR[1]'], {}), '(RvR[1])\n', (2952, 2960), False, 'import numpy\n'), ((3230, 3248), 'numpy.fabs', 'numpy.fabs', (['RvR[3]'], {}), '(RvR[3])\n', (3240, 3248), False, 'import numpy\n'), ((3366, 3384), 'numpy.fabs', 'numpy.fabs', (['RvR[4]'], {}), '(RvR[4])\n', (3376, 3384), False, 'import numpy\n'), ((11728, 11745), 'numpy.nanmax', 'numpy.nanmax', (['dOr'], {}), '(dOr)\n', (11740, 11745), False, 'import numpy\n'), ((11894, 11911), 'numpy.nanmax', 'numpy.nanmax', (['dOp'], {}), '(dOp)\n', (11906, 11911), False, 'import numpy\n'), ((12059, 12076), 'numpy.nanmax', 'numpy.nanmax', (['dOz'], {}), '(dOz)\n', (12071, 12076), False, 'import numpy\n'), ((15430, 15447), 'numpy.nanmax', 'numpy.nanmax', (['dOr'], {}), '(dOr)\n', (15442, 15447), False, 'import numpy\n'), ((15594, 15611), 'numpy.nanmax', 'numpy.nanmax', (['dOp'], {}), '(dOp)\n', (15606, 15611), False, 'import numpy\n'), ((15757, 15774), 'numpy.nanmax', 'numpy.nanmax', (['dOz'], {}), '(dOz)\n', (15769, 15774), False, 'import numpy\n'), ((19110, 19127), 'numpy.nanmax', 'numpy.nanmax', (['dOr'], {}), '(dOr)\n', (19122, 19127), False, 'import numpy\n'), ((19288, 19305), 'numpy.nanmax', 'numpy.nanmax', (['dOp'], {}), '(dOp)\n', (19300, 19305), False, 'import numpy\n'), ((19465, 19482), 'numpy.nanmax', 'numpy.nanmax', (['dOz'], {}), '(dOz)\n', (19477, 19482), False, 'import numpy\n'), ((20930, 20955), 'numpy.fabs', 'numpy.fabs', (['((h - h.T) / h)'], {}), '((h - h.T) / h)\n', (20940, 20955), False, 'import numpy\n'), ((21563, 21600), 'numpy.fabs', 'numpy.fabs', (['((do_fromhessian - do) / O)'], {}), '((do_fromhessian - do) / O)\n', (21573, 21600), False, 'import numpy\n'), ((22113, 22131), 'numpy.array', 'numpy.array', (['[0.0]'], {}), '([0.0])\n', (22124, 22131), False, 'import numpy\n'), ((22131, 22149), 'numpy.array', 'numpy.array', (['[1.0]'], {}), '([1.0])\n', (22142, 22149), False, 'import numpy\n'), ((22178, 22196), 'numpy.array', 'numpy.array', (['[2.0]'], {}), '([2.0])\n', (22189, 22196), False, 'import numpy\n'), ((22809, 22827), 'numpy.array', 'numpy.array', (['[0.0]'], {}), '([0.0])\n', (22820, 22827), False, 'import numpy\n'), ((22827, 22845), 'numpy.array', 'numpy.array', (['[1.0]'], {}), '([1.0])\n', (22838, 22845), False, 'import numpy\n'), ((22874, 22892), 'numpy.array', 'numpy.array', (['[2.0]'], {}), '([2.0])\n', (22885, 22892), False, 'import numpy\n'), ((25443, 25491), 'numpy.fabs', 'numpy.fabs', (['((xv_fromjac - xv_direct) / xv_direct)'], {}), '((xv_fromjac - xv_direct) / xv_direct)\n', (25453, 25491), False, 'import numpy\n'), ((27155, 27186), 'test_actionAngle.reset_warning_registry', 'reset_warning_registry', (['"""galpy"""'], {}), "('galpy')\n", (27177, 27186), False, 'from test_actionAngle import reset_warning_registry\n'), ((993, 1014), 'galpy.potential.rl', 'rl', (['MWPotential', 'jphi'], {}), '(MWPotential, jphi)\n', (995, 1014), False, 'from galpy.potential import epifreq, omegac, verticalfreq, rl, JaffePotential, PowerSphericalPotential, HernquistPotential\n'), ((1908, 1920), 'galpy.potential.rl', 'rl', (['pp', 'jphi'], {}), '(pp, jphi)\n', (1910, 1920), False, 'from galpy.potential import epifreq, omegac, verticalfreq, rl, JaffePotential, PowerSphericalPotential, HernquistPotential\n'), ((2817, 2829), 'galpy.potential.rl', 'rl', (['fp', 'jphi'], {}), '(fp, jphi)\n', (2819, 2829), False, 'from galpy.potential import epifreq, omegac, verticalfreq, rl, JaffePotential, PowerSphericalPotential, HernquistPotential\n'), ((20436, 20451), 'numpy.array', 'numpy.array', (['fO'], {}), '(fO)\n', (20447, 20451), False, 'import numpy\n'), ((20452, 20467), 'numpy.array', 'numpy.array', (['hO'], {}), '(hO)\n', (20463, 20467), False, 'import numpy\n'), ((22234, 22249), 'numpy.array', 'numpy.array', (['fO'], {}), '(fO)\n', (22245, 22249), False, 'import numpy\n'), ((22250, 22265), 'numpy.array', 'numpy.array', (['hO'], {}), '(hO)\n', (22261, 22265), False, 'import numpy\n'), ((22928, 22943), 'numpy.array', 'numpy.array', (['fO'], {}), '(fO)\n', (22939, 22943), False, 'import numpy\n'), ((22944, 22959), 'numpy.array', 'numpy.array', (['hO'], {}), '(hO)\n', (22955, 22959), False, 'import numpy\n'), ((23631, 23646), 'numpy.array', 'numpy.array', (['fO'], {}), '(fO)\n', (23642, 23646), False, 'import numpy\n'), ((23647, 23662), 'numpy.array', 'numpy.array', (['hO'], {}), '(hO)\n', (23658, 23662), False, 'import numpy\n'), ((1299, 1320), 'galpy.potential.rl', 'rl', (['MWPotential', 'jphi'], {}), '(MWPotential, jphi)\n', (1301, 1320), False, 'from galpy.potential import epifreq, omegac, verticalfreq, rl, JaffePotential, PowerSphericalPotential, HernquistPotential\n'), ((2196, 2208), 'galpy.potential.rl', 'rl', (['pp', 'jphi'], {}), '(pp, jphi)\n', (2198, 2208), False, 'from galpy.potential import epifreq, omegac, verticalfreq, rl, JaffePotential, PowerSphericalPotential, HernquistPotential\n'), ((3105, 3117), 'galpy.potential.rl', 'rl', (['fp', 'jphi'], {}), '(fp, jphi)\n', (3107, 3117), False, 'from galpy.potential import epifreq, omegac, verticalfreq, rl, JaffePotential, PowerSphericalPotential, HernquistPotential\n'), ((4027, 4039), 'galpy.potential.rl', 'rl', (['jp', 'jphi'], {}), '(jp, jphi)\n', (4029, 4039), False, 'from galpy.potential import epifreq, omegac, verticalfreq, rl, JaffePotential, PowerSphericalPotential, HernquistPotential\n'), ((4179, 4191), 'galpy.potential.rl', 'rl', (['jp', 'jphi'], {}), '(jp, jphi)\n', (4181, 4191), False, 'from galpy.potential import epifreq, omegac, verticalfreq, rl, JaffePotential, PowerSphericalPotential, HernquistPotential\n'), ((4339, 4351), 'galpy.potential.rl', 'rl', (['jp', 'jphi'], {}), '(jp, jphi)\n', (4341, 4351), False, 'from galpy.potential import epifreq, omegac, verticalfreq, rl, JaffePotential, PowerSphericalPotential, HernquistPotential\n'), ((4653, 4665), 'galpy.potential.rl', 'rl', (['pp', 'jphi'], {}), '(pp, jphi)\n', (4655, 4665), False, 'from galpy.potential import epifreq, omegac, verticalfreq, rl, JaffePotential, PowerSphericalPotential, HernquistPotential\n'), ((4805, 4817), 'galpy.potential.rl', 'rl', (['pp', 'jphi'], {}), '(pp, jphi)\n', (4807, 4817), False, 'from galpy.potential import epifreq, omegac, verticalfreq, rl, JaffePotential, PowerSphericalPotential, HernquistPotential\n'), ((4965, 4977), 'galpy.potential.rl', 'rl', (['pp', 'jphi'], {}), '(pp, jphi)\n', (4967, 4977), False, 'from galpy.potential import epifreq, omegac, verticalfreq, rl, JaffePotential, PowerSphericalPotential, HernquistPotential\n'), ((5313, 5325), 'galpy.potential.rl', 'rl', (['hp', 'jphi'], {}), '(hp, jphi)\n', (5315, 5325), False, 'from galpy.potential import epifreq, omegac, verticalfreq, rl, JaffePotential, PowerSphericalPotential, HernquistPotential\n'), ((5465, 5477), 'galpy.potential.rl', 'rl', (['hp', 'jphi'], {}), '(hp, jphi)\n', (5467, 5477), False, 'from galpy.potential import epifreq, omegac, verticalfreq, rl, JaffePotential, PowerSphericalPotential, HernquistPotential\n'), ((5625, 5637), 'galpy.potential.rl', 'rl', (['hp', 'jphi'], {}), '(hp, jphi)\n', (5627, 5637), False, 'from galpy.potential import epifreq, omegac, verticalfreq, rl, JaffePotential, PowerSphericalPotential, HernquistPotential\n'), ((24360, 24386), 'numpy.linalg.det', 'numpy.linalg.det', (['jf[1][0]'], {}), '(jf[1][0])\n', (24376, 24386), False, 'import numpy\n'), ((24545, 24571), 'numpy.linalg.det', 'numpy.linalg.det', (['jf[1][1]'], {}), '(jf[1][1])\n', (24561, 24571), False, 'import numpy\n')] |
import numpy as np
from scipy import ndimage
__all__ = ['gabor_kernel', 'gabor_filter']
def _sigma_prefactor(bandwidth):
b = bandwidth
# See http://www.cs.rug.nl/~imaging/simplecell.html
return 1.0 / np.pi * np.sqrt(np.log(2)/2.0) * (2.0**b + 1) / (2.0**b - 1)
def gabor_kernel(frequency, theta=0, bandwidth=1, sigma_x=None, sigma_y=None,
offset=0):
"""Return complex 2D Gabor filter kernel.
Frequency and orientation representations of the Gabor filter are similar
to those of the human visual system. It is especially suitable for texture
classification using Gabor filter banks.
Parameters
----------
frequency : float
Frequency of the harmonic function.
theta : float
Orientation in radians. If 0, the harmonic is in the x-direction.
bandwidth : float
The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`
and `sigma_y` will decrease with increasing frequency. This value is
ignored if `sigma_x` and `sigma_y` are set by the user.
sigma_x, sigma_y : float
Standard deviation in x- and y-directions. These directions apply to
the kernel *before* rotation. If `theta = pi/2`, then the kernel is
rotated 90 degrees so that `sigma_x` controls the *vertical* direction.
offset : float, optional
Phase offset of harmonic function in radians.
Returns
-------
g : complex array
Complex filter kernel.
References
----------
.. [1] http://en.wikipedia.org/wiki/Gabor_filter
.. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf
"""
if sigma_x is None:
sigma_x = _sigma_prefactor(bandwidth) / frequency
if sigma_y is None:
sigma_y = _sigma_prefactor(bandwidth) / frequency
n_stds = 3
x0 = np.ceil(max(np.abs(n_stds * sigma_x * np.cos(theta)),
np.abs(n_stds * sigma_y * np.sin(theta)), 1))
y0 = np.ceil(max(np.abs(n_stds * sigma_y * np.cos(theta)),
np.abs(n_stds * sigma_x * np.sin(theta)), 1))
y, x = np.mgrid[-y0:y0+1, -x0:x0+1]
rotx = x * np.cos(theta) + y * np.sin(theta)
roty = -x * np.sin(theta) + y * np.cos(theta)
g = np.zeros(y.shape, dtype=np.complex)
g[:] = np.exp(-0.5 * (rotx**2 / sigma_x**2 + roty**2 / sigma_y**2))
g /= 2 * np.pi * sigma_x * sigma_y
g *= np.exp(1j * (2 * np.pi * frequency * rotx + offset))
return g
def gabor_filter(image, frequency, theta=0, bandwidth=1, sigma_x=None,
sigma_y=None, offset=0, mode='reflect', cval=0):
"""Return real and imaginary responses to Gabor filter.
The real and imaginary parts of the Gabor filter kernel are applied to the
image and the response is returned as a pair of arrays.
Frequency and orientation representations of the Gabor filter are similar
to those of the human visual system. It is especially suitable for texture
classification using Gabor filter banks.
Parameters
----------
image : array
Input image.
frequency : float
Frequency of the harmonic function.
theta : float
Orientation in radians. If 0, the harmonic is in the x-direction.
bandwidth : float
The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`
and `sigma_y` will decrease with increasing frequency. This value is
ignored if `sigma_x` and `sigma_y` are set by the user.
sigma_x, sigma_y : float
Standard deviation in x- and y-directions. These directions apply to
the kernel *before* rotation. If `theta = pi/2`, then the kernel is
rotated 90 degrees so that `sigma_x` controls the *vertical* direction.
offset : float, optional
Phase offset of harmonic function in radians.
Returns
-------
real, imag : arrays
Filtered images using the real and imaginary parts of the Gabor filter
kernel.
References
----------
.. [1] http://en.wikipedia.org/wiki/Gabor_filter
.. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf
"""
g = gabor_kernel(frequency, theta, bandwidth, sigma_x, sigma_y, offset)
filtered_real = ndimage.convolve(image, np.real(g), mode=mode, cval=cval)
filtered_imag = ndimage.convolve(image, np.imag(g), mode=mode, cval=cval)
return filtered_real, filtered_imag
| [
"numpy.log",
"numpy.exp",
"numpy.real",
"numpy.zeros",
"numpy.cos",
"numpy.sin",
"numpy.imag"
] | [((2221, 2256), 'numpy.zeros', 'np.zeros', (['y.shape'], {'dtype': 'np.complex'}), '(y.shape, dtype=np.complex)\n', (2229, 2256), True, 'import numpy as np\n'), ((2268, 2336), 'numpy.exp', 'np.exp', (['(-0.5 * (rotx ** 2 / sigma_x ** 2 + roty ** 2 / sigma_y ** 2))'], {}), '(-0.5 * (rotx ** 2 / sigma_x ** 2 + roty ** 2 / sigma_y ** 2))\n', (2274, 2336), True, 'import numpy as np\n'), ((2377, 2431), 'numpy.exp', 'np.exp', (['(1.0j * (2 * np.pi * frequency * rotx + offset))'], {}), '(1.0j * (2 * np.pi * frequency * rotx + offset))\n', (2383, 2431), True, 'import numpy as np\n'), ((4211, 4221), 'numpy.real', 'np.real', (['g'], {}), '(g)\n', (4218, 4221), True, 'import numpy as np\n'), ((4289, 4299), 'numpy.imag', 'np.imag', (['g'], {}), '(g)\n', (4296, 4299), True, 'import numpy as np\n'), ((2128, 2141), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2134, 2141), True, 'import numpy as np\n'), ((2148, 2161), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2154, 2161), True, 'import numpy as np\n'), ((2178, 2191), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2184, 2191), True, 'import numpy as np\n'), ((2198, 2211), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2204, 2211), True, 'import numpy as np\n'), ((1859, 1872), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1865, 1872), True, 'import numpy as np\n'), ((1922, 1935), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1928, 1935), True, 'import numpy as np\n'), ((1989, 2002), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1995, 2002), True, 'import numpy as np\n'), ((2052, 2065), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2058, 2065), True, 'import numpy as np\n'), ((232, 241), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (238, 241), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# connectivity.py
# definitions of connectivity characters
import math
import warnings
import networkx as nx
import numpy as np
from tqdm import tqdm
__all__ = [
"node_degree",
"meshedness",
"mean_node_dist",
"cds_length",
"mean_node_degree",
"proportion",
"cyclomatic",
"edge_node_ratio",
"gamma",
"clustering",
"local_closeness_centrality",
"closeness_centrality",
"betweenness_centrality",
"local_betweenness_centrality",
"local_straightness_centrality",
"straightness_centrality",
"subgraph",
"mean_nodes",
]
def node_degree(graph, name="degree"):
"""
Calculates node degree for each node.
Wrapper around ``networkx.degree()``.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
name : str (default 'degree')
calculated attribute name
Returns
-------
Graph
networkx.Graph
Examples
--------
>>> network_graph = mm.node_degree(network_graph)
"""
netx = graph.copy()
degree = dict(nx.degree(netx))
nx.set_node_attributes(netx, degree, name)
return netx
def _meshedness(graph):
"""
Calculates meshedness of a graph.
"""
e = graph.number_of_edges()
v = graph.number_of_nodes()
return (e - v + 1) / (2 * v - 5)
def meshedness(graph, radius=5, name="meshedness", distance=None, verbose=True):
"""
Calculates meshedness for subgraph around each node if radius is set, or for
whole graph, if ``radius=None``.
Subgraph is generated around each node within set radius. If ``distance=None``,
radius will define topological distance, otherwise it uses values in distance
attribute.
.. math::
\\alpha=\\frac{e-v+1}{2 v-5}
where :math:`e` is the number of edges in subgraph and :math:`v` is the number of nodes in subgraph.
Adapted from :cite:`feliciotti2018`.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
radius: int, optional
Include all neighbors of distance <= radius from n
name : str, optional
calculated attribute name
distance : str, optional
Use specified edge data key as distance.
For example, setting ``distance=’weight’`` will use the edge ``weight`` to
measure the distance from the node n.
verbose : bool (default True)
if True, shows progress bars in loops and indication of steps
Returns
-------
Graph
networkx.Graph if radius is set
float
meshedness for graph if ``radius=None``
Examples
--------
>>> network_graph = mm.meshedness(network_graph, radius=800, distance='edge_length')
"""
netx = graph.copy()
if radius:
for n in tqdm(netx, total=len(netx), disable=not verbose):
sub = nx.ego_graph(
netx, n, radius=radius, distance=distance
) # define subgraph of steps=radius
netx.nodes[n][name] = _meshedness(
sub
) # save value calulated for subgraph to node
return netx
return _meshedness(netx)
def mean_node_dist(graph, name="meanlen", length="mm_len", verbose=True):
"""
Calculates mean distance to neighbouring nodes.
Mean of values in ``length`` attribute.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
name : str, optional
calculated attribute name
length : str, optional
name of attribute of segment length (geographical)
verbose : bool (default True)
if True, shows progress bars in loops and indication of steps
Returns
-------
Graph
networkx.Graph
Examples
--------
>>> network_graph = mm.mean_node_dist(network_graph)
"""
netx = graph.copy()
for n, nbrs in tqdm(netx.adj.items(), total=len(netx), disable=not verbose):
lengths = []
for nbr, keydict in nbrs.items():
for key, eattr in keydict.items():
lengths.append(eattr[length])
netx.nodes[n][name] = np.mean(lengths)
return netx
def _cds_length(graph, mode, length):
"""
Calculates cul-de-sac length in a graph.
"""
lens = []
for u, v, k, cds in graph.edges.data("cdsbool", keys=True):
if cds:
lens.append(graph[u][v][k][length])
if mode == "sum":
return sum(lens)
if mode == "mean":
return np.mean(lens)
raise ValueError("Mode {} is not supported. Use 'sum' or 'mean'.".format(mode))
def cds_length(
graph,
radius=5,
mode="sum",
name="cds_len",
degree="degree",
length="mm_len",
distance=None,
verbose=True,
):
"""
Calculates length of cul-de-sacs for subgraph around each node if radius is set, or for
whole graph, if ``radius=None``.
Subgraph is generated around each node within set radius. If ``distance=None``,
radius will define topological distance, otherwise it uses values in distance
attribute.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
radius : int
Include all neighbors of distance <= radius from n
mode : str (default 'sum')
if ``'sum'``, calculate total length, if ``'mean'`` calculate mean length
name : str, optional
calculated attribute name
degree : str
name of attribute of node degree (:py:func:`momepy.node_degree`)
length : str, optional
name of attribute of segment length (geographical)
distance : str, optional
Use specified edge data key as distance.
For example, setting ``distance=’weight’`` will use the edge ``weight`` to
measure the distance from the node n.
verbose : bool (default True)
if True, shows progress bars in loops and indication of steps
Returns
-------
Graph
networkx.Graph if radius is set
float
length of cul-de-sacs for graph if ``radius=None``
Examples
--------
>>> network_graph = mm.cds_length(network_graph, radius=9, mode='mean')
"""
# node degree needed beforehand
netx = graph.copy()
for u, v, k in netx.edges(keys=True):
if netx.nodes[u][degree] == 1 or netx.nodes[v][degree] == 1:
netx[u][v][k]["cdsbool"] = True
else:
netx[u][v][k]["cdsbool"] = False
if radius:
for n in tqdm(netx, total=len(netx), disable=not verbose):
sub = nx.ego_graph(
netx, n, radius=radius, distance=distance
) # define subgraph of steps=radius
netx.nodes[n][name] = _cds_length(
sub, mode=mode, length=length
) # save value calculated for subgraph to node
return netx
return _cds_length(netx, mode=mode, length=length)
def _mean_node_degree(graph, degree):
"""
Calculates mean node degree in a graph.
"""
return np.mean(list(dict(graph.nodes(degree)).values()))
def mean_node_degree(
graph, radius=5, name="mean_nd", degree="degree", distance=None, verbose=True
):
"""
Calculates mean node degree for subgraph around each node if radius is set, or for
whole graph, if ``radius=None``.
Subgraph is generated around each node within set radius. If ``distance=None``,
radius will define topological distance, otherwise it uses values in ``distance``
attribute.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
radius: int
radius defining the extent of subgraph
name : str, optional
calculated attribute name
degree : str
name of attribute of node degree (:py:func:`momepy.node_degree`)
distance : str, optional
Use specified edge data key as distance.
For example, setting ``distance=’weight’`` will use the edge ``weight`` to
measure the distance from the node n.
verbose : bool (default True)
if True, shows progress bars in loops and indication of steps
Returns
-------
Graph
networkx.Graph if radius is set
float
mean node degree for graph if ``radius=None``
Examples
--------
>>> network_graph = mm.mean_node_degree(network_graph, radius=3)
"""
netx = graph.copy()
if radius:
for n in tqdm(netx, total=len(netx), disable=not verbose):
sub = nx.ego_graph(
netx, n, radius=radius, distance=distance
) # define subgraph of steps=radius
netx.nodes[n][name] = _mean_node_degree(sub, degree=degree)
return netx
return _mean_node_degree(netx, degree=degree)
def _proportion(graph, degree):
"""
Calculates the proportion of intersection types in a graph.
"""
import collections
values = list(dict(graph.nodes(degree)).values())
counts = collections.Counter(values)
return counts
def proportion(
graph,
radius=5,
three=None,
four=None,
dead=None,
degree="degree",
distance=None,
verbose=True,
):
"""
Calculates the proportion of intersection types for subgraph around each node if radius is set, or for
whole graph, if ``radius=None``.
Subgraph is generated around each node within set radius. If ``distance=None``,
radius will define topological distance, otherwise it uses values in ``distance``
attribute.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
radius: int
Include all neighbors of distance <= radius from n
three : str, optional
attribute name for 3-way intersections proportion
four : str, optional
attribute name for 4-way intersections proportion
dead : str, optional
attribute name for deadends proportion
degree : str
name of attribute of node degree (:py:func:`momepy.node_degree`)
distance : str, optional
Use specified edge data key as distance.
For example, setting ``distance=’weight’`` will use the edge ``weight`` to
measure the distance from the node n.
verbose : bool (default True)
if True, shows progress bars in loops and indication of steps
Returns
-------
Graph
networkx.Graph if radius is set
dict
dict with proportions for graph if ``radius=None``
Examples
--------
>>> network_graph = mm.proportion(network_graph, three='threeway', four='fourway', dead='deadends')
"""
if not three and not four and not dead:
raise ValueError(
"Nothing to calculate. Define names for at least one proportion to be calculated: three, four, dead."
)
netx = graph.copy()
if radius:
for n in tqdm(netx, total=len(netx), disable=not verbose):
sub = nx.ego_graph(
netx, n, radius=radius, distance=distance
) # define subgraph of steps=radius
counts = _proportion(sub, degree=degree)
if three:
netx.nodes[n][three] = counts[3] / len(sub)
if four:
netx.nodes[n][four] = counts[4] / len(sub)
if dead:
netx.nodes[n][dead] = counts[1] / len(sub)
return netx
# add example to docs explaining keys
counts = _proportion(netx, degree=degree)
result = {}
if three:
result[three] = counts[3] / len(netx)
if four:
result[four] = counts[4] / len(netx)
if dead:
result[dead] = counts[1] / len(netx)
return result
def _cyclomatic(graph):
"""
Calculates the cyclomatic complexity of a graph.
"""
e = graph.number_of_edges()
v = graph.number_of_nodes()
return e - v + 1
def cyclomatic(graph, radius=5, name="cyclomatic", distance=None, verbose=True):
"""
Calculates cyclomatic complexity for subgraph around each node if radius is set, or for
whole graph, if ``radius=None``.
Subgraph is generated around each node within set radius. If ``distance=None``,
radius will define topological distance, otherwise it uses values in ``distance``
attribute.
.. math::
\\alpha=e-v+1
where :math:`e` is the number of edges in subgraph and :math:`v` is the number of nodes in subgraph.
Adapted from :cite:`bourdic2012`.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
radius: int
Include all neighbors of distance <= radius from n
name : str, optional
calculated attribute name
distance : str, optional
Use specified edge data key as distance.
For example, setting ``distance=’weight’`` will use the edge ``weight`` to
measure the distance from the node n.
verbose : bool (default True)
if True, shows progress bars in loops and indication of steps
Returns
-------
Graph
networkx.Graph if radius is set
float
cyclomatic complexity for graph if ``radius=None``
Examples
--------
>>> network_graph = mm.cyclomatic(network_graph, radius=3)
"""
netx = graph.copy()
if radius:
for n in tqdm(netx, total=len(netx), disable=not verbose):
sub = nx.ego_graph(
netx, n, radius=radius, distance=distance
) # define subgraph of steps=radius
netx.nodes[n][name] = _cyclomatic(
sub
) # save value calulated for subgraph to node
return netx
return _cyclomatic(netx)
def _edge_node_ratio(graph):
"""
Calculates edge / node ratio of a graph.
"""
e = graph.number_of_edges()
v = graph.number_of_nodes()
return e / v
def edge_node_ratio(
graph, radius=5, name="edge_node_ratio", distance=None, verbose=True
):
"""
Calculates edge / node ratio for subgraph around each node if radius is set, or for
whole graph, if ``radius=None``.
Subgraph is generated around each node within set radius. If ``distance=None``,
radius will define topological distance, otherwise it uses values in ``distance``
attribute.
.. math::
\\alpha=e/v
where :math:`e` is the number of edges in subgraph and :math:`v` is the number of nodes in subgraph.
Adapted from :cite:`dibble2017`.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
radius: int
Include all neighbors of distance <= radius from n
name : str, optional
calculated attribute name
distance : str, optional
Use specified edge data key as distance.
For example, setting ``distance=’weight’`` will use the edge ``weight`` to
measure the distance from the node n.
verbose : bool (default True)
if True, shows progress bars in loops and indication of steps
Returns
-------
Graph
networkx.Graph if radius is set
float
edge / node ratio for graph if ``radius=None``
Examples
--------
>>> network_graph = mm.edge_node_ratio(network_graph, radius=3)
"""
netx = graph.copy()
if radius:
for n in tqdm(netx, total=len(netx), disable=not verbose):
sub = nx.ego_graph(
netx, n, radius=radius, distance=distance
) # define subgraph of steps=radius
netx.nodes[n][name] = _edge_node_ratio(
sub
) # save value calulated for subgraph to node
return netx
return _edge_node_ratio(netx)
def _gamma(graph):
"""
Calculates gamma index of a graph.
"""
e = graph.number_of_edges()
v = graph.number_of_nodes()
if v == 2:
return np.nan
return e / (3 * (v - 2)) # save value calulated for subgraph to node
def gamma(graph, radius=5, name="gamma", distance=None, verbose=True):
"""
Calculates connectivity gamma index for subgraph around each node if radius is set, or for
whole graph, if ``radius=None``.
Subgraph is generated around each node within set radius. If ``distance=None``,
radius will define topological distance, otherwise it uses values in ``distance``
attribute.
.. math::
\\alpha=\\frac{e}{3(v-2)}
where :math:`e` is the number of edges in subgraph and :math:`v` is the number of nodes in subgraph.
Adapted from :cite:`dibble2017`.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
radius: int
Include all neighbors of distance <= radius from n
name : str, optional
calculated attribute name
distance : str, optional
Use specified edge data key as distance.
For example, setting ``distance=’weight’`` will use the edge ``weight`` to
measure the distance from the node n.
verbose : bool (default True)
if True, shows progress bars in loops and indication of steps
Returns
-------
Graph
networkx.Graph if radius is set
float
gamma index for graph if ``radius=None``
Examples
--------
>>> network_graph = mm.gamma(network_graph, radius=3)
"""
netx = graph.copy()
if radius:
for n in tqdm(netx, total=len(netx), disable=not verbose):
sub = nx.ego_graph(
netx, n, radius=radius, distance=distance
) # define subgraph of steps=radius
netx.nodes[n][name] = _gamma(sub)
return netx
return _gamma(netx)
def clustering(graph, name="cluster"):
"""
Calculates the squares clustering coefficient for nodes.
Wrapper around ``networkx.square_clustering``.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
name : str, optional
calculated attribute name
Returns
-------
Graph
networkx.Graph
Examples
--------
>>> network_graph = mm.clustering(network_graph)
"""
netx = graph.copy()
vals = nx.square_clustering(netx)
nx.set_node_attributes(netx, vals, name)
return netx
def _closeness_centrality(G, u=None, length=None, wf_improved=True, len_graph=None):
r"""Compute closeness centrality for nodes. Slight adaptation of networkx
`closeness_centrality` to allow normalisation for local closeness.
Adapted script used in networkx.
Closeness centrality [1]_ of a node `u` is the reciprocal of the
average shortest path distance to `u` over all `n-1` reachable nodes.
.. math::
C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},
where `d(v, u)` is the shortest-path distance between `v` and `u`,
and `n` is the number of nodes that can reach `u`. Notice that the
closeness distance function computes the incoming distance to `u`
for directed graphs. To use outward distance, act on `G.reverse()`.
Notice that higher values of closeness indicate higher centrality.
Wasserman and Faust propose an improved formula for graphs with
more than one connected component. The result is "a ratio of the
fraction of actors in the group who are reachable, to the average
distance" from the reachable actors [2]_. You might think this
scale factor is inverted but it is not. As is, nodes from small
components receive a smaller closeness value. Letting `N` denote
the number of nodes in the graph,
.. math::
C_{WF}(u) = \frac{n-1}{N-1} \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},
Parameters
----------
G : graph
A NetworkX graph
u : node, optional
Return only the value for node u
distance : edge attribute key, optional (default=None)
Use the specified edge attribute as the edge distance in shortest
path calculations
len_graph : int
length of complete graph
Returns
-------
nodes : dictionary
Dictionary of nodes with closeness centrality as the value.
References
----------
.. [1] <NAME>: Centrality in networks: I.
Conceptual clarification. Social Networks 1:215-239, 1979.
http://leonidzhukov.ru/hse/2013/socialnetworks/papers/freeman79-centrality.pdf
.. [2] pg. 201 of <NAME>. and <NAME>.,
Social Network Analysis: Methods and Applications, 1994,
Cambridge University Press.
"""
if length is not None:
import functools
# use Dijkstra's algorithm with specified attribute as edge weight
path_length = functools.partial(
nx.single_source_dijkstra_path_length, weight=length
)
else:
path_length = nx.single_source_shortest_path_length
nodes = [u]
closeness_centrality = {}
for n in nodes:
sp = dict(path_length(G, n))
totsp = sum(sp.values())
if totsp > 0.0 and len(G) > 1:
closeness_centrality[n] = (len(sp) - 1.0) / totsp
# normalize to number of nodes-1 in connected part
s = (len(sp) - 1.0) / (len_graph - 1)
closeness_centrality[n] *= s
else:
closeness_centrality[n] = 0.0
return closeness_centrality[u]
def local_closeness_centrality(
graph, radius=5, name="closeness", distance=None, weight=None
):
"""
Calculates local closeness for each node based on the defined distance.
Subgraph is generated around each node within set radius. If ``distance=None``,
radius will define topological distance, otherwise it uses values in ``distance``
attribute. Based on ``networkx.closeness_centrality``.
Local closeness centrality of a node `u` is the reciprocal of the
average shortest path distance to `u` over all `n-1` nodes within subgraph.
.. math::
C(u) = \\frac{n - 1}{\\sum_{v=1}^{n-1} d(v, u)},
where :math:`d(v, u)` is the shortest-path distance between :math:`v` and :math:`u`,
and :math:`n` is the number of nodes that can reach :math:`u`.
Adapted from :cite:`porta2006`.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
radius: int
Include all neighbors of distance <= radius from n
name : str, optional
calculated attribute name
distance : str, optional
Use specified edge data key as distance.
For example, setting ``distance=’weight’`` will use the edge ``weight`` to
measure the distance from the node n during ego_graph generation.
weight : str, optional
Use the specified edge attribute as the edge distance in shortest
path calculations in closeness centrality algorithm
Returns
-------
Graph
networkx.Graph
Examples
--------
>>> network_graph = mm.local_closeness_centrality(network_graph, radius=400, distance='edge_length')
"""
warnings.warn(
"local_closeness_centrality() is deprecated and will be removed in momepy 0.4.0. "
"Use closeness_centrality() instead.",
FutureWarning,
)
return closeness_centrality(
graph=graph, radius=radius, name=name, distance=distance, weight=weight
)
def closeness_centrality(
graph,
name="closeness",
weight="mm_len",
radius=None,
distance=None,
verbose=True,
**kwargs
):
"""
Calculates the closeness centrality for nodes.
Wrapper around ``networkx.closeness_centrality``.
Closeness centrality of a node `u` is the reciprocal of the
average shortest path distance to `u` over all `n-1` nodes within reachable nodes.
.. math::
C(u) = \\frac{n - 1}{\\sum_{v=1}^{n-1} d(v, u)},
where :math:`d(v, u)` is the shortest-path distance between :math:`v` and :math:`u`,
and :math:`n` is the number of nodes that can reach :math:`u`.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
name : str, optional
calculated attribute name
weight : str (default 'mm_len')
attribute holding the weight of edge (e.g. length, angle)
radius: int
Include all neighbors of distance <= radius from n
distance : str, optional
Use specified edge data key as distance.
For example, setting ``distance=’weight’`` will use the edge ``weight`` to
measure the distance from the node n during ego_graph generation.
verbose : bool (default True)
if True, shows progress bars in loops and indication of steps
**kwargs
kwargs for ``networkx.closeness_centrality``
Returns
-------
Graph
networkx.Graph
Examples
--------
>>> network_graph = mm.closeness_centrality(network_graph)
"""
netx = graph.copy()
if radius:
lengraph = len(netx)
for n in tqdm(netx, total=len(netx), disable=not verbose):
sub = nx.ego_graph(
netx, n, radius=radius, distance=distance
) # define subgraph of steps=radius
netx.nodes[n][name] = _closeness_centrality(
sub, n, length=weight, len_graph=lengraph
)
else:
vals = nx.closeness_centrality(netx, distance=weight, **kwargs)
nx.set_node_attributes(netx, vals, name)
return netx
def betweenness_centrality(
graph,
name="betweenness",
mode="nodes",
weight="mm_len",
endpoints=True,
radius=None,
distance=None,
normalized=False,
verbose=True,
**kwargs
):
"""
Calculates the shortest-path betweenness centrality for nodes.
Wrapper around ``networkx.betweenness_centrality`` or ``networkx.edge_betweenness_centrality``.
Betweenness centrality of a node `v` is the sum of the
fraction of all-pairs shortest paths that pass through `v`
.. math::
c_B(v) =\\sum_{s,t \\in V} \\frac{\\sigma(s, t|v)}{\\sigma(s, t)}
where `V` is the set of nodes, :math:`\\sigma(s, t)` is the number of
shortest :math:`(s, t)`-paths, and :math:`\\sigma(s, t|v)` is the number of
those paths passing through some node `v` other than `s, t`.
If `s = t`, :math:`\\sigma(s, t) = 1`, and if `v` in `{s, t}``,
:math:`\\sigma(s, t|v) = 0`.
Betweenness centrality of an edge `e` is the sum of the
fraction of all-pairs shortest paths that pass through `e`
.. math::
c_B(e) =\\sum_{s,t \\in V} \\frac{\\sigma(s, t|e)}{\\sigma(s, t)}
where `V` is the set of nodes, :math:`\\sigma(s, t)` is the number of
shortest :math:`(s, t)`-paths, and :math:`\\sigma(s, t|e)` is the number of
those paths passing through edge `e`.
Adapted from :cite:`porta2006`.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
name : str, optional
calculated attribute name
mode : str, default 'nodes'
mode of betweenness calculation. 'node' for node-based, 'edges' for edge-based
weight : str (default 'mm_len')
attribute holding the weight of edge (e.g. length, angle)
radius: int
Include all neighbors of distance <= radius from n
distance : str, optional
Use specified edge data key as distance.
For example, setting ``distance=’weight’`` will use the edge ``weight`` to
measure the distance from the node n during ego_graph generation.
normalized : bool, optional
If True the betweenness values are normalized by `2/((n-1)(n-2))`,
where n is the number of nodes in subgraph.
verbose : bool (default True)
if True, shows progress bars in loops and indication of steps
**kwargs
kwargs for ``networkx.betweenness_centrality`` or ``networkx.edge_betweenness_centrality``
Returns
-------
Graph
networkx.Graph
Examples
--------
>>> network_graph = mm.betweenness_centrality(network_graph)
Notes
-----
In case of angular betweenness, implementation is based on "Tasos Implementation".
"""
netx = graph.copy()
# has to be Graph not MultiGraph as MG is not supported by networkx2.4
G = nx.Graph()
for u, v, k, data in netx.edges(data=True, keys=True):
if G.has_edge(u, v):
if G[u][v][weight] > netx[u][v][k][weight]:
nx.set_edge_attributes(G, {(u, v): data})
else:
G.add_edge(u, v, **data)
if radius:
for n in tqdm(G, total=len(G), disable=not verbose):
sub = nx.ego_graph(
G, n, radius=radius, distance=distance
) # define subgraph of steps=radius
netx.nodes[n][name] = nx.betweenness_centrality(
sub, weight=weight, normalized=normalized, **kwargs
)[n]
elif mode == "nodes":
vals = nx.betweenness_centrality(
G, weight=weight, endpoints=endpoints, **kwargs
)
nx.set_node_attributes(netx, vals, name)
elif mode == "edges":
vals = nx.edge_betweenness_centrality(G, weight=weight, **kwargs)
for u, v, k in netx.edges(keys=True):
try:
val = vals[u, v]
except KeyError:
val = vals[v, u]
netx[u][v][k][name] = val
else:
raise ValueError(
"Mode {} is not supported. Use 'nodes' or 'edges'.".format(mode)
)
return netx
def local_betweenness_centrality(
graph,
radius=5,
name="betweenness",
distance=None,
weight=None,
normalized=False,
**kwargs
):
"""
Calculates the shortest-path betweenness centrality for nodes within subgraph.
Subgraph is generated around each node within set radius. If ``distance=None``,
radius will define topological distance, otherwise it uses values in ``distance``
attribute. Based on ``networkx.betweenness_centrality``.
Betweenness centrality of a node `v` is the sum of the
fraction of all-pairs shortest paths that pass through `v`
.. math::
c_B(v) =\\sum_{s,t \\in V} \\frac{\\sigma(s, t|v)}{\\sigma(s, t)}
where `V` is the set of nodes, :math:`\\sigma(s, t)` is the number of
shortest :math:`(s, t)`-paths, and :math:`\\sigma(s, t|v)` is the number of
those paths passing through some node `v` other than `s, t`.
If `s = t`, :math:`\\sigma(s, t) = 1`, and if `v` in `{s, t}``,
:math:`\\sigma(s, t|v) = 0`.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
radius: int
Include all neighbors of distance <= radius from n
name : str, optional
calculated attribute name
distance : str, optional
Use specified edge data key as distance.
For example, setting ``distance=’weight’`` will use the edge ``weight`` to
measure the distance from the node n during ego_graph generation.
weight : str, optional
Use the specified edge attribute as the edge distance in shortest
path calculations in closeness centrality algorithm
normalized : bool, optional
If True the betweenness values are normalized by `2/((n-1)(n-2))`,
where n is the number of nodes in subgraph.
**kwargs
kwargs for ``networkx.betweenness_centrality_subset``
Returns
-------
Graph
networkx.Graph
Examples
--------
>>> network_graph = mm.local_betweenness_centrality(network_graph, radius=800, distance='edge_length')
"""
warnings.warn(
"local_betweenness_centrality() is deprecated and will be removed in momepy 0.4.0. "
"Use betweenness_centrality() instead.",
FutureWarning,
)
return betweenness_centrality(
graph,
radius=radius,
name=name,
distance=distance,
weight=weight,
normalized=normalized,
**kwargs
)
def _euclidean(n, m):
"""helper for straightness"""
return math.sqrt((n[0] - m[0]) ** 2 + (n[1] - m[1]) ** 2)
def _straightness_centrality(G, weight, normalized=True):
"""
Calculates straightness centrality.
"""
straightness_centrality = {}
for n in G.nodes():
straightness = 0
sp = nx.single_source_dijkstra_path_length(G, n, weight=weight)
if len(sp) > 0 and len(G) > 1:
for target in sp:
if n != target:
network_dist = sp[target]
euclidean_dist = _euclidean(n, target)
straightness = straightness + (euclidean_dist / network_dist)
straightness_centrality[n] = straightness * (1.0 / (len(G) - 1.0))
# normalize to number of nodes-1 in connected part
if normalized:
if len(sp) > 1:
s = (len(G) - 1.0) / (len(sp) - 1.0)
straightness_centrality[n] *= s
else:
straightness_centrality[n] = 0
else:
straightness_centrality[n] = 0.0
return straightness_centrality
def straightness_centrality(
graph,
weight="mm_len",
normalized=True,
name="straightness",
radius=None,
distance=None,
verbose=True,
):
"""
Calculates the straightness centrality for nodes.
.. math::
C_{S}(i)=\\frac{1}{n-1} \\sum_{j \\in V, j \\neq i} \\frac{d_{i j}^{E u}}{d_{i j}}
where :math:`\\mathrm{d}^{\\mathrm{E} \\mathrm{u}}_{\\mathrm{ij}}` is the Euclidean distance
between nodes `i` and `j` along a straight line.
Adapted from :cite:`porta2006`.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
weight : str (default 'mm_len')
attribute holding length of edge
normalized : bool
normalize to number of nodes-1 in connected part (for local straightness
is recommended to set to normalized False)
name : str, optional
calculated attribute name
radius: int
Include all neighbors of distance <= radius from n
distance : str, optional
Use specified edge data key as distance.
For example, setting ``distance=’weight’`` will use the edge ``weight`` to
measure the distance from the node n during ego_graph generation.
verbose : bool (default True)
if True, shows progress bars in loops and indication of steps
Returns
-------
Graph
networkx.Graph
Examples
--------
>>> network_graph = mm.straightness_centrality(network_graph)
"""
netx = graph.copy()
if radius:
for n in tqdm(netx, total=len(netx), disable=not verbose):
sub = nx.ego_graph(
netx, n, radius=radius, distance=distance
) # define subgraph of steps=radius
netx.nodes[n][name] = _straightness_centrality(
sub, weight=weight, normalized=normalized
)[n]
else:
vals = _straightness_centrality(netx, weight=weight, normalized=normalized)
nx.set_node_attributes(netx, vals, name)
return netx
def local_straightness_centrality(
graph, radius=5, name="straightness", distance=None, weight="mm_len"
):
"""
Calculates local straightness for each node based on the defined distance.
Subgraph is generated around each node within set radius. If ``distance=None``,
radius will define topological distance, otherwise it uses values in ``distance``
attribute.
.. math::
C_{S}(i)=\\frac{1}{n-1} \\sum_{j \\in V, j \\neq i} \\frac{d_{i j}^{E u}}{d_{i j}}
where :math:`\\mathrm{d}^{\\mathrm{E} \\mathrm{u}}_{\\mathrm{ij}}` is the Euclidean distance
between nodes `i` and `j` along a straight line.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
radius: int
Include all neighbors of distance <= radius from n
name : str, optional
calculated attribute name
distance : str, optional
Use specified edge data key as distance.
For example, setting ``distance=’weight’`` will use the edge ``weight`` to
measure the distance from the node n during ego_graph generation.
weight : str, optional
Use the specified edge attribute as the edge distance in shortest
path calculations in closeness centrality algorithm
Returns
-------
Graph
networkx.Graph
Examples
--------
>>> network_graph = mm.local_straightness_centrality(network_graph, radius=400, distance='edge_length')
"""
warnings.warn(
"local_straightness_centrality() is deprecated and will be removed in momepy 0.4.0. "
"Use straightness_centrality() instead.",
FutureWarning,
)
return straightness_centrality(
graph=graph, radius=radius, name=name, distance=distance, weight=weight
)
def subgraph(
graph,
radius=5,
distance=None,
meshedness=True,
cds_length=True,
mode="sum",
degree="degree",
length="mm_len",
mean_node_degree=True,
proportion={3: True, 4: True, 0: True},
cyclomatic=True,
edge_node_ratio=True,
gamma=True,
local_closeness=True,
closeness_weight=None,
verbose=True,
):
"""
Calculates all subgraph-based characters.
Generating subgraph might be a time consuming activity. If we want to use the same
subgraph for more characters, ``subgraph`` allows this by generating subgraph and
then analysing it using selected options.
Parameters
----------
graph : networkx.Graph
Graph representing street network.
Ideally generated from GeoDataFrame using :func:`momepy.gdf_to_nx`
radius: int
radius defining the extent of subgraph
distance : str, optional
Use specified edge data key as distance.
For example, setting ``distance=’weight’`` will use the edge ``weight`` to
measure the distance from the node n.
meshedness : bool, default True
Calculate meshedness (True/False)
cds_length : bool, default True
Calculate cul-de-sac length (True/False)
mode : str (defualt 'sum')
if ``'sum'``, calculate total cds_length, if ``'mean'`` calculate mean cds_length
degree : str
name of attribute of node degree (:py:func:`momepy.node_degree`)
length : str, default `mm_len`
name of attribute of segment length (geographical)
mean_node_degree : bool, default True
Calculate mean node degree (True/False)
proportion : dict, default {3: True, 4: True, 0: True}
Calculate proportion {3: True/False, 4: True/False, 0: True/False}
cyclomatic : bool, default True
Calculate cyclomatic complexity (True/False)
edge_node_ratio : bool, default True
Calculate edge node ratio (True/False)
gamma : bool, default True
Calculate gamma index (True/False)
local_closeness : bool, default True
Calculate local closeness centrality (True/False)
closeness_weight : str, optional
Use the specified edge attribute as the edge distance in shortest
path calculations in closeness centrality algorithm
verbose : bool (default True)
if True, shows progress bars in loops and indication of steps
Returns
-------
Graph
networkx.Graph
Examples
--------
>>> network_graph = mm.subgraph(network_graph)
"""
netx = graph.copy()
for n in tqdm(netx, total=len(netx), disable=not verbose):
sub = nx.ego_graph(
netx, n, radius=radius, distance=distance
) # define subgraph of steps=radius
if meshedness:
netx.nodes[n]["meshedness"] = _meshedness(sub)
if cds_length:
for u, v, k in netx.edges(keys=True):
if netx.nodes[u][degree] == 1 or netx.nodes[v][degree] == 1:
netx[u][v][k]["cdsbool"] = True
else:
netx[u][v][k]["cdsbool"] = False
netx.nodes[n]["cds_length"] = _cds_length(sub, mode=mode, length=length)
if mean_node_degree:
netx.nodes[n]["mean_node_degree"] = _mean_node_degree(sub, degree=degree)
if proportion:
counts = _proportion(sub, degree=degree)
if proportion[3]:
netx.nodes[n]["proportion_3"] = counts[3] / len(sub)
if proportion[4]:
netx.nodes[n]["proportion_4"] = counts[4] / len(sub)
if proportion[0]:
netx.nodes[n]["proportion_0"] = counts[1] / len(sub)
if cyclomatic:
netx.nodes[n]["cyclomatic"] = _cyclomatic(sub)
if edge_node_ratio:
netx.nodes[n]["edge_node_ratio"] = _edge_node_ratio(sub)
if gamma:
netx.nodes[n]["gamma"] = _gamma(sub)
if local_closeness:
lengraph = len(netx)
netx.nodes[n]["local_closeness"] = _closeness_centrality(
sub, n, length=closeness_weight, len_graph=lengraph
)
return netx
def mean_nodes(G, attr):
"""
Calculates mean value of nodes attr for each edge.
"""
for u, v, k in G.edges(keys=True):
mean = (G.nodes[u][attr] + G.nodes[v][attr]) / 2
G[u][v][k][attr] = mean
| [
"networkx.degree",
"numpy.mean",
"math.sqrt",
"networkx.Graph",
"networkx.ego_graph",
"collections.Counter",
"networkx.square_clustering",
"networkx.closeness_centrality",
"networkx.set_edge_attributes",
"functools.partial",
"networkx.set_node_attributes",
"networkx.betweenness_centrality",
... | [((1223, 1265), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['netx', 'degree', 'name'], {}), '(netx, degree, name)\n', (1245, 1265), True, 'import networkx as nx\n'), ((9377, 9404), 'collections.Counter', 'collections.Counter', (['values'], {}), '(values)\n', (9396, 9404), False, 'import collections\n'), ((18870, 18896), 'networkx.square_clustering', 'nx.square_clustering', (['netx'], {}), '(netx)\n', (18890, 18896), True, 'import networkx as nx\n'), ((18901, 18941), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['netx', 'vals', 'name'], {}), '(netx, vals, name)\n', (18923, 18941), True, 'import networkx as nx\n'), ((23720, 23877), 'warnings.warn', 'warnings.warn', (['"""local_closeness_centrality() is deprecated and will be removed in momepy 0.4.0. Use closeness_centrality() instead."""', 'FutureWarning'], {}), "(\n 'local_closeness_centrality() is deprecated and will be removed in momepy 0.4.0. Use closeness_centrality() instead.'\n , FutureWarning)\n", (23733, 23877), False, 'import warnings\n'), ((29086, 29096), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (29094, 29096), True, 'import networkx as nx\n'), ((32497, 32658), 'warnings.warn', 'warnings.warn', (['"""local_betweenness_centrality() is deprecated and will be removed in momepy 0.4.0. Use betweenness_centrality() instead."""', 'FutureWarning'], {}), "(\n 'local_betweenness_centrality() is deprecated and will be removed in momepy 0.4.0. Use betweenness_centrality() instead.'\n , FutureWarning)\n", (32510, 32658), False, 'import warnings\n'), ((32949, 32999), 'math.sqrt', 'math.sqrt', (['((n[0] - m[0]) ** 2 + (n[1] - m[1]) ** 2)'], {}), '((n[0] - m[0]) ** 2 + (n[1] - m[1]) ** 2)\n', (32958, 32999), False, 'import math\n'), ((37692, 37855), 'warnings.warn', 'warnings.warn', (['"""local_straightness_centrality() is deprecated and will be removed in momepy 0.4.0. Use straightness_centrality() instead."""', 'FutureWarning'], {}), "(\n 'local_straightness_centrality() is deprecated and will be removed in momepy 0.4.0. Use straightness_centrality() instead.'\n , FutureWarning)\n", (37705, 37855), False, 'import warnings\n'), ((1202, 1217), 'networkx.degree', 'nx.degree', (['netx'], {}), '(netx)\n', (1211, 1217), True, 'import networkx as nx\n'), ((4416, 4432), 'numpy.mean', 'np.mean', (['lengths'], {}), '(lengths)\n', (4423, 4432), True, 'import numpy as np\n'), ((4778, 4791), 'numpy.mean', 'np.mean', (['lens'], {}), '(lens)\n', (4785, 4791), True, 'import numpy as np\n'), ((21331, 21402), 'functools.partial', 'functools.partial', (['nx.single_source_dijkstra_path_length'], {'weight': 'length'}), '(nx.single_source_dijkstra_path_length, weight=length)\n', (21348, 21402), False, 'import functools\n'), ((26073, 26129), 'networkx.closeness_centrality', 'nx.closeness_centrality', (['netx'], {'distance': 'weight'}), '(netx, distance=weight, **kwargs)\n', (26096, 26129), True, 'import networkx as nx\n'), ((26138, 26178), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['netx', 'vals', 'name'], {}), '(netx, vals, name)\n', (26160, 26178), True, 'import networkx as nx\n'), ((33212, 33270), 'networkx.single_source_dijkstra_path_length', 'nx.single_source_dijkstra_path_length', (['G', 'n'], {'weight': 'weight'}), '(G, n, weight=weight)\n', (33249, 33270), True, 'import networkx as nx\n'), ((36081, 36121), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['netx', 'vals', 'name'], {}), '(netx, vals, name)\n', (36103, 36121), True, 'import networkx as nx\n'), ((40653, 40708), 'networkx.ego_graph', 'nx.ego_graph', (['netx', 'n'], {'radius': 'radius', 'distance': 'distance'}), '(netx, n, radius=radius, distance=distance)\n', (40665, 40708), True, 'import networkx as nx\n'), ((3069, 3124), 'networkx.ego_graph', 'nx.ego_graph', (['netx', 'n'], {'radius': 'radius', 'distance': 'distance'}), '(netx, n, radius=radius, distance=distance)\n', (3081, 3124), True, 'import networkx as nx\n'), ((6900, 6955), 'networkx.ego_graph', 'nx.ego_graph', (['netx', 'n'], {'radius': 'radius', 'distance': 'distance'}), '(netx, n, radius=radius, distance=distance)\n', (6912, 6955), True, 'import networkx as nx\n'), ((8907, 8962), 'networkx.ego_graph', 'nx.ego_graph', (['netx', 'n'], {'radius': 'radius', 'distance': 'distance'}), '(netx, n, radius=radius, distance=distance)\n', (8919, 8962), True, 'import networkx as nx\n'), ((11403, 11458), 'networkx.ego_graph', 'nx.ego_graph', (['netx', 'n'], {'radius': 'radius', 'distance': 'distance'}), '(netx, n, radius=radius, distance=distance)\n', (11415, 11458), True, 'import networkx as nx\n'), ((13895, 13950), 'networkx.ego_graph', 'nx.ego_graph', (['netx', 'n'], {'radius': 'radius', 'distance': 'distance'}), '(netx, n, radius=radius, distance=distance)\n', (13907, 13950), True, 'import networkx as nx\n'), ((15952, 16007), 'networkx.ego_graph', 'nx.ego_graph', (['netx', 'n'], {'radius': 'radius', 'distance': 'distance'}), '(netx, n, radius=radius, distance=distance)\n', (15964, 16007), True, 'import networkx as nx\n'), ((18077, 18132), 'networkx.ego_graph', 'nx.ego_graph', (['netx', 'n'], {'radius': 'radius', 'distance': 'distance'}), '(netx, n, radius=radius, distance=distance)\n', (18089, 18132), True, 'import networkx as nx\n'), ((25798, 25853), 'networkx.ego_graph', 'nx.ego_graph', (['netx', 'n'], {'radius': 'radius', 'distance': 'distance'}), '(netx, n, radius=radius, distance=distance)\n', (25810, 25853), True, 'import networkx as nx\n'), ((29445, 29497), 'networkx.ego_graph', 'nx.ego_graph', (['G', 'n'], {'radius': 'radius', 'distance': 'distance'}), '(G, n, radius=radius, distance=distance)\n', (29457, 29497), True, 'import networkx as nx\n'), ((29751, 29825), 'networkx.betweenness_centrality', 'nx.betweenness_centrality', (['G'], {'weight': 'weight', 'endpoints': 'endpoints'}), '(G, weight=weight, endpoints=endpoints, **kwargs)\n', (29776, 29825), True, 'import networkx as nx\n'), ((29856, 29896), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['netx', 'vals', 'name'], {}), '(netx, vals, name)\n', (29878, 29896), True, 'import networkx as nx\n'), ((35723, 35778), 'networkx.ego_graph', 'nx.ego_graph', (['netx', 'n'], {'radius': 'radius', 'distance': 'distance'}), '(netx, n, radius=radius, distance=distance)\n', (35735, 35778), True, 'import networkx as nx\n'), ((29257, 29298), 'networkx.set_edge_attributes', 'nx.set_edge_attributes', (['G', '{(u, v): data}'], {}), '(G, {(u, v): data})\n', (29279, 29298), True, 'import networkx as nx\n'), ((29597, 29675), 'networkx.betweenness_centrality', 'nx.betweenness_centrality', (['sub'], {'weight': 'weight', 'normalized': 'normalized'}), '(sub, weight=weight, normalized=normalized, **kwargs)\n', (29622, 29675), True, 'import networkx as nx\n'), ((29938, 29996), 'networkx.edge_betweenness_centrality', 'nx.edge_betweenness_centrality', (['G'], {'weight': 'weight'}), '(G, weight=weight, **kwargs)\n', (29968, 29996), True, 'import networkx as nx\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
from .log import logger
def static_params_to_dygraph(model, static_tensor_dict):
"""Simple tool for convert static paramters to dygraph paramters dict.
**NOTE** The model must both support static graph and dygraph mode.
Args:
model (nn.Layer): the model of a neural network.
static_tensor_dict (string): path of which locate the saved paramters in static mode.
Usualy load by `paddle.static.load_program_state`.
Returns:
[tensor dict]: a state dict the same as the dygraph mode.
"""
state_dict = model.state_dict()
# static_tensor_dict = paddle.static.load_program_state(static_params_path)
ret_dict = dict()
for n, p in state_dict.items():
if p.name not in static_tensor_dict:
logger.info("%s paramter is missing from you state dict." % n)
continue
ret_dict[n] = static_tensor_dict[p.name]
return ret_dict
def dygraph_params_to_static(model, dygraph_tensor_dict, topo=None):
"""Simple tool for convert dygraph paramters to static paramters dict.
**NOTE** The model must both support static graph and dygraph mode.
Args:
model (nn.Layer): the model of a neural network.
dygraph_tensor_dict (string): path of which locate the saved paramters in static mode.
Returns:
[tensor dict]: a state dict the same as the dygraph mode.
"""
state_dict = model.state_dict()
ret_dict = dict()
for name, parm in state_dict.items():
if name not in dygraph_tensor_dict:
logger.info("%s paramter is missing from you state dict." % name)
continue
tensor = dygraph_tensor_dict[name]
if parm.is_distributed:
assert topo is not None
for dim, v in enumerate(tensor.shape):
if parm.shape[dim] != v:
break
splited = np.split(tensor, topo.mp_info.size,
axis=dim)[topo.mp_info.rank]
ret_dict[parm.name] = splited
else:
ret_dict[parm.name] = tensor
return ret_dict
class TimeCostAverage(object):
"""
Simple tool for calcluating time average cost in the process of training and inferencing.
"""
def __init__(self):
self.reset()
def reset(self):
"""
Reset the recoder state, and reset the `cnt` to zero.
"""
self.cnt = 0
self.total_time = 0
def record(self, usetime):
"""
Recoding the time cost in current step and accumulating the `cnt`.
"""
self.cnt += 1
self.total_time += usetime
def get_average(self):
"""
Returning the average time cost after the start of training.
"""
if self.cnt == 0:
return 0
return self.total_time / self.cnt
def get_env_device():
"""
Return the device name of running enviroment.
"""
if paddle.is_compiled_with_cuda():
return 'gpu'
elif paddle.is_compiled_with_npu():
return 'npu'
elif paddle.is_compiled_with_rocm():
return 'rocm'
elif paddle.is_compiled_with_xpu():
return 'xpu'
return 'cpu'
def compare_version(version, pair_version):
"""
Args:
version (str): The first version string needed to be compared.
The format of version string should be as follow : "xxx.yyy.zzz".
pair_version (str): The second version string needed to be compared.
The format of version string should be as follow : "xxx.yyy.zzz".
Returns:
int: The result of comparasion. 1 means version > pair_version; 0 means
version = pair_version; -1 means version < pair_version.
Examples:
>>> compare_version("2.2.1", "2.2.0")
>>> 1
>>> compare_version("2.2.0", "2.2.0")
>>> 0
>>> compare_version("2.2.0-rc0", "2.2.0")
>>> -1
>>> compare_version("2.3.0-rc0", "2.2.0")
>>> 1
"""
version = version.strip()
pair_version = pair_version.strip()
if version == pair_version:
return 0
version_list = version.split(".")
pair_version_list = pair_version.split(".")
for version_code, pair_version_code in zip(version_list, pair_version_list):
if not version_code.isnumeric():
return -1
if not pair_version_code.isnumeric():
return 1
if int(version_code) > int(pair_version_code):
return 1
elif int(version_code) < int(pair_version_code):
return -1
return 0
def get_bool_ids_greater_than(probs, limit=0.5, return_prob=False):
"""
Get idx of the last dimension in probability arrays, which is greater than a limitation.
Args:
probs (List[List[float]]): The input probability arrays.
limit (float): The limitation for probability.
return_prob (bool): Whether to return the probability
Returns:
List[List[int]]: The index of the last dimension meet the conditions.
"""
probs = np.array(probs)
dim_len = len(probs.shape)
if dim_len > 1:
result = []
for p in probs:
result.append(get_bool_ids_greater_than(p, limit, return_prob))
return result
else:
result = []
for i, p in enumerate(probs):
if p > limit:
if return_prob:
result.append((i, p))
else:
result.append(i)
return result
def get_span(start_ids, end_ids, with_prob=False):
"""
Get span set from position start and end list.
Args:
start_ids (List[int]/List[tuple]): The start index list.
end_ids (List[int]/List[tuple]): The end index list.
with_prob (bool): If True, each element for start_ids and end_ids is a tuple aslike: (index, probability).
Returns:
set: The span set without overlapping, every id can only be used once .
"""
if with_prob:
start_ids = sorted(start_ids, key=lambda x: x[0])
end_ids = sorted(end_ids, key=lambda x: x[0])
else:
start_ids = sorted(start_ids)
end_ids = sorted(end_ids)
start_pointer = 0
end_pointer = 0
len_start = len(start_ids)
len_end = len(end_ids)
couple_dict = {}
while start_pointer < len_start and end_pointer < len_end:
if with_prob:
start_id = start_ids[start_pointer][0]
end_id = end_ids[end_pointer][0]
else:
start_id = start_ids[start_pointer]
end_id = end_ids[end_pointer]
if start_id == end_id:
couple_dict[end_ids[end_pointer]] = start_ids[start_pointer]
start_pointer += 1
end_pointer += 1
continue
if start_id < end_id:
couple_dict[end_ids[end_pointer]] = start_ids[start_pointer]
start_pointer += 1
continue
if start_id > end_id:
end_pointer += 1
continue
result = [(couple_dict[end], end) for end in couple_dict]
result = set(result)
return result
| [
"paddle.is_compiled_with_xpu",
"paddle.is_compiled_with_rocm",
"numpy.array",
"numpy.split",
"paddle.is_compiled_with_cuda",
"paddle.is_compiled_with_npu"
] | [((3596, 3626), 'paddle.is_compiled_with_cuda', 'paddle.is_compiled_with_cuda', ([], {}), '()\n', (3624, 3626), False, 'import paddle\n'), ((5717, 5732), 'numpy.array', 'np.array', (['probs'], {}), '(probs)\n', (5725, 5732), True, 'import numpy as np\n'), ((3658, 3687), 'paddle.is_compiled_with_npu', 'paddle.is_compiled_with_npu', ([], {}), '()\n', (3685, 3687), False, 'import paddle\n'), ((3719, 3749), 'paddle.is_compiled_with_rocm', 'paddle.is_compiled_with_rocm', ([], {}), '()\n', (3747, 3749), False, 'import paddle\n'), ((2541, 2586), 'numpy.split', 'np.split', (['tensor', 'topo.mp_info.size'], {'axis': 'dim'}), '(tensor, topo.mp_info.size, axis=dim)\n', (2549, 2586), True, 'import numpy as np\n'), ((3782, 3811), 'paddle.is_compiled_with_xpu', 'paddle.is_compiled_with_xpu', ([], {}), '()\n', (3809, 3811), False, 'import paddle\n')] |
import os
from tqdm import tqdm
from mmdet.apis import init_detector, inference_detector
import numpy as np
import torch
import mmcv
import cv2
import json
import PIL
testset_dir = '/home/xiekaiyu/ocr/dataset/ICDAR2019ArT/test_task13'
output_dir = '/home/xiekaiyu/ocr/dataset/ICDAR2019ArT/output/preds'
model_name = 'solo_r50_fpn_1x_coco'
config_file = 'work_dirs/'+model_name+'/'+model_name+'.py'
checkpoint_file = 'work_dirs/'+model_name+'/latest.pth'
print(f'inferencing using model: {checkpoint_file}')
model = init_detector(config_file, checkpoint_file, device='cuda:0')
score_thr = 0.3
print('start inference')
for image in tqdm(os.listdir(testset_dir)):
image_path = os.path.join(testset_dir, image)
try:
im = PIL.Image.open(image_path)
im.close()
except PIL.Image.DecompressionBombError:
print(f'skip: {image_path}')
continue
image_index = image.split('.')[0].split('_')[1]
result = inference_detector(model, image_path)
torch.cuda.empty_cache()
if isinstance(result, tuple):
bbox_result, segm_result = result
if isinstance(segm_result, tuple):
segm_result = segm_result[0] # ms rcnn
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
preds = []
if segm_result is not None and len(labels) > 0: # non empty
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
np.random.seed(42)
for i in inds:
i = int(i)
sg = segms[i]
if isinstance(sg, torch.Tensor):
sg = sg.detach().cpu().numpy()
mask = sg.astype(np.uint8)
mask *= 255
contours, _ = cv2.findContours(
mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) > 0:
points = [[float(point[0][0]), float(point[0][1])]
for point in contours[0]]
if len(points) < 3:
continue
points.reverse() # convert to clock-wise
confidence = bboxes[i][-1]
preds.append({
'points': points,
'confidence': float(confidence)
})
output_file = os.path.join(output_dir, image_index+'.json')
with open(output_file, 'w')as f:
json.dump(preds, f)
print('collecting results')
submit = dict()
submit_file = '/home/xiekaiyu/ocr/dataset/ICDAR2019ArT/output/submit.json'
for pred in tqdm(os.listdir(output_dir)):
pred_path = os.path.join(output_dir, pred)
image_index = pred.split('.')[0]
with open(pred_path, 'r')as f:
result = json.load(f)
submit['res_'+image_index] = result
# skip image
submit['res_3102'] = [{
'points': [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]],
'confidence':0.0
}]
with open(submit_file, 'w')as f:
json.dump(submit, f)
| [
"os.listdir",
"PIL.Image.open",
"numpy.where",
"mmdet.apis.init_detector",
"os.path.join",
"cv2.findContours",
"mmcv.concat_list",
"numpy.vstack",
"numpy.concatenate",
"numpy.random.seed",
"json.load",
"numpy.full",
"mmdet.apis.inference_detector",
"torch.cuda.empty_cache",
"json.dump"
] | [((518, 578), 'mmdet.apis.init_detector', 'init_detector', (['config_file', 'checkpoint_file'], {'device': '"""cuda:0"""'}), "(config_file, checkpoint_file, device='cuda:0')\n", (531, 578), False, 'from mmdet.apis import init_detector, inference_detector\n'), ((639, 662), 'os.listdir', 'os.listdir', (['testset_dir'], {}), '(testset_dir)\n', (649, 662), False, 'import os\n'), ((682, 714), 'os.path.join', 'os.path.join', (['testset_dir', 'image'], {}), '(testset_dir, image)\n', (694, 714), False, 'import os\n'), ((950, 987), 'mmdet.apis.inference_detector', 'inference_detector', (['model', 'image_path'], {}), '(model, image_path)\n', (968, 987), False, 'from mmdet.apis import init_detector, inference_detector\n'), ((992, 1016), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1014, 1016), False, 'import torch\n'), ((1260, 1282), 'numpy.vstack', 'np.vstack', (['bbox_result'], {}), '(bbox_result)\n', (1269, 1282), True, 'import numpy as np\n'), ((1413, 1435), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (1427, 1435), True, 'import numpy as np\n'), ((2458, 2505), 'os.path.join', 'os.path.join', (['output_dir', "(image_index + '.json')"], {}), "(output_dir, image_index + '.json')\n", (2470, 2505), False, 'import os\n'), ((2706, 2728), 'os.listdir', 'os.listdir', (['output_dir'], {}), '(output_dir)\n', (2716, 2728), False, 'import os\n'), ((2747, 2777), 'os.path.join', 'os.path.join', (['output_dir', 'pred'], {}), '(output_dir, pred)\n', (2759, 2777), False, 'import os\n'), ((3088, 3108), 'json.dump', 'json.dump', (['submit', 'f'], {}), '(submit, f)\n', (3097, 3108), False, 'import json\n'), ((738, 764), 'PIL.Image.open', 'PIL.Image.open', (['image_path'], {}), '(image_path)\n', (752, 764), False, 'import PIL\n'), ((1306, 1347), 'numpy.full', 'np.full', (['bbox.shape[0]', 'i'], {'dtype': 'np.int32'}), '(bbox.shape[0], i, dtype=np.int32)\n', (1313, 1347), True, 'import numpy as np\n'), ((1532, 1561), 'mmcv.concat_list', 'mmcv.concat_list', (['segm_result'], {}), '(segm_result)\n', (1548, 1561), False, 'import mmcv\n'), ((1624, 1642), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1638, 1642), True, 'import numpy as np\n'), ((2549, 2568), 'json.dump', 'json.dump', (['preds', 'f'], {}), '(preds, f)\n', (2558, 2568), False, 'import json\n'), ((2867, 2879), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2876, 2879), False, 'import json\n'), ((1577, 1612), 'numpy.where', 'np.where', (['(bboxes[:, -1] > score_thr)'], {}), '(bboxes[:, -1] > score_thr)\n', (1585, 1612), True, 'import numpy as np\n'), ((1896, 1962), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (1912, 1962), False, 'import cv2\n')] |
#!/usr/bin/env python
import rospy
import cv2
import numpy as np
from std_msgs.msg import String
from sensor_msgs.msg import Image, CompressedImage
from cv_bridge import CvBridge, CvBridgeError
bridge = CvBridge()
class ImageAverager:
def __init__(self):
self.publisher = rospy.Publisher("~topic_out", Image, queue_size=1)
self.subscriber = rospy.Subscriber("~topic_in", CompressedImage, self.callback)
self.image_avg = None
self.num_frames = 0
def callback(self, msg):
# Load image
np_arr = np.fromstring(msg.data, np.uint8)
image_np = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
# Convert image to float
image_np_float = image_np.astype('float')
# Start accumulating image averages
if self.image_avg is None:
self.image_avg = image_np_float
# Average new image with previous average
else:
prev_weight = float(self.num_frames)/float(self.num_frames+1)
new_weight = 1.0 - prev_weight
self.image_avg = cv2.addWeighted(self.image_avg, prev_weight, image_np_float, new_weight, 0.0)
self.num_frames = self.num_frames+1
msg = bridge.cv2_to_imgmsg(self.image_avg.astype('uint8'), "bgr8")
self.publisher.publish(msg)
def init_image_averager_node():
rospy.init_node('image_average_node', anonymous=True)
averager = ImageAverager()
rospy.spin() #Keeps the script for exiting
if __name__ == '__main__':
init_image_averager_node()
| [
"rospy.Subscriber",
"rospy.init_node",
"numpy.fromstring",
"cv_bridge.CvBridge",
"cv2.addWeighted",
"rospy.spin",
"cv2.imdecode",
"rospy.Publisher"
] | [((204, 214), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (212, 214), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((1372, 1425), 'rospy.init_node', 'rospy.init_node', (['"""image_average_node"""'], {'anonymous': '(True)'}), "('image_average_node', anonymous=True)\n", (1387, 1425), False, 'import rospy\n'), ((1466, 1478), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1476, 1478), False, 'import rospy\n'), ((286, 336), 'rospy.Publisher', 'rospy.Publisher', (['"""~topic_out"""', 'Image'], {'queue_size': '(1)'}), "('~topic_out', Image, queue_size=1)\n", (301, 336), False, 'import rospy\n'), ((363, 424), 'rospy.Subscriber', 'rospy.Subscriber', (['"""~topic_in"""', 'CompressedImage', 'self.callback'], {}), "('~topic_in', CompressedImage, self.callback)\n", (379, 424), False, 'import rospy\n'), ((555, 588), 'numpy.fromstring', 'np.fromstring', (['msg.data', 'np.uint8'], {}), '(msg.data, np.uint8)\n', (568, 588), True, 'import numpy as np\n'), ((608, 653), 'cv2.imdecode', 'cv2.imdecode', (['np_arr', 'cv2.CV_LOAD_IMAGE_COLOR'], {}), '(np_arr, cv2.CV_LOAD_IMAGE_COLOR)\n', (620, 653), False, 'import cv2\n'), ((1088, 1165), 'cv2.addWeighted', 'cv2.addWeighted', (['self.image_avg', 'prev_weight', 'image_np_float', 'new_weight', '(0.0)'], {}), '(self.image_avg, prev_weight, image_np_float, new_weight, 0.0)\n', (1103, 1165), False, 'import cv2\n')] |
from collections import defaultdict
import numpy as np
class MetricsAccumulator:
def __init__(self) -> None:
self.accumulator = defaultdict(lambda: [])
def update_metric(self, metric_name, metric_value):
self.accumulator[metric_name].append(metric_value)
def print_average_metric(self):
for k, v in self.accumulator.items():
average_v = np.array(v).mean()
print(f"{k} - {average_v:.2f}")
self.__init__()
| [
"numpy.array",
"collections.defaultdict"
] | [((143, 167), 'collections.defaultdict', 'defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (154, 167), False, 'from collections import defaultdict\n'), ((390, 401), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (398, 401), True, 'import numpy as np\n')] |
#! -*- coding:utf-8 -*-
'''
@Author: ZM
@Date and Time: 2020/12/15 20:27
@File: ToOneHot.py
'''
import numpy as np
class ToOneHot:
def __init__(self, num_classes):
self.num_classes = num_classes
def __call__(self, data):
data_size = data.size
if data_size > 1:
one_hot = np.zeros((data_size, self.num_classes), dtype='float32')
one_hot[np.arange(data_size), data] = 1.
else:
one_hot = np.zeros(self.num_classes, dtype='float32')
one_hot[data] = 1.
return one_hot | [
"numpy.zeros",
"numpy.arange"
] | [((335, 391), 'numpy.zeros', 'np.zeros', (['(data_size, self.num_classes)'], {'dtype': '"""float32"""'}), "((data_size, self.num_classes), dtype='float32')\n", (343, 391), True, 'import numpy as np\n'), ((481, 524), 'numpy.zeros', 'np.zeros', (['self.num_classes'], {'dtype': '"""float32"""'}), "(self.num_classes, dtype='float32')\n", (489, 524), True, 'import numpy as np\n'), ((412, 432), 'numpy.arange', 'np.arange', (['data_size'], {}), '(data_size)\n', (421, 432), True, 'import numpy as np\n')] |
import contextlib
import numbers
import typing
import numpy as np
import pandas as pd
from river import base
from river import optim
from river import utils
__all__ = [
'LinearRegression',
'LogisticRegression',
'Perceptron'
]
class GLM:
"""Generalized Linear Model.
This serves as a base class for linear and logistic regression.
"""
def __init__(self, optimizer, loss, l2, intercept, intercept_lr, clip_gradient, initializer):
self.optimizer = optimizer
self.loss = loss
self.l2 = l2
self.intercept = intercept
self.intercept_lr = (
optim.schedulers.Constant(intercept_lr)
if isinstance(intercept_lr, numbers.Number) else
intercept_lr
)
self.clip_gradient = clip_gradient
self.initializer = initializer
self._weights = utils.VectorDict(None)
# The predict_many functions are going to return pandas.Series. We can name the series with
# the name given to the y series seen during the last learn_many call.
self._y_name = None
@property
def weights(self):
return self._weights.to_dict()
@contextlib.contextmanager
def _learn_mode(self, mask=None):
weights = self._weights
try:
# enable the initializer and set a mask
self._weights = utils.VectorDict(weights, self.initializer, mask)
yield
finally:
self._weights = weights
def _fit(self, x, y, w, get_grad):
# Some optimizers need to do something before a prediction is made
self.optimizer.update_before_pred(w=self._weights)
# Calculate the gradient
gradient, loss_gradient = get_grad(x, y, w)
# Update the intercept
self.intercept -= self.intercept_lr.get(self.optimizer.n_iterations) * loss_gradient
# Update the weights
self.optimizer.update_after_pred(w=self._weights, g=gradient)
return self
# Single instance methods
def _raw_dot_one(self, x: dict) -> float:
return self._weights @ utils.VectorDict(x) + self.intercept
def _eval_gradient_one(self, x: dict, y: float, w: float) -> (dict, float):
loss_gradient = self.loss.gradient(y_true=y, y_pred=self._raw_dot_one(x))
loss_gradient *= w
loss_gradient = float(utils.math.clamp(loss_gradient, -self.clip_gradient, self.clip_gradient))
return loss_gradient * utils.VectorDict(x) + 2. * self.l2 * self._weights, loss_gradient
def learn_one(self, x, y, w=1.):
with self._learn_mode(x):
return self._fit(x, y, w, get_grad=self._eval_gradient_one)
# Mini-batch methods
def _raw_dot_many(self, X: pd.DataFrame) -> np.ndarray:
return X.values @ self._weights.to_numpy(X.columns) + self.intercept
def _eval_gradient_many(self,
X: pd.DataFrame,
y: pd.Series,
w: typing.Union[float, pd.Series]) -> (dict, float):
loss_gradient = self.loss.gradient(y_true=y.values, y_pred=self._raw_dot_many(X))
loss_gradient *= w
loss_gradient = np.clip(loss_gradient, -self.clip_gradient, self.clip_gradient)
# At this point we have a feature matrix X of shape (n, p). The loss gradient is a vector
# of length p. We want to multiply each of X's rows by the corresponding value in the loss
# gradient. When this is all done, we collapse X by computing the average of each column,
# thereby obtaining the mean gradient of the batch. From thereon, the code reduces to the
# single instance case.
gradient = np.einsum('ij,i->ij', X.values, loss_gradient).mean(axis=0)
return dict(zip(X.columns, gradient)), loss_gradient.mean()
def learn_many(self, X: pd.DataFrame, y: pd.Series, w: typing.Union[float, pd.Series] = 1):
self._y_name = y.name
with self._learn_mode(set(X)):
return self._fit(X, y, w, get_grad=self._eval_gradient_many)
class LinearRegression(GLM, base.MiniBatchRegressor):
"""Linear regression.
This estimator supports learning with mini-batches. On top of the single instance methods, it
provides the following methods: `learn_many`, `predict_many`, `predict_proba_many`. Each method
takes as input a `pandas.DataFrame` where each column represents a feature.
It is generally a good idea to scale the data beforehand in order for the optimizer to
converge. You can do this online with a `preprocessing.StandardScaler`.
Parameters
----------
optimizer
The sequential optimizer used for updating the weights. Note that the intercept updates are
handled separately.
loss
The loss function to optimize for.
l2
Amount of L2 regularization used to push weights towards 0.
intercept
Initial intercept value.
intercept_lr
Learning rate scheduler used for updating the intercept. A `optim.schedulers.Constant` is
used if a `float` is provided. The intercept is not updated when this is set to 0.
clip_gradient
Clips the absolute value of each gradient value.
initializer
Weights initialization scheme.
Attributes
----------
weights : dict
The current weights.
Examples
--------
>>> from river import datasets
>>> from river import evaluate
>>> from river import linear_model
>>> from river import metrics
>>> from river import preprocessing
>>> dataset = datasets.TrumpApproval()
>>> model = (
... preprocessing.StandardScaler() |
... linear_model.LinearRegression(intercept_lr=.1)
... )
>>> metric = metrics.MAE()
>>> evaluate.progressive_val_score(dataset, model, metric)
MAE: 0.555971
>>> model['LinearRegression'].intercept
35.617670
You can call the `debug_one` method to break down a prediction. This works even if the
linear regression is part of a pipeline.
>>> x, y = next(iter(dataset))
>>> report = model.debug_one(x)
>>> print(report)
0. Input
--------
gallup: 43.84321 (float)
ipsos: 46.19925 (float)
morning_consult: 48.31875 (float)
ordinal_date: 736389 (int)
rasmussen: 44.10469 (float)
you_gov: 43.63691 (float)
<BLANKLINE>
1. StandardScaler
-----------------
gallup: 1.18810 (float)
ipsos: 2.10348 (float)
morning_consult: 2.73545 (float)
ordinal_date: -1.73032 (float)
rasmussen: 1.26872 (float)
you_gov: 1.48391 (float)
<BLANKLINE>
2. LinearRegression
-------------------
Name Value Weight Contribution
Intercept 1.00000 35.61767 35.61767
ipsos 2.10348 0.62689 1.31866
morning_consult 2.73545 0.24180 0.66144
gallup 1.18810 0.43568 0.51764
rasmussen 1.26872 0.28118 0.35674
you_gov 1.48391 0.03123 0.04634
ordinal_date -1.73032 3.45162 -5.97242
<BLANKLINE>
Prediction: 32.54607
"""
def __init__(self, optimizer: optim.Optimizer = None, loss: optim.losses.RegressionLoss = None,
l2=.0, intercept=0.,
intercept_lr: typing.Union[optim.schedulers.Scheduler, float] = .01,
clip_gradient=1e+12, initializer: optim.initializers.Initializer = None):
super().__init__(
optimizer=optim.SGD(.01) if optimizer is None else optimizer,
loss=optim.losses.Squared() if loss is None else loss,
intercept=intercept,
intercept_lr=intercept_lr,
l2=l2,
clip_gradient=clip_gradient,
initializer=initializer if initializer else optim.initializers.Zeros()
)
def predict_one(self, x):
return self.loss.mean_func(self._raw_dot_one(x))
def predict_many(self, X):
return pd.Series(
self.loss.mean_func(self._raw_dot_many(X)),
index=X.index,
name=self._y_name,
copy=False
)
def debug_one(self, x: dict, decimals=5) -> str:
"""Debugs the output of the linear regression.
Parameters
----------
x
A dictionary of features.
decimals
The number of decimals use for printing each numeric value.
Returns
-------
A table which explains the output.
"""
def fmt_float(x):
return '{: ,.{prec}f}'.format(x, prec=decimals)
names = list(map(str, x.keys())) + ['Intercept']
values = list(map(fmt_float, list(x.values()) + [1]))
weights = list(map(fmt_float, [self._weights.get(i, 0) for i in x] + [self.intercept]))
contributions = [xi * self._weights.get(i, 0) for i, xi in x.items()] + [self.intercept]
order = reversed(np.argsort(contributions))
contributions = list(map(fmt_float, contributions))
table = utils.pretty.print_table(
headers=['Name', 'Value', 'Weight', 'Contribution'],
columns=[names, values, weights, contributions],
order=order
)
return table
class LogisticRegression(GLM, base.MiniBatchClassifier):
"""Logistic regression.
This estimator supports learning with mini-batches. On top of the single instance methods, it
provides the following methods: `learn_many`, `predict_many`, `predict_proba_many`. Each method
takes as input a `pandas.DataFrame` where each column represents a feature.
It is generally a good idea to scale the data beforehand in order for the optimizer to
converge. You can do this online with a `preprocessing.StandardScaler`.
Parameters
----------
optimizer
The sequential optimizer used for updating the weights. Note that the intercept is handled
separately.
loss
The loss function to optimize for. Defaults to `optim.losses.Log`.
l2
Amount of L2 regularization used to push weights towards 0.
intercept
Initial intercept value.
intercept_lr
Learning rate scheduler used for updating the intercept. A `optim.schedulers.Constant` is
used if a `float` is provided. The intercept is not updated when this is set to 0.
clip_gradient
Clips the absolute value of each gradient value.
initializer
Weights initialization scheme.
Attributes
----------
weights
The current weights.
Examples
--------
>>> from river import datasets
>>> from river import evaluate
>>> from river import linear_model
>>> from river import metrics
>>> from river import optim
>>> from river import preprocessing
>>> dataset = datasets.Phishing()
>>> model = (
... preprocessing.StandardScaler() |
... linear_model.LogisticRegression(optimizer=optim.SGD(.1))
... )
>>> metric = metrics.Accuracy()
>>> evaluate.progressive_val_score(dataset, model, metric)
Accuracy: 88.96%
"""
def __init__(self, optimizer: optim.Optimizer = None, loss: optim.losses.BinaryLoss = None,
l2=.0, intercept=0.,
intercept_lr: typing.Union[float, optim.schedulers.Scheduler] = .01,
clip_gradient=1e12, initializer: optim.initializers.Initializer = None):
super().__init__(
optimizer=optim.SGD(.01) if optimizer is None else optimizer,
loss=optim.losses.Log() if loss is None else loss,
intercept=intercept,
intercept_lr=intercept_lr,
l2=l2,
clip_gradient=clip_gradient,
initializer=initializer if initializer else optim.initializers.Zeros()
)
def predict_proba_one(self, x):
p = self.loss.mean_func(self._raw_dot_one(x)) # Convert logit to probability
return {False: 1. - p, True: p}
def predict_proba_many(self, X: pd.DataFrame) -> pd.DataFrame:
p = self.loss.mean_func(self._raw_dot_many(X)) # Convert logits to probabilities
return pd.DataFrame({False: 1. - p, True: p}, index=X.index, copy=False)
class Perceptron(LogisticRegression):
"""Perceptron classifier.
In this implementation, the Perceptron is viewed as a special case of the logistic regression.
The loss function that is used is the Hinge loss with a threshold set to 0, whilst the learning
rate of the stochastic gradient descent procedure is set to 1 for both the weights and the
intercept.
Parameters
----------
l2
Amount of L2 regularization used to push weights towards 0.
clip_gradient
Clips the absolute value of each gradient value.
initializer
Weights initialization scheme.
Attributes
----------
weights
The current weights.
Examples
--------
>>> from river import datasets
>>> from river import evaluate
>>> from river import linear_model as lm
>>> from river import metrics
>>> from river import preprocessing as pp
>>> dataset = datasets.Phishing()
>>> model = pp.StandardScaler() | lm.Perceptron()
>>> metric = metrics.Accuracy()
>>> evaluate.progressive_val_score(dataset, model, metric)
Accuracy: 85.84%
"""
def __init__(self, l2=.0, clip_gradient=1e12,
initializer: optim.initializers.Initializer = None):
super().__init__(
optimizer=optim.SGD(1),
intercept_lr=1,
loss=optim.losses.Hinge(threshold=0.),
l2=l2,
clip_gradient=clip_gradient,
initializer=initializer
)
| [
"numpy.clip",
"river.utils.pretty.print_table",
"river.optim.schedulers.Constant",
"river.optim.SGD",
"river.utils.VectorDict",
"river.optim.losses.Log",
"river.utils.math.clamp",
"numpy.argsort",
"river.optim.losses.Squared",
"numpy.einsum",
"river.optim.initializers.Zeros",
"pandas.DataFrame... | [((865, 887), 'river.utils.VectorDict', 'utils.VectorDict', (['None'], {}), '(None)\n', (881, 887), False, 'from river import utils\n'), ((3188, 3251), 'numpy.clip', 'np.clip', (['loss_gradient', '(-self.clip_gradient)', 'self.clip_gradient'], {}), '(loss_gradient, -self.clip_gradient, self.clip_gradient)\n', (3195, 3251), True, 'import numpy as np\n'), ((9090, 9234), 'river.utils.pretty.print_table', 'utils.pretty.print_table', ([], {'headers': "['Name', 'Value', 'Weight', 'Contribution']", 'columns': '[names, values, weights, contributions]', 'order': 'order'}), "(headers=['Name', 'Value', 'Weight', 'Contribution'\n ], columns=[names, values, weights, contributions], order=order)\n", (9114, 9234), False, 'from river import utils\n'), ((12202, 12272), 'pandas.DataFrame', 'pd.DataFrame', (['{(False): 1.0 - p, (True): p}'], {'index': 'X.index', 'copy': '(False)'}), '({(False): 1.0 - p, (True): p}, index=X.index, copy=False)\n', (12214, 12272), True, 'import pandas as pd\n'), ((623, 662), 'river.optim.schedulers.Constant', 'optim.schedulers.Constant', (['intercept_lr'], {}), '(intercept_lr)\n', (648, 662), False, 'from river import optim\n'), ((1368, 1417), 'river.utils.VectorDict', 'utils.VectorDict', (['weights', 'self.initializer', 'mask'], {}), '(weights, self.initializer, mask)\n', (1384, 1417), False, 'from river import utils\n'), ((2363, 2435), 'river.utils.math.clamp', 'utils.math.clamp', (['loss_gradient', '(-self.clip_gradient)', 'self.clip_gradient'], {}), '(loss_gradient, -self.clip_gradient, self.clip_gradient)\n', (2379, 2435), False, 'from river import utils\n'), ((8986, 9011), 'numpy.argsort', 'np.argsort', (['contributions'], {}), '(contributions)\n', (8996, 9011), True, 'import numpy as np\n'), ((2105, 2124), 'river.utils.VectorDict', 'utils.VectorDict', (['x'], {}), '(x)\n', (2121, 2124), False, 'from river import utils\n'), ((3697, 3743), 'numpy.einsum', 'np.einsum', (['"""ij,i->ij"""', 'X.values', 'loss_gradient'], {}), "('ij,i->ij', X.values, loss_gradient)\n", (3706, 3743), True, 'import numpy as np\n'), ((13573, 13585), 'river.optim.SGD', 'optim.SGD', (['(1)'], {}), '(1)\n', (13582, 13585), False, 'from river import optim\n'), ((13632, 13665), 'river.optim.losses.Hinge', 'optim.losses.Hinge', ([], {'threshold': '(0.0)'}), '(threshold=0.0)\n', (13650, 13665), False, 'from river import optim\n'), ((2469, 2488), 'river.utils.VectorDict', 'utils.VectorDict', (['x'], {}), '(x)\n', (2485, 2488), False, 'from river import utils\n'), ((7550, 7565), 'river.optim.SGD', 'optim.SGD', (['(0.01)'], {}), '(0.01)\n', (7559, 7565), False, 'from river import optim\n'), ((7619, 7641), 'river.optim.losses.Squared', 'optim.losses.Squared', ([], {}), '()\n', (7639, 7641), False, 'from river import optim\n'), ((7857, 7883), 'river.optim.initializers.Zeros', 'optim.initializers.Zeros', ([], {}), '()\n', (7881, 7883), False, 'from river import optim\n'), ((11526, 11541), 'river.optim.SGD', 'optim.SGD', (['(0.01)'], {}), '(0.01)\n', (11535, 11541), False, 'from river import optim\n'), ((11595, 11613), 'river.optim.losses.Log', 'optim.losses.Log', ([], {}), '()\n', (11611, 11613), False, 'from river import optim\n'), ((11829, 11855), 'river.optim.initializers.Zeros', 'optim.initializers.Zeros', ([], {}), '()\n', (11853, 11855), False, 'from river import optim\n')] |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified by: <NAME>, <NAME>
"""An example of code submission for the AutoDL challenge.
It implements 3 compulsory methods ('__init__', 'train' and 'test') and
an attribute 'done_training' for indicating if the model will not proceed more
training due to convergence or limited time budget.
To create a valid submission, zip model.py together with other necessary files
such as Python modules/packages, pre-trained weights, etc. The final zip file
should not exceed 300MB.
"""
import logging
import numpy as np
import os
import sys
import tensorflow as tf
class Model(object):
"""Trivial example of valid model. Returns all-zero predictions."""
def __init__(self, metadata):
"""
Args:
metadata: an AutoDLMetadata object. Its definition can be found in
AutoDL_ingestion_program/dataset.py
"""
self.done_training = False
self.metadata = metadata
def train(self, dataset, remaining_time_budget=None):
"""Train this algorithm on the tensorflow |dataset|.
This method will be called REPEATEDLY during the whole training/predicting
process. So your `train` method should be able to handle repeated calls and
hopefully improve your model performance after each call.
****************************************************************************
****************************************************************************
IMPORTANT: the loop of calling `train` and `test` will only run if
self.done_training = False
(the corresponding code can be found in ingestion.py, search
'M.done_training')
Otherwise, the loop will go on until the time budget is used up. Please
pay attention to set self.done_training = True when you think the model is
converged or when there is not enough time for next round of training.
****************************************************************************
****************************************************************************
Args:
dataset: a `tf.data.Dataset` object. Each of its examples is of the form
(example, labels)
where `example` is a dense 4-D Tensor of shape
(sequence_size, row_count, col_count, num_channels)
and `labels` is a 1-D Tensor of shape
(output_dim,).
Here `output_dim` represents number of classes of this
multilabel classification task.
IMPORTANT: some of the dimensions of `example` might be `None`,
which means the shape on this dimension might be variable. In this
case, some preprocessing technique should be applied in order to
feed the training of a neural network. For example, if an image
dataset has `example` of shape
(1, None, None, 3)
then the images in this datasets may have different sizes. On could
apply resizing, cropping or padding in order to have a fixed size
input tensor.
remaining_time_budget: a float, time remaining to execute train(). The method
should keep track of its execution time to avoid exceeding its time
budget. If remaining_time_budget is None, no time budget is imposed.
"""
logger.info("This basic sample code doesn't do any training, " +
"but will retrieve some information on the dataset:")
iterator = dataset.make_one_shot_iterator()
example, labels = iterator.get_next()
sample_count = 0
with tf.Session() as sess:
while True:
try:
sess.run(labels)
sample_count += 1
except tf.errors.OutOfRangeError:
break
logger.info("Number of training examples: {}".format(sample_count))
logger.info("Shape of example: {}".format(example.shape))
logger.info("Number of classes: {}".format(labels.shape[0]))
assert self.metadata.get_output_size() == labels.shape[0]
self.done_training = True
def test(self, dataset, remaining_time_budget=None):
"""Make predictions on the test set `dataset` (which is different from that
of the method `train`).
Args:
Same as that of `train` method, except that the labels will be empty
(all zeros) since this time `dataset` is a test set.
Returns:
predictions: A `numpy.ndarray` matrix of shape (sample_count, output_dim).
here `sample_count` is the number of examples in this dataset as test
set and `output_dim` is the number of labels to be predicted. The
values should be binary or in the interval [0,1].
"""
sample_count = 0
iterator = dataset.make_one_shot_iterator()
example, labels = iterator.get_next()
with tf.Session() as sess:
while True:
try:
sess.run(labels)
sample_count += 1
except tf.errors.OutOfRangeError:
break
logger.info("Number of test examples: {}".format(sample_count))
output_dim = self.metadata.get_output_size()
predictions = np.zeros((sample_count, output_dim))
return predictions
##############################################################################
#### Above 3 methods (__init__, train, test) should always be implemented ####
##############################################################################
def get_logger(verbosity_level):
"""Set logging format to something like:
2019-04-25 12:52:51,924 INFO model.py: <message>
"""
logger = logging.getLogger(__file__)
logging_level = getattr(logging, verbosity_level)
logger.setLevel(logging_level)
formatter = logging.Formatter(
fmt='%(asctime)s %(levelname)s %(filename)s: %(message)s')
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging_level)
stdout_handler.setFormatter(formatter)
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setLevel(logging.WARNING)
stderr_handler.setFormatter(formatter)
logger.addHandler(stdout_handler)
logger.addHandler(stderr_handler)
logger.propagate = False
return logger
logger = get_logger('INFO')
| [
"logging.getLogger",
"logging.StreamHandler",
"logging.Formatter",
"tensorflow.Session",
"numpy.zeros"
] | [((6029, 6056), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (6046, 6056), False, 'import logging\n'), ((6156, 6232), 'logging.Formatter', 'logging.Formatter', ([], {'fmt': '"""%(asctime)s %(levelname)s %(filename)s: %(message)s"""'}), "(fmt='%(asctime)s %(levelname)s %(filename)s: %(message)s')\n", (6173, 6232), False, 'import logging\n'), ((6257, 6290), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (6278, 6290), False, 'import logging\n'), ((6392, 6425), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stderr'], {}), '(sys.stderr)\n', (6413, 6425), False, 'import logging\n'), ((5575, 5611), 'numpy.zeros', 'np.zeros', (['(sample_count, output_dim)'], {}), '((sample_count, output_dim))\n', (5583, 5611), True, 'import numpy as np\n'), ((4066, 4078), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4076, 4078), True, 'import tensorflow as tf\n'), ((5274, 5286), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5284, 5286), True, 'import tensorflow as tf\n')] |
import sys
import argparse
import os
import cv2
import yaml
from PIL import Image
from importlib.machinery import SourceFileLoader
import torch
from torch import nn
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
import pandas
import numpy
__filedir__ = os.path.dirname(os.path.realpath(__file__))
# network_module = SourceFileLoader(".", os.path.join(__filedir__, "network.py")).load_module()
import feature_graph.models.dtoid.network as network_module
class DTOIDWrapper(nn.Module):
def __init__(self, backend="cuda", no_filter_z=False):
super(DTOIDWrapper, self).__init__()
# Initialize the network
model = network_module.Network()
model.eval()
# model_path = os.path.join(__filedir__, "model.pth.tar")
model_path = "/home/qiaog/src/DTOID/model.pth.tar"
checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint["state_dict"])
if backend == "cuda":
model = model.cuda()
self.model = model
self.backend = backend
self.no_filter_z = no_filter_z
self.preprocess = network_module.PREPROCESS
# self.model_directory = os.path.join(__filedir__, "templates")
self.model_directory = "/home/qiaog/src/DTOID/templates"
self.template_cache = {}
def clearCache(self):
del self.template_cache
self.template_cache = {}
def getTemplates(self, linemod_model):
'''
linemod_model: str of the linemod object ID ("01", "02", ...)
'''
if linemod_model in self.template_cache:
return
assert type(linemod_model) is str
model_name = "hinterstoisser_" + linemod_model
template_dir = os.path.join(self.model_directory, model_name)
output_file = "{}.yml".format(model_name)
#load text file
pose_file = os.path.join(template_dir, "poses.txt")
pose_file_np = pandas.read_csv(pose_file, sep=" ", header=None).values
pose_z_values = pose_file_np[:, 11]
# Template
global_template_list = []
template_paths = [x for x in os.listdir(template_dir) if len(x) == 12 and "_a.png" in x]
template_paths.sort()
preprocessed_templates = []
# features for all templates (240)
template_list = []
template_global_list = []
template_ratios_list = []
batch_size = 10
temp_batch_local = []
temp_batch_global = []
temp_batch_ratios = []
iteration = 0
for t in tqdm(template_paths):
# open template and template mask
template_im = cv2.imread(os.path.join(template_dir, t))[:, :, ::-1]
template = Image.fromarray(template_im)
template_mask = cv2.imread(os.path.join(template_dir, t.replace("_a", "_m")))[:, :, 0]
template_mask = Image.fromarray(template_mask)
# preprocess and concatenate
template = self.preprocess[1](template)
template_mask = self.preprocess[2](template_mask)
template = torch.cat([template, template_mask], dim=0)
if self.backend == "cuda":
template = template.cuda()
template_feature = self.model.compute_template_local(template.unsqueeze(0))
# Create mini-batches of templates
if iteration == 0:
temp_batch_local = template_feature
template_feature_global = self.model.compute_template_global(template.unsqueeze(0))
template_global_list.append(template_feature_global)
elif iteration % (batch_size) == 0:
template_list.append(temp_batch_local)
temp_batch_local = template_feature
elif iteration == (len(template_paths) - 1):
temp_batch_local = torch.cat([temp_batch_local, template_feature], dim=0)
template_list.append(temp_batch_local)
else:
temp_batch_local= torch.cat([temp_batch_local, template_feature], dim=0)
iteration += 1
self.template_cache[linemod_model] = (template_list, template_global_list, pose_z_values)
def forward(self, img_numpy, obj_id):
template_list, template_global_list, pose_z_values = self.template_cache[obj_id]
img_h, img_w, img_c = img_numpy.shape
img = Image.fromarray(img_numpy)
img = self.preprocess[0](img)
network_h = img.size(1)
network_w = img.size(2)
if self.backend == "cuda":
img = img.cuda()
top_k_num = 500
top_k_scores, top_k_bboxes, top_k_template_ids, seg_pred = self.model.forward_all_templates(
img.unsqueeze(0), template_list, template_global_list, topk=top_k_num)
pred_seg_np = seg_pred.cpu().numpy()
pred_scores_np = top_k_scores.cpu().numpy()
pred_bbox_np = top_k_bboxes.cpu().numpy()
pred_template_ids = top_k_template_ids[:, 0].long().cpu().numpy()
template_z_values = pose_z_values[pred_template_ids]
if not self.no_filter_z:
pred_w_np = pred_bbox_np[:, 2] - pred_bbox_np[:, 0]
pred_h_np = pred_bbox_np[:, 3] - pred_bbox_np[:, 1]
pred_max_dim_np = np.stack([pred_w_np, pred_h_np]).transpose().max(axis=1)
pred_z = (124 / pred_max_dim_np) * -template_z_values
# Filter based on predicted Z values
pred_z_conds = (pred_z > 0.4) & (pred_z < 2)
pred_z_conds_ids = numpy.where(pred_z_conds)[0]
pred_scores_np = pred_scores_np[pred_z_conds_ids]
pred_bbox_np = pred_bbox_np[pred_z_conds_ids]
pred_template_ids = pred_template_ids[pred_z_conds_ids]
pred_z = pred_z[pred_z_conds_ids]
# Keep top 1 (eval)
pred_scores_np = pred_scores_np[:1]
pred_bbox_np = pred_bbox_np[:1]
pred_template_ids = pred_template_ids[:1]
pred_z = pred_z[:1]
pred_seg_np = pred_seg_np[:1]
output = {
"pred_bbox_np": pred_bbox_np,
"pred_scores_np": pred_scores_np,
"pred_seg_np": pred_seg_np,
"pred_template_ids": pred_template_ids,
"network_w": network_w,
"network_h": network_h,
"img_h": img_h,
"img_w": img_w,
}
return output
| [
"feature_graph.models.dtoid.network.Network",
"PIL.Image.fromarray",
"os.listdir",
"pandas.read_csv",
"numpy.where",
"torch.load",
"tqdm.tqdm",
"os.path.join",
"os.path.realpath",
"numpy.stack",
"torch.cat"
] | [((296, 322), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (312, 322), False, 'import os\n'), ((668, 692), 'feature_graph.models.dtoid.network.Network', 'network_module.Network', ([], {}), '()\n', (690, 692), True, 'import feature_graph.models.dtoid.network as network_module\n'), ((860, 925), 'torch.load', 'torch.load', (['model_path'], {'map_location': '(lambda storage, loc: storage)'}), '(model_path, map_location=lambda storage, loc: storage)\n', (870, 925), False, 'import torch\n'), ((1794, 1840), 'os.path.join', 'os.path.join', (['self.model_directory', 'model_name'], {}), '(self.model_directory, model_name)\n', (1806, 1840), False, 'import os\n'), ((1936, 1975), 'os.path.join', 'os.path.join', (['template_dir', '"""poses.txt"""'], {}), "(template_dir, 'poses.txt')\n", (1948, 1975), False, 'import os\n'), ((2612, 2632), 'tqdm.tqdm', 'tqdm', (['template_paths'], {}), '(template_paths)\n', (2616, 2632), False, 'from tqdm import tqdm\n'), ((4454, 4480), 'PIL.Image.fromarray', 'Image.fromarray', (['img_numpy'], {}), '(img_numpy)\n', (4469, 4480), False, 'from PIL import Image\n'), ((1999, 2047), 'pandas.read_csv', 'pandas.read_csv', (['pose_file'], {'sep': '""" """', 'header': 'None'}), "(pose_file, sep=' ', header=None)\n", (2014, 2047), False, 'import pandas\n'), ((2783, 2811), 'PIL.Image.fromarray', 'Image.fromarray', (['template_im'], {}), '(template_im)\n', (2798, 2811), False, 'from PIL import Image\n'), ((2940, 2970), 'PIL.Image.fromarray', 'Image.fromarray', (['template_mask'], {}), '(template_mask)\n', (2955, 2970), False, 'from PIL import Image\n'), ((3150, 3193), 'torch.cat', 'torch.cat', (['[template, template_mask]'], {'dim': '(0)'}), '([template, template_mask], dim=0)\n', (3159, 3193), False, 'import torch\n'), ((2190, 2214), 'os.listdir', 'os.listdir', (['template_dir'], {}), '(template_dir)\n', (2200, 2214), False, 'import os\n'), ((5616, 5641), 'numpy.where', 'numpy.where', (['pred_z_conds'], {}), '(pred_z_conds)\n', (5627, 5641), False, 'import numpy\n'), ((2717, 2746), 'os.path.join', 'os.path.join', (['template_dir', 't'], {}), '(template_dir, t)\n', (2729, 2746), False, 'import os\n'), ((3916, 3970), 'torch.cat', 'torch.cat', (['[temp_batch_local, template_feature]'], {'dim': '(0)'}), '([temp_batch_local, template_feature], dim=0)\n', (3925, 3970), False, 'import torch\n'), ((4079, 4133), 'torch.cat', 'torch.cat', (['[temp_batch_local, template_feature]'], {'dim': '(0)'}), '([temp_batch_local, template_feature], dim=0)\n', (4088, 4133), False, 'import torch\n'), ((5355, 5387), 'numpy.stack', 'np.stack', (['[pred_w_np, pred_h_np]'], {}), '([pred_w_np, pred_h_np])\n', (5363, 5387), True, 'import numpy as np\n')] |
import os
import random
import numpy as np
import tensorflow as tf
random.seed(1234)
def load_or_initialize_model(sess, saver, model_name, model_path):
sess.run(tf.global_variables_initializer())
if os.path.isfile(model_path+model_name+'/'+model_name+'.ckpt.meta'):
saver.restore(sess, model_path+model_name+'/'+model_name+'.ckpt')
print(model_name+" Model restored.")
return True
else:
print(model_name+ " Model initialized.")
return False
def get_shuffled_train_data(data):
for k in range(10):
random.shuffle(data)
return data
def save_model(sess, saver, model_name, model_path):
# Save model weights to disk
if not os.path.isdir(model_path + model_name + '/'):
os.makedirs(model_path + model_name + '/')
save_path = saver.save(sess, model_path + model_name + '/' + model_name+".ckpt")
print(" Model saved in file: %s at episode:" % save_path)
def compute_taging_accuracy(batch_X_tag_label, batch_X_q_len, pred_viterbi_sequence):
acc = []
for i in range(len(batch_X_tag_label)):
label_vec = batch_X_tag_label[i][:batch_X_q_len[i]]
pred_sequence_vec = pred_viterbi_sequence[i][:batch_X_q_len[i]]
acc.append(np.mean(pred_sequence_vec[label_vec == 1]))
return np.mean(acc)
def get_next_batch_action(data, itr, batch_size):
batch = data[(itr * batch_size):(itr * batch_size) + batch_size]
# categorical_input
batch_X_q = []
batch_X_pos_act_name = []
batch_X_pos_act_para_names = []
batch_X_neg_act_name = []
batch_X_neg_act_para_names = []
bert_X_query = { 'input_ids': [], 'input_mask': [], 'segment_ids':[]}
bert_X_pos_action_name = {'input_ids': [], 'input_mask': [], 'segment_ids': []}
bert_X_pos_act_para_names = {'input_ids': [], 'input_mask': [], 'segment_ids': []}
bert_X_neg_action_name = {'input_ids': [], 'input_mask': [], 'segment_ids': []}
bert_X_neg_act_para_names = {'input_ids': [], 'input_mask': [], 'segment_ids': []}
for q_vec, pos_action_name, pos_action_para_names, pos_action_para_dom, \
neg_act_list, pos_act_bert_input, neg_act_bert_input_list in batch:
batch_X_q.append(q_vec)
batch_X_pos_act_name.append(pos_action_name)
batch_X_pos_act_para_names.append(pos_action_para_names)
neg_act_tup = random.choice(neg_act_list)
batch_X_neg_act_name.append(neg_act_tup[0])
batch_X_neg_act_para_names.append(neg_act_tup[1])
# bert inputs
add_bert_input(bert_X_query, pos_act_bert_input[0])
add_bert_input(bert_X_pos_action_name, pos_act_bert_input[1])
add_bert_input(bert_X_pos_act_para_names, pos_act_bert_input[2])
neg_bert_input_p1 = random.choice(neg_act_bert_input_list)
add_bert_input(bert_X_neg_action_name, neg_bert_input_p1[0])
add_bert_input(bert_X_neg_act_para_names, neg_bert_input_p1[1])
action_batch = {'query': np.array(batch_X_q), 'pos_act_name': np.array(batch_X_pos_act_name),
'pos_act_para_names': np.array(batch_X_pos_act_para_names),
'neg_act_name': np.array(batch_X_neg_act_name), 'neg_act_para_names': np.array(batch_X_neg_act_para_names),
'bert_in_query': (np.array(bert_X_query['input_ids']),
np.array(bert_X_query['input_mask']),
np.array(bert_X_query['segment_ids'])),
'bert_in_pos_act_name': (np.array(bert_X_pos_action_name['input_ids']),
np.array(bert_X_pos_action_name['input_mask']),
np.array(bert_X_pos_action_name['segment_ids'])),
'bert_in_pos_act_para_names': (np.array(bert_X_pos_act_para_names['input_ids']),
np.array(bert_X_pos_act_para_names['input_mask']),
np.array(bert_X_pos_act_para_names['segment_ids'])),
'bert_in_neg_act_name': (np.array(bert_X_neg_action_name['input_ids']),
np.array(bert_X_neg_action_name['input_mask']),
np.array(bert_X_neg_action_name['segment_ids'])),
'bert_in_neg_act_para_names': (np.array(bert_X_neg_act_para_names['input_ids']),
np.array(bert_X_neg_act_para_names['input_mask']),
np.array(bert_X_neg_act_para_names['segment_ids']))
}
return action_batch
def add_bert_input(batch_X_bert, bert_in_tup):
batch_X_bert['input_ids'].append(bert_in_tup[0])
batch_X_bert['input_mask'].append(bert_in_tup[1])
batch_X_bert['segment_ids'].append(bert_in_tup[2])
def get_next_batch_para_tagger(data, itr, batch_size):
batch = data[(itr * batch_size):(itr * batch_size) + batch_size]
# categorical_input
batch_X_q = []
batch_X_q_char = []
batch_X_q_ent = []
batch_X_pos_para_name = []
batch_X_para_type = []
batch_X_label_seq = []
batch_X_q_len = []
bert_X_tagg_in = {'input_ids': [], 'input_mask': [], 'segment_ids': []}
bert_X_label_st = []
bert_X_label_end = []
for q_vec, pos_para_type, pos_para_name, label_vec, q_len, \
q_char_vec, q_ent_vec, bert_input_tagging, gold_label_ids in batch:
batch_X_q.append(q_vec)
batch_X_q_char.append(q_char_vec)
batch_X_q_ent.append(q_ent_vec)
batch_X_pos_para_name.append(pos_para_name)
batch_X_para_type.append(pos_para_type)
batch_X_label_seq.append(label_vec)
batch_X_q_len.append(q_len)
# bert inputs
add_bert_input(bert_X_tagg_in, bert_input_tagging)
bert_X_label_st.append(gold_label_ids[0])
bert_X_label_end.append(gold_label_ids[1])
para_batch = {'query': np.array(batch_X_q), 'q_char': np.array(batch_X_q_char), 'q_ent': np.array(batch_X_q_ent),
'para_type': np.array(batch_X_para_type),
'pos_para_name': np.array(batch_X_pos_para_name),
'tag_label': np.array(batch_X_label_seq),
'q_len': np.array(batch_X_q_len),
'bert_tagg_in': (np.array(bert_X_tagg_in['input_ids']),
np.array(bert_X_tagg_in['input_mask']),
np.array(bert_X_tagg_in['segment_ids'])),
'bert_tag_label_st': np.array(bert_X_label_st),
'bert_tag_label_end': np.array(bert_X_label_end)
}
return para_batch
def get_next_batch_para_matcher(data, itr, batch_size):
batch = data[(itr * batch_size):(itr * batch_size) + batch_size]
batch_X_q_para_val = []
batch_X_q_para_val_char = []
batch_X_pos_para_name = []
batch_X_para_type = []
batch_X_pos_para_val = []
batch_X_pos_para_val_char = []
batch_X_pos_ext_match_score = []
batch_X_neg_para_val = []
batch_X_neg_para_val_char = []
batch_X_neg_ext_match_score = []
bert_X_pos_para_name = {'input_ids': [], 'input_mask': [], 'segment_ids': []}
bert_X_pos_para_val = {'input_ids': [], 'input_mask': [], 'segment_ids': []}
bert_X_neg_para_val = {'input_ids': [], 'input_mask': [], 'segment_ids': []}
for q_para_val_vec, pos_para_type, pos_para_name, pos_para_val, pos_ext_match_score, \
neg_para_val_list, pos_para_bert_input, neg_para_bert_input_list, \
q_para_val_vec_char, pos_para_val_vec_char in batch:
batch_X_q_para_val.append(q_para_val_vec)
batch_X_q_para_val_char.append(q_para_val_vec_char)
batch_X_pos_para_name.append(pos_para_name)
batch_X_para_type.append(pos_para_type)
batch_X_pos_para_val.append(pos_para_val)
batch_X_pos_para_val_char.append(pos_para_val_vec_char)
batch_X_pos_ext_match_score.append(pos_ext_match_score)
neg_tup = random.choice(neg_para_val_list) # (neg_para_val_vec, neg_val_ext_match_score,
# neg_para_val_vec_char, q_para_match_vec_neg)
batch_X_neg_para_val.append(neg_tup[0])
batch_X_neg_ext_match_score.append(neg_tup[1])
batch_X_neg_para_val_char.append(neg_tup[2])
# bert inputs
add_bert_input(bert_X_pos_para_name, pos_para_bert_input[0])
add_bert_input(bert_X_pos_para_val, pos_para_bert_input[1])
neg_bert_input_p1 = random.choice(neg_para_bert_input_list)
add_bert_input(bert_X_neg_para_val, neg_bert_input_p1)
para_batch = {'q_para_val': np.array(batch_X_q_para_val),
'q_para_val_char': np.array(batch_X_q_para_val_char),
'para_type': np.array(batch_X_para_type),
'pos_para_name': np.array(batch_X_pos_para_name),
'pos_para_val': np.array(batch_X_pos_para_val),
'pos_para_val_char': np.array(batch_X_pos_para_val_char),
'pos_ext_match_score': np.array(batch_X_pos_ext_match_score),
'neg_para_val': np.array(batch_X_neg_para_val),
'neg_para_val_char': np.array(batch_X_neg_para_val_char),
'neg_ext_match_score': np.array(batch_X_neg_ext_match_score),
'bert_in_pos_para_name': (np.array(bert_X_pos_para_name['input_ids']),
np.array(bert_X_pos_para_name['input_mask']),
np.array(bert_X_pos_para_name['segment_ids'])),
'bert_in_pos_para_val': (np.array(bert_X_pos_para_val['input_ids']),
np.array(bert_X_pos_para_val['input_mask']),
np.array(bert_X_pos_para_val['segment_ids'])),
'bert_in_neg_para_val': (np.array(bert_X_neg_para_val['input_ids']),
np.array(bert_X_neg_para_val['input_mask']),
np.array(bert_X_neg_para_val['segment_ids'])),
}
return para_batch
| [
"numpy.mean",
"random.choice",
"random.shuffle",
"os.makedirs",
"random.seed",
"os.path.isfile",
"tensorflow.global_variables_initializer",
"numpy.array",
"os.path.isdir"
] | [((69, 86), 'random.seed', 'random.seed', (['(1234)'], {}), '(1234)\n', (80, 86), False, 'import random\n'), ((212, 285), 'os.path.isfile', 'os.path.isfile', (["(model_path + model_name + '/' + model_name + '.ckpt.meta')"], {}), "(model_path + model_name + '/' + model_name + '.ckpt.meta')\n", (226, 285), False, 'import os\n'), ((1301, 1313), 'numpy.mean', 'np.mean', (['acc'], {}), '(acc)\n', (1308, 1313), True, 'import numpy as np\n'), ((169, 202), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (200, 202), True, 'import tensorflow as tf\n'), ((567, 587), 'random.shuffle', 'random.shuffle', (['data'], {}), '(data)\n', (581, 587), False, 'import random\n'), ((704, 748), 'os.path.isdir', 'os.path.isdir', (["(model_path + model_name + '/')"], {}), "(model_path + model_name + '/')\n", (717, 748), False, 'import os\n'), ((758, 800), 'os.makedirs', 'os.makedirs', (["(model_path + model_name + '/')"], {}), "(model_path + model_name + '/')\n", (769, 800), False, 'import os\n'), ((2374, 2401), 'random.choice', 'random.choice', (['neg_act_list'], {}), '(neg_act_list)\n', (2387, 2401), False, 'import random\n'), ((2768, 2806), 'random.choice', 'random.choice', (['neg_act_bert_input_list'], {}), '(neg_act_bert_input_list)\n', (2781, 2806), False, 'import random\n'), ((2978, 2997), 'numpy.array', 'np.array', (['batch_X_q'], {}), '(batch_X_q)\n', (2986, 2997), True, 'import numpy as np\n'), ((3015, 3045), 'numpy.array', 'np.array', (['batch_X_pos_act_name'], {}), '(batch_X_pos_act_name)\n', (3023, 3045), True, 'import numpy as np\n'), ((3089, 3125), 'numpy.array', 'np.array', (['batch_X_pos_act_para_names'], {}), '(batch_X_pos_act_para_names)\n', (3097, 3125), True, 'import numpy as np\n'), ((3163, 3193), 'numpy.array', 'np.array', (['batch_X_neg_act_name'], {}), '(batch_X_neg_act_name)\n', (3171, 3193), True, 'import numpy as np\n'), ((3217, 3253), 'numpy.array', 'np.array', (['batch_X_neg_act_para_names'], {}), '(batch_X_neg_act_para_names)\n', (3225, 3253), True, 'import numpy as np\n'), ((6065, 6084), 'numpy.array', 'np.array', (['batch_X_q'], {}), '(batch_X_q)\n', (6073, 6084), True, 'import numpy as np\n'), ((6096, 6120), 'numpy.array', 'np.array', (['batch_X_q_char'], {}), '(batch_X_q_char)\n', (6104, 6120), True, 'import numpy as np\n'), ((6131, 6154), 'numpy.array', 'np.array', (['batch_X_q_ent'], {}), '(batch_X_q_ent)\n', (6139, 6154), True, 'import numpy as np\n'), ((6187, 6214), 'numpy.array', 'np.array', (['batch_X_para_type'], {}), '(batch_X_para_type)\n', (6195, 6214), True, 'import numpy as np\n'), ((6251, 6282), 'numpy.array', 'np.array', (['batch_X_pos_para_name'], {}), '(batch_X_pos_para_name)\n', (6259, 6282), True, 'import numpy as np\n'), ((6316, 6343), 'numpy.array', 'np.array', (['batch_X_label_seq'], {}), '(batch_X_label_seq)\n', (6324, 6343), True, 'import numpy as np\n'), ((6372, 6395), 'numpy.array', 'np.array', (['batch_X_q_len'], {}), '(batch_X_q_len)\n', (6380, 6395), True, 'import numpy as np\n'), ((6666, 6691), 'numpy.array', 'np.array', (['bert_X_label_st'], {}), '(bert_X_label_st)\n', (6674, 6691), True, 'import numpy as np\n'), ((6733, 6759), 'numpy.array', 'np.array', (['bert_X_label_end'], {}), '(bert_X_label_end)\n', (6741, 6759), True, 'import numpy as np\n'), ((8156, 8188), 'random.choice', 'random.choice', (['neg_para_val_list'], {}), '(neg_para_val_list)\n', (8169, 8188), False, 'import random\n'), ((8690, 8729), 'random.choice', 'random.choice', (['neg_para_bert_input_list'], {}), '(neg_para_bert_input_list)\n', (8703, 8729), False, 'import random\n'), ((8826, 8854), 'numpy.array', 'np.array', (['batch_X_q_para_val'], {}), '(batch_X_q_para_val)\n', (8834, 8854), True, 'import numpy as np\n'), ((8893, 8926), 'numpy.array', 'np.array', (['batch_X_q_para_val_char'], {}), '(batch_X_q_para_val_char)\n', (8901, 8926), True, 'import numpy as np\n'), ((8960, 8987), 'numpy.array', 'np.array', (['batch_X_para_type'], {}), '(batch_X_para_type)\n', (8968, 8987), True, 'import numpy as np\n'), ((9024, 9055), 'numpy.array', 'np.array', (['batch_X_pos_para_name'], {}), '(batch_X_pos_para_name)\n', (9032, 9055), True, 'import numpy as np\n'), ((9092, 9122), 'numpy.array', 'np.array', (['batch_X_pos_para_val'], {}), '(batch_X_pos_para_val)\n', (9100, 9122), True, 'import numpy as np\n'), ((9163, 9198), 'numpy.array', 'np.array', (['batch_X_pos_para_val_char'], {}), '(batch_X_pos_para_val_char)\n', (9171, 9198), True, 'import numpy as np\n'), ((9241, 9278), 'numpy.array', 'np.array', (['batch_X_pos_ext_match_score'], {}), '(batch_X_pos_ext_match_score)\n', (9249, 9278), True, 'import numpy as np\n'), ((9315, 9345), 'numpy.array', 'np.array', (['batch_X_neg_para_val'], {}), '(batch_X_neg_para_val)\n', (9323, 9345), True, 'import numpy as np\n'), ((9386, 9421), 'numpy.array', 'np.array', (['batch_X_neg_para_val_char'], {}), '(batch_X_neg_para_val_char)\n', (9394, 9421), True, 'import numpy as np\n'), ((9464, 9501), 'numpy.array', 'np.array', (['batch_X_neg_ext_match_score'], {}), '(batch_X_neg_ext_match_score)\n', (9472, 9501), True, 'import numpy as np\n'), ((1246, 1288), 'numpy.mean', 'np.mean', (['pred_sequence_vec[label_vec == 1]'], {}), '(pred_sequence_vec[label_vec == 1])\n', (1253, 1288), True, 'import numpy as np\n'), ((3294, 3329), 'numpy.array', 'np.array', (["bert_X_query['input_ids']"], {}), "(bert_X_query['input_ids'])\n", (3302, 3329), True, 'import numpy as np\n'), ((3379, 3415), 'numpy.array', 'np.array', (["bert_X_query['input_mask']"], {}), "(bert_X_query['input_mask'])\n", (3387, 3415), True, 'import numpy as np\n'), ((3465, 3502), 'numpy.array', 'np.array', (["bert_X_query['segment_ids']"], {}), "(bert_X_query['segment_ids'])\n", (3473, 3502), True, 'import numpy as np\n'), ((3551, 3596), 'numpy.array', 'np.array', (["bert_X_pos_action_name['input_ids']"], {}), "(bert_X_pos_action_name['input_ids'])\n", (3559, 3596), True, 'import numpy as np\n'), ((3646, 3692), 'numpy.array', 'np.array', (["bert_X_pos_action_name['input_mask']"], {}), "(bert_X_pos_action_name['input_mask'])\n", (3654, 3692), True, 'import numpy as np\n'), ((3742, 3789), 'numpy.array', 'np.array', (["bert_X_pos_action_name['segment_ids']"], {}), "(bert_X_pos_action_name['segment_ids'])\n", (3750, 3789), True, 'import numpy as np\n'), ((3843, 3891), 'numpy.array', 'np.array', (["bert_X_pos_act_para_names['input_ids']"], {}), "(bert_X_pos_act_para_names['input_ids'])\n", (3851, 3891), True, 'import numpy as np\n'), ((3944, 3993), 'numpy.array', 'np.array', (["bert_X_pos_act_para_names['input_mask']"], {}), "(bert_X_pos_act_para_names['input_mask'])\n", (3952, 3993), True, 'import numpy as np\n'), ((4046, 4096), 'numpy.array', 'np.array', (["bert_X_pos_act_para_names['segment_ids']"], {}), "(bert_X_pos_act_para_names['segment_ids'])\n", (4054, 4096), True, 'import numpy as np\n'), ((4145, 4190), 'numpy.array', 'np.array', (["bert_X_neg_action_name['input_ids']"], {}), "(bert_X_neg_action_name['input_ids'])\n", (4153, 4190), True, 'import numpy as np\n'), ((4240, 4286), 'numpy.array', 'np.array', (["bert_X_neg_action_name['input_mask']"], {}), "(bert_X_neg_action_name['input_mask'])\n", (4248, 4286), True, 'import numpy as np\n'), ((4336, 4383), 'numpy.array', 'np.array', (["bert_X_neg_action_name['segment_ids']"], {}), "(bert_X_neg_action_name['segment_ids'])\n", (4344, 4383), True, 'import numpy as np\n'), ((4437, 4485), 'numpy.array', 'np.array', (["bert_X_neg_act_para_names['input_ids']"], {}), "(bert_X_neg_act_para_names['input_ids'])\n", (4445, 4485), True, 'import numpy as np\n'), ((4538, 4587), 'numpy.array', 'np.array', (["bert_X_neg_act_para_names['input_mask']"], {}), "(bert_X_neg_act_para_names['input_mask'])\n", (4546, 4587), True, 'import numpy as np\n'), ((4640, 4690), 'numpy.array', 'np.array', (["bert_X_neg_act_para_names['segment_ids']"], {}), "(bert_X_neg_act_para_names['segment_ids'])\n", (4648, 4690), True, 'import numpy as np\n'), ((6433, 6470), 'numpy.array', 'np.array', (["bert_X_tagg_in['input_ids']"], {}), "(bert_X_tagg_in['input_ids'])\n", (6441, 6470), True, 'import numpy as np\n'), ((6508, 6546), 'numpy.array', 'np.array', (["bert_X_tagg_in['input_mask']"], {}), "(bert_X_tagg_in['input_mask'])\n", (6516, 6546), True, 'import numpy as np\n'), ((6584, 6623), 'numpy.array', 'np.array', (["bert_X_tagg_in['segment_ids']"], {}), "(bert_X_tagg_in['segment_ids'])\n", (6592, 6623), True, 'import numpy as np\n'), ((9548, 9591), 'numpy.array', 'np.array', (["bert_X_pos_para_name['input_ids']"], {}), "(bert_X_pos_para_name['input_ids'])\n", (9556, 9591), True, 'import numpy as np\n'), ((9639, 9683), 'numpy.array', 'np.array', (["bert_X_pos_para_name['input_mask']"], {}), "(bert_X_pos_para_name['input_mask'])\n", (9647, 9683), True, 'import numpy as np\n'), ((9731, 9776), 'numpy.array', 'np.array', (["bert_X_pos_para_name['segment_ids']"], {}), "(bert_X_pos_para_name['segment_ids'])\n", (9739, 9776), True, 'import numpy as np\n'), ((9823, 9865), 'numpy.array', 'np.array', (["bert_X_pos_para_val['input_ids']"], {}), "(bert_X_pos_para_val['input_ids'])\n", (9831, 9865), True, 'import numpy as np\n'), ((9916, 9959), 'numpy.array', 'np.array', (["bert_X_pos_para_val['input_mask']"], {}), "(bert_X_pos_para_val['input_mask'])\n", (9924, 9959), True, 'import numpy as np\n'), ((10010, 10054), 'numpy.array', 'np.array', (["bert_X_pos_para_val['segment_ids']"], {}), "(bert_X_pos_para_val['segment_ids'])\n", (10018, 10054), True, 'import numpy as np\n'), ((10100, 10142), 'numpy.array', 'np.array', (["bert_X_neg_para_val['input_ids']"], {}), "(bert_X_neg_para_val['input_ids'])\n", (10108, 10142), True, 'import numpy as np\n'), ((10190, 10233), 'numpy.array', 'np.array', (["bert_X_neg_para_val['input_mask']"], {}), "(bert_X_neg_para_val['input_mask'])\n", (10198, 10233), True, 'import numpy as np\n'), ((10281, 10325), 'numpy.array', 'np.array', (["bert_X_neg_para_val['segment_ids']"], {}), "(bert_X_neg_para_val['segment_ids'])\n", (10289, 10325), True, 'import numpy as np\n')] |
import numpy as np
def class_count(data_holder):
unique, counts = np.unique(data_holder.target, return_counts=True)
return unique, counts
| [
"numpy.unique"
] | [((71, 120), 'numpy.unique', 'np.unique', (['data_holder.target'], {'return_counts': '(True)'}), '(data_holder.target, return_counts=True)\n', (80, 120), True, 'import numpy as np\n')] |
from collections import namedtuple
import jax.numpy as jnp
import pytest
from numpy.testing import assert_allclose
from numpyro.infer.einstein.kernels import (
RBFKernel,
RandomFeatureKernel,
GraphicalKernel,
IMQKernel,
LinearKernel,
MixtureKernel,
HessianPrecondMatrix,
PrecondMatrixKernel
)
T = namedtuple('TestSteinKernel', ['kernel', 'particle_info', 'loss_fn', 'kval'])
PARTICLES_2D = jnp.array([[1., 2.], [-10., 10.], [0., 0.], [2., -1]])
TPARTICLES_2D = (jnp.array([1., 2.]), jnp.array([10., 5.])) # transformed particles
TEST_CASES = [
T(RBFKernel,
lambda d: {},
lambda x: x,
{'norm': 3.8147664e-06,
'vector': jnp.array([0., 0.2500005]),
'matrix': jnp.array([[3.8147664e-06, 0.],
[0., 3.8147664e-06]])}
),
T(RandomFeatureKernel,
lambda d: {},
lambda x: x,
{'norm': -4.566867}),
T(IMQKernel,
lambda d: {},
lambda x: x,
{'norm': .104828484,
'vector': jnp.array([0.11043153, 0.31622776])}
),
T(LinearKernel,
lambda d: {},
lambda x: x,
{'norm': 21.}
),
T(lambda mode: MixtureKernel(mode=mode, ws=jnp.array([.2, .8]), kernel_fns=[RBFKernel(mode), RBFKernel(mode)]),
lambda d: {},
lambda x: x,
{'matrix': jnp.array([[3.8147664e-06, 0.],
[0., 3.8147664e-06]])}
),
T(lambda mode: GraphicalKernel(mode=mode, local_kernel_fns={'p1': RBFKernel('norm')}),
lambda d: {'p1': (0, d)},
lambda x: x,
{'matrix': jnp.array([[3.8147664e-06, 0.],
[0., 3.8147664e-06]])}
),
T(lambda mode: PrecondMatrixKernel(HessianPrecondMatrix(), RBFKernel(mode='matrix')),
lambda d: {},
lambda x: x[0] ** 4 - x[1] ** 3 / 2,
{'matrix': jnp.array([[5.608312e-09, 0.],
[0., 9.347186e-05]])}
)
]
PARTICLES = [(PARTICLES_2D, TPARTICLES_2D)]
TEST_IDS = [t[0].__class__.__name__ for t in TEST_CASES]
@pytest.mark.parametrize('kernel, particle_info, loss_fn, kval', TEST_CASES, ids=TEST_IDS)
@pytest.mark.parametrize('particles, tparticles', PARTICLES)
@pytest.mark.parametrize('mode', ['norm', 'vector', 'matrix'])
def test_kernel_forward(kernel, particles, particle_info, loss_fn, tparticles, mode, kval):
if mode not in kval:
return
d, = tparticles[0].shape
kernel_fn = kernel(mode=mode).compute(particles, particle_info(d), loss_fn)
value = kernel_fn(*tparticles)
assert_allclose(value, kval[mode])
| [
"collections.namedtuple",
"numpyro.infer.einstein.kernels.RBFKernel",
"numpy.testing.assert_allclose",
"jax.numpy.array",
"pytest.mark.parametrize",
"numpyro.infer.einstein.kernels.HessianPrecondMatrix"
] | [((332, 409), 'collections.namedtuple', 'namedtuple', (['"""TestSteinKernel"""', "['kernel', 'particle_info', 'loss_fn', 'kval']"], {}), "('TestSteinKernel', ['kernel', 'particle_info', 'loss_fn', 'kval'])\n", (342, 409), False, 'from collections import namedtuple\n'), ((426, 487), 'jax.numpy.array', 'jnp.array', (['[[1.0, 2.0], [-10.0, 10.0], [0.0, 0.0], [2.0, -1]]'], {}), '([[1.0, 2.0], [-10.0, 10.0], [0.0, 0.0], [2.0, -1]])\n', (435, 487), True, 'import jax.numpy as jnp\n'), ((2033, 2126), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kernel, particle_info, loss_fn, kval"""', 'TEST_CASES'], {'ids': 'TEST_IDS'}), "('kernel, particle_info, loss_fn, kval', TEST_CASES,\n ids=TEST_IDS)\n", (2056, 2126), False, 'import pytest\n'), ((2124, 2183), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""particles, tparticles"""', 'PARTICLES'], {}), "('particles, tparticles', PARTICLES)\n", (2147, 2183), False, 'import pytest\n'), ((2185, 2246), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['norm', 'vector', 'matrix']"], {}), "('mode', ['norm', 'vector', 'matrix'])\n", (2208, 2246), False, 'import pytest\n'), ((499, 520), 'jax.numpy.array', 'jnp.array', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (508, 520), True, 'import jax.numpy as jnp\n'), ((520, 542), 'jax.numpy.array', 'jnp.array', (['[10.0, 5.0]'], {}), '([10.0, 5.0])\n', (529, 542), True, 'import jax.numpy as jnp\n'), ((2528, 2562), 'numpy.testing.assert_allclose', 'assert_allclose', (['value', 'kval[mode]'], {}), '(value, kval[mode])\n', (2543, 2562), False, 'from numpy.testing import assert_allclose\n'), ((686, 713), 'jax.numpy.array', 'jnp.array', (['[0.0, 0.2500005]'], {}), '([0.0, 0.2500005])\n', (695, 713), True, 'import jax.numpy as jnp\n'), ((731, 786), 'jax.numpy.array', 'jnp.array', (['[[3.8147664e-06, 0.0], [0.0, 3.8147664e-06]]'], {}), '([[3.8147664e-06, 0.0], [0.0, 3.8147664e-06]])\n', (740, 786), True, 'import jax.numpy as jnp\n'), ((1017, 1052), 'jax.numpy.array', 'jnp.array', (['[0.11043153, 0.31622776]'], {}), '([0.11043153, 0.31622776])\n', (1026, 1052), True, 'import jax.numpy as jnp\n'), ((1323, 1378), 'jax.numpy.array', 'jnp.array', (['[[3.8147664e-06, 0.0], [0.0, 3.8147664e-06]]'], {}), '([[3.8147664e-06, 0.0], [0.0, 3.8147664e-06]])\n', (1332, 1378), True, 'import jax.numpy as jnp\n'), ((1574, 1629), 'jax.numpy.array', 'jnp.array', (['[[3.8147664e-06, 0.0], [0.0, 3.8147664e-06]]'], {}), '([[3.8147664e-06, 0.0], [0.0, 3.8147664e-06]])\n', (1583, 1629), True, 'import jax.numpy as jnp\n'), ((1836, 1889), 'jax.numpy.array', 'jnp.array', (['[[5.608312e-09, 0.0], [0.0, 9.347186e-05]]'], {}), '([[5.608312e-09, 0.0], [0.0, 9.347186e-05]])\n', (1845, 1889), True, 'import jax.numpy as jnp\n'), ((1705, 1727), 'numpyro.infer.einstein.kernels.HessianPrecondMatrix', 'HessianPrecondMatrix', ([], {}), '()\n', (1725, 1727), False, 'from numpyro.infer.einstein.kernels import RBFKernel, RandomFeatureKernel, GraphicalKernel, IMQKernel, LinearKernel, MixtureKernel, HessianPrecondMatrix, PrecondMatrixKernel\n'), ((1729, 1753), 'numpyro.infer.einstein.kernels.RBFKernel', 'RBFKernel', ([], {'mode': '"""matrix"""'}), "(mode='matrix')\n", (1738, 1753), False, 'from numpyro.infer.einstein.kernels import RBFKernel, RandomFeatureKernel, GraphicalKernel, IMQKernel, LinearKernel, MixtureKernel, HessianPrecondMatrix, PrecondMatrixKernel\n'), ((1198, 1219), 'jax.numpy.array', 'jnp.array', (['[0.2, 0.8]'], {}), '([0.2, 0.8])\n', (1207, 1219), True, 'import jax.numpy as jnp\n'), ((1231, 1246), 'numpyro.infer.einstein.kernels.RBFKernel', 'RBFKernel', (['mode'], {}), '(mode)\n', (1240, 1246), False, 'from numpyro.infer.einstein.kernels import RBFKernel, RandomFeatureKernel, GraphicalKernel, IMQKernel, LinearKernel, MixtureKernel, HessianPrecondMatrix, PrecondMatrixKernel\n'), ((1248, 1263), 'numpyro.infer.einstein.kernels.RBFKernel', 'RBFKernel', (['mode'], {}), '(mode)\n', (1257, 1263), False, 'from numpyro.infer.einstein.kernels import RBFKernel, RandomFeatureKernel, GraphicalKernel, IMQKernel, LinearKernel, MixtureKernel, HessianPrecondMatrix, PrecondMatrixKernel\n'), ((1485, 1502), 'numpyro.infer.einstein.kernels.RBFKernel', 'RBFKernel', (['"""norm"""'], {}), "('norm')\n", (1494, 1502), False, 'from numpyro.infer.einstein.kernels import RBFKernel, RandomFeatureKernel, GraphicalKernel, IMQKernel, LinearKernel, MixtureKernel, HessianPrecondMatrix, PrecondMatrixKernel\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 5 13:38:43 2022
@author: Dartoon
"""
import numpy as np
from scipy import ndimage
import scipy.optimize as op
from galight.tools.astro_tools import plt_fits
import matplotlib.pyplot as plt
def shift_img(img, shift_pix, order=1):
shift_pix = shift_pix[::-1] #uniform the yx to xy
from scipy.ndimage.interpolation import shift
shifted_digit_image=shift(img, shift_pix, order = order)
return shifted_digit_image
def rotate_image(img, rotate_pix, order =1):
shift_pix = [-rotate_pix[0]*2, -rotate_pix[1]*2]
shift_ = shift_img(img, shift_pix, order=order)
rotate = np.flip(shift_)
return rotate
class Measure_asy:
def __init__(self, fitting_process_class, obj_id=0, interp_order=3, seg_cal_reg = 'or'):
self.fitting_process_class = fitting_process_class
self.interp_order = interp_order
self.seg_cal_reg = seg_cal_reg
self.obj_id = obj_id
self.interp_order = interp_order
self.img = self.fitting_process_class.fitting_specify_class.kwargs_data['image_data']
def asy_segm(self, segm = None, mask_type = 'segm'):
obj_id = self.obj_id
apertures = self.fitting_process_class.fitting_specify_class.apertures
if segm is None:
if mask_type == 'segm':
segm_deblend = self.fitting_process_class.fitting_specify_class.segm_deblend
elif mask_type == 'aper': #!!!
from galight.tools.measure_tools import mask_obj
segm_deblend = np.zeros_like(self.img)
for i in range(len(apertures)):
segm_deblend = segm_deblend + (1-mask_obj(self.img, [apertures[i]])[0]) * (i+1)
else:
segm_deblend = segm
if isinstance(segm_deblend, (np.ndarray)):
self.segm = segm_deblend
else:
self.segm = segm_deblend.data
pix_pos = np.intc(apertures[obj_id].positions)
self.segm_id = self.segm[pix_pos[1], pix_pos[0]]
self.ini_pix = [pix_pos[0]-len(self.img)/2., pix_pos[1]-len(self.img)/2.]
def abs_res(self, rotate_pix, if_plot=False):
cal_areas, _, punish = self.segm_to_mask(rotate_pix)
rotate_ = rotate_image(self.img, rotate_pix, order = self.interp_order)
res_ = self.img - rotate_ #Consider resdiual as data-model, where model is the rotation.
if if_plot == True:
print("Plot the minimized abs residual:")
plt_fits(abs(res_*cal_areas),norm='log')
if punish == False:
return np.sum(abs(res_*cal_areas))
else:
return 10**6
def find_pos(self):
ini_pix = self.ini_pix
print('Measuring the position for minimized asy...')
result = op.minimize(self.abs_res, ini_pix, method='nelder-mead',
options={'xatol': 1e-8, 'disp': True})
return result
def segm_to_mask(self, rotate_pix, segm_id = None):
if segm_id is None:
segm_id = self.segm_id
cal_area = self.segm == segm_id
mask = (self.segm != segm_id) * (self.segm != 0)
rotate_pix = np.around(rotate_pix)
cal_area_ = rotate_image(cal_area, rotate_pix,order =1)
mask_ = rotate_image(mask, rotate_pix,order =1)
punish = False
if self.seg_cal_reg == 'and':
cal_areas = cal_area * cal_area_
mask_areas = mask * mask_
if np.sum(cal_areas) < np.sum(cal_area)/3:
punish = True
elif self.seg_cal_reg == 'or':
cal_areas = cal_area + cal_area_
mask_areas = mask + mask_
return cal_areas, mask_areas, punish
def cal_asymmetry(self, rotate_pix, bkg_asy_dens=None, obj_flux = None, if_remeasure_bkg=False, if_plot = True, if_plot_bkg = False):
'''
Parameters
----------
rotate_pix : array
center of rotation.
bkg_asy_dens : float between 0 and 1, optional
bkg asymmetry per pixel, if given, use this value directly. The default is None.
if_remeasure_bkg : boolean, optional
if True, use a larger area up to 25 * obj pixels to calculate the bkg asymmetry. The default is False.
if_plot : boolean, optional
Plot the minimized abs residual. The default is True.
if_plot_bkg : boolean, optional
Plot the region to estiamte the background asymmetry. The default is False.
Returns
-------
float
asymmetry value.
'''
asy = self.abs_res(rotate_pix, if_plot=if_plot)
cal_areas, masks, _ = self.segm_to_mask(rotate_pix)
if obj_flux is None:
self.obj_flux = np.sum(self.img * cal_areas)
else:
self.obj_flux = obj_flux
if bkg_asy_dens is None:
if if_remeasure_bkg == False:
obj_masks = cal_areas + masks
obj_masks = obj_masks == False
img_bkg = self.img * obj_masks
img_bkg_ = rotate_image(img_bkg, np.around(rotate_pix), order =1)
rot_mask = img_bkg_!=0
obj_masks = obj_masks * rot_mask
elif hasattr(self.fitting_process_class.fitting_specify_class, 'data_process_class'):
# data_process_class = self.fitting_process_class.fitting_specify_class.data_process_class,
img_bkg, obj_masks = pass_bkg(data_process=self.fitting_process_class.fitting_specify_class.data_process_class,
num_pix=np.sum(cal_areas),
rotate_pix=rotate_pix,
ini_pix = self.ini_pix)
img_bkg_ = rotate_image(img_bkg, np.around(rotate_pix-self.ini_pix), order =1)
else:
raise ValueError("data_process_class has been removed and should be re-assigned to fitting_specify_class.")
bkg_asy_2d = abs(img_bkg - img_bkg_) * obj_masks
bkg_asy = np.sum(bkg_asy_2d)
self.bkg_asy_dens = bkg_asy/np.sum(obj_masks) #The density of the background asymmetry.
else:
assert 0 < bkg_asy_dens < 1.0
self.bkg_asy_dens = bkg_asy_dens
if if_plot_bkg == True:
print("Plot the region to estiamte the background asymmetry:")
plt_fits(bkg_asy_2d,norm='linear')
return asy/self.obj_flux - self.bkg_asy_dens * np.sum(cal_areas)/self.obj_flux
from galight.tools.measure_tools import detect_obj, mask_obj
def pass_bkg(data_process, num_pix, rotate_pix, ini_pix):# **kwargs):
ini_pix = np.asarray(ini_pix)
rotate_pix = rotate_pix - ini_pix
data_process.target_pos = data_process.target_pos + ini_pix
for boost_list in [28, 30, 35, 40, 50, 60]:
radius = np.sqrt(num_pix*boost_list)/2
data_process.generate_target_materials(radius=radius)
img = data_process.target_stamp
apertures = detect_obj(img, nsigma=1, exp_sz=1.6, npixels = 10)
# apertures = detect_obj(img, detect_tool = 'sep',exp_sz=2.5,err=data_process.noise_map)
obj_mask = mask_obj(img, apertures, if_plot=False, sum_mask=True)
image_masked = img*obj_mask
obj_mask_ = rotate_image(obj_mask, rotate_pix,order =1)
obj_masks = obj_mask*obj_mask_
if np.sum(obj_masks) > num_pix*25:
break
return image_masked, obj_masks
# import pickle
# fit_run_pkl = pickle.load(open('/Users/Dartoon/Astro/Projects/my_code/package_code/galight_example/HSC_QSO.pkl','rb'))
# fit_run_pkl.fitting_specify_class.plot_fitting_sets()
# data_process = fit_run_pkl.fitting_specify_class.data_process_class
# asy_class = Measure_asy(fit_run_pkl, seg_cal_reg = 'or', obj_id=0)
# asy_class.asy_segm(mask_type='aper')
# result = asy_class.find_pos()
# print(result["x"])
# plt_fits(asy_class.img,colorbar=True)
# asy = asy_class.cal_asymmetry(rotate_pix = result["x"], if_remeasure_bkg=False ,if_plot=True, if_plot_bkg=True)
# print('asymmetry :', asy)
| [
"scipy.ndimage.interpolation.shift",
"numpy.flip",
"numpy.intc",
"numpy.sqrt",
"galight.tools.measure_tools.detect_obj",
"galight.tools.measure_tools.mask_obj",
"scipy.optimize.minimize",
"numpy.asarray",
"numpy.sum",
"galight.tools.astro_tools.plt_fits",
"numpy.around",
"numpy.zeros_like"
] | [((432, 466), 'scipy.ndimage.interpolation.shift', 'shift', (['img', 'shift_pix'], {'order': 'order'}), '(img, shift_pix, order=order)\n', (437, 466), False, 'from scipy.ndimage.interpolation import shift\n'), ((663, 678), 'numpy.flip', 'np.flip', (['shift_'], {}), '(shift_)\n', (670, 678), True, 'import numpy as np\n'), ((6753, 6772), 'numpy.asarray', 'np.asarray', (['ini_pix'], {}), '(ini_pix)\n', (6763, 6772), True, 'import numpy as np\n'), ((1952, 1988), 'numpy.intc', 'np.intc', (['apertures[obj_id].positions'], {}), '(apertures[obj_id].positions)\n', (1959, 1988), True, 'import numpy as np\n'), ((2808, 2909), 'scipy.optimize.minimize', 'op.minimize', (['self.abs_res', 'ini_pix'], {'method': '"""nelder-mead"""', 'options': "{'xatol': 1e-08, 'disp': True}"}), "(self.abs_res, ini_pix, method='nelder-mead', options={'xatol': \n 1e-08, 'disp': True})\n", (2819, 2909), True, 'import scipy.optimize as op\n'), ((3179, 3200), 'numpy.around', 'np.around', (['rotate_pix'], {}), '(rotate_pix)\n', (3188, 3200), True, 'import numpy as np\n'), ((7092, 7141), 'galight.tools.measure_tools.detect_obj', 'detect_obj', (['img'], {'nsigma': '(1)', 'exp_sz': '(1.6)', 'npixels': '(10)'}), '(img, nsigma=1, exp_sz=1.6, npixels=10)\n', (7102, 7141), False, 'from galight.tools.measure_tools import detect_obj, mask_obj\n'), ((7260, 7314), 'galight.tools.measure_tools.mask_obj', 'mask_obj', (['img', 'apertures'], {'if_plot': '(False)', 'sum_mask': '(True)'}), '(img, apertures, if_plot=False, sum_mask=True)\n', (7268, 7314), False, 'from galight.tools.measure_tools import mask_obj\n'), ((4783, 4811), 'numpy.sum', 'np.sum', (['(self.img * cal_areas)'], {}), '(self.img * cal_areas)\n', (4789, 4811), True, 'import numpy as np\n'), ((6140, 6158), 'numpy.sum', 'np.sum', (['bkg_asy_2d'], {}), '(bkg_asy_2d)\n', (6146, 6158), True, 'import numpy as np\n'), ((6479, 6514), 'galight.tools.astro_tools.plt_fits', 'plt_fits', (['bkg_asy_2d'], {'norm': '"""linear"""'}), "(bkg_asy_2d, norm='linear')\n", (6487, 6514), False, 'from galight.tools.astro_tools import plt_fits\n'), ((6940, 6969), 'numpy.sqrt', 'np.sqrt', (['(num_pix * boost_list)'], {}), '(num_pix * boost_list)\n', (6947, 6969), True, 'import numpy as np\n'), ((7465, 7482), 'numpy.sum', 'np.sum', (['obj_masks'], {}), '(obj_masks)\n', (7471, 7482), True, 'import numpy as np\n'), ((3480, 3497), 'numpy.sum', 'np.sum', (['cal_areas'], {}), '(cal_areas)\n', (3486, 3497), True, 'import numpy as np\n'), ((6199, 6216), 'numpy.sum', 'np.sum', (['obj_masks'], {}), '(obj_masks)\n', (6205, 6216), True, 'import numpy as np\n'), ((1571, 1594), 'numpy.zeros_like', 'np.zeros_like', (['self.img'], {}), '(self.img)\n', (1584, 1594), True, 'import numpy as np\n'), ((3500, 3516), 'numpy.sum', 'np.sum', (['cal_area'], {}), '(cal_area)\n', (3506, 3516), True, 'import numpy as np\n'), ((5127, 5148), 'numpy.around', 'np.around', (['rotate_pix'], {}), '(rotate_pix)\n', (5136, 5148), True, 'import numpy as np\n'), ((6569, 6586), 'numpy.sum', 'np.sum', (['cal_areas'], {}), '(cal_areas)\n', (6575, 6586), True, 'import numpy as np\n'), ((5868, 5904), 'numpy.around', 'np.around', (['(rotate_pix - self.ini_pix)'], {}), '(rotate_pix - self.ini_pix)\n', (5877, 5904), True, 'import numpy as np\n'), ((5649, 5666), 'numpy.sum', 'np.sum', (['cal_areas'], {}), '(cal_areas)\n', (5655, 5666), True, 'import numpy as np\n'), ((1697, 1731), 'galight.tools.measure_tools.mask_obj', 'mask_obj', (['self.img', '[apertures[i]]'], {}), '(self.img, [apertures[i]])\n', (1705, 1731), False, 'from galight.tools.measure_tools import mask_obj\n')] |
"""
Tests for error conditions.
"""
import unittest
import tempfile
import os
import platform
import struct
import numpy as np
import kastore as kas
import kastore.store as store
IS_WINDOWS = platform.system() == "Windows"
class InterfaceMixin(object):
"""
Exercise the low-level interface.
"""
def setUp(self):
fd, path = tempfile.mkstemp(prefix="kas_test_errors")
os.close(fd)
self.temp_file = path
def tearDown(self):
os.unlink(self.temp_file)
def test_bad_dicts(self):
for bad_dict in [[], "w34", None, 1]:
self.assertRaises(
TypeError, kas.dump, bad_dict, self.temp_file, engine=self.engine)
self.assertRaises(
TypeError, kas.dump, data=bad_dict, filename=self.temp_file,
engine=self.engine)
def test_bad_filename_type(self):
for bad_filename in [[], None, {}]:
self.assertRaises(
TypeError, kas.dump, {}, bad_filename, engine=self.engine)
self.assertRaises(
TypeError, kas.dump, data={}, filename=bad_filename, engine=self.engine)
self.assertRaises(
TypeError, kas.load, bad_filename, engine=self.engine)
self.assertRaises(
TypeError, kas.load, filename=bad_filename, engine=self.engine)
def test_bad_keys(self):
a = np.zeros(1)
for bad_key in [(1234,), b"1234", None, 1234]:
self.assertRaises(
TypeError, kas.dump, data={bad_key: a}, filename=self.temp_file,
engine=self.engine)
def test_bad_arrays(self):
kas.dump(data={"a": []}, filename=self.temp_file, engine=self.engine)
for bad_array in [kas, lambda x: x, "1234", None, [[0, 1], [0, 2]]]:
self.assertRaises(
ValueError, kas.dump, data={"a": bad_array},
filename=self.temp_file, engine=self.engine)
# TODO add tests for arrays in fortran order and so on.
def test_file_not_found(self):
a = np.zeros(1)
for bad_file in ["no_such_file", "/no/such/file"]:
self.assertRaises(
FileNotFoundError, kas.load, filename=bad_file, engine=self.engine)
self.assertRaises(
FileNotFoundError, kas.dump, data={"a": a}, filename="/no/such/file",
engine=self.engine)
def test_file_is_a_directory(self):
tmp_dir = tempfile.mkdtemp()
try:
exception = IsADirectoryError
if IS_WINDOWS:
exception = PermissionError
self.assertRaises(
exception, kas.dump, filename=tmp_dir, data={"a": []},
engine=self.engine)
self.assertRaises(
exception, kas.load, filename=tmp_dir, engine=self.engine)
finally:
os.rmdir(tmp_dir)
class TestInterfacePyEngine(InterfaceMixin, unittest.TestCase):
engine = kas.PY_ENGINE
class TestInterfaceCEngine(InterfaceMixin, unittest.TestCase):
engine = kas.C_ENGINE
class TestEngines(unittest.TestCase):
"""
Check that we correctly identify bad engines
"""
bad_engines = [None, {}, "no such engine", b"not an engine"]
def test_bad_engine_dump(self):
for bad_engine in self.bad_engines:
self.assertRaises(ValueError, kas.dump, "", {}, engine=bad_engine)
def test_bad_engine_load(self):
for bad_engine in self.bad_engines:
self.assertRaises(ValueError, kas.load, "", engine=bad_engine)
class FileFormatsMixin(object):
"""
Common utilities for tests on the file format.
"""
def setUp(self):
fd, path = tempfile.mkstemp(prefix="kas_malformed_files")
os.close(fd)
self.temp_file = path
def tearDown(self):
os.unlink(self.temp_file)
def write_file(self, num_items=0):
data = {}
for j in range(num_items):
data["a" * (j + 1)] = np.arange(j)
kas.dump(data, self.temp_file)
class MalformedFilesMixin(FileFormatsMixin):
"""
Tests for various types of malformed intput files.
"""
def test_empty_file(self):
with open(self.temp_file, "w"):
pass
self.assertEqual(os.path.getsize(self.temp_file), 0)
self.assertRaises(
kas.FileFormatError, kas.load, self.temp_file, engine=self.engine,
read_all=self.read_all)
def test_bad_magic(self):
self.write_file()
with open(self.temp_file, 'rb') as f:
buff = bytearray(f.read())
before_len = len(buff)
buff[0:8] = b'12345678'
self.assertEqual(len(buff), before_len)
with open(self.temp_file, 'wb') as f:
f.write(buff)
self.assertRaises(
kas.FileFormatError, kas.load, self.temp_file, engine=self.engine,
read_all=self.read_all)
def test_bad_file_size(self):
for num_items in range(10):
for offset in [-2, -1, 1, 2**10]:
self.write_file(num_items)
file_size = os.path.getsize(self.temp_file)
with open(self.temp_file, 'rb') as f:
buff = bytearray(f.read())
before_len = len(buff)
buff[16:24] = struct.pack("<Q", file_size + offset)
self.assertEqual(len(buff), before_len)
with open(self.temp_file, 'wb') as f:
f.write(buff)
with self.assertRaises(kas.FileFormatError):
kas.load(self.temp_file, engine=self.engine, read_all=self.read_all)
def test_bad_item_types(self):
items = {"a": []}
descriptors, file_size = store.pack_items(items)
num_types = len(store.np_dtype_to_type_map)
for bad_type in [num_types + 1, 2 * num_types]:
with open(self.temp_file, "wb") as f:
descriptors[0].type = bad_type
store.write_file(f, descriptors, file_size)
with self.assertRaises(kas.FileFormatError):
kas.load(self.temp_file, engine=self.engine, read_all=self.read_all)
def test_bad_key_initial_offsets(self):
items = {"a": np.arange(100)}
# First key offset must be at header_size + n * (descriptor_size)
for offset in [-1, +1, 2, 100]:
# First key offset must be at header_size + n * (descriptor_size)
descriptors, file_size = store.pack_items(items)
descriptors[0].key_start += offset
with open(self.temp_file, "wb") as f:
store.write_file(f, descriptors, file_size)
self.assertRaises(
kas.FileFormatError, kas.load, self.temp_file, engine=self.engine,
read_all=self.read_all)
def test_bad_key_non_sequential(self):
items = {"a": np.arange(100), "b": []}
# Keys must be packed sequentially.
for offset in [-1, +1, 2, 100]:
descriptors, file_size = store.pack_items(items)
descriptors[1].key_start += offset
self.assertRaises(
kas.FileFormatError, kas.load, self.temp_file, engine=self.engine,
read_all=self.read_all)
def test_bad_array_initial_offset(self):
items = {"a": np.arange(100)}
for offset in [-1, +1, 2, 8, 16, 100]:
# First key offset must be at header_size + n * (descriptor_size)
descriptors, file_size = store.pack_items(items)
descriptors[0].array_start += offset
with open(self.temp_file, "wb") as f:
store.write_file(f, descriptors, file_size)
self.assertRaises(
kas.FileFormatError, kas.load, self.temp_file, engine=self.engine,
read_all=self.read_all)
def test_bad_array_non_sequential(self):
items = {"a": np.arange(100), "b": []}
for offset in [-1, 1, 2, -8, 8, 100]:
descriptors, file_size = store.pack_items(items)
descriptors[1].array_start += offset
with open(self.temp_file, "wb") as f:
store.write_file(f, descriptors, file_size)
self.assertRaises(
kas.FileFormatError, kas.load, self.temp_file, engine=self.engine,
read_all=self.read_all)
class TestMalformedFilesPyEngine(MalformedFilesMixin, unittest.TestCase):
read_all = False
engine = kas.PY_ENGINE
class TestMalformedFilesCEngine(MalformedFilesMixin, unittest.TestCase):
read_all = False
engine = kas.C_ENGINE
class TestMalformedFilesPyEngineReadAll(MalformedFilesMixin, unittest.TestCase):
read_all = True
engine = kas.PY_ENGINE
class TestMalformedFilesCEngineReadAll(MalformedFilesMixin, unittest.TestCase):
read_all = True
engine = kas.C_ENGINE
class FileVersionsMixin(FileFormatsMixin):
"""
Tests for the file major version.
"""
def verify_major_version(self, version):
self.write_file()
with open(self.temp_file, 'rb') as f:
buff = bytearray(f.read())
before_len = len(buff)
buff[8:10] = struct.pack("<H", version)
self.assertEqual(len(buff), before_len)
with open(self.temp_file, 'wb') as f:
f.write(buff)
kas.load(self.temp_file, engine=self.engine, read_all=self.read_all)
def test_major_version_too_old(self):
self.assertRaises(
kas.VersionTooOldError, self.verify_major_version, store.VERSION_MAJOR - 1)
def test_major_version_too_new(self):
for j in range(1, 5):
self.assertRaises(
kas.VersionTooNewError, self.verify_major_version,
store.VERSION_MAJOR + j)
class TestFileVersionsPyEngine(FileVersionsMixin, unittest.TestCase):
engine = kas.PY_ENGINE
read_all = False
class TestFileVersionsPyEngineReadAll(FileVersionsMixin, unittest.TestCase):
engine = kas.PY_ENGINE
read_all = True
class TestFileVersionsCEngine(FileVersionsMixin, unittest.TestCase):
engine = kas.C_ENGINE
read_all = False
class TestFileVersionsCEngineReadAll(FileVersionsMixin, unittest.TestCase):
engine = kas.C_ENGINE
read_all = True
| [
"os.path.getsize",
"kastore.store.write_file",
"os.close",
"kastore.dump",
"kastore.load",
"struct.pack",
"os.rmdir",
"platform.system",
"numpy.zeros",
"tempfile.mkdtemp",
"os.unlink",
"kastore.store.pack_items",
"tempfile.mkstemp",
"numpy.arange"
] | [((195, 212), 'platform.system', 'platform.system', ([], {}), '()\n', (210, 212), False, 'import platform\n'), ((352, 394), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': '"""kas_test_errors"""'}), "(prefix='kas_test_errors')\n", (368, 394), False, 'import tempfile\n'), ((403, 415), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (411, 415), False, 'import os\n'), ((479, 504), 'os.unlink', 'os.unlink', (['self.temp_file'], {}), '(self.temp_file)\n', (488, 504), False, 'import os\n'), ((1404, 1415), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (1412, 1415), True, 'import numpy as np\n'), ((1659, 1728), 'kastore.dump', 'kas.dump', ([], {'data': "{'a': []}", 'filename': 'self.temp_file', 'engine': 'self.engine'}), "(data={'a': []}, filename=self.temp_file, engine=self.engine)\n", (1667, 1728), True, 'import kastore as kas\n'), ((2071, 2082), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (2079, 2082), True, 'import numpy as np\n'), ((2457, 2475), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (2473, 2475), False, 'import tempfile\n'), ((3704, 3750), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': '"""kas_malformed_files"""'}), "(prefix='kas_malformed_files')\n", (3720, 3750), False, 'import tempfile\n'), ((3759, 3771), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (3767, 3771), False, 'import os\n'), ((3835, 3860), 'os.unlink', 'os.unlink', (['self.temp_file'], {}), '(self.temp_file)\n', (3844, 3860), False, 'import os\n'), ((4009, 4039), 'kastore.dump', 'kas.dump', (['data', 'self.temp_file'], {}), '(data, self.temp_file)\n', (4017, 4039), True, 'import kastore as kas\n'), ((5733, 5756), 'kastore.store.pack_items', 'store.pack_items', (['items'], {}), '(items)\n', (5749, 5756), True, 'import kastore.store as store\n'), ((9155, 9181), 'struct.pack', 'struct.pack', (['"""<H"""', 'version'], {}), "('<H', version)\n", (9166, 9181), False, 'import struct\n'), ((9310, 9378), 'kastore.load', 'kas.load', (['self.temp_file'], {'engine': 'self.engine', 'read_all': 'self.read_all'}), '(self.temp_file, engine=self.engine, read_all=self.read_all)\n', (9318, 9378), True, 'import kastore as kas\n'), ((2875, 2892), 'os.rmdir', 'os.rmdir', (['tmp_dir'], {}), '(tmp_dir)\n', (2883, 2892), False, 'import os\n'), ((3988, 4000), 'numpy.arange', 'np.arange', (['j'], {}), '(j)\n', (3997, 4000), True, 'import numpy as np\n'), ((4271, 4302), 'os.path.getsize', 'os.path.getsize', (['self.temp_file'], {}), '(self.temp_file)\n', (4286, 4302), False, 'import os\n'), ((6231, 6245), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (6240, 6245), True, 'import numpy as np\n'), ((6476, 6499), 'kastore.store.pack_items', 'store.pack_items', (['items'], {}), '(items)\n', (6492, 6499), True, 'import kastore.store as store\n'), ((6877, 6891), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (6886, 6891), True, 'import numpy as np\n'), ((7023, 7046), 'kastore.store.pack_items', 'store.pack_items', (['items'], {}), '(items)\n', (7039, 7046), True, 'import kastore.store as store\n'), ((7316, 7330), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (7325, 7330), True, 'import numpy as np\n'), ((7494, 7517), 'kastore.store.pack_items', 'store.pack_items', (['items'], {}), '(items)\n', (7510, 7517), True, 'import kastore.store as store\n'), ((7899, 7913), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (7908, 7913), True, 'import numpy as np\n'), ((8007, 8030), 'kastore.store.pack_items', 'store.pack_items', (['items'], {}), '(items)\n', (8023, 8030), True, 'import kastore.store as store\n'), ((5104, 5135), 'os.path.getsize', 'os.path.getsize', (['self.temp_file'], {}), '(self.temp_file)\n', (5119, 5135), False, 'import os\n'), ((5306, 5343), 'struct.pack', 'struct.pack', (['"""<Q"""', '(file_size + offset)'], {}), "('<Q', file_size + offset)\n", (5317, 5343), False, 'import struct\n'), ((5978, 6021), 'kastore.store.write_file', 'store.write_file', (['f', 'descriptors', 'file_size'], {}), '(f, descriptors, file_size)\n', (5994, 6021), True, 'import kastore.store as store\n'), ((6095, 6163), 'kastore.load', 'kas.load', (['self.temp_file'], {'engine': 'self.engine', 'read_all': 'self.read_all'}), '(self.temp_file, engine=self.engine, read_all=self.read_all)\n', (6103, 6163), True, 'import kastore as kas\n'), ((6613, 6656), 'kastore.store.write_file', 'store.write_file', (['f', 'descriptors', 'file_size'], {}), '(f, descriptors, file_size)\n', (6629, 6656), True, 'import kastore.store as store\n'), ((7633, 7676), 'kastore.store.write_file', 'store.write_file', (['f', 'descriptors', 'file_size'], {}), '(f, descriptors, file_size)\n', (7649, 7676), True, 'import kastore.store as store\n'), ((8146, 8189), 'kastore.store.write_file', 'store.write_file', (['f', 'descriptors', 'file_size'], {}), '(f, descriptors, file_size)\n', (8162, 8189), True, 'import kastore.store as store\n'), ((5569, 5637), 'kastore.load', 'kas.load', (['self.temp_file'], {'engine': 'self.engine', 'read_all': 'self.read_all'}), '(self.temp_file, engine=self.engine, read_all=self.read_all)\n', (5577, 5637), True, 'import kastore as kas\n')] |
import yaml
import numpy as np
from os import path
from absl import flags
from pysc2.env import sc2_env
from pysc2.lib import features
from pysc2.lib import actions
sc2_f_path = path.abspath(path.join(path.dirname(__file__), "..", "configs", "sc2_config.yml"))
with open(sc2_f_path, 'r') as ymlfile:
sc2_cfg = yaml.load(ymlfile)
# TODO: update README.md for adding random seed for game env
def create_sc2_minigame_env(map_name, mode, visualize=False):
"""Create sc2 game env with available actions printer
Set screen, minimap same resolution and x, y same pixels for simplicity.
"""
assert mode in ['full', 'lite', 'test']
# workaround for pysc2 flags
FLAGS = flags.FLAGS
FLAGS([__file__])
env_seed = 3 if mode == 'test' else None
env = sc2_env.SC2Env(
map_name=map_name,
step_mul=sc2_cfg[mode]['step_mul'],
screen_size_px=(sc2_cfg[mode]['resl'],) * 2,
minimap_size_px=(sc2_cfg[mode]['resl'],) * 2,
visualize=visualize,
random_seed=env_seed)
return env
# TODO: move preprocess to neuro net embed layer
# TODO: move post process into sc2_env extension
class GameInterfaceHandler(object):
"""Provide game interface info.
Transform observed game image and available actions into CNN input tensors.
- Special Categorial 2d image:
single layer normalized by scalar max
(no same category overlapping)
- Categorial 2d image:
expand to multiple layer
- Scalar 2d image:
single layer normalized by scalar max
NOTE: This class can potentially be a decorator to wrap sc2_env
"""
def __init__(self, mode):
assert mode in ['full', 'lite', 'test']
self.dtype = np.float32
self.minimap_player_id = features.MINIMAP_FEATURES.player_id.index
self.screen_player_id = features.SCREEN_FEATURES.player_id.index
self.screen_unit_type = features.SCREEN_FEATURES.unit_type.index
self.screen_resolution = sc2_cfg[mode]['resl']
self.minimap_resolution = sc2_cfg[mode]['resl']
(self.sub_to_full_acts, self.full_to_sub_acts) = self._get_action_mappings(
sc2_cfg[mode]['action_list'])
self.num_action = len(self.sub_to_full_acts)
self.non_spatial_actions = self._get_non_spatial_actions()
self.screen_imgs = sc2_cfg[mode]['screen_imgs']
self.minimap_imgs = sc2_cfg[mode]['minimap_imgs']
@property
def screen_channels(self):
"""Return number of channels for preprocessed screen image"""
channels = 0
for i, screen_feature in enumerate(features.SCREEN_FEATURES):
if len(self.screen_imgs) > 0 and i not in self.screen_imgs:
continue
if i == self.screen_player_id or i == self.screen_unit_type:
channels += 1
elif screen_feature.type == features.FeatureType.SCALAR:
channels += 1
else:
channels += screen_feature.scale
return channels
def _preprocess_screen(self, screen):
"""Transform screen image into expanded tensor
Args:
screen: obs.observation['screen']
Returns:
ndarray, shape (len(SCREEN_FEATURES), screen_size_px.y, screen_size_px.x)
"""
screen = np.array(screen, dtype=self.dtype)
layers = []
assert screen.shape[0] == len(features.SCREEN_FEATURES)
for i, screen_feature in enumerate(features.SCREEN_FEATURES):
if len(self.screen_imgs) > 0 and i not in self.screen_imgs:
continue
if i == self.screen_player_id or i == self.screen_unit_type:
layers.append(np.log(screen[i:i + 1] + 1.))
elif screen_feature.type == features.FeatureType.SCALAR:
layers.append(np.log(screen[i:i + 1] + 1.))
else:
layer = np.zeros(
(screen_feature.scale, screen.shape[1], screen.shape[2]),
dtype=self.dtype)
for j in range(screen_feature.scale):
indy, indx = (screen[i] == j).nonzero()
layer[j, indy, indx] = 1
layers.append(layer)
return np.concatenate(layers, axis=0)
def get_screen(self, observation):
"""Extract screen variable from observation['minimap']
Args:
observation: Timestep.obervation
Returns:
screen: ndarray, shape (1, len(SCREEN_FEATURES), screen_size_px.y, screen_size_px.x)
"""
screen = self._preprocess_screen(observation['screen'])
return np.expand_dims(screen, 0)
@property
def minimap_channels(self):
"""Return number of channels for preprocessed minimap image"""
channels = 0
for i, minimap_feature in enumerate(features.MINIMAP_FEATURES):
if len(self.minimap_imgs) > 0 and i not in self.minimap_imgs:
continue
if i == self.minimap_player_id:
channels += 1
elif minimap_feature.type == features.FeatureType.SCALAR:
channels += 1
else:
channels += minimap_feature.scale
return channels
def _preprocess_minimap(self, minimap):
"""Transform minimap image into expanded tensor
Args:
minimap: obs.observation['minimap']
Returns:
ndarray, shape (len(MINIMAP_FEATURES), minimap_size_px.y, minimap_size_px.x)
"""
minimap = np.array(minimap, dtype=self.dtype)
layers = []
assert minimap.shape[0] == len(features.MINIMAP_FEATURES)
for i, minimap_feature in enumerate(features.MINIMAP_FEATURES):
if len(self.minimap_imgs) > 0 and i not in self.minimap_imgs:
continue
if i == self.minimap_player_id:
layers.append(np.log(minimap[i:i + 1] + 1.))
elif minimap_feature.type == features.FeatureType.SCALAR:
layers.append(np.log(minimap[i:i + 1] + 1.))
else:
layer = np.zeros(
(minimap_feature.scale, minimap.shape[1], minimap.shape[2]),
dtype=self.dtype)
for j in range(minimap_feature.scale):
indy, indx = (minimap[i] == j).nonzero()
layer[j, indy, indx] = 1
layers.append(layer)
return np.concatenate(layers, axis=0)
def get_minimap(self, observation):
"""Extract minimap variable from observation['minimap']
Args:
observation: Timestep.observation
Returns:
minimap: ndarray, shape (1, len(MINIMAP_FEATURES), minimap_size_px.y, minimap_size_px.x)
"""
minimap = self._preprocess_minimap(observation['minimap'])
return np.expand_dims(minimap, 0)
def _preprocess_available_actions(self, available_actions):
"""Returns ndarray of available_actions from observed['available_actions']
shape (num_actions)
"""
available_actions = np.intersect1d(available_actions, self.sub_to_full_acts)
a_actions = np.zeros((self.num_action), dtype=self.dtype)
a_actions[self.full_to_sub_acts[available_actions]] = 1.
return a_actions
def get_available_actions(self, observation):
"""
Args:
observation: Timestep.observation
Returns:
available_action: ndarray, shape(num_actions)
"""
return self._preprocess_available_actions(
observation['available_actions'])
def get_info(self, observation):
"""Extract available actioins as info from state.observation['available_actioins']
Args:
observation: Timestep.observation
Returns:
info: ndarray, shape (num_actions)
"""
return self.get_available_actions(observation)
def postprocess_action(self, non_spatial_action, spatial_action):
"""Transform selected non_spatial and spatial actions into pysc2 FunctionCall
Args:
non_spatial_action: ndarray, shape (1, 1)
spatial_action: ndarray, shape (1, 1)
Returns:
FunctionCall as action for pysc2_env
"""
act_id = self.sub_to_full_acts[non_spatial_action[0][0]]
target = spatial_action[0][0]
target_point = [
int(target % self.screen_resolution),
int(target // self.screen_resolution)
] # (x, y)
act_args = []
for arg in actions.FUNCTIONS[act_id].args:
if arg.name in ('screen', 'minimap', 'screen2'):
act_args.append(target_point)
else:
act_args.append([0])
return actions.FunctionCall(act_id, act_args)
def _get_non_spatial_actions(self):
non_spatial_actions = [True] * self.num_action
for func_id, func in enumerate(actions.FUNCTIONS):
for arg in func.args:
if arg.name in ('screen', 'minimap', 'screen2'):
non_spatial_actions[self.full_to_sub_acts[func_id]] = False
break
return non_spatial_actions
def is_non_spatial_action(self, action_id):
return self.non_spatial_actions[self.full_to_sub_acts[action_id]]
def _get_action_mappings(self, action_list):
"""Fill actioin list if it's empty
Args:
action_list: list
Returns:
sub_to_full_acts: ndarray
full_to_sub_acts: ndarray
"""
if len(action_list) == 0:
action_list = [i for i in range(len(actions.FUNCTIONS))]
sub_to_full_acts = action_list
full_to_sub_acts = [-1] * len(actions.FUNCTIONS)
for idx, val in enumerate(sub_to_full_acts):
full_to_sub_acts[val] = idx
return (np.asarray(sub_to_full_acts, dtype=np.int32), np.asarray(full_to_sub_acts, dtype=np.int32))
| [
"numpy.intersect1d",
"pysc2.lib.actions.FunctionCall",
"pysc2.env.sc2_env.SC2Env",
"numpy.asarray",
"yaml.load",
"numpy.log",
"os.path.dirname",
"numpy.array",
"numpy.zeros",
"numpy.concatenate",
"numpy.expand_dims"
] | [((317, 335), 'yaml.load', 'yaml.load', (['ymlfile'], {}), '(ymlfile)\n', (326, 335), False, 'import yaml\n'), ((788, 1000), 'pysc2.env.sc2_env.SC2Env', 'sc2_env.SC2Env', ([], {'map_name': 'map_name', 'step_mul': "sc2_cfg[mode]['step_mul']", 'screen_size_px': "((sc2_cfg[mode]['resl'],) * 2)", 'minimap_size_px': "((sc2_cfg[mode]['resl'],) * 2)", 'visualize': 'visualize', 'random_seed': 'env_seed'}), "(map_name=map_name, step_mul=sc2_cfg[mode]['step_mul'],\n screen_size_px=(sc2_cfg[mode]['resl'],) * 2, minimap_size_px=(sc2_cfg[\n mode]['resl'],) * 2, visualize=visualize, random_seed=env_seed)\n", (802, 1000), False, 'from pysc2.env import sc2_env\n'), ((204, 226), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (216, 226), False, 'from os import path\n'), ((3381, 3415), 'numpy.array', 'np.array', (['screen'], {'dtype': 'self.dtype'}), '(screen, dtype=self.dtype)\n', (3389, 3415), True, 'import numpy as np\n'), ((4308, 4338), 'numpy.concatenate', 'np.concatenate', (['layers'], {'axis': '(0)'}), '(layers, axis=0)\n', (4322, 4338), True, 'import numpy as np\n'), ((4722, 4747), 'numpy.expand_dims', 'np.expand_dims', (['screen', '(0)'], {}), '(screen, 0)\n', (4736, 4747), True, 'import numpy as np\n'), ((5639, 5674), 'numpy.array', 'np.array', (['minimap'], {'dtype': 'self.dtype'}), '(minimap, dtype=self.dtype)\n', (5647, 5674), True, 'import numpy as np\n'), ((6552, 6582), 'numpy.concatenate', 'np.concatenate', (['layers'], {'axis': '(0)'}), '(layers, axis=0)\n', (6566, 6582), True, 'import numpy as np\n'), ((6976, 7002), 'numpy.expand_dims', 'np.expand_dims', (['minimap', '(0)'], {}), '(minimap, 0)\n', (6990, 7002), True, 'import numpy as np\n'), ((7223, 7279), 'numpy.intersect1d', 'np.intersect1d', (['available_actions', 'self.sub_to_full_acts'], {}), '(available_actions, self.sub_to_full_acts)\n', (7237, 7279), True, 'import numpy as np\n'), ((7300, 7343), 'numpy.zeros', 'np.zeros', (['self.num_action'], {'dtype': 'self.dtype'}), '(self.num_action, dtype=self.dtype)\n', (7308, 7343), True, 'import numpy as np\n'), ((8967, 9005), 'pysc2.lib.actions.FunctionCall', 'actions.FunctionCall', (['act_id', 'act_args'], {}), '(act_id, act_args)\n', (8987, 9005), False, 'from pysc2.lib import actions\n'), ((10096, 10140), 'numpy.asarray', 'np.asarray', (['sub_to_full_acts'], {'dtype': 'np.int32'}), '(sub_to_full_acts, dtype=np.int32)\n', (10106, 10140), True, 'import numpy as np\n'), ((10142, 10186), 'numpy.asarray', 'np.asarray', (['full_to_sub_acts'], {'dtype': 'np.int32'}), '(full_to_sub_acts, dtype=np.int32)\n', (10152, 10186), True, 'import numpy as np\n'), ((3770, 3799), 'numpy.log', 'np.log', (['(screen[i:i + 1] + 1.0)'], {}), '(screen[i:i + 1] + 1.0)\n', (3776, 3799), True, 'import numpy as np\n'), ((3971, 4060), 'numpy.zeros', 'np.zeros', (['(screen_feature.scale, screen.shape[1], screen.shape[2])'], {'dtype': 'self.dtype'}), '((screen_feature.scale, screen.shape[1], screen.shape[2]), dtype=\n self.dtype)\n', (3979, 4060), True, 'import numpy as np\n'), ((6006, 6036), 'numpy.log', 'np.log', (['(minimap[i:i + 1] + 1.0)'], {}), '(minimap[i:i + 1] + 1.0)\n', (6012, 6036), True, 'import numpy as np\n'), ((6210, 6302), 'numpy.zeros', 'np.zeros', (['(minimap_feature.scale, minimap.shape[1], minimap.shape[2])'], {'dtype': 'self.dtype'}), '((minimap_feature.scale, minimap.shape[1], minimap.shape[2]), dtype\n =self.dtype)\n', (6218, 6302), True, 'import numpy as np\n'), ((3899, 3928), 'numpy.log', 'np.log', (['(screen[i:i + 1] + 1.0)'], {}), '(screen[i:i + 1] + 1.0)\n', (3905, 3928), True, 'import numpy as np\n'), ((6137, 6167), 'numpy.log', 'np.log', (['(minimap[i:i + 1] + 1.0)'], {}), '(minimap[i:i + 1] + 1.0)\n', (6143, 6167), True, 'import numpy as np\n')] |
from utils.path import *
from utils.audio.tools import get_mel
from tqdm import tqdm
import numpy as np
import glob, os, sys
from multiprocessing import Pool
from scipy.io.wavfile import write
import librosa, ffmpeg
from sklearn.preprocessing import StandardScaler
def job(wav_filename):
original_wav_filename, prepro_wav_dir, sampling_rate = wav_filename
filename = original_wav_filename.split("/")[-1]
new_wav_filename = get_path(prepro_wav_dir, filename)
if not os.path.exists(new_wav_filename):
try:
out, err = (ffmpeg
.input(original_wav_filename)
.output(new_wav_filename, acodec='pcm_s16le', ac=1, ar=sampling_rate)
.overwrite_output()
.run(capture_stdout=True, capture_stderr=True))
except ffmpeg.Error as err:
print(err.stderr, file=sys.stderr)
raise
def preprocess(data_path, prepro_wav_dir, prepro_path, mel_path, sampling_rate, n_workers=10, filter_length=1024, hop_length=256, trim_silence=True, top_db=60):
p = Pool(n_workers)
mel_scaler = StandardScaler(copy=False)
prepro_wav_dir = create_dir(prepro_wav_dir)
wav_paths=[[filename, prepro_wav_dir, sampling_rate] for filename in list(glob.glob(get_path(data_path, "wav48", "**", "*.wav")))]
print("\t[LOG] converting wav format...")
with tqdm(total=len(wav_paths)) as pbar:
for _ in tqdm(p.imap_unordered(job, wav_paths)):
pbar.update()
print("\t[LOG] saving mel-spectrogram...")
with tqdm(total=len(wav_paths)) as pbar:
for wav_filename in tqdm(glob.glob(get_path(prepro_wav_dir, "*.wav"))):
mel_filename = wav_filename.split("/")[-1].replace("wav", "npy")
mel_savepath = get_path(mel_path, mel_filename)
mel_spectrogram, _ = get_mel(wav_filename, trim_silence=trim_silence, frame_length=filter_length, hop_length=hop_length, top_db=top_db)
mel_scaler.partial_fit(mel_spectrogram)
np.save(mel_savepath, mel_spectrogram)
np.save(get_path(prepro_path, "mel_stats.npy"), np.array([mel_scaler.mean_, mel_scaler.scale_]))
print("Done!")
def split_unseen_speakers(prepro_mel_dir):
print("[LOG] 6 UNSEEN speakers: \n\t p226(Male, English, Surrey) \n\t p256(Male, English, Birmingham) \
\n\t p266(Female, Irish, Athlone) \n\t p297(Female, American, Newyork) \
\n\t p323 (Female, SouthAfrican, Pretoria)\n\t p376(Male, Indian)")
unseen_speaker_list = ["p226", "p256", "p266", "p297", "p323", "p376"]
seen_speaker_files, unseen_speaker_files = [], []
preprocessed_file_list = glob.glob(get_path(prepro_mel_dir, "*.npy"))
for preprocessed_mel_file in preprocessed_file_list:
speaker = preprocessed_mel_file.split("/")[-1].split("_")[0]
if speaker in unseen_speaker_list:
unseen_speaker_files.append(preprocessed_mel_file)
else:
seen_speaker_files.append(preprocessed_mel_file)
return seen_speaker_files, unseen_speaker_files
| [
"os.path.exists",
"ffmpeg.input",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"utils.audio.tools.get_mel",
"multiprocessing.Pool",
"numpy.save"
] | [((971, 986), 'multiprocessing.Pool', 'Pool', (['n_workers'], {}), '(n_workers)\n', (975, 986), False, 'from multiprocessing import Pool\n'), ((1002, 1028), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'copy': '(False)'}), '(copy=False)\n', (1016, 1028), False, 'from sklearn.preprocessing import StandardScaler\n'), ((474, 506), 'os.path.exists', 'os.path.exists', (['new_wav_filename'], {}), '(new_wav_filename)\n', (488, 506), False, 'import glob, os, sys\n'), ((1917, 1964), 'numpy.array', 'np.array', (['[mel_scaler.mean_, mel_scaler.scale_]'], {}), '([mel_scaler.mean_, mel_scaler.scale_])\n', (1925, 1964), True, 'import numpy as np\n'), ((1666, 1784), 'utils.audio.tools.get_mel', 'get_mel', (['wav_filename'], {'trim_silence': 'trim_silence', 'frame_length': 'filter_length', 'hop_length': 'hop_length', 'top_db': 'top_db'}), '(wav_filename, trim_silence=trim_silence, frame_length=filter_length,\n hop_length=hop_length, top_db=top_db)\n', (1673, 1784), False, 'from utils.audio.tools import get_mel\n'), ((1828, 1866), 'numpy.save', 'np.save', (['mel_savepath', 'mel_spectrogram'], {}), '(mel_savepath, mel_spectrogram)\n', (1835, 1866), True, 'import numpy as np\n'), ((530, 565), 'ffmpeg.input', 'ffmpeg.input', (['original_wav_filename'], {}), '(original_wav_filename)\n', (542, 565), False, 'import librosa, ffmpeg\n')] |
import numpy as np
from PyNeuronToolbox.morphology import allsec_preorder
def ez_record(h,var='v',sections=None,order=None,\
targ_names=None,cust_labels=None):
"""
Records state variables across segments
Args:
h = hocObject to interface with neuron
var = string specifying state variable to be recorded.
Possible values are:
'v' (membrane potential)
'cai' (Ca concentration)
sections = list of h.Section() objects to be recorded
targ_names = list of section names to be recorded; alternative
passing list of h.Section() objects directly
through the "sections" argument above.
cust_labels = list of custom section labels
Returns:
data = list of h.Vector() objects recording membrane potential
labels = list of labels for each voltage trace
"""
if sections is None:
if order == 'pre':
sections = allsec_preorder(h)
else:
sections = list(h.allsec())
if targ_names is not None:
old_sections = sections
sections = []
for sec in old_sections:
if sec.name() in targ_names:
sections.append(sec)
data, labels = [], []
for i in range(len(sections)):
sec = sections[i]
positions = np.linspace(0,1,sec.nseg+2)
for position in positions[1:-1]:
# record data
data.append(h.Vector())
if var is 'v':
data[-1].record(sec(position)._ref_v)
elif var is 'cai':
data[-1].record(sec(position)._ref_cai)
# determine labels
if cust_labels is None:
lab = sec.name()+'_'+str(round(position,5))
else:
lab = cust_labels[i]+'_'+str(round(position,5))
labels.append(lab)
return data, labels
def ez_convert(data):
"""
Takes data, a list of h.Vector() objects filled with data, and converts
it into a 2d numpy array, data_clean. This should be used together with
the ez_record command.
"""
data_clean = np.empty((len(data[0]),len(data)))
for (i,vec) in enumerate(data):
data_clean[:,i] = vec.to_python()
return data_clean
| [
"PyNeuronToolbox.morphology.allsec_preorder",
"numpy.linspace"
] | [((1381, 1412), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(sec.nseg + 2)'], {}), '(0, 1, sec.nseg + 2)\n', (1392, 1412), True, 'import numpy as np\n'), ((1004, 1022), 'PyNeuronToolbox.morphology.allsec_preorder', 'allsec_preorder', (['h'], {}), '(h)\n', (1019, 1022), False, 'from PyNeuronToolbox.morphology import allsec_preorder\n')] |
import numpy as np
from numpy.linalg import eig
from scipy.linalg import fractional_matrix_power
from sklearn.preprocessing import normalize
from kmeans import KMeans
class Spectral:
def cluster(self, x, k, delta=2.):
def dist(i, j):
if i == j: return 0.
return np.exp(-delta * np.linalg.norm(x[int(i)] - x[int(j)]) ** 2)
n = len(x)
A = np.fromfunction(np.vectorize(dist), (n, n))
D = np.diag(np.array([sum(A[i]) for i in range(n)]))
DD = fractional_matrix_power(D, -0.5)
L = np.matmul(DD, np.matmul(A, DD))
eigen_values, eigen_vectors = eig(L)
idx = eigen_values.argsort()[::-1]
X = eigen_vectors[:, idx][:, :k]
assert X.shape == (n, k)
Y = normalize(X, axis=1, norm='l1')
cluster_ids, _ = KMeans().cluster(Y, k)
return cluster_ids
| [
"kmeans.KMeans",
"numpy.linalg.eig",
"numpy.matmul",
"sklearn.preprocessing.normalize",
"scipy.linalg.fractional_matrix_power",
"numpy.vectorize"
] | [((511, 543), 'scipy.linalg.fractional_matrix_power', 'fractional_matrix_power', (['D', '(-0.5)'], {}), '(D, -0.5)\n', (534, 543), False, 'from scipy.linalg import fractional_matrix_power\n'), ((627, 633), 'numpy.linalg.eig', 'eig', (['L'], {}), '(L)\n', (630, 633), False, 'from numpy.linalg import eig\n'), ((764, 795), 'sklearn.preprocessing.normalize', 'normalize', (['X'], {'axis': '(1)', 'norm': '"""l1"""'}), "(X, axis=1, norm='l1')\n", (773, 795), False, 'from sklearn.preprocessing import normalize\n'), ((409, 427), 'numpy.vectorize', 'np.vectorize', (['dist'], {}), '(dist)\n', (421, 427), True, 'import numpy as np\n'), ((570, 586), 'numpy.matmul', 'np.matmul', (['A', 'DD'], {}), '(A, DD)\n', (579, 586), True, 'import numpy as np\n'), ((821, 829), 'kmeans.KMeans', 'KMeans', ([], {}), '()\n', (827, 829), False, 'from kmeans import KMeans\n')] |
import csv
import io
import tempfile
import unittest
from io import IOBase
from collections import defaultdict
import json
import numpy as np
from requests import Response
from lightly.openapi_generated.swagger_client.models.datasource_processed_until_timestamp_response import DatasourceProcessedUntilTimestampResponse
from lightly.openapi_generated.swagger_client.models.tag_creator import TagCreator
from lightly.openapi_generated.swagger_client.models.dataset_create_request import DatasetCreateRequest
from lightly.openapi_generated.swagger_client.models.dataset_data import DatasetData
from lightly.openapi_generated.swagger_client.api.datasets_api import DatasetsApi
from lightly.openapi_generated.swagger_client.api.datasources_api import DatasourcesApi
from lightly.openapi_generated.swagger_client.rest import ApiException
import lightly
from lightly.api.api_workflow_client import ApiWorkflowClient
from typing import *
from lightly.openapi_generated.swagger_client import ScoresApi, \
CreateEntityResponse, SamplesApi, SampleCreateRequest, \
InitialTagCreateRequest, ApiClient, VersioningApi, QuotaApi, \
TagArithmeticsRequest, TagBitMaskResponse, SampleWriteUrls, SampleData, \
Trigger2dEmbeddingJobRequest, SampleUpdateRequest
from lightly.openapi_generated.swagger_client.api.embeddings_api import EmbeddingsApi
from lightly.openapi_generated.swagger_client.api.jobs_api import JobsApi
from lightly.openapi_generated.swagger_client.api.mappings_api import MappingsApi
from lightly.openapi_generated.swagger_client.api.samplings_api import SamplingsApi
from lightly.openapi_generated.swagger_client.api.tags_api import TagsApi
from lightly.openapi_generated.swagger_client.models.async_task_data import AsyncTaskData
from lightly.openapi_generated.swagger_client.models.dataset_embedding_data import DatasetEmbeddingData
from lightly.openapi_generated.swagger_client.models.job_result_type import JobResultType
from lightly.openapi_generated.swagger_client.models.job_state import JobState
from lightly.openapi_generated.swagger_client.models.job_status_data import JobStatusData
from lightly.openapi_generated.swagger_client.models.job_status_data_result import JobStatusDataResult
from lightly.openapi_generated.swagger_client.models.sampling_create_request import SamplingCreateRequest
from lightly.openapi_generated.swagger_client.models.tag_data import TagData
from lightly.openapi_generated.swagger_client.models.write_csv_url_data import WriteCSVUrlData
from lightly.openapi_generated.swagger_client.models.datasource_config import DatasourceConfig
from lightly.openapi_generated.swagger_client.models.datasource_config_local import DatasourceConfigLOCAL
from lightly.openapi_generated.swagger_client.models.datasource_config_base import DatasourceConfigBase
from lightly.openapi_generated.swagger_client.models.datasource_processed_until_timestamp_request import DatasourceProcessedUntilTimestampRequest
from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_data import DatasourceRawSamplesData
from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_data_row import DatasourceRawSamplesDataRow
def _check_dataset_id(dataset_id: str):
assert isinstance(dataset_id, str)
assert len(dataset_id) > 0
N_FILES_ON_SERVER = 100
class MockedEmbeddingsApi(EmbeddingsApi):
def __init__(self, api_client):
EmbeddingsApi.__init__(self, api_client=api_client)
self.embeddings = [
DatasetEmbeddingData(
id='embedding_id_xyz',
name='embedding_name_xxyyzz',
is_processed=True,
created_at=0,
),
DatasetEmbeddingData(
id='embedding_id_xyz_2',
name='default',
is_processed=True,
created_at=0,
)
]
def get_embeddings_csv_write_url_by_id(self, dataset_id: str, **kwargs):
_check_dataset_id(dataset_id)
assert isinstance(dataset_id, str)
response_ = WriteCSVUrlData(signed_write_url="signed_write_url_valid", embedding_id="embedding_id_xyz")
return response_
def get_embeddings_by_dataset_id(self, dataset_id, **kwargs) -> List[DatasetEmbeddingData]:
_check_dataset_id(dataset_id)
assert isinstance(dataset_id, str)
return self.embeddings
def trigger2d_embeddings_job(self, body, dataset_id, embedding_id, **kwargs):
_check_dataset_id(dataset_id)
assert isinstance(body, Trigger2dEmbeddingJobRequest)
def get_embeddings_csv_read_url_by_id(self, dataset_id, embedding_id, **kwargs):
_check_dataset_id(dataset_id)
return 'https://my-embedding-read-url.com'
class MockedSamplingsApi(SamplingsApi):
def trigger_sampling_by_id(self, body: SamplingCreateRequest, dataset_id, embedding_id, **kwargs):
_check_dataset_id(dataset_id)
assert isinstance(body, SamplingCreateRequest)
assert isinstance(dataset_id, str)
assert isinstance(embedding_id, str)
response_ = AsyncTaskData(job_id="155")
return response_
class MockedJobsApi(JobsApi):
def __init__(self, *args, **kwargs):
self.no_calls = 0
JobsApi.__init__(self, *args, **kwargs)
def get_job_status_by_id(self, job_id, **kwargs):
assert isinstance(job_id, str)
self.no_calls += 1
if self.no_calls > 3:
result = JobStatusDataResult(type=JobResultType.SAMPLING, data="sampling_tag_id_xyz")
response_ = JobStatusData(id="id_", status=JobState.FINISHED, wait_time_till_next_poll=0,
created_at=1234, finished_at=1357, result=result)
else:
result = None
response_ = JobStatusData(id="id_", status=JobState.RUNNING, wait_time_till_next_poll=0.001,
created_at=1234, result=result)
return response_
class MockedTagsApi(TagsApi):
def create_initial_tag_by_dataset_id(self, body, dataset_id, **kwargs):
_check_dataset_id(dataset_id)
assert isinstance(body, InitialTagCreateRequest)
assert isinstance(dataset_id, str)
response_ = CreateEntityResponse(id="xyz")
return response_
def get_tag_by_tag_id(self, dataset_id, tag_id, **kwargs):
_check_dataset_id(dataset_id)
assert isinstance(dataset_id, str)
assert isinstance(tag_id, str)
response_ = TagData(id=tag_id, dataset_id=dataset_id, prev_tag_id="initial-tag", bit_mask_data="0x80bda23e9",
name='second-tag', tot_size=15, created_at=1577836800, changes=dict())
return response_
def get_tags_by_dataset_id(self, dataset_id, **kwargs):
_check_dataset_id(dataset_id)
if dataset_id == 'xyz-no-tags':
return []
tag_1 = TagData(id='inital_tag_id', dataset_id=dataset_id, prev_tag_id=None,
bit_mask_data="0xF", name='initial-tag', tot_size=4,
created_at=1577836800, changes=dict())
tag_2 = TagData(id='query_tag_id_xyz', dataset_id=dataset_id, prev_tag_id="initial-tag",
bit_mask_data="0xF", name='query_tag_name_xyz', tot_size=4,
created_at=1577836800, changes=dict())
tag_3 = TagData(id='preselected_tag_id_xyz', dataset_id=dataset_id, prev_tag_id="initial-tag",
bit_mask_data="0x1", name='preselected_tag_name_xyz', tot_size=4,
created_at=1577836800, changes=dict())
tag_4 = TagData(id='sampled_tag_xyz', dataset_id=dataset_id, prev_tag_id="preselected_tag_id_xyz",
bit_mask_data="0x3", name='sampled_tag_xyz', tot_size=4,
created_at=1577836800, changes=dict())
tag_5 = TagData(id='tag_with_integer_name', dataset_id=dataset_id, prev_tag_id=None,
bit_mask_data='0x1', name='1000', tot_size=4,
created_at=1577836800, changes=dict())
tags = [tag_1, tag_2, tag_3, tag_4, tag_5]
no_tags_to_return = getattr(self, "no_tags", 5)
tags = tags[:no_tags_to_return]
return tags
def perform_tag_arithmetics(self, body: TagArithmeticsRequest, dataset_id, **kwargs):
_check_dataset_id(dataset_id)
if (body.new_tag_name is None) or (body.new_tag_name == ''):
return TagBitMaskResponse(bit_mask_data="0x2")
else:
return CreateEntityResponse(id="tag-arithmetic-created")
def perform_tag_arithmetics_bitmask(self, body: TagArithmeticsRequest, dataset_id, **kwargs):
_check_dataset_id(dataset_id)
return TagBitMaskResponse(bit_mask_data="0x2")
def upsize_tags_by_dataset_id(self, body, dataset_id, **kwargs):
_check_dataset_id(dataset_id)
assert body.upsize_tag_creator == TagCreator.USER_PIP
class MockedScoresApi(ScoresApi):
def create_or_update_active_learning_score_by_tag_id(self, body, dataset_id, tag_id, **kwargs) -> \
CreateEntityResponse:
_check_dataset_id(dataset_id)
if len(body.scores) > 0 and not isinstance(body.scores[0], float):
raise AttributeError
response_ = CreateEntityResponse(id="sampled_tag_id_xyz")
return response_
class MockedMappingsApi(MappingsApi):
def __init__(self, samples_api, *args, **kwargs):
self._samples_api = samples_api
MappingsApi.__init__(self, *args, **kwargs)
self.n_samples = N_FILES_ON_SERVER
sample_names = [f'img_{i}.jpg' for i in range(self.n_samples)]
sample_names.reverse()
self.sample_names = sample_names
def get_sample_mappings_by_dataset_id(self, dataset_id, field, **kwargs):
if dataset_id == 'xyz-no-tags':
return []
return self.sample_names[:self.n_samples]
class MockedSamplesApi(SamplesApi):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.sample_create_requests: List[SampleCreateRequest] = []
def get_samples_by_dataset_id(
self, dataset_id, **kwargs
) -> List[SampleData]:
samples = []
for i, body in enumerate(self.sample_create_requests):
sample = SampleData(
id=f'{i}_xyz',
dataset_id='dataset_id_xyz',
file_name=body.file_name,
type='Images',
)
samples.append(sample)
return samples
def create_sample_by_dataset_id(self, body, dataset_id, **kwargs):
_check_dataset_id(dataset_id)
assert isinstance(body, SampleCreateRequest)
response_ = CreateEntityResponse(id="xyz")
self.sample_create_requests.append(body)
return response_
def get_sample_image_write_url_by_id(self, dataset_id, sample_id, is_thumbnail, **kwargs):
_check_dataset_id(dataset_id)
url = f"{sample_id}_write_url"
return url
def get_sample_image_read_url_by_id(self, dataset_id, sample_id, type, **kwargs):
_check_dataset_id(dataset_id)
url = f"{sample_id}_write_url"
return url
def get_sample_image_write_urls_by_id(self, dataset_id, sample_id, **kwargs) -> SampleWriteUrls:
_check_dataset_id(dataset_id)
thumb_url = f"{sample_id}_thumb_write_url"
full_url = f"{sample_id}_full_write_url"
ret = SampleWriteUrls(full=full_url, thumb=thumb_url)
return ret
def update_sample_by_id(self, body, dataset_id, sample_id, **kwargs):
_check_dataset_id(dataset_id)
assert isinstance(body, SampleUpdateRequest)
class MockedDatasetsApi(DatasetsApi):
def __init__(self, api_client):
no_datasets = 3
self.default_datasets = [DatasetData(name=f"dataset_{i}", id=f"dataset_{i}_id", last_modified_at=i,
type="", img_type="full", size_in_bytes=-1, n_samples=-1, created_at=-1)
for i in range(no_datasets)]
self.reset()
def reset(self):
self.datasets = self.default_datasets
def get_datasets(self, **kwargs):
return self.datasets
def create_dataset(self, body: DatasetCreateRequest, **kwargs):
assert isinstance(body, DatasetCreateRequest)
id = body.name + "_id"
if body.name == 'xyz-no-tags':
id = 'xyz-no-tags'
dataset = DatasetData(id=id, name=body.name, last_modified_at=len(self.datasets) + 1,
type="Images", size_in_bytes=-1, n_samples=-1, created_at=-1)
self.datasets += [dataset]
response_ = CreateEntityResponse(id=id)
return response_
def get_dataset_by_id(self, dataset_id):
_check_dataset_id(dataset_id)
return next(dataset for dataset in self.default_datasets if dataset_id == dataset.id)
def register_dataset_upload_by_id(self, body, dataset_id):
_check_dataset_id(dataset_id)
return True
def delete_dataset_by_id(self, dataset_id, **kwargs):
_check_dataset_id(dataset_id)
datasets_without_that_id = [dataset for dataset in self.datasets if dataset.id != dataset_id]
assert len(datasets_without_that_id) == len(self.datasets) - 1
self.datasets = datasets_without_that_id
class MockedDatasourcesApi(DatasourcesApi):
def __init__(self, api_client=None):
super().__init__(api_client=api_client)
# maximum numbers of samples returned by list raw samples request
self._max_return_samples = 2
# default number of samples in every datasource
self._num_samples = 5
self.reset()
def reset(self):
local_datasource = DatasourceConfigBase(type='LOCAL', full_path='').to_dict()
azure_datasource = DatasourceConfigBase(type='AZURE', full_path='').to_dict()
self._datasources = {
"dataset_id_xyz": local_datasource,
"dataset_0": azure_datasource,
}
self._processed_until_timestamp = defaultdict(lambda: 0)
self._samples = defaultdict(self._default_samples)
def _default_samples(self):
return [
DatasourceRawSamplesDataRow(
file_name=f"file_{i}", read_url=f"url_{i}"
)
for i in range(self._num_samples)
]
def get_datasource_by_dataset_id(self, dataset_id: str, **kwargs):
try:
datasource = self._datasources[dataset_id]
return datasource
except Exception:
raise ApiException()
def get_datasource_processed_until_timestamp_by_dataset_id(
self, dataset_id: str, **kwargs
) -> DatasourceProcessedUntilTimestampResponse:
timestamp = self._processed_until_timestamp[dataset_id]
return DatasourceProcessedUntilTimestampResponse(timestamp)
def get_list_of_raw_samples_from_datasource_by_dataset_id(
self, dataset_id, cursor: str = None, _from: int = None, to: int = None, **kwargs
) -> DatasourceRawSamplesData:
if cursor is None:
# initial request
assert _from is not None
assert to is not None
cursor_dict = {"from": _from, "to": to}
current = _from
else:
# follow up request
cursor_dict = json.loads(cursor)
current = cursor_dict["current"]
to = cursor_dict["to"]
next_current = min(current + self._max_return_samples, to + 1)
samples = self._samples[dataset_id][current:next_current]
cursor_dict["current"] = next_current
cursor = json.dumps(cursor_dict)
has_more = len(samples) > 0
return DatasourceRawSamplesData(
has_more=has_more,
cursor=cursor,
data=samples,
)
def update_datasource_by_dataset_id(
self, body: DatasourceConfig, dataset_id: str, **kwargs
) -> None:
assert isinstance(body, DatasourceConfig)
self._datasources[dataset_id] = body # type: ignore
def update_datasource_processed_until_timestamp_by_dataset_id(
self, body, dataset_id, **kwargs
) -> None:
assert isinstance(body, DatasourceProcessedUntilTimestampRequest)
to = body.processed_until_timestamp
self._processed_until_timestamp[dataset_id] = to # type: ignore
class MockedVersioningApi(VersioningApi):
def get_latest_pip_version(self, **kwargs):
return "1.0.8"
def get_minimum_compatible_pip_version(self, **kwargs):
return "1.0.0"
class MockedQuotaApi(QuotaApi):
def get_quota_maximum_dataset_size(self, **kwargs):
return "60000"
def mocked_request_put(dst_url: str, data=IOBase) -> Response:
assert isinstance(dst_url, str)
content_bytes: bytes = data.read()
content_str: str = content_bytes.decode('utf-8')
assert content_str.startswith('filenames')
response_ = Response()
response_.status_code = 200
return response_
class MockedApiClient(ApiClient):
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None):
raise ValueError("ERROR: calling ApiClient.request(), but this should be mocked.")
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, async_req=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None):
raise ValueError("ERROR: calling ApiClient.call_api(), but this should be mocked.")
class MockedApiWorkflowClient(ApiWorkflowClient):
embeddings_filename_base = 'img'
n_embedding_rows_on_server = N_FILES_ON_SERVER
def __init__(self, *args, **kwargs):
lightly.api.api_workflow_client.ApiClient = MockedApiClient
lightly.api.version_checking.VersioningApi = MockedVersioningApi
ApiWorkflowClient.__init__(self, *args, **kwargs)
self._samplings_api = MockedSamplingsApi(api_client=self.api_client)
self._jobs_api = MockedJobsApi(api_client=self.api_client)
self._tags_api = MockedTagsApi(api_client=self.api_client)
self._embeddings_api = MockedEmbeddingsApi(api_client=self.api_client)
self._samples_api = MockedSamplesApi(api_client=self.api_client)
self._mappings_api = MockedMappingsApi(api_client=self.api_client,
samples_api=self._samples_api)
self._scores_api = MockedScoresApi(api_client=self.api_client)
self._datasets_api = MockedDatasetsApi(api_client=self.api_client)
self._datasources_api = MockedDatasourcesApi(api_client=self.api_client)
self._quota_api = MockedQuotaApi(api_client=self.api_client)
lightly.api.api_workflow_client.requests.put = mocked_request_put
self.wait_time_till_next_poll = 0.001 # for api_workflow_sampling
def upload_file_with_signed_url(
self, file: IOBase, signed_write_url: str,
max_backoff: int = 32, max_retries: int = 5, headers: Dict = None,
) -> Response:
res = Response()
return res
def _get_csv_reader_from_read_url(self, read_url: str):
n_rows: int = self.n_embedding_rows_on_server
n_dims: int = self.n_dims_embeddings_on_server
rows_csv = [['filenames'] + [f'embeddings_{i}' for i in range(n_dims)] + ['labels']]
for i in range(n_rows):
row = [f'{self.embeddings_filename_base}_{i}.jpg']
for _ in range(n_dims):
row.append(np.random.uniform(0, 1))
row.append(i)
rows_csv.append(row)
# save the csv rows in a temporary in-memory string file
# using a csv writer and then read them as bytes
f = tempfile.SpooledTemporaryFile(mode="rw")
writer = csv.writer(f)
writer.writerows(rows_csv)
f.seek(0)
buffer = io.StringIO(f.read())
reader = csv.reader(buffer)
return reader
class MockedApiWorkflowSetup(unittest.TestCase):
EMBEDDINGS_FILENAME_BASE: str = 'sample'
def setUp(self, token="token_xyz", dataset_id="dataset_id_xyz") -> None:
self.api_workflow_client = MockedApiWorkflowClient(token=token, dataset_id=dataset_id)
| [
"lightly.openapi_generated.swagger_client.models.write_csv_url_data.WriteCSVUrlData",
"lightly.openapi_generated.swagger_client.models.datasource_raw_samples_data.DatasourceRawSamplesData",
"lightly.openapi_generated.swagger_client.api.mappings_api.MappingsApi.__init__",
"lightly.openapi_generated.swagger_cli... | [((17020, 17030), 'requests.Response', 'Response', ([], {}), '()\n', (17028, 17030), False, 'from requests import Response\n'), ((3407, 3458), 'lightly.openapi_generated.swagger_client.api.embeddings_api.EmbeddingsApi.__init__', 'EmbeddingsApi.__init__', (['self'], {'api_client': 'api_client'}), '(self, api_client=api_client)\n', (3429, 3458), False, 'from lightly.openapi_generated.swagger_client.api.embeddings_api import EmbeddingsApi\n'), ((4070, 4166), 'lightly.openapi_generated.swagger_client.models.write_csv_url_data.WriteCSVUrlData', 'WriteCSVUrlData', ([], {'signed_write_url': '"""signed_write_url_valid"""', 'embedding_id': '"""embedding_id_xyz"""'}), "(signed_write_url='signed_write_url_valid', embedding_id=\n 'embedding_id_xyz')\n", (4085, 4166), False, 'from lightly.openapi_generated.swagger_client.models.write_csv_url_data import WriteCSVUrlData\n'), ((5100, 5127), 'lightly.openapi_generated.swagger_client.models.async_task_data.AsyncTaskData', 'AsyncTaskData', ([], {'job_id': '"""155"""'}), "(job_id='155')\n", (5113, 5127), False, 'from lightly.openapi_generated.swagger_client.models.async_task_data import AsyncTaskData\n'), ((5260, 5299), 'lightly.openapi_generated.swagger_client.api.jobs_api.JobsApi.__init__', 'JobsApi.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (5276, 5299), False, 'from lightly.openapi_generated.swagger_client.api.jobs_api import JobsApi\n'), ((6245, 6275), 'lightly.openapi_generated.swagger_client.CreateEntityResponse', 'CreateEntityResponse', ([], {'id': '"""xyz"""'}), "(id='xyz')\n", (6265, 6275), False, 'from lightly.openapi_generated.swagger_client import ScoresApi, CreateEntityResponse, SamplesApi, SampleCreateRequest, InitialTagCreateRequest, ApiClient, VersioningApi, QuotaApi, TagArithmeticsRequest, TagBitMaskResponse, SampleWriteUrls, SampleData, Trigger2dEmbeddingJobRequest, SampleUpdateRequest\n'), ((8749, 8788), 'lightly.openapi_generated.swagger_client.TagBitMaskResponse', 'TagBitMaskResponse', ([], {'bit_mask_data': '"""0x2"""'}), "(bit_mask_data='0x2')\n", (8767, 8788), False, 'from lightly.openapi_generated.swagger_client import ScoresApi, CreateEntityResponse, SamplesApi, SampleCreateRequest, InitialTagCreateRequest, ApiClient, VersioningApi, QuotaApi, TagArithmeticsRequest, TagBitMaskResponse, SampleWriteUrls, SampleData, Trigger2dEmbeddingJobRequest, SampleUpdateRequest\n'), ((9299, 9344), 'lightly.openapi_generated.swagger_client.CreateEntityResponse', 'CreateEntityResponse', ([], {'id': '"""sampled_tag_id_xyz"""'}), "(id='sampled_tag_id_xyz')\n", (9319, 9344), False, 'from lightly.openapi_generated.swagger_client import ScoresApi, CreateEntityResponse, SamplesApi, SampleCreateRequest, InitialTagCreateRequest, ApiClient, VersioningApi, QuotaApi, TagArithmeticsRequest, TagBitMaskResponse, SampleWriteUrls, SampleData, Trigger2dEmbeddingJobRequest, SampleUpdateRequest\n'), ((9512, 9555), 'lightly.openapi_generated.swagger_client.api.mappings_api.MappingsApi.__init__', 'MappingsApi.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (9532, 9555), False, 'from lightly.openapi_generated.swagger_client.api.mappings_api import MappingsApi\n'), ((10753, 10783), 'lightly.openapi_generated.swagger_client.CreateEntityResponse', 'CreateEntityResponse', ([], {'id': '"""xyz"""'}), "(id='xyz')\n", (10773, 10783), False, 'from lightly.openapi_generated.swagger_client import ScoresApi, CreateEntityResponse, SamplesApi, SampleCreateRequest, InitialTagCreateRequest, ApiClient, VersioningApi, QuotaApi, TagArithmeticsRequest, TagBitMaskResponse, SampleWriteUrls, SampleData, Trigger2dEmbeddingJobRequest, SampleUpdateRequest\n'), ((11487, 11534), 'lightly.openapi_generated.swagger_client.SampleWriteUrls', 'SampleWriteUrls', ([], {'full': 'full_url', 'thumb': 'thumb_url'}), '(full=full_url, thumb=thumb_url)\n', (11502, 11534), False, 'from lightly.openapi_generated.swagger_client import ScoresApi, CreateEntityResponse, SamplesApi, SampleCreateRequest, InitialTagCreateRequest, ApiClient, VersioningApi, QuotaApi, TagArithmeticsRequest, TagBitMaskResponse, SampleWriteUrls, SampleData, Trigger2dEmbeddingJobRequest, SampleUpdateRequest\n'), ((12730, 12757), 'lightly.openapi_generated.swagger_client.CreateEntityResponse', 'CreateEntityResponse', ([], {'id': 'id'}), '(id=id)\n', (12750, 12757), False, 'from lightly.openapi_generated.swagger_client import ScoresApi, CreateEntityResponse, SamplesApi, SampleCreateRequest, InitialTagCreateRequest, ApiClient, VersioningApi, QuotaApi, TagArithmeticsRequest, TagBitMaskResponse, SampleWriteUrls, SampleData, Trigger2dEmbeddingJobRequest, SampleUpdateRequest\n'), ((14124, 14147), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (14135, 14147), False, 'from collections import defaultdict\n'), ((14171, 14205), 'collections.defaultdict', 'defaultdict', (['self._default_samples'], {}), '(self._default_samples)\n', (14182, 14205), False, 'from collections import defaultdict\n'), ((14891, 14943), 'lightly.openapi_generated.swagger_client.models.datasource_processed_until_timestamp_response.DatasourceProcessedUntilTimestampResponse', 'DatasourceProcessedUntilTimestampResponse', (['timestamp'], {}), '(timestamp)\n', (14932, 14943), False, 'from lightly.openapi_generated.swagger_client.models.datasource_processed_until_timestamp_response import DatasourceProcessedUntilTimestampResponse\n'), ((15713, 15736), 'json.dumps', 'json.dumps', (['cursor_dict'], {}), '(cursor_dict)\n', (15723, 15736), False, 'import json\n'), ((15789, 15861), 'lightly.openapi_generated.swagger_client.models.datasource_raw_samples_data.DatasourceRawSamplesData', 'DatasourceRawSamplesData', ([], {'has_more': 'has_more', 'cursor': 'cursor', 'data': 'samples'}), '(has_more=has_more, cursor=cursor, data=samples)\n', (15813, 15861), False, 'from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_data import DatasourceRawSamplesData\n'), ((18198, 18247), 'lightly.api.api_workflow_client.ApiWorkflowClient.__init__', 'ApiWorkflowClient.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (18224, 18247), False, 'from lightly.api.api_workflow_client import ApiWorkflowClient\n'), ((19416, 19426), 'requests.Response', 'Response', ([], {}), '()\n', (19424, 19426), False, 'from requests import Response\n'), ((20087, 20127), 'tempfile.SpooledTemporaryFile', 'tempfile.SpooledTemporaryFile', ([], {'mode': '"""rw"""'}), "(mode='rw')\n", (20116, 20127), False, 'import tempfile\n'), ((20145, 20158), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (20155, 20158), False, 'import csv\n'), ((20268, 20286), 'csv.reader', 'csv.reader', (['buffer'], {}), '(buffer)\n', (20278, 20286), False, 'import csv\n'), ((3499, 3609), 'lightly.openapi_generated.swagger_client.models.dataset_embedding_data.DatasetEmbeddingData', 'DatasetEmbeddingData', ([], {'id': '"""embedding_id_xyz"""', 'name': '"""embedding_name_xxyyzz"""', 'is_processed': '(True)', 'created_at': '(0)'}), "(id='embedding_id_xyz', name='embedding_name_xxyyzz',\n is_processed=True, created_at=0)\n", (3519, 3609), False, 'from lightly.openapi_generated.swagger_client.models.dataset_embedding_data import DatasetEmbeddingData\n'), ((3698, 3797), 'lightly.openapi_generated.swagger_client.models.dataset_embedding_data.DatasetEmbeddingData', 'DatasetEmbeddingData', ([], {'id': '"""embedding_id_xyz_2"""', 'name': '"""default"""', 'is_processed': '(True)', 'created_at': '(0)'}), "(id='embedding_id_xyz_2', name='default', is_processed=\n True, created_at=0)\n", (3718, 3797), False, 'from lightly.openapi_generated.swagger_client.models.dataset_embedding_data import DatasetEmbeddingData\n'), ((5472, 5548), 'lightly.openapi_generated.swagger_client.models.job_status_data_result.JobStatusDataResult', 'JobStatusDataResult', ([], {'type': 'JobResultType.SAMPLING', 'data': '"""sampling_tag_id_xyz"""'}), "(type=JobResultType.SAMPLING, data='sampling_tag_id_xyz')\n", (5491, 5548), False, 'from lightly.openapi_generated.swagger_client.models.job_status_data_result import JobStatusDataResult\n'), ((5573, 5705), 'lightly.openapi_generated.swagger_client.models.job_status_data.JobStatusData', 'JobStatusData', ([], {'id': '"""id_"""', 'status': 'JobState.FINISHED', 'wait_time_till_next_poll': '(0)', 'created_at': '(1234)', 'finished_at': '(1357)', 'result': 'result'}), "(id='id_', status=JobState.FINISHED, wait_time_till_next_poll=\n 0, created_at=1234, finished_at=1357, result=result)\n", (5586, 5705), False, 'from lightly.openapi_generated.swagger_client.models.job_status_data import JobStatusData\n'), ((5803, 5920), 'lightly.openapi_generated.swagger_client.models.job_status_data.JobStatusData', 'JobStatusData', ([], {'id': '"""id_"""', 'status': 'JobState.RUNNING', 'wait_time_till_next_poll': '(0.001)', 'created_at': '(1234)', 'result': 'result'}), "(id='id_', status=JobState.RUNNING, wait_time_till_next_poll=\n 0.001, created_at=1234, result=result)\n", (5816, 5920), False, 'from lightly.openapi_generated.swagger_client.models.job_status_data import JobStatusData\n'), ((8474, 8513), 'lightly.openapi_generated.swagger_client.TagBitMaskResponse', 'TagBitMaskResponse', ([], {'bit_mask_data': '"""0x2"""'}), "(bit_mask_data='0x2')\n", (8492, 8513), False, 'from lightly.openapi_generated.swagger_client import ScoresApi, CreateEntityResponse, SamplesApi, SampleCreateRequest, InitialTagCreateRequest, ApiClient, VersioningApi, QuotaApi, TagArithmeticsRequest, TagBitMaskResponse, SampleWriteUrls, SampleData, Trigger2dEmbeddingJobRequest, SampleUpdateRequest\n'), ((8547, 8596), 'lightly.openapi_generated.swagger_client.CreateEntityResponse', 'CreateEntityResponse', ([], {'id': '"""tag-arithmetic-created"""'}), "(id='tag-arithmetic-created')\n", (8567, 8596), False, 'from lightly.openapi_generated.swagger_client import ScoresApi, CreateEntityResponse, SamplesApi, SampleCreateRequest, InitialTagCreateRequest, ApiClient, VersioningApi, QuotaApi, TagArithmeticsRequest, TagBitMaskResponse, SampleWriteUrls, SampleData, Trigger2dEmbeddingJobRequest, SampleUpdateRequest\n'), ((10335, 10435), 'lightly.openapi_generated.swagger_client.SampleData', 'SampleData', ([], {'id': 'f"""{i}_xyz"""', 'dataset_id': '"""dataset_id_xyz"""', 'file_name': 'body.file_name', 'type': '"""Images"""'}), "(id=f'{i}_xyz', dataset_id='dataset_id_xyz', file_name=body.\n file_name, type='Images')\n", (10345, 10435), False, 'from lightly.openapi_generated.swagger_client import ScoresApi, CreateEntityResponse, SamplesApi, SampleCreateRequest, InitialTagCreateRequest, ApiClient, VersioningApi, QuotaApi, TagArithmeticsRequest, TagBitMaskResponse, SampleWriteUrls, SampleData, Trigger2dEmbeddingJobRequest, SampleUpdateRequest\n'), ((11853, 12004), 'lightly.openapi_generated.swagger_client.models.dataset_data.DatasetData', 'DatasetData', ([], {'name': 'f"""dataset_{i}"""', 'id': 'f"""dataset_{i}_id"""', 'last_modified_at': 'i', 'type': '""""""', 'img_type': '"""full"""', 'size_in_bytes': '(-1)', 'n_samples': '(-1)', 'created_at': '(-1)'}), "(name=f'dataset_{i}', id=f'dataset_{i}_id', last_modified_at=i,\n type='', img_type='full', size_in_bytes=-1, n_samples=-1, created_at=-1)\n", (11864, 12004), False, 'from lightly.openapi_generated.swagger_client.models.dataset_data import DatasetData\n'), ((14268, 14339), 'lightly.openapi_generated.swagger_client.models.datasource_raw_samples_data_row.DatasourceRawSamplesDataRow', 'DatasourceRawSamplesDataRow', ([], {'file_name': 'f"""file_{i}"""', 'read_url': 'f"""url_{i}"""'}), "(file_name=f'file_{i}', read_url=f'url_{i}')\n", (14295, 14339), False, 'from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_data_row import DatasourceRawSamplesDataRow\n'), ((15413, 15431), 'json.loads', 'json.loads', (['cursor'], {}), '(cursor)\n', (15423, 15431), False, 'import json\n'), ((13805, 13853), 'lightly.openapi_generated.swagger_client.models.datasource_config_base.DatasourceConfigBase', 'DatasourceConfigBase', ([], {'type': '"""LOCAL"""', 'full_path': '""""""'}), "(type='LOCAL', full_path='')\n", (13825, 13853), False, 'from lightly.openapi_generated.swagger_client.models.datasource_config_base import DatasourceConfigBase\n'), ((13891, 13939), 'lightly.openapi_generated.swagger_client.models.datasource_config_base.DatasourceConfigBase', 'DatasourceConfigBase', ([], {'type': '"""AZURE"""', 'full_path': '""""""'}), "(type='AZURE', full_path='')\n", (13911, 13939), False, 'from lightly.openapi_generated.swagger_client.models.datasource_config_base import DatasourceConfigBase\n'), ((14640, 14654), 'lightly.openapi_generated.swagger_client.rest.ApiException', 'ApiException', ([], {}), '()\n', (14652, 14654), False, 'from lightly.openapi_generated.swagger_client.rest import ApiException\n'), ((19868, 19891), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (19885, 19891), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
import os, sys
if len( sys.argv ) < 2:
print( "Please specify the folder containing the benchmark logs!" )
folder = sys.argv[1]
suffix = '-full-first-read.log'
benchmarkLogs = [ os.path.join( folder, file ) for file in os.listdir( folder ) if file.endswith( suffix ) ]
# Return compilerName and lists of min, max, avg values per commit
def loadData( filePath ):
commits = []
minTimes = []
avgTimes = []
maxTimes = []
label = filePath.split( '/' )[-1]
if label.endswith( suffix ):
label = label[:-len( suffix )]
with open( filePath, 'rt' ) as file:
for line in file:
tokens = line.split( ' ' )
if len( tokens ) < 3:
continue
commit = tokens[0][20:-1]
if commit in commits:
continue
commits += [ commit ]
if '+-' in tokens:
# version 1: [2020-12-06T21-36][bccbedc] 10.164 <= 10.294 +- 0.105 <= 10.475 at version unknown
minTimes += [ float( tokens[1] ) ]
avgTimes += [ float( tokens[3] ) ]
maxTimes += [ float( tokens[7] ) ]
else:
# version 2: [2020-12-06T23-01][9ca572f] 8.42 8.34 8.49 8.67 8.58
times = np.array( [ float( t ) for t in tokens[1:] ] )
minTimes += [ np.min( times ) ]
avgTimes += [ np.mean( times ) ]
maxTimes += [ np.max( times ) ]
return label, commits, np.array( minTimes ), np.array( avgTimes ), np.array( maxTimes )
fig = plt.figure( figsize = ( 10,6 ) )
ax = fig.add_subplot( 111, ylabel = "Runtime in seconds", xlabel = "Commits from oldest to newest" )
ax.set_title( "Decoding 128MiB of random data compressed to BZ2" )
for logFile in benchmarkLogs:
label, commits, minTimes, avgTimes, maxTimes = loadData( logFile )
ax.errorbar( np.arange( len( commits ) ), avgTimes, yerr = ( avgTimes - minTimes, maxTimes - avgTimes ),
#linestyle = '--' if 'clang' in label else '-',
linestyle = ':',
marker = 'o' if 'clang' in label else 'v', capsize = 4, label = label )
ax.legend( loc = 'center right', bbox_to_anchor = ( 1.3, 0.5 ) )
ax.set_xticklabels( [] )
#ax.legend( loc = 'best' )
fig.tight_layout()
fig.savefig( "benchmark-commits-per-compiler.pdf" )
fig.savefig( "benchmark-commits-per-compiler.png" )
plt.show()
| [
"numpy.mean",
"os.listdir",
"os.path.join",
"numpy.max",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.min",
"matplotlib.pyplot.show"
] | [((1634, 1661), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (1644, 1661), True, 'import matplotlib.pyplot as plt\n'), ((2475, 2485), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2483, 2485), True, 'import matplotlib.pyplot as plt\n'), ((258, 284), 'os.path.join', 'os.path.join', (['folder', 'file'], {}), '(folder, file)\n', (270, 284), False, 'import os, sys\n'), ((299, 317), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (309, 317), False, 'import os, sys\n'), ((1562, 1580), 'numpy.array', 'np.array', (['minTimes'], {}), '(minTimes)\n', (1570, 1580), True, 'import numpy as np\n'), ((1584, 1602), 'numpy.array', 'np.array', (['avgTimes'], {}), '(avgTimes)\n', (1592, 1602), True, 'import numpy as np\n'), ((1606, 1624), 'numpy.array', 'np.array', (['maxTimes'], {}), '(maxTimes)\n', (1614, 1624), True, 'import numpy as np\n'), ((1419, 1432), 'numpy.min', 'np.min', (['times'], {}), '(times)\n', (1425, 1432), True, 'import numpy as np\n'), ((1467, 1481), 'numpy.mean', 'np.mean', (['times'], {}), '(times)\n', (1474, 1481), True, 'import numpy as np\n'), ((1516, 1529), 'numpy.max', 'np.max', (['times'], {}), '(times)\n', (1522, 1529), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from future.utils import iteritems
import numpy as np
from pandas import DataFrame, Series
import scipy.sparse as sparse
from sqlalchemy.sql import bindparam, select
from .features import get_span_feats
from .models import (
GoldLabel, GoldLabelKey, Label, LabelKey, Feature, FeatureKey, Candidate,
Marginal
)
from .models.meta import new_sessionmaker
from .udf import UDF, UDFRunner
from .utils import (
matrix_conflicts,
matrix_coverage,
matrix_overlaps,
matrix_tp,
matrix_fp,
matrix_fn,
matrix_tn
)
class csr_AnnotationMatrix(sparse.csr_matrix):
"""
An extension of the scipy.sparse.csr_matrix class for holding sparse annotation matrices
and related helper methods.
"""
def __init__(self, arg1, **kwargs):
# Note: Currently these need to return None if unset, otherwise matrix copy operations break...
self.candidate_index = kwargs.pop('candidate_index', None)
self.row_index = kwargs.pop('row_index', None)
self.annotation_key_cls = kwargs.pop('annotation_key_cls', None)
self.key_index = kwargs.pop('key_index', None)
self.col_index = kwargs.pop('col_index', None)
# Note that scipy relies on the first three letters of the class to define matrix type...
super(csr_AnnotationMatrix, self).__init__(arg1, **kwargs)
def get_candidate(self, session, i):
"""Return the Candidate object corresponding to row i"""
return session.query(Candidate).filter(Candidate.id == self.row_index[i]).one()
def get_row_index(self, candidate):
"""Return the row index of the Candidate"""
return self.candidate_index[candidate.id]
def get_key(self, session, j):
"""Return the AnnotationKey object corresponding to column j"""
return session.query(self.annotation_key_cls)\
.filter(self.annotation_key_cls.id == self.col_index[j]).one()
def get_col_index(self, key):
"""Return the cow index of the AnnotationKey"""
return self.key_index[key.id]
def _get_sliced_indexes(self, s, axis, index, inv_index):
"""
Remaps the indexes between matrix rows/cols and candidates/keys.
Note: This becomes a massive performance bottleneck if not implemented
properly, so be careful of changing!
"""
if isinstance(s, slice):
# Check for empty slice
if s.start is None and s.stop is None:
return index, inv_index
else:
idxs = np.arange(self.shape[axis])[s]
elif isinstance(s, int):
idxs = np.array([s])
else: # s is an array of ints
idxs = s
# If s is the entire slice, skip the remapping step
if np.array_equal(idxs, list(range(len(idxs)))):
return index, inv_index
index_new, inv_index_new = {}, {}
for i_new, i in enumerate(idxs):
k = index[i]
index_new[i_new] = k
inv_index_new[k] = i_new
return index_new, inv_index_new
def __getitem__(self, key):
X = super(csr_AnnotationMatrix, self).__getitem__(key)
# If X is an integer or float value, just return it
if type(X) in [int, float] or issubclass(type(X), np.integer)\
or issubclass(type(X), np.float):
return X
# If X is a matrix, make sure it stays a csr_AnnotationMatrix
elif not isinstance(X, csr_AnnotationMatrix):
X = csr_AnnotationMatrix(X)
# X must be a matrix, so update appropriate csr_AnnotationMatrix fields
X.annotation_key_cls = self.annotation_key_cls
row_slice, col_slice = self._unpack_index(key)
X.row_index, X.candidate_index = self._get_sliced_indexes(
row_slice, 0, self.row_index, self.candidate_index)
X.col_index, X.key_index = self._get_sliced_indexes(
col_slice, 1, self.col_index, self.key_index)
return X
def stats(self):
"""Return summary stats about the annotations"""
raise NotImplementedError()
try:
class csr_LabelMatrix(csr_AnnotationMatrix):
def lf_stats(self, session, labels=None, est_accs=None):
"""Returns a pandas DataFrame with the LFs and various per-LF statistics"""
lf_names = [self.get_key(session, j).name for j in range(self.shape[1])]
# Default LF stats
col_names = ['j', 'Coverage', 'Overlaps', 'Conflicts']
d = {
'j' : list(range(self.shape[1])),
'Coverage' : Series(data=matrix_coverage(self), index=lf_names),
'Overlaps' : Series(data=matrix_overlaps(self), index=lf_names),
'Conflicts' : Series(data=matrix_conflicts(self), index=lf_names)
}
if labels is not None:
col_names.extend(['TP', 'FP', 'FN', 'TN', 'Empirical Acc.'])
ls = np.ravel(labels.todense() if sparse.issparse(labels) else labels)
tp = matrix_tp(self, ls)
fp = matrix_fp(self, ls)
fn = matrix_fn(self, ls)
tn = matrix_tn(self, ls)
ac = (tp+tn) / (tp+tn+fp+fn)
d['Empirical Acc.'] = Series(data=ac, index=lf_names)
d['TP'] = Series(data=tp, index=lf_names)
d['FP'] = Series(data=fp, index=lf_names)
d['FN'] = Series(data=fn, index=lf_names)
d['TN'] = Series(data=tn, index=lf_names)
if est_accs is not None:
col_names.append('Learned Acc.')
d['Learned Acc.'] = est_accs
d['Learned Acc.'].index = lf_names
return DataFrame(data=d, index=lf_names)[col_names]
# This is a hack for getting the documentation to build...
except:
class csr_LabelMatrix(object):
def lf_stats(self, session, labels=None, est_accs=None):
return None
class Annotator(UDFRunner):
"""Abstract class for annotating candidates and persisting these annotations to DB"""
def __init__(self, annotation_class, annotation_key_class, f_gen):
self.annotation_class = annotation_class
self.annotation_key_class = annotation_key_class
super(Annotator, self).__init__(AnnotatorUDF,
annotation_class=annotation_class,
annotation_key_class=annotation_key_class,
f_gen=f_gen)
def apply(self, split=0, key_group=0, replace_key_set=True, cids_query=None,
**kwargs):
# If we are replacing the key set, make sure the reducer key id cache is cleared!
if replace_key_set:
self.reducer.key_cache = {}
# Get the cids based on the split, and also the count
SnorkelSession = new_sessionmaker()
session = SnorkelSession()
cids_query = cids_query or session.query(Candidate.id)\
.filter(Candidate.split == split)
# Note: In the current UDFRunner implementation, we load all these into memory and fill a
# multiprocessing JoinableQueue with them before starting... so might as well load them here and pass in.
# Also, if we try to pass in a query iterator instead, with AUTOCOMMIT on, we get a TXN error...
cids = cids_query.all()
cids_count = len(cids)
# Run the Annotator
super(Annotator, self).apply(cids, split=split, key_group=key_group,
replace_key_set=replace_key_set, cids_query=cids_query,
count=cids_count, **kwargs)
# Load the matrix
return self.load_matrix(session, split=split, cids_query=cids_query,
key_group=key_group)
def clear(self, session, split=0, key_group=0, replace_key_set=True,
cids_query=None, **kwargs):
"""
Deletes the Annotations for the Candidates in the given split.
If replace_key_set=True, deletes *all* Annotations (of this Annotation sub-class)
and also deletes all AnnotationKeys (of this sub-class)
"""
query = session.query(self.annotation_class)
# If replace_key_set=False, then we just delete the annotations for candidates in our split
if not replace_key_set:
sub_query = cids_query or session.query(Candidate.id)\
.filter(Candidate.split == split)
sub_query = sub_query.subquery()
query = query.filter(self.annotation_class.candidate_id.in_(sub_query))
query.delete(synchronize_session='fetch')
# If we are creating a new key set, delete all old annotation keys
if replace_key_set:
query = session.query(self.annotation_key_class)
query = query.filter(self.annotation_key_class.group == key_group)
query.delete(synchronize_session='fetch')
def apply_existing(self, split=0, key_group=0, cids_query=None, **kwargs):
"""Alias for apply that emphasizes we are using an existing AnnotatorKey set."""
return self.apply(split=split, key_group=key_group,
replace_key_set=False, cids_query=cids_query, **kwargs)
def load_matrix(self, session, split=0, key_group=0, cids_query=None,
**kwargs):
raise NotImplementedError()
class AnnotatorUDF(UDF):
def __init__(self, annotation_class, annotation_key_class, f_gen, **kwargs):
self.annotation_class = annotation_class
self.annotation_key_class = annotation_key_class
# AnnotatorUDF relies on a *generator function* which yields annotations
# given a candidate input
# NB: inspect.isgeneratorfunction is not sufficient to check if f_ger
# is a generator (does not work with fns that wrap gen, e.g. partial)
# So no check here at the moment...
self.anno_generator = f_gen
# For caching key ids during the reduce step
self.key_cache = {}
super(AnnotatorUDF, self).__init__(**kwargs)
def apply(self, cid, **kwargs):
"""
Applies a given function to a Candidate, yielding a set of Annotations as key_name, value pairs
Note: Accepts a candidate _id_ as argument, because of issues with putting Candidate subclasses
into Queues (can't pickle...)
"""
seen = set()
cid = cid[0]
c = self.session.query(Candidate).filter(Candidate.id == cid).one()
for key_name, value in self.anno_generator(c):
# Note: Make sure no duplicates emitted here!
if (cid, key_name) not in seen:
seen.add((cid, key_name))
yield cid, key_name, value
def reduce(self, y, clear, key_group, replace_key_set, **kwargs):
"""
Inserts Annotations into the database.
For Annotations with unseen AnnotationKeys (in key_group, if not None), either adds these
AnnotationKeys if create_new_keyset is True, else skips these Annotations.
"""
cid, key_name, value = y
# Prepares queries
# Annoation updating only needs to be done if clear=False
if not clear:
anno_update_query = self.annotation_class.__table__.update()
anno_update_query = anno_update_query.where(self.annotation_class.candidate_id == bindparam('cid'))
anno_update_query = anno_update_query.where(self.annotation_class.key_id == bindparam('kid'))
anno_update_query = anno_update_query.values(value=bindparam('value'))
# We only need to insert AnnotationKeys if replace_key_set=True
# Note that in current configuration, we never update AnnotationKeys!
if replace_key_set:
key_insert_query = self.annotation_key_class.__table__.insert()
# If we are replacing the AnnotationKeys (replace_key_set=True), then we assume they will
# all have been handled by *this* reduce thread, and hence be in the cache already
# So we only need key select queries if replace_key_set=False
else:
key_select_query = select([self.annotation_key_class.id])\
.where(self.annotation_key_class.name == bindparam('name'))
if key_group is not None:
key_select_query = key_select_query.where(self.annotation_key_class.group == key_group)
anno_insert_query = self.annotation_class.__table__.insert()
# Check if the AnnotationKey already exists, and gets its id
key_id = None
if key_name in self.key_cache:
key_id = self.key_cache[key_name]
else:
key_args = {'name': key_name, 'group': key_group} if key_group else {'name': key_name}
# If we are replacing the AnnotationKeys (replace_key_set=True), then we assume they will
# all have been handled by *this* reduce thread, and hence be in the cache already
if not replace_key_set:
key_id = self.session.execute(key_select_query, key_args).first()
# Key not in cache but exists in DB; add to cache
if key_id is not None:
key_id = key_id[0]
self.key_cache[key_name] = key_id
# Key not in cache or DB; add to both if create_new_keyset = True
elif replace_key_set:
key_id = self.session.execute(key_insert_query, key_args).inserted_primary_key[0]
self.key_cache[key_name] = key_id
# If AnnotationKey does not exist and create_new_keyset = False, skip
if key_id is not None:
# Updates the Annotation, assuming one might already exist, if try_update = True
if not clear:
res = self.session.execute(anno_update_query, {'cid': cid, 'kid': key_id, 'value': value})
# If Annotation does not exist, insert
if (clear or res.rowcount == 0) and value != 0:
self.session.execute(anno_insert_query, {'candidate_id': cid, 'key_id': key_id, 'value': value})
def load_matrix(matrix_class, annotation_key_class, annotation_class, session,
split=0, cids_query=None, key_group=0, key_names=None, zero_one=False,
load_as_array=False, coerce_int=True):
"""
Returns the annotations corresponding to a split of candidates with N members
and an AnnotationKey group with M distinct keys as an N x M CSR sparse matrix.
"""
cid_query = cids_query or session.query(Candidate.id)\
.filter(Candidate.split == split)
cid_query = cid_query.order_by(Candidate.id)
keys_query = session.query(annotation_key_class.id)
keys_query = keys_query.filter(annotation_key_class.group == key_group)
if key_names is not None:
keys_query = keys_query.filter(annotation_key_class.name.in_(frozenset(key_names)))
keys_query = keys_query.order_by(annotation_key_class.id)
# First, we query to construct the row index map
cid_to_row = {}
row_to_cid = {}
for cid, in cid_query.all():
if cid not in cid_to_row:
j = len(cid_to_row)
# Create both mappings
cid_to_row[cid] = j
row_to_cid[j] = cid
# Second, we query to construct the column index map
kid_to_col = {}
col_to_kid = {}
for kid, in keys_query.all():
if kid not in kid_to_col:
j = len(kid_to_col)
# Create both mappings
kid_to_col[kid] = j
col_to_kid[j] = kid
# Create sparse matrix in COO format for incremental construction
row = []
columns = []
data = []
# Rely on the core for fast iteration
annot_select_query = annotation_class.__table__.select()
# Iteratively construct row index and output sparse matrix
# Cycles through the entire table to load the data.
# Perfornamce may slow down based on table size; however, negligible since
# it takes 8min to go throuh 245M rows (pretty fast).
for res in session.execute(annot_select_query):
# NOTE: The order of return seems to be switched in Python 3???
# Either way, make sure the order is set here explicitly!
cid, kid, val = res.candidate_id, res.key_id, res.value
if cid in cid_to_row and kid in kid_to_col:
# Optionally restricts val range to {0,1}, mapping -1 -> 0
if zero_one:
val = 1 if val == 1 else 0
row.append(cid_to_row[cid])
columns.append(kid_to_col[kid])
if coerce_int:
data.append(int(val))
else:
data.append(val)
X = sparse.coo_matrix((data, (row, columns)), shape=(len(cid_to_row), len(kid_to_col)))
# Return as an AnnotationMatrix
Xr = matrix_class(X, candidate_index=cid_to_row, row_index=row_to_cid,
annotation_key_cls=annotation_key_class, key_index=kid_to_col,
col_index=col_to_kid)
return np.squeeze(Xr.toarray()) if load_as_array else Xr
def load_label_matrix(session, **kwargs):
return load_matrix(csr_LabelMatrix, LabelKey, Label, session, **kwargs)
def load_feature_matrix(session, **kwargs):
return load_matrix(csr_AnnotationMatrix, FeatureKey, Feature, session, **kwargs)
def load_gold_labels(session, annotator_name, **kwargs):
return load_matrix(csr_LabelMatrix, GoldLabelKey, GoldLabel, session, key_names=[annotator_name], **kwargs)
class LabelAnnotator(Annotator):
"""Apply labeling functions to the candidates, generating Label annotations
:param lfs: A _list_ of labeling functions (LFs)
"""
def __init__(self, lfs=None, label_generator=None):
if lfs is not None:
labels = lambda c : [(lf.__name__, lf(c)) for lf in lfs]
elif label_generator is not None:
labels = lambda c : label_generator(c)
else:
raise ValueError("Must provide lfs or label_generator kwarg.")
# Convert lfs to a generator function
# In particular, catch verbose values and convert to integer ones
def f_gen(c):
for lf_key, label in labels(c):
# Note: We assume if the LF output is an int, it is already
# mapped correctly
if isinstance(label, int):
yield lf_key, label
# None is a protected LF output value corresponding to 0,
# representing LF abstaining
elif label is None:
yield lf_key, 0
elif label in c.values:
if c.cardinality > 2:
yield lf_key, c.values.index(label) + 1
# Note: Would be nice to not special-case here, but for
# consistency we leave binary LF range as {-1,0,1}
else:
val = 1 if c.values.index(label) == 0 else -1
yield lf_key, val
else:
raise ValueError("""
Unable to parse label with value %s
for candidate with values %s""" % (label, c.values))
super(LabelAnnotator, self).__init__(Label, LabelKey, f_gen)
def load_matrix(self, session, **kwargs):
return load_label_matrix(session, **kwargs)
class FeatureAnnotator(Annotator):
"""Apply feature generators to the candidates, generating Feature annotations"""
def __init__(self, f=get_span_feats):
super(FeatureAnnotator, self).__init__(Feature, FeatureKey, f)
def load_matrix(self, session, **kwargs):
return load_feature_matrix(session, coerce_int=False, **kwargs)
def save_marginals(session, X, marginals, training=True):
"""Save marginal probabilities for a set of Candidates to db.
:param X: Either an M x N csr_AnnotationMatrix-class matrix, where M
is number of candidates, N number of LFs/features; OR a list of
arbitrary objects with candidate ids accessible via a .id attrib
:param marginals: A dense M x K matrix of marginal probabilities, where
K is the cardinality of the candidates, OR a M-dim list/array if K=2.
:param training: If True, these are training marginals / labels; else they
are saved as end model predictions.
Note: The marginals for k=0 are not stored, only for k = 1,...,K
"""
# Make sure that we are working with a numpy array
try:
shape = marginals.shape
except:
marginals = np.array(marginals)
shape = marginals.shape
# Handle binary input as M x 1-dim array; assume elements represent
# poksitive (k=1) class values
if len(shape) == 1:
marginals = np.vstack([1-marginals, marginals]).T
# Only add values for classes k=1,...,K
marginal_tuples = []
for i in range(shape[0]):
for k in range(1, shape[1] if len(shape) > 1 else 2):
if marginals[i, k] > 0:
marginal_tuples.append((i, k, marginals[i, k]))
# NOTE: This will delete all existing marginals of type `training`
session.query(Marginal).filter(Marginal.training == training).\
delete(synchronize_session='fetch')
# Prepare bulk INSERT query
q = Marginal.__table__.insert()
# Check whether X is an AnnotationMatrix or not
anno_matrix = isinstance(X, csr_AnnotationMatrix)
if not anno_matrix:
X = list(X)
# Prepare values
insert_vals = []
for i, k, p in marginal_tuples:
cid = X.get_candidate(session, i).id if anno_matrix else X[i].id
insert_vals.append({
'candidate_id': cid,
'training': training,
'value': k,
# We cast p in case its a numpy type, which psycopg2 does not handle
'probability': float(p)
})
# Execute update
session.execute(q, insert_vals)
session.commit()
print("Saved %s marginals" % len(marginals))
def load_marginals(session, X=None, split=0, cids_query=None, training=True):
"""Load the marginal probs. for a given split of Candidates"""
# For candidate ids subquery
cids_query = cids_query or session.query(Candidate.id) \
.filter(Candidate.split == split)
# Ensure ordering by CID
cids_query = cids_query.order_by(Candidate.id)
cids_sub_query = cids_query.subquery('cids')
# Load marginal tuples from db
marginal_tuples = session.query(Marginal.candidate_id, Marginal.value,
Marginal.probability) \
.filter(Marginal.candidate_id == cids_sub_query.c.id) \
.filter(Marginal.training == training) \
.all()
# If an AnnotationMatrix or list of candidates X is provided, we make sure
# that the returned marginals are collated with X.
if X is not None:
# For now, handle feature matrix vs. list of objects with try / except
# Handle AnnotationMatrix
try:
cardinality = X.get_candidate(session, 0).cardinality
marginals = np.zeros((X.shape[0], cardinality))
cid_map = X.candidate_index
# Handle list of Candidates
except:
cardinality = X[0].cardinality
marginals = np.zeros((len(X), cardinality))
cid_map = dict([(x.id, i) for i, x in enumerate(X)])
# Otherwise if X is not provided, we sort by candidate id, using the
# cids_query from above
else:
cardinality = session.query(Candidate) \
.get(marginal_tuples[0][0]).cardinality
marginals = np.zeros((cids_query.count(), cardinality))
cid_map = dict([(cid, i) for i, (cid,) in enumerate(cids_query.all())])
# Assemble the marginals matrix according to the candidate index of X
for cid, k, p in marginal_tuples:
marginals[cid_map[cid], k] = p
# Add first column if k > 2, else ravel
if cardinality > 2:
row_sums = marginals.sum(axis=1)
for i in range(marginals.shape[0]):
marginals[i, 0] = 1 - row_sums[i]
else:
marginals = np.ravel(marginals[:, 1])
return marginals
| [
"pandas.Series",
"sqlalchemy.sql.bindparam",
"pandas.DataFrame",
"sqlalchemy.sql.select",
"scipy.sparse.issparse",
"numpy.array",
"numpy.zeros",
"numpy.vstack",
"numpy.ravel",
"numpy.arange"
] | [((24435, 24460), 'numpy.ravel', 'np.ravel', (['marginals[:, 1]'], {}), '(marginals[:, 1])\n', (24443, 24460), True, 'import numpy as np\n'), ((20898, 20917), 'numpy.array', 'np.array', (['marginals'], {}), '(marginals)\n', (20906, 20917), True, 'import numpy as np\n'), ((21102, 21139), 'numpy.vstack', 'np.vstack', (['[1 - marginals, marginals]'], {}), '([1 - marginals, marginals])\n', (21111, 21139), True, 'import numpy as np\n'), ((23391, 23426), 'numpy.zeros', 'np.zeros', (['(X.shape[0], cardinality)'], {}), '((X.shape[0], cardinality))\n', (23399, 23426), True, 'import numpy as np\n'), ((2824, 2837), 'numpy.array', 'np.array', (['[s]'], {}), '([s])\n', (2832, 2837), True, 'import numpy as np\n'), ((5488, 5519), 'pandas.Series', 'Series', ([], {'data': 'ac', 'index': 'lf_names'}), '(data=ac, index=lf_names)\n', (5494, 5519), False, 'from pandas import DataFrame, Series\n'), ((5558, 5589), 'pandas.Series', 'Series', ([], {'data': 'tp', 'index': 'lf_names'}), '(data=tp, index=lf_names)\n', (5564, 5589), False, 'from pandas import DataFrame, Series\n'), ((5628, 5659), 'pandas.Series', 'Series', ([], {'data': 'fp', 'index': 'lf_names'}), '(data=fp, index=lf_names)\n', (5634, 5659), False, 'from pandas import DataFrame, Series\n'), ((5698, 5729), 'pandas.Series', 'Series', ([], {'data': 'fn', 'index': 'lf_names'}), '(data=fn, index=lf_names)\n', (5704, 5729), False, 'from pandas import DataFrame, Series\n'), ((5768, 5799), 'pandas.Series', 'Series', ([], {'data': 'tn', 'index': 'lf_names'}), '(data=tn, index=lf_names)\n', (5774, 5799), False, 'from pandas import DataFrame, Series\n'), ((6002, 6035), 'pandas.DataFrame', 'DataFrame', ([], {'data': 'd', 'index': 'lf_names'}), '(data=d, index=lf_names)\n', (6011, 6035), False, 'from pandas import DataFrame, Series\n'), ((2741, 2768), 'numpy.arange', 'np.arange', (['self.shape[axis]'], {}), '(self.shape[axis])\n', (2750, 2768), True, 'import numpy as np\n'), ((11696, 11712), 'sqlalchemy.sql.bindparam', 'bindparam', (['"""cid"""'], {}), "('cid')\n", (11705, 11712), False, 'from sqlalchemy.sql import bindparam, select\n'), ((11802, 11818), 'sqlalchemy.sql.bindparam', 'bindparam', (['"""kid"""'], {}), "('kid')\n", (11811, 11818), False, 'from sqlalchemy.sql import bindparam, select\n'), ((11883, 11901), 'sqlalchemy.sql.bindparam', 'bindparam', (['"""value"""'], {}), "('value')\n", (11892, 11901), False, 'from sqlalchemy.sql import bindparam, select\n'), ((12463, 12501), 'sqlalchemy.sql.select', 'select', (['[self.annotation_key_class.id]'], {}), '([self.annotation_key_class.id])\n', (12469, 12501), False, 'from sqlalchemy.sql import bindparam, select\n'), ((12576, 12593), 'sqlalchemy.sql.bindparam', 'bindparam', (['"""name"""'], {}), "('name')\n", (12585, 12593), False, 'from sqlalchemy.sql import bindparam, select\n'), ((5204, 5227), 'scipy.sparse.issparse', 'sparse.issparse', (['labels'], {}), '(labels)\n', (5219, 5227), True, 'import scipy.sparse as sparse\n')] |
"""
Class to construct parabolas from 3 points.
ADW: Need to move all of the plotting stuff
"""
import numpy
import scipy.stats
import scipy.interpolate
############################################################
class Parabola:
def __init__(self, x, y):
"""
INPUTS
x = variable of interest
y = 2 * log(likelihood)
"""
# Sort the input
argsort = numpy.argsort(x)
self.x = numpy.array(x)[argsort]
self.y = numpy.array(y)[argsort]
index = numpy.argmax(self.y)
if index == 0:
index_0 = 0
index_1 = 1
index_2 = 2
elif index == len(self.y) - 1:
index_0 = len(self.y) - 3
index_1 = len(self.y) - 2
index_2 = len(self.y) - 1
else:
index_0 = index - 1
index_1 = index
index_2 = index + 1
x_0 = self.x[index_0]
x_1 = self.x[index_1]
x_2 = self.x[index_2]
y_0 = self.y[index_0]
y_1 = self.y[index_1]
y_2 = self.y[index_2]
# Invert matrix
a = numpy.matrix([[x_0**2, x_0, 1.],
[x_1**2, x_1, 1.],
[x_2**2, x_2, 1.]])
a_inverse = numpy.linalg.inv(a)
b = numpy.array([y_0, y_1, y_2])
p = numpy.dot(numpy.array(a_inverse), b)
self.p_2 = p[0]
self.p_1 = p[1]
self.p_0 = p[2]
# Vertex
self.vertex_x = -self.p_1 / (2. * self.p_2)
self.vertex_y = self.p_0 - (self.p_1**2 / (4. * self.p_2))
def __eq__(self,other):
return numpy.allclose([self.p_0,self.p_1,self.p_2],[other.p_0,other.p_1,other.p_2])
def __ne__(self,other):
return not self.__eq__(other)
def __repr__(self):
return "y = %.2g * x**2 + %.2g * x + %.2g"%(self.p_2, self.p_1, self.p_0)
def __str__(self):
return self.__repr__()
def __call__(self, x):
"""
Evaluate the parabola.
"""
return (self.p_2 * x**2) + (self.p_1 * x) + self.p_0
def densify(self, factor=10):
"""
Increase the density of points along the parabolic curve.
"""
x = []
y = []
for ii in range(0, len(self.x) - 2):
p = Parabola(self.x[ii: ii + 3], self.y[ii: ii + 3])
x.append(numpy.linspace(self.x[ii], self.x[ii + 1], factor)[0: -1])
y.append(p(x[-1]))
p = Parabola(self.x[len(self.x) - 3:], self.y[len(self.y) - 3:])
x.append(numpy.linspace(self.x[-2], self.x[-1], factor)[0: -1])
y.append(p(x[-1]))
x.append([self.x[-1]])
y.append([self.y[-1]])
#f = scipy.interpolate.interp1d(numpy.concatenate(x), numpy.concatenate(y))
#x = numpy.linspace(self.x[0], self.x[-1], len(x) * factor)
#return x, f(x)
return numpy.concatenate(x), numpy.concatenate(y)
def profileUpperLimit(self, delta = 2.71):
"""
Compute one-sided upperlimit via profile method.
"""
a = self.p_2
b = self.p_1
if self.vertex_x < 0:
c = self.p_0 + delta
else:
c = self.p_0 - self.vertex_y + delta
if b**2 - 4. * a * c < 0.:
print('WARNING')
print(a, b, c)
#pylab.figure()
#pylab.scatter(self.x, self.y)
#raw_input('WAIT')
return 0.
return max((numpy.sqrt(b**2 - 4. * a * c) - b) / (2. * a), (-1. * numpy.sqrt(b**2 - 4. * a * c) - b) / (2. * a))
#def bayesianUpperLimit3(self, alpha, steps = 1.e5):
# """
# Compute one-sided upper limit using Bayesian Method of Helene.
# """
# # Need a check to see whether limit is reliable
# pdf = scipy.interpolate.interp1d(self.x, numpy.exp(self.y / 2.)) # Convert from 2 * log(likelihood) to likelihood
# x_pdf = numpy.linspace(self.x[0], self.x[-1], steps)
# cdf = numpy.cumsum(pdf(x_pdf))
# cdf /= cdf[-1]
# cdf_reflect = scipy.interpolate.interp1d(cdf, x_pdf)
# return cdf_reflect(alpha)
# #return self.x[numpy.argmin((cdf - alpha)**2)]
def bayesianUpperLimit(self, alpha, steps=1.e5, plot=False):
"""
Compute one-sided upper limit using Bayesian Method of Helene.
Several methods of increasing numerical stability have been implemented.
"""
x_dense, y_dense = self.densify()
y_dense -= numpy.max(y_dense) # Numeric stability
f = scipy.interpolate.interp1d(x_dense, y_dense, kind='linear')
x = numpy.linspace(0., numpy.max(x_dense), steps)
pdf = numpy.exp(f(x) / 2.)
cut = (pdf / numpy.max(pdf)) > 1.e-10
x = x[cut]
pdf = pdf[cut]
#pdf /= pdf[0]
#forbidden = numpy.nonzero(pdf < 1.e-10)[0]
#if len(forbidden) > 0:
# index = forbidden[0] # Numeric stability
# x = x[0: index]
# pdf = pdf[0: index]
cdf = numpy.cumsum(pdf)
cdf /= cdf[-1]
cdf_reflect = scipy.interpolate.interp1d(cdf, x)
#if plot:
# pylab.figure()
# pylab.plot(x, f(x))
# pylab.scatter(self.x, self.y, c='red')
#
# pylab.figure()
# pylab.plot(x, pdf)
#
# pylab.figure()
# pylab.plot(cdf, x)
return cdf_reflect(alpha)
def bayesianUpperLimit2(self, alpha, steps=1.e5, plot=False):
"""
Compute one-sided upper limit using Bayesian Method of Helene.
"""
cut = ((self.y / 2.) > -30.) # Numeric stability
try:
f = scipy.interpolate.interp1d(self.x[cut], self.y[cut], kind='cubic')
except:
f = scipy.interpolate.interp1d(self.x[cut], self.y[cut], kind='linear')
x = numpy.linspace(0., numpy.max(self.x[cut]), steps)
y = numpy.exp(f(x) / 2.)
#forbidden = numpy.nonzero((y / numpy.exp(self.vertex_y / 2.)) < 1.e-10)[0]
forbidden = numpy.nonzero((y / self.vertex_y) < 1.e-10)[0]
if len(forbidden) > 0:
index = forbidden[0] # Numeric stability
x = x[0: index]
y = y[0: index]
cdf = numpy.cumsum(y)
cdf /= cdf[-1]
cdf_reflect = scipy.interpolate.interp1d(cdf, x)
#if plot:
# pylab.figure()
# pylab.scatter(self.x, self.y)
#
# pylab.figure()
# pylab.plot(x, f(x))
#
# pylab.figure()
# pylab.plot(x, y)
#
# pylab.figure()
# pylab.plot(cdf, x)
return cdf_reflect(alpha)
"""
if numpy.isnan(result):
import pylab
for ii in range(0, len(self.x)):
print '%.3f %.3f'%(self.x[ii], self.y[ii])
pylab.figure()
pylab.scatter(self.x, self.y)
pylab.figure()
pylab.scatter(cdf, x)
raw_input('WAIT')
return result
"""
def confidenceInterval(self, alpha=0.6827, steps=1.e5, plot=False):
"""
Compute two-sided confidence interval by taking x-values corresponding to the largest PDF-values first.
"""
x_dense, y_dense = self.densify()
y_dense -= numpy.max(y_dense) # Numeric stability
f = scipy.interpolate.interp1d(x_dense, y_dense, kind='linear')
x = numpy.linspace(0., numpy.max(x_dense), steps)
# ADW: Why does this start at 0, which often outside the input range?
# Wouldn't starting at xmin be better:
#x = numpy.linspace(numpy.min(x_dense), numpy.max(x_dense), steps)
pdf = numpy.exp(f(x) / 2.)
cut = (pdf / numpy.max(pdf)) > 1.e-10
x = x[cut]
pdf = pdf[cut]
sorted_pdf_indices = numpy.argsort(pdf)[::-1] # Indices of PDF in descending value
cdf = numpy.cumsum(pdf[sorted_pdf_indices])
cdf /= cdf[-1]
sorted_pdf_index_max = numpy.argmin((cdf - alpha)**2)
x_select = x[sorted_pdf_indices[0: sorted_pdf_index_max]]
#if plot:
# cdf = numpy.cumsum(pdf)
# cdf /= cdf[-1]
# print cdf[numpy.max(sorted_pdf_indices[0: sorted_pdf_index_max])] \
# - cdf[numpy.min(sorted_pdf_indices[0: sorted_pdf_index_max])]
#
# pylab.figure()
# pylab.plot(x, f(x))
# pylab.scatter(self.x, self.y, c='red')
#
# pylab.figure()
# pylab.plot(x, pdf)
return numpy.min(x_select), numpy.max(x_select)
############################################################
def upperLimitsDeltaTS(confidence_level, one_sided=True, degrees_of_freedom=1):
"""
"""
if not one_sided:
confidence_level = 0.5*(confidence_level + 1.)
ts_min = 0 # TS = Test Statistic
ts_max = 5
ts_steps = 1000
x = numpy.linspace(ts_min, ts_max, ts_steps)
y = (0.5 * scipy.stats.chi2.sf(x, degrees_of_freedom) - (1. - confidence_level))**2
return x[numpy.argmin(y)]
############################################################
| [
"numpy.allclose",
"numpy.sqrt",
"numpy.argmax",
"numpy.min",
"numpy.max",
"numpy.argsort",
"numpy.array",
"numpy.linspace",
"numpy.linalg.inv",
"numpy.concatenate",
"numpy.nonzero",
"numpy.argmin",
"numpy.cumsum",
"numpy.matrix"
] | [((9052, 9092), 'numpy.linspace', 'numpy.linspace', (['ts_min', 'ts_max', 'ts_steps'], {}), '(ts_min, ts_max, ts_steps)\n', (9066, 9092), False, 'import numpy\n'), ((413, 429), 'numpy.argsort', 'numpy.argsort', (['x'], {}), '(x)\n', (426, 429), False, 'import numpy\n'), ((529, 549), 'numpy.argmax', 'numpy.argmax', (['self.y'], {}), '(self.y)\n', (541, 549), False, 'import numpy\n'), ((1122, 1207), 'numpy.matrix', 'numpy.matrix', (['[[x_0 ** 2, x_0, 1.0], [x_1 ** 2, x_1, 1.0], [x_2 ** 2, x_2, 1.0]]'], {}), '([[x_0 ** 2, x_0, 1.0], [x_1 ** 2, x_1, 1.0], [x_2 ** 2, x_2, 1.0]]\n )\n', (1134, 1207), False, 'import numpy\n'), ((1266, 1285), 'numpy.linalg.inv', 'numpy.linalg.inv', (['a'], {}), '(a)\n', (1282, 1285), False, 'import numpy\n'), ((1298, 1326), 'numpy.array', 'numpy.array', (['[y_0, y_1, y_2]'], {}), '([y_0, y_1, y_2])\n', (1309, 1326), False, 'import numpy\n'), ((1639, 1725), 'numpy.allclose', 'numpy.allclose', (['[self.p_0, self.p_1, self.p_2]', '[other.p_0, other.p_1, other.p_2]'], {}), '([self.p_0, self.p_1, self.p_2], [other.p_0, other.p_1, other\n .p_2])\n', (1653, 1725), False, 'import numpy\n'), ((4546, 4564), 'numpy.max', 'numpy.max', (['y_dense'], {}), '(y_dense)\n', (4555, 4564), False, 'import numpy\n'), ((5075, 5092), 'numpy.cumsum', 'numpy.cumsum', (['pdf'], {}), '(pdf)\n', (5087, 5092), False, 'import numpy\n'), ((6323, 6338), 'numpy.cumsum', 'numpy.cumsum', (['y'], {}), '(y)\n', (6335, 6338), False, 'import numpy\n'), ((7431, 7449), 'numpy.max', 'numpy.max', (['y_dense'], {}), '(y_dense)\n', (7440, 7449), False, 'import numpy\n'), ((8029, 8066), 'numpy.cumsum', 'numpy.cumsum', (['pdf[sorted_pdf_indices]'], {}), '(pdf[sorted_pdf_indices])\n', (8041, 8066), False, 'import numpy\n'), ((8121, 8153), 'numpy.argmin', 'numpy.argmin', (['((cdf - alpha) ** 2)'], {}), '((cdf - alpha) ** 2)\n', (8133, 8153), False, 'import numpy\n'), ((9194, 9209), 'numpy.argmin', 'numpy.argmin', (['y'], {}), '(y)\n', (9206, 9209), False, 'import numpy\n'), ((447, 461), 'numpy.array', 'numpy.array', (['x'], {}), '(x)\n', (458, 461), False, 'import numpy\n'), ((488, 502), 'numpy.array', 'numpy.array', (['y'], {}), '(y)\n', (499, 502), False, 'import numpy\n'), ((1349, 1371), 'numpy.array', 'numpy.array', (['a_inverse'], {}), '(a_inverse)\n', (1360, 1371), False, 'import numpy\n'), ((2913, 2933), 'numpy.concatenate', 'numpy.concatenate', (['x'], {}), '(x)\n', (2930, 2933), False, 'import numpy\n'), ((2935, 2955), 'numpy.concatenate', 'numpy.concatenate', (['y'], {}), '(y)\n', (2952, 2955), False, 'import numpy\n'), ((4688, 4706), 'numpy.max', 'numpy.max', (['x_dense'], {}), '(x_dense)\n', (4697, 4706), False, 'import numpy\n'), ((5954, 5976), 'numpy.max', 'numpy.max', (['self.x[cut]'], {}), '(self.x[cut])\n', (5963, 5976), False, 'import numpy\n'), ((6122, 6162), 'numpy.nonzero', 'numpy.nonzero', (['(y / self.vertex_y < 1e-10)'], {}), '(y / self.vertex_y < 1e-10)\n', (6135, 6162), False, 'import numpy\n'), ((7573, 7591), 'numpy.max', 'numpy.max', (['x_dense'], {}), '(x_dense)\n', (7582, 7591), False, 'import numpy\n'), ((7953, 7971), 'numpy.argsort', 'numpy.argsort', (['pdf'], {}), '(pdf)\n', (7966, 7971), False, 'import numpy\n'), ((8693, 8712), 'numpy.min', 'numpy.min', (['x_select'], {}), '(x_select)\n', (8702, 8712), False, 'import numpy\n'), ((8714, 8733), 'numpy.max', 'numpy.max', (['x_select'], {}), '(x_select)\n', (8723, 8733), False, 'import numpy\n'), ((2564, 2610), 'numpy.linspace', 'numpy.linspace', (['self.x[-2]', 'self.x[-1]', 'factor'], {}), '(self.x[-2], self.x[-1], factor)\n', (2578, 2610), False, 'import numpy\n'), ((4771, 4785), 'numpy.max', 'numpy.max', (['pdf'], {}), '(pdf)\n', (4780, 4785), False, 'import numpy\n'), ((7856, 7870), 'numpy.max', 'numpy.max', (['pdf'], {}), '(pdf)\n', (7865, 7870), False, 'import numpy\n'), ((2383, 2433), 'numpy.linspace', 'numpy.linspace', (['self.x[ii]', 'self.x[ii + 1]', 'factor'], {}), '(self.x[ii], self.x[ii + 1], factor)\n', (2397, 2433), False, 'import numpy\n'), ((3526, 3558), 'numpy.sqrt', 'numpy.sqrt', (['(b ** 2 - 4.0 * a * c)'], {}), '(b ** 2 - 4.0 * a * c)\n', (3536, 3558), False, 'import numpy\n'), ((3580, 3612), 'numpy.sqrt', 'numpy.sqrt', (['(b ** 2 - 4.0 * a * c)'], {}), '(b ** 2 - 4.0 * a * c)\n', (3590, 3612), False, 'import numpy\n')] |
import numpy as np
from numpy.random import rand
from numpy import *
from pathlib import Path
import csv
# activation function? if we'd use ReLU with grayscale images the value stays
# the same
# This script creates a .csv file in the work directory
# containing integer numbers in the following form
# Input X : x1, x2, x3, ... xN_inputlayer
# Weight W1: w11, w12, w13, ... w1N_inputlayer
# Weight W1: w21, w22, w23, ... w2N_inputlayer
# ...
# Weight W1: wN_hiddenlayer1, wN_hiddenlayer2 ... wN_hiddenlayerN_inputlayer
# Weight W2: w11, w12, ... w1hiddenlayer;
# Weight W2: ...
# Weight W2: wN_inputlayer1, ... wN_inputlayerN_hiddenlayer
# Output Y : y1, y2, y3, ... yN_outputlayer;
## Parameters
N_inputlayer = 28*28; # Nodes Input Layer
N_outputlayer = N_inputlayer; # Nodes Output Layer
N_hiddenlayer = 200; # Nodes Hidden Layer
file_name = "test_vectors" # name of .csv file
# Neural Network consits of Input Layer, Hidden Layer, Output Layer
# Input Layer has 28 * 28 = 784 nodes
# Hidden Layer has a abitrary number of nodes, we choose 200
# Output Layer has 784 nodes
## Input Layer
X = floor(rand(1,N_inputlayer)*256); #create 784 random Pixels range 0:255
W1 = floor(rand(N_inputlayer,N_hiddenlayer)*256); #create 784 randoms weights 200 times
## Hidden Layer
H1 = X.dot(W1); #multiply Pixels with s_weights
H1 = floor(H1 / 255);
W2 = floor(rand(N_hiddenlayer,N_inputlayer)*256); # create 200 random weights 784 times
## Output layer
Y = floor(H1.dot(W2) / 255); # multiplay hidden layer with weights to
#write Data to .csv file
#path = str(Path().absolute()) + "\\" + file_name + ".csv";
with open(file_name + ".csv", 'w', newline = '') as csvFile:
writer = csv.writer(csvFile)
for x in X:
writer.writerow(x.astype(integer))
for w1 in W1.transpose():
writer.writerow(w1.astype(integer))
for w2 in W2.transpose():
writer.writerow(w2.astype(integer))
for y in Y:
writer.writerow(y.astype(integer))
| [
"csv.writer",
"numpy.random.rand"
] | [((1691, 1710), 'csv.writer', 'csv.writer', (['csvFile'], {}), '(csvFile)\n', (1701, 1710), False, 'import csv\n'), ((1109, 1130), 'numpy.random.rand', 'rand', (['(1)', 'N_inputlayer'], {}), '(1, N_inputlayer)\n', (1113, 1130), False, 'from numpy.random import rand\n'), ((1188, 1221), 'numpy.random.rand', 'rand', (['N_inputlayer', 'N_hiddenlayer'], {}), '(N_inputlayer, N_hiddenlayer)\n', (1192, 1221), False, 'from numpy.random import rand\n'), ((1366, 1399), 'numpy.random.rand', 'rand', (['N_hiddenlayer', 'N_inputlayer'], {}), '(N_hiddenlayer, N_inputlayer)\n', (1370, 1399), False, 'from numpy.random import rand\n')] |
from keras.utils import to_categorical
from keras.models import load_model
import matplotlib.pyplot as plt
'''lib loading error prevention'''
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
'''========================'''
'''tensorflow configuration'''
'''========================'''
import tensorflow as tf
from keras import backend as K
num_cores = 48
num_CPU = 1
num_GPU = 1
config = tf.ConfigProto(intra_op_parallelism_threads=num_cores,\
inter_op_parallelism_threads=num_cores, allow_soft_placement=True,\
device_count = {'CPU' : num_CPU, 'GPU' : num_GPU})
session = tf.Session(config=config)
K.set_session(session)
'''scientific packages'''
import numpy as np
import pickle
import datetime
fs = 500 # sampling rate of mitdb
model_h5 = 'icbeb2019_semantic_seg_2019_03_24_10_28_11.h5'
hist_rec = 'icbeb2019_semantic_seg_2019_03_24_10_28_11.hist'
'''load tmp data'''
(data, label) = pickle.load(open('icbeb_test.tmp', 'rb'))
'''global parameters'''
input_dim = 1
output_dim = 1
if input_dim < output_dim:
print('input_dim smaller than output_dim, quit task')
stride = output_dim
timestep = 0
# hyper params
batch_size = 40
epochs = 400
filter_size = 80
kernel_size = 4
dropout = 0.2
# stagging the signal
x_train = []
for dat in data:
seq = np.array([dat[i*stride:i*stride+input_dim] for i in range((len(dat)-input_dim)//stride)])
x_train.append(seq)
y_train = []
for lb in label:
y = np.array([lb[i*stride+input_dim//2-output_dim//2:i*stride+input_dim//2 - output_dim//2 + output_dim] for i in range( (len(lb)-input_dim)//stride )])
y = to_categorical(y, num_classes=2)
y_train.append(y)
'''load model'''
model = load_model(model_h5)
timestep = len(x_train[0])
x_train = np.array(x_train)
y_train = np.array(y_train)
tested = x_train[1800:]
expected = y_train[1800:]
predicted = model.predict(tested)
ex = [np.argmax(p) for p in expected[2]]
pr = [np.argmax(p) for p in predicted[2]]
plt.plot(tested[2])
plt.plot(ex)
plt.plot(pr)
plt.legend(['signal', 'expected', 'predicted']),
plt.show()
| [
"keras.models.load_model",
"tensorflow.Session",
"matplotlib.pyplot.plot",
"keras.backend.set_session",
"numpy.argmax",
"keras.utils.to_categorical",
"numpy.array",
"tensorflow.ConfigProto",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((395, 571), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'intra_op_parallelism_threads': 'num_cores', 'inter_op_parallelism_threads': 'num_cores', 'allow_soft_placement': '(True)', 'device_count': "{'CPU': num_CPU, 'GPU': num_GPU}"}), "(intra_op_parallelism_threads=num_cores,\n inter_op_parallelism_threads=num_cores, allow_soft_placement=True,\n device_count={'CPU': num_CPU, 'GPU': num_GPU})\n", (409, 571), True, 'import tensorflow as tf\n'), ((596, 621), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (606, 621), True, 'import tensorflow as tf\n'), ((622, 644), 'keras.backend.set_session', 'K.set_session', (['session'], {}), '(session)\n', (635, 644), True, 'from keras import backend as K\n'), ((1680, 1700), 'keras.models.load_model', 'load_model', (['model_h5'], {}), '(model_h5)\n', (1690, 1700), False, 'from keras.models import load_model\n'), ((1739, 1756), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (1747, 1756), True, 'import numpy as np\n'), ((1767, 1784), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (1775, 1784), True, 'import numpy as np\n'), ((1955, 1974), 'matplotlib.pyplot.plot', 'plt.plot', (['tested[2]'], {}), '(tested[2])\n', (1963, 1974), True, 'import matplotlib.pyplot as plt\n'), ((1975, 1987), 'matplotlib.pyplot.plot', 'plt.plot', (['ex'], {}), '(ex)\n', (1983, 1987), True, 'import matplotlib.pyplot as plt\n'), ((1988, 2000), 'matplotlib.pyplot.plot', 'plt.plot', (['pr'], {}), '(pr)\n', (1996, 2000), True, 'import matplotlib.pyplot as plt\n'), ((2050, 2060), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2058, 2060), True, 'import matplotlib.pyplot as plt\n'), ((1599, 1631), 'keras.utils.to_categorical', 'to_categorical', (['y'], {'num_classes': '(2)'}), '(y, num_classes=2)\n', (1613, 1631), False, 'from keras.utils import to_categorical\n'), ((1877, 1889), 'numpy.argmax', 'np.argmax', (['p'], {}), '(p)\n', (1886, 1889), True, 'import numpy as np\n'), ((1918, 1930), 'numpy.argmax', 'np.argmax', (['p'], {}), '(p)\n', (1927, 1930), True, 'import numpy as np\n'), ((2001, 2048), 'matplotlib.pyplot.legend', 'plt.legend', (["['signal', 'expected', 'predicted']"], {}), "(['signal', 'expected', 'predicted'])\n", (2011, 2048), True, 'import matplotlib.pyplot as plt\n')] |
"""1. Calculate Cp from LES data different
wind farm "extractability"
2. Predict Cp using two scale momentum theory
3. Plot results
"""
import numpy as np
import scipy.optimize as sp
import matplotlib.pyplot as plt
#wind farm parameters
#momentum `extractability' factor
zeta=[0,5,10,15,20,25]
#bottom friction exponent
gamma=2
#arrays to store result
cp_finite = np.zeros((50,6))
effective_area_ratio = np.zeros(50)
cp_nishino = np.zeros((50,6))
#load LES data
training_data = np.genfromtxt('LES_training_data.csv', delimiter=',')
#remove header
training_data = np.delete(training_data, 0, 0)
training_data = np.delete(training_data, 0, 1)
beta = training_data[:,5]
cp = training_data[:,7]
#repeat for different zeta values
for i in range(6):
#############################################
# 1. Calculate Cp from LES data for a finite
# wind farm
#############################################
#calculate adjusted Cp and effective area ratio
#for each wind farm LES
for run_no in range(50):
U_F = beta[run_no]*10.10348311
U_F0 = 10.10348311
#coefficients of quadratic formula to solve
a = 1/U_F**2
b = zeta[i]/U_F0
c = -zeta[i] - 1
U_Fprime = (-b + np.sqrt(b**2 - 4*a*c))/(2*a)
cp_finite[run_no,i] = cp[run_no]*(U_Fprime/U_F)**3
#calculate effective area ratio
C_f0 = 0.28641758**2/(0.5*10.10348311**2)
A = np.pi/4
S = training_data[run_no,0]*training_data[run_no,1]
area_ratio = A/S
effective_area_ratio[run_no] = area_ratio/C_f0
#############################################
# 2. Predict Cp using two-scale momentum
# theory
#############################################
effective_area_ratio_theory = np.linspace(0,20,50)
#predict Cp for each wind farm LES
for run_no in range(50):
def NDFM(beta):
""" Non-dimensional farm momentum
equation (see Nishino 2020)
"""
#use uncorrected ct_star to predict beta
#analytical model gives ct_star = 0.75
#divide by correction factor (N^2=0.8037111)
ct_star_adj = 0.75 / 0.8037111
return ct_star_adj*effective_area_ratio_theory[run_no]*beta**2 + beta**gamma - 1 -zeta[i]*(1-beta)
beta_theory = sp.bisect(NDFM,0,1)
cp_nishino[run_no,i] = 0.75**1.5 * beta_theory**3 * 1.33**-0.5
fig, ax = plt.subplots(nrows=3, ncols=2, figsize=[5.33,6.6], dpi=600)
ax[0,0].plot(effective_area_ratio_theory[12:], cp_nishino[12:,0])
pcm = ax[0,0].scatter(effective_area_ratio, training_data[:,7], s=5, c=training_data[:,3])
ax[0,0].set_xlabel(r'$\lambda/C_{f0}$')
ax[0,0].set_ylabel(r'$C_p$')
ax[0,0].set_title('a)', loc='left')
ax[0,1].plot(effective_area_ratio_theory[12:], cp_nishino[12:,1])
ax[0,1].scatter(effective_area_ratio, cp_finite[:,1], s=5, c=training_data[:,3])
ax[0,1].set_xlabel(r'$\lambda/C_{f0}$')
ax[0,1].set_ylabel(r'$C_{p,finite}$')
ax[0,1].set_title('b)', loc='left')
ax[1,0].plot(effective_area_ratio_theory[12:], cp_nishino[12:,2])
ax[1,0].scatter(effective_area_ratio, cp_finite[:,2], s=5, c=training_data[:,3])
ax[1,0].set_xlabel(r'$\lambda/C_{f0}$')
ax[1,0].set_ylabel(r'$C_{p.finite}$')
ax[1,0].set_title('c)', loc='left')
ax[1,1].plot(effective_area_ratio_theory[12:], cp_nishino[12:,3])
ax[1,1].scatter(effective_area_ratio, cp_finite[:,3], s=5, c=training_data[:,3])
ax[1,1].set_xlabel(r'$\lambda/C_{f0}$')
ax[1,1].set_ylabel(r'$C_{p,finite}$')
ax[1,1].set_title('d)', loc='left')
ax[2,0].plot(effective_area_ratio_theory[12:], cp_nishino[12:,4])
ax[2,0].scatter(effective_area_ratio, cp_finite[:,4], s=5, c=training_data[:,3])
ax[2,0].set_xlabel(r'$\lambda/C_{f0}$')
ax[2,0].set_ylabel(r'$C_{p,finite}$')
ax[2,0].set_title('e)', loc='left')
ax[2,1].plot(effective_area_ratio_theory[12:], cp_nishino[12:,5])
ax[2,1].scatter(effective_area_ratio, cp_finite[:,5], s=5, c=training_data[:,3])
ax[2,1].set_xlabel(r'$\lambda/C_{f0}$')
ax[2,1].set_ylabel(r'$C_{p,finite}$')
ax[2,1].set_title('f)', loc='left')
plt.tight_layout()
cbar = fig.colorbar(pcm, ax=ax.ravel().tolist(), shrink=0.97)
cbar.set_label(r'$C_T^*$')
plt.savefig('LES_cp_results.png', bbox_inches='tight')
| [
"scipy.optimize.bisect",
"matplotlib.pyplot.savefig",
"numpy.sqrt",
"numpy.delete",
"numpy.zeros",
"numpy.linspace",
"matplotlib.pyplot.tight_layout",
"numpy.genfromtxt",
"matplotlib.pyplot.subplots"
] | [((367, 384), 'numpy.zeros', 'np.zeros', (['(50, 6)'], {}), '((50, 6))\n', (375, 384), True, 'import numpy as np\n'), ((407, 419), 'numpy.zeros', 'np.zeros', (['(50)'], {}), '(50)\n', (415, 419), True, 'import numpy as np\n'), ((433, 450), 'numpy.zeros', 'np.zeros', (['(50, 6)'], {}), '((50, 6))\n', (441, 450), True, 'import numpy as np\n'), ((482, 535), 'numpy.genfromtxt', 'np.genfromtxt', (['"""LES_training_data.csv"""'], {'delimiter': '""","""'}), "('LES_training_data.csv', delimiter=',')\n", (495, 535), True, 'import numpy as np\n'), ((567, 597), 'numpy.delete', 'np.delete', (['training_data', '(0)', '(0)'], {}), '(training_data, 0, 0)\n', (576, 597), True, 'import numpy as np\n'), ((614, 644), 'numpy.delete', 'np.delete', (['training_data', '(0)', '(1)'], {}), '(training_data, 0, 1)\n', (623, 644), True, 'import numpy as np\n'), ((2417, 2477), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(3)', 'ncols': '(2)', 'figsize': '[5.33, 6.6]', 'dpi': '(600)'}), '(nrows=3, ncols=2, figsize=[5.33, 6.6], dpi=600)\n', (2429, 2477), True, 'import matplotlib.pyplot as plt\n'), ((4050, 4068), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4066, 4068), True, 'import matplotlib.pyplot as plt\n'), ((4160, 4214), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""LES_cp_results.png"""'], {'bbox_inches': '"""tight"""'}), "('LES_cp_results.png', bbox_inches='tight')\n", (4171, 4214), True, 'import matplotlib.pyplot as plt\n'), ((1781, 1803), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', '(50)'], {}), '(0, 20, 50)\n', (1792, 1803), True, 'import numpy as np\n'), ((2315, 2336), 'scipy.optimize.bisect', 'sp.bisect', (['NDFM', '(0)', '(1)'], {}), '(NDFM, 0, 1)\n', (2324, 2336), True, 'import scipy.optimize as sp\n'), ((1242, 1269), 'numpy.sqrt', 'np.sqrt', (['(b ** 2 - 4 * a * c)'], {}), '(b ** 2 - 4 * a * c)\n', (1249, 1269), True, 'import numpy as np\n')] |
import numpy as np
import pdb
class History:
def __init__(self, data_format, batch_size, history_length, screen_dims):
self.data_format = data_format
self.history = np.zeros([history_length] + list(screen_dims), dtype=np.float32)
def add(self, screen):
self.history[:-1] = self.history[1:]
self.history[-1] = screen
def reset(self):
self.history *= 0
def get(self):
if self.data_format == 'NHWC' and len(self.history.shape) == 3:
return np.transpose(self.history, (1, 2, 0))
else:
return self.history | [
"numpy.transpose"
] | [((483, 520), 'numpy.transpose', 'np.transpose', (['self.history', '(1, 2, 0)'], {}), '(self.history, (1, 2, 0))\n', (495, 520), True, 'import numpy as np\n')] |
"""
This modules performs spectral analysis of EEG signals (sometimes
refered as quantitative EEG) on a mne object. EEG Sleep EEG is ideally suited
to frequency and time-frequency analysis, since different stages or
micro-elements (such as spindles, K-complexes, slow waves) have
specific frequency characteristics [1].
Three spectral analysis methods can be used for the analysis, Fast Fourier
transform, Welch and Multitaper spectrogram. Multitaper estimation tends to
be slightly better in reducing artefactual noise and is thus prefered. For an
in depth application of Multitaper analysis to EEG signals, please see [2].
This module can also be used to summarised spectral quantities overnight. For
example, absolute delta power can be calculated in each sleep stages. More
experimental metrics, such as spectral entropy of delta activity across the
night [2], are also implemented.
The code below has been also used to analyse event-related changes in EEG.
The following publications have used part of this code [3,4,5,6], and we refer
interested reader to this publication for further details on implementation
technicals.
[1] <NAME>., <NAME>., <NAME>. et al (2021). New and Emerging Approaches
to Better Define Sleep Disruption and Its Consequences.
Frontiers in Neuroscience, 15. doi:10.3389/fnins.2021.751730
[2] <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2017). Sleep Neurophysiological Dynamics Through the Lens of
Multitaper Spectral Analysis. Physiology (Bethesda), 32(1),
60-92. doi:10.1152/physiol.00062.2015
[3] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., . . . <NAME>. (2021). A Novel EEG Derived Measure of
Disrupted Delta Wave Activity during Sleep Predicts All-Cause Mortality Risk.
Ann Am Thorac Soc, (in press). doi:10.1513/AnnalsATS.202103-315OC
[4] <NAME>., <NAME>., <NAME>., & <NAME>. (2020).
Correspondence between physiological and behavioural responses
to vibratory stimuli during the sleep onset period: A quantitative
electroencephalography analysis. J Sleep Res, e13232. doi:10.1111/jsr.13232
[5] <NAME>., <NAME>., <NAME>. G. et al. (2021). Polysomnographic
Predictors of Treatment Response to Cognitive Behavioral Therapy for
Insomnia in Participants With Co-morbid Insomnia and Sleep Apnea:
Secondary Analysis of a Randomized Controlled Trial.
Frontiers in Psychology, 12. doi:10.3389/fpsyg.2021.676763
[6] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., EEG power spectral responses to wind farm compared to road
traffic noise during sleep: A laboratory study. (in press) J Sleep Res,
"""
import mne
import os
import numpy as np
import pandas as pd
import warnings
from psga.analyses.utils import check_is_fitted
from psga.analyses.base import BaseMethods
from psga.features.utils import power_spectrum, _psd_params_checker
from psga.hypnogram import _convert_hypno
from psga.features.time_features import compute_maximum_value_epochs, \
compute_ptp_amp, \
compute_rms_value_epochs, compute_std, compute_zero_crossings, \
compute_time_mass, \
compute_hjorth, compute_ratio_energy_time_mass
from psga.features.spectral_features import compute_absol_pow_freq_bands, \
compute_relative_pow_ratios, \
compute_hjorth_spect, compute_spect_entropy, \
compute_spect_slope, compute_spect_edge_freq
from psga.features.denoising_function import moving_average_weighted
import sys
try:
wd = sys._MEIPASS
except AttributeError:
wd = os.path.dirname(__file__)
FREQ_INIT = {'Delta': [0.5,4.5], 'Theta': [4.5, 7.0], 'Alpha': [7,12],
'Sigma': [12,16], 'Beta': [16,35]}
PSD_PARAMS_INIT = {'multitaper':
{'mt_adaptive': True, 'mt_bandwidth': 1,
'mt_low_bias':True},
'welch':{'welch_n_fft':256,
'welch_n_per_seg':None,
'welch_n_overlap':0}}
class qEEG(BaseMethods):
"""Performs quantitative EEG analysis on a mne raw object.
Power spectrum analysis is computed on consecutive X ('windows_length') of
raw EEG in the 'score' methods. Mean absolute power of a given frequency
bands can then be calculated overnight and in specific sleep stage. More
experimental metrics on the delta frequency bands [1] are also implemented.
A full list of metrics calculated can be found in XX.
This class can also be used to perform analysis of qEEG relative to a
given events in the score_events methods. Given an event dataframe,
the methods will score qEEG relative to the event onset. For more
information, please see [2,3].
Parameters
----------
windows_length : int
Length of analysis windows. Default to 5 sec.
psd_method : str
PSD methods, 'welch', 'multitaper' or 'fft'
psd_params : dict or None
Optional parameters to be passed to shai.features.utils:power_spectrum. If psd_method = 'welch', psd_params
should contain the following keys (`welch_n_fft`, `welch_n_per_seg`, `welch_n_overlap`). If psd_method
= 'multitaper', should contain the following ('mt_bandwidth','mt_adaptive','mt_low_bias'). If None,
default parameters are used.
before_event : int
Time, in seconds relative to event onset, from which to start the analysis.
after_event : int
Time, in seconds relative to event onset, from which to stop the analysis.
len_windows_event : int
Time, in seconds, of the size of the windows analysis.
save_results : bool
If true, will save the results at the given "path"
Notes
-----
References
-----
[1] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., . . . <NAME>. (2021). A Novel EEG Derived Measure of
Disrupted Delta Wave Activity during Sleep Predicts All-Cause Mortality Risk.
Ann Am Thorac Soc, (in press). doi:10.1513/AnnalsATS.202103-315OC
[2] <NAME>., <NAME>., <NAME>., & <NAME>. (2020).
Correspondence between physiological and behavioural responses to
vibratory stimuli during the sleep onset period: A quantitative
electroencephalography analysis. J Sleep Res, e13232. doi:10.1111/jsr.13232
[3] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., EEG power spectral responses to wind farm compared to road
traffic noise during sleep: A laboratory study. (in press) J Sleep Res,
"""
def __init__(self, windows_length = 5, psd_method = 'multitaper',
events_windows_length = 5, events_lower_bound = -20,
events_upper_bound = 20,
):
self.freq_bands = np.hstack([FREQ_INIT['Delta'][0],
FREQ_INIT['Delta'][1],
FREQ_INIT['Theta'][1],
FREQ_INIT['Alpha'][1],
FREQ_INIT['Sigma'][1],
FREQ_INIT['Beta'][1]])
self.psd_method = psd_method
self.psd_params = PSD_PARAMS_INIT[psd_method]
self.windows_length = windows_length
self.events_windows_length = events_windows_length
self.events_lower_bound = events_lower_bound
self.events_upper_bound = events_upper_bound
self.picks = None
super().__init__()
def set_params(self, parameters_dict, check_has_key=False):
for key, value in parameters_dict.items():
if key == 'psd_params':
value = _psd_params_checker(value,
parameters_dict['psd_method'])
if key == 'freq_bands':
self.freq_bands = value
value = np.hstack([value['Delta'][0], value['Delta'][1],
value['Theta'][1], value['Alpha'][1],
value['Sigma'][1], value['Beta'][1]])
if hasattr(self, key):
setattr(self, key, value)
else:
warnings.warn(key + ' key is not a valid attribute')
def fit(self, raw, hypnogram, picks=None):
self._check_raw(raw)
self._check_hypno(hypnogram)
if picks is not None: self.picks = picks
if self.picks is not None:
raw = raw.pick_channels(ch_names=picks)
else:
raise ValueError('No EEG channel was selected for qEEG analysis.')
self._raw = raw.filter(l_freq=0.3, h_freq=35, verbose='error')
self._hypno = _convert_hypno(hypnogram, self.windows_length)
def score(self):
"""Calculate power spectrum based metrics for each segment of length windows_size
Notes
-----
The following parameters are calculated for each segments and for each EEG channel:
- Absolute and relative power of delta,theta, alpha, sigma, and beta
bands
- 'Delta/alpha ratio, slowing ratio and REM ratio
- Maximum, RMS, SD and peak-to-peak values of EEG epochs data
- zero crossing rate of each EEG epochs
- Spectral entropy and Spectral edges (q =0.85 and 0.95)
"""
check_is_fitted(self, ['_raw', '_hypno'])
hypno = self._hypno
raw = self._raw
for channel in raw.info['ch_names']:
self._scoring[channel] = _score_qEEG(
raw, hypno, channel, tmin=0,
tmax=hypno['duration'].values[0], psd_method=self.psd_method,
psd_params=self.psd_params, freq_bands=self.freq_bands)
return self._scoring, self._epochs_data
def overnight_metrics(self, kdftype='lct2020'):
"""Calculate summary descriptive metrics of an overnight.
Calculate the mean of each metrics calculated in "qEEG.score()" for
individual sleep stages. More experimental metrics on the delta
frequency bands [1] are also implemented. A full list of metrics
calculated can be found in Notes.
Notes
-----
The following parameters are calculated for each segments:
"""
if not self._scoring:
self.score()
scoring = self._scoring.items()
metrics = {}
is_scored = True
for channel, qeeg_dict in scoring:
df = pd.DataFrame.from_dict(qeeg_dict)
st = df.loc[df.Max_Val < 400, :]
#if -1 in np.unique(df.SleepStage.values): is_scored = False
if not is_scored: kdftype = 'log'
if is_scored:
# by individual sleep stage
grouped = st.groupby(by='SleepStage').mean().drop(
['SleepStageOnset', 'SleepStageDuration'], axis=1)
v = grouped.unstack().to_frame().sort_index(level=1).T
v.columns = [x + '_' + 'N' + str(int(y)) for (x, y) in
v.columns]
# NREM
nrem = st.drop(['SleepStageOnset', 'SleepStageDuration'],
axis=1).mean().to_frame().T
nrem.columns = [x + '_NREM' for x in nrem.columns]
grpstaged = pd.concat([v, nrem], axis=1).to_dict(
orient='records')[0]
else: # no sleep stage to sub-analyse, mean of all
t = st.drop(['SleepStageOnset', 'SleepStageDuration'],
axis=1).mean().to_frame().T
t.columns = [x + '_mean' for x in t.columns]
grpstaged = t.to_dict(orient='records')[0]
delta = df.absolute_delta
if kdftype == 'lct2020':
delta[df.SleepStage == 0] = 0
delta[df.SleepStage == 1] = 0
delta_smoothed = moving_average_weighted(delta)
features = _delta_fluctuations_parameters(delta_smoothed)
elif kdftype == 'log':
ldelta = np.log(delta + 1)
ldelta[df.Max_Val > 400] = np.min(ldelta[ldelta != 0])
ldelta = ldelta - np.min(ldelta)
ldelta_smoothed = moving_average_weighted(ldelta)
features = _delta_fluctuations_parameters(ldelta_smoothed)
else:
raise NotImplementedError
m = {**grpstaged, **features}
metrics = {**metrics,**{channel + k: v for k, v in m.items()}}
return metrics
def score_from_events(self, events):
"""Calculate power spectrum based metrics for each segment of length windows_size
Cut raw EEG from "before_event" to "after_event" in epochs of size
"len_windows" and calculate a range of temporal and spectrum based
metrics.
Parameters
----------
event_file : pd.Dataframe
Dataframe containing onset, duration and labels of events data.
"""
hypno = self._hypno
raw = self._raw
metrics = {}
for channel in raw.info['ch_names']:
ev_dict = {}
for count, tmin in enumerate(
np.arange(self.events_lower_bound, self.events_upper_bound,
self.events_windows_length)):
tmax = tmin + self.events_windows_length
temp_dict = _score_qEEG(raw, events, channel, tmin=tmin,
tmax=tmax, type='event',
psd_method=self.psd_method,
psd_params=self.psd_params,
freq_bands=self.freq_bands
)
for key, val in temp_dict.items():
ev_dict[str(tmin) + 'W_' + key] = val
if count == 0:
event_stage = add_label_to_events(events, hypno)
ev_dict['Event_Label'] = event_stage['label'].values
ev_dict['Event_Onset'] = event_stage['onset'].values
ev_dict['Event_Sleep_Stage'] = event_stage['stage_label']
metrics[channel] = ev_dict
return metrics
def _score_qEEG(raw, Stages, channel, tmin=0, tmax=5, type='stage',
psd_method=None, psd_params=None,
freq_bands=None):
###### MNE needs an array type in points, not seconds ###########
onset = np.asarray(Stages['onset'].values * raw.info['sfreq'], dtype='int')
dur = np.asarray(Stages['duration'].values * raw.info['sfreq'], dtype='int')
label = np.ones_like(Stages['duration'].values, dtype='int')
events = np.vstack((onset, dur, label)).T
################## Get epoch data ###################
epochs = mne.Epochs(raw, events, picks=[channel], event_id=None, tmin=tmin,
tmax=tmax,
baseline=(None, None),
reject=None, reject_by_annotation=False,
verbose='critical', flat=None)
assert len(epochs.selection) == len(Stages)
#Stages = Stages.loc[epochs.selection, :]
#onset = np.asarray(Stages['onset'].values * raw.info['sfreq'], dtype='int')
#dur = np.asarray(Stages['duration'].values * raw.info['sfreq'],
# dtype='int')
data = epochs.get_data().squeeze() * 10 ** 6
########## Calculate Epoch Features ###########
feat_dict = _calculate_epochs_parameters(raw.info['sfreq'], data, psd_method=psd_method,
psd_params=psd_params,
freq_bands=freq_bands)
if type == 'stage':
feat_dict['SleepStage'] = Stages['label'].values
feat_dict['SleepStageOnset'] = onset / raw.info['sfreq']
feat_dict['SleepStageDuration'] = dur / raw.info['sfreq']
return feat_dict
def add_label_to_events(events, Stages):
"""
This function label a sleep stages + ascending or descending phase for each events
:param onset_event: list of event onsets in sec
:param Stages: dataframe containing 'onset' colunum, 'label' colunum and
:return: arguments of events contains within epochs and its label and type of slope
"""
onset_stages = Stages['onset'].values
stages_label = Stages['label'].values
corresponding_stage = []
for single_onset_events in events['onset'].values:
# find the preceding stage onset
index_of_preceding_stage = \
np.argwhere(onset_stages < single_onset_events)[-1]
if int(single_onset_events - onset_stages[
index_of_preceding_stage]) < 15:
index_of_preceding_stage = index_of_preceding_stage - 1
corresponding_stage.append(stages_label[index_of_preceding_stage])
events['stage_label'] = np.hstack(corresponding_stage)
return events
def _calculate_epochs_parameters(sfreq, data, psd_method='multitaper',
psd_params=None,
freq_bands=None):
#### Calculate power spectrum density of epoched data ###
psd, freqs = power_spectrum(sfreq, data, psd_method=psd_method,
**psd_params, verbose=False)
precomputed_psd = {'psd': psd.squeeze(), 'freqs': freqs}
### Calculate absolute frequencies of epoched data ########
absolute_bands = compute_absol_pow_freq_bands(sfreq, data.squeeze(),
freq_bands=freq_bands,
precomputed_psd=precomputed_psd)
freq_name = ['absolute_delta', 'absolute_theta', 'absolute_alpha',
'absolute_sigma', 'absolute_beta']
### Calculate absolute frequencies of epoched data ########
relative_bands = compute_relative_pow_ratios(absolute_bands)
ratio_name = ['deltaR', 'thetaR', 'alphaR', 'sigmaR', 'betaR', 'DAR',
'SLOWING_RATIO', 'REMR']
### Calculate range of spectral feature of epoched data ########
spectral_features = np.column_stack((
#compute_hjorth_spect(sfreq, data, precomputed_psd=precomputed_psd),
compute_spect_entropy(sfreq, data, precomputed_psd=precomputed_psd),
# compute_spect_slope(sfreq, data,fmin=0.1,fmax=35, precomputed_psd=precomputed_psd),
compute_spect_edge_freq(sfreq, data, precomputed_psd=precomputed_psd,
edge=[0.85, 0.95])
))
spec_feature_names = [#'Hjort_Spect_mobility', 'Hjort_Spect_complexity',
'Spectral_Entropy',
'Spectral_Edge_85',
'Spectral_Edge_95']
### Calculate range of temporal feature of epoched data ########
temporal_features = np.column_stack((
compute_maximum_value_epochs(data),
compute_rms_value_epochs(data),
compute_ptp_amp(data),
compute_std(data),
compute_zero_crossings(data),
#compute_hjorth(data),
))
t_feature_names = ['Max_Val', 'RMS_Val', 'PTP_Amp', 'std', 'ZCR',
#'Hjorth_Activity', 'Hjorth_Mobility',
#'Hjorth_Complexity'
]
#### Pool feature together ####
features = np.column_stack((absolute_bands, relative_bands,
spectral_features, temporal_features))
feat_name = np.hstack([freq_name, ratio_name,
spec_feature_names, t_feature_names])
feature_dict = {k: v for k, v in zip(feat_name, features.T)}
return feature_dict
def _delta_fluctuations_parameters(overnight_fluctuations):
data = np.expand_dims(overnight_fluctuations, axis=0)
features_delta_band = np.column_stack((
compute_spect_entropy(2, data, psd_method='fft'),
compute_spect_edge_freq(2, data, psd_method='fft', edge=[0.75, 0.95]),
compute_time_mass(data, q=[0.5, 0.75]),
compute_ratio_energy_time_mass(data, q=[0.5, 0.75]),
compute_spect_slope(2, data, psd_method='fft', fmin=None, fmax=1)
))
names = ['Spectral_Entropy_kdf', 'Spectral_Edge_75_kdf',
'Spectral_Edge_95_kdf',
'TimeMass50_kdf', 'TimeMass75_kdf', 'DER_50_kdf', 'DER_75_kdf',
'intercept_kdf', 'slope_kdf', 'MSE_kdf', 'R2_kdf']
features_delta_dict = {k: float(v[0]) for k, v in zip(names,
features_delta_band.T)}
return features_delta_dict
| [
"numpy.hstack",
"psga.features.time_features.compute_maximum_value_epochs",
"numpy.log",
"numpy.column_stack",
"psga.features.utils.power_spectrum",
"numpy.arange",
"numpy.asarray",
"psga.features.time_features.compute_zero_crossings",
"pandas.DataFrame.from_dict",
"psga.features.utils._psd_params... | [((14328, 14395), 'numpy.asarray', 'np.asarray', (["(Stages['onset'].values * raw.info['sfreq'])"], {'dtype': '"""int"""'}), "(Stages['onset'].values * raw.info['sfreq'], dtype='int')\n", (14338, 14395), True, 'import numpy as np\n'), ((14406, 14476), 'numpy.asarray', 'np.asarray', (["(Stages['duration'].values * raw.info['sfreq'])"], {'dtype': '"""int"""'}), "(Stages['duration'].values * raw.info['sfreq'], dtype='int')\n", (14416, 14476), True, 'import numpy as np\n'), ((14489, 14541), 'numpy.ones_like', 'np.ones_like', (["Stages['duration'].values"], {'dtype': '"""int"""'}), "(Stages['duration'].values, dtype='int')\n", (14501, 14541), True, 'import numpy as np\n'), ((14660, 14841), 'mne.Epochs', 'mne.Epochs', (['raw', 'events'], {'picks': '[channel]', 'event_id': 'None', 'tmin': 'tmin', 'tmax': 'tmax', 'baseline': '(None, None)', 'reject': 'None', 'reject_by_annotation': '(False)', 'verbose': '"""critical"""', 'flat': 'None'}), "(raw, events, picks=[channel], event_id=None, tmin=tmin, tmax=\n tmax, baseline=(None, None), reject=None, reject_by_annotation=False,\n verbose='critical', flat=None)\n", (14670, 14841), False, 'import mne\n'), ((16691, 16721), 'numpy.hstack', 'np.hstack', (['corresponding_stage'], {}), '(corresponding_stage)\n', (16700, 16721), True, 'import numpy as np\n'), ((16992, 17071), 'psga.features.utils.power_spectrum', 'power_spectrum', (['sfreq', 'data'], {'psd_method': 'psd_method', 'verbose': '(False)'}), '(sfreq, data, psd_method=psd_method, **psd_params, verbose=False)\n', (17006, 17071), False, 'from psga.features.utils import power_spectrum, _psd_params_checker\n'), ((17666, 17709), 'psga.features.spectral_features.compute_relative_pow_ratios', 'compute_relative_pow_ratios', (['absolute_bands'], {}), '(absolute_bands)\n', (17693, 17709), False, 'from psga.features.spectral_features import compute_absol_pow_freq_bands, compute_relative_pow_ratios, compute_hjorth_spect, compute_spect_entropy, compute_spect_slope, compute_spect_edge_freq\n'), ((19118, 19209), 'numpy.column_stack', 'np.column_stack', (['(absolute_bands, relative_bands, spectral_features, temporal_features)'], {}), '((absolute_bands, relative_bands, spectral_features,\n temporal_features))\n', (19133, 19209), True, 'import numpy as np\n'), ((19254, 19325), 'numpy.hstack', 'np.hstack', (['[freq_name, ratio_name, spec_feature_names, t_feature_names]'], {}), '([freq_name, ratio_name, spec_feature_names, t_feature_names])\n', (19263, 19325), True, 'import numpy as np\n'), ((19514, 19560), 'numpy.expand_dims', 'np.expand_dims', (['overnight_fluctuations'], {'axis': '(0)'}), '(overnight_fluctuations, axis=0)\n', (19528, 19560), True, 'import numpy as np\n'), ((3475, 3500), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3490, 3500), False, 'import os\n'), ((6683, 6836), 'numpy.hstack', 'np.hstack', (["[FREQ_INIT['Delta'][0], FREQ_INIT['Delta'][1], FREQ_INIT['Theta'][1],\n FREQ_INIT['Alpha'][1], FREQ_INIT['Sigma'][1], FREQ_INIT['Beta'][1]]"], {}), "([FREQ_INIT['Delta'][0], FREQ_INIT['Delta'][1], FREQ_INIT['Theta']\n [1], FREQ_INIT['Alpha'][1], FREQ_INIT['Sigma'][1], FREQ_INIT['Beta'][1]])\n", (6692, 6836), True, 'import numpy as np\n'), ((8539, 8585), 'psga.hypnogram._convert_hypno', '_convert_hypno', (['hypnogram', 'self.windows_length'], {}), '(hypnogram, self.windows_length)\n', (8553, 8585), False, 'from psga.hypnogram import _convert_hypno\n'), ((9171, 9212), 'psga.analyses.utils.check_is_fitted', 'check_is_fitted', (['self', "['_raw', '_hypno']"], {}), "(self, ['_raw', '_hypno'])\n", (9186, 9212), False, 'from psga.analyses.utils import check_is_fitted\n'), ((14555, 14585), 'numpy.vstack', 'np.vstack', (['(onset, dur, label)'], {}), '((onset, dur, label))\n', (14564, 14585), True, 'import numpy as np\n'), ((10297, 10330), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['qeeg_dict'], {}), '(qeeg_dict)\n', (10319, 10330), True, 'import pandas as pd\n'), ((16372, 16419), 'numpy.argwhere', 'np.argwhere', (['(onset_stages < single_onset_events)'], {}), '(onset_stages < single_onset_events)\n', (16383, 16419), True, 'import numpy as np\n'), ((18023, 18090), 'psga.features.spectral_features.compute_spect_entropy', 'compute_spect_entropy', (['sfreq', 'data'], {'precomputed_psd': 'precomputed_psd'}), '(sfreq, data, precomputed_psd=precomputed_psd)\n', (18044, 18090), False, 'from psga.features.spectral_features import compute_absol_pow_freq_bands, compute_relative_pow_ratios, compute_hjorth_spect, compute_spect_entropy, compute_spect_slope, compute_spect_edge_freq\n'), ((18194, 18287), 'psga.features.spectral_features.compute_spect_edge_freq', 'compute_spect_edge_freq', (['sfreq', 'data'], {'precomputed_psd': 'precomputed_psd', 'edge': '[0.85, 0.95]'}), '(sfreq, data, precomputed_psd=precomputed_psd, edge=\n [0.85, 0.95])\n', (18217, 18287), False, 'from psga.features.spectral_features import compute_absol_pow_freq_bands, compute_relative_pow_ratios, compute_hjorth_spect, compute_spect_entropy, compute_spect_slope, compute_spect_edge_freq\n'), ((18656, 18690), 'psga.features.time_features.compute_maximum_value_epochs', 'compute_maximum_value_epochs', (['data'], {}), '(data)\n', (18684, 18690), False, 'from psga.features.time_features import compute_maximum_value_epochs, compute_ptp_amp, compute_rms_value_epochs, compute_std, compute_zero_crossings, compute_time_mass, compute_hjorth, compute_ratio_energy_time_mass\n'), ((18700, 18730), 'psga.features.time_features.compute_rms_value_epochs', 'compute_rms_value_epochs', (['data'], {}), '(data)\n', (18724, 18730), False, 'from psga.features.time_features import compute_maximum_value_epochs, compute_ptp_amp, compute_rms_value_epochs, compute_std, compute_zero_crossings, compute_time_mass, compute_hjorth, compute_ratio_energy_time_mass\n'), ((18740, 18761), 'psga.features.time_features.compute_ptp_amp', 'compute_ptp_amp', (['data'], {}), '(data)\n', (18755, 18761), False, 'from psga.features.time_features import compute_maximum_value_epochs, compute_ptp_amp, compute_rms_value_epochs, compute_std, compute_zero_crossings, compute_time_mass, compute_hjorth, compute_ratio_energy_time_mass\n'), ((18771, 18788), 'psga.features.time_features.compute_std', 'compute_std', (['data'], {}), '(data)\n', (18782, 18788), False, 'from psga.features.time_features import compute_maximum_value_epochs, compute_ptp_amp, compute_rms_value_epochs, compute_std, compute_zero_crossings, compute_time_mass, compute_hjorth, compute_ratio_energy_time_mass\n'), ((18798, 18826), 'psga.features.time_features.compute_zero_crossings', 'compute_zero_crossings', (['data'], {}), '(data)\n', (18820, 18826), False, 'from psga.features.time_features import compute_maximum_value_epochs, compute_ptp_amp, compute_rms_value_epochs, compute_std, compute_zero_crossings, compute_time_mass, compute_hjorth, compute_ratio_energy_time_mass\n'), ((19613, 19661), 'psga.features.spectral_features.compute_spect_entropy', 'compute_spect_entropy', (['(2)', 'data'], {'psd_method': '"""fft"""'}), "(2, data, psd_method='fft')\n", (19634, 19661), False, 'from psga.features.spectral_features import compute_absol_pow_freq_bands, compute_relative_pow_ratios, compute_hjorth_spect, compute_spect_entropy, compute_spect_slope, compute_spect_edge_freq\n'), ((19671, 19740), 'psga.features.spectral_features.compute_spect_edge_freq', 'compute_spect_edge_freq', (['(2)', 'data'], {'psd_method': '"""fft"""', 'edge': '[0.75, 0.95]'}), "(2, data, psd_method='fft', edge=[0.75, 0.95])\n", (19694, 19740), False, 'from psga.features.spectral_features import compute_absol_pow_freq_bands, compute_relative_pow_ratios, compute_hjorth_spect, compute_spect_entropy, compute_spect_slope, compute_spect_edge_freq\n'), ((19750, 19788), 'psga.features.time_features.compute_time_mass', 'compute_time_mass', (['data'], {'q': '[0.5, 0.75]'}), '(data, q=[0.5, 0.75])\n', (19767, 19788), False, 'from psga.features.time_features import compute_maximum_value_epochs, compute_ptp_amp, compute_rms_value_epochs, compute_std, compute_zero_crossings, compute_time_mass, compute_hjorth, compute_ratio_energy_time_mass\n'), ((19798, 19849), 'psga.features.time_features.compute_ratio_energy_time_mass', 'compute_ratio_energy_time_mass', (['data'], {'q': '[0.5, 0.75]'}), '(data, q=[0.5, 0.75])\n', (19828, 19849), False, 'from psga.features.time_features import compute_maximum_value_epochs, compute_ptp_amp, compute_rms_value_epochs, compute_std, compute_zero_crossings, compute_time_mass, compute_hjorth, compute_ratio_energy_time_mass\n'), ((19859, 19924), 'psga.features.spectral_features.compute_spect_slope', 'compute_spect_slope', (['(2)', 'data'], {'psd_method': '"""fft"""', 'fmin': 'None', 'fmax': '(1)'}), "(2, data, psd_method='fft', fmin=None, fmax=1)\n", (19878, 19924), False, 'from psga.features.spectral_features import compute_absol_pow_freq_bands, compute_relative_pow_ratios, compute_hjorth_spect, compute_spect_entropy, compute_spect_slope, compute_spect_edge_freq\n'), ((7542, 7599), 'psga.features.utils._psd_params_checker', '_psd_params_checker', (['value', "parameters_dict['psd_method']"], {}), "(value, parameters_dict['psd_method'])\n", (7561, 7599), False, 'from psga.features.utils import power_spectrum, _psd_params_checker\n'), ((7744, 7873), 'numpy.hstack', 'np.hstack', (["[value['Delta'][0], value['Delta'][1], value['Theta'][1], value['Alpha'][1],\n value['Sigma'][1], value['Beta'][1]]"], {}), "([value['Delta'][0], value['Delta'][1], value['Theta'][1], value[\n 'Alpha'][1], value['Sigma'][1], value['Beta'][1]])\n", (7753, 7873), True, 'import numpy as np\n'), ((8050, 8102), 'warnings.warn', 'warnings.warn', (["(key + ' key is not a valid attribute')"], {}), "(key + ' key is not a valid attribute')\n", (8063, 8102), False, 'import warnings\n'), ((11728, 11758), 'psga.features.denoising_function.moving_average_weighted', 'moving_average_weighted', (['delta'], {}), '(delta)\n', (11751, 11758), False, 'from psga.features.denoising_function import moving_average_weighted\n'), ((13038, 13130), 'numpy.arange', 'np.arange', (['self.events_lower_bound', 'self.events_upper_bound', 'self.events_windows_length'], {}), '(self.events_lower_bound, self.events_upper_bound, self.\n events_windows_length)\n', (13047, 13130), True, 'import numpy as np\n'), ((11893, 11910), 'numpy.log', 'np.log', (['(delta + 1)'], {}), '(delta + 1)\n', (11899, 11910), True, 'import numpy as np\n'), ((11954, 11981), 'numpy.min', 'np.min', (['ldelta[ldelta != 0]'], {}), '(ldelta[ldelta != 0])\n', (11960, 11981), True, 'import numpy as np\n'), ((12065, 12096), 'psga.features.denoising_function.moving_average_weighted', 'moving_average_weighted', (['ldelta'], {}), '(ldelta)\n', (12088, 12096), False, 'from psga.features.denoising_function import moving_average_weighted\n'), ((12016, 12030), 'numpy.min', 'np.min', (['ldelta'], {}), '(ldelta)\n', (12022, 12030), True, 'import numpy as np\n'), ((11137, 11165), 'pandas.concat', 'pd.concat', (['[v, nrem]'], {'axis': '(1)'}), '([v, nrem], axis=1)\n', (11146, 11165), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
import numpy as np
from numpy import ndarray
from numba import njit, prange
__cache = True
@njit(nogil=True, cache=__cache)
def shape_function_values(x, L):
"""
Evaluates the shape functions at a point x in the range [-1, 1].
"""
return np.array([
[
0.5 * x * (x - 1),
x**2 * (0.75 * x + 1.0) * (x - 1)**2,
x**2 * (0.75 * x + 1.0) * (x - 1)**2,
0.5 * x * (x - 1),
-0.125 * L * x**2 * (x - 1)**2 * (x + 1),
0.125 * L * x**2 * (x - 1)**2 * (x + 1)
],
[
1.0 - 1.0 * x**2,
1.0 * (x - 1)**2 * (x + 1)**2,
1.0 * (x - 1)**2 * (x + 1)**2,
1.0 - 1.0 * x**2,
L * x * (-0.5 * x**4 + 1.0 * x**2 - 0.5),
0.5 * L * x * (x - 1)**2 * (x + 1)**2
],
[
0.5 * x * (x + 1),
x**2 * (1.0 - 0.75 * x) * (x + 1)**2,
x**2 * (1.0 - 0.75 * x) * (x + 1)**2,
0.5 * x * (x + 1),
-0.125 * L * x**2 * (x - 1) * (x + 1)**2,
0.125 * L * x**2 * (x - 1) * (x + 1)**2
]
])
@njit(nogil=True, cache=__cache)
def shape_function_derivatives_1(x, L):
"""
Evaluates the first derivatives of the shape
functions at a point x in the range [-1, 1].
"""
return np.array([
[
1.0 * x - 0.5,
3.75 * x * (x - 1) * (1.0 * x - 0.533333333333333) * (x + 1),
3.75 * x * (x - 1) * (1.0 * x - 0.533333333333333) * (x + 1),
1.0 * x - 0.5,
L * x * (-0.625 * x**3 + 0.5 * x**2 + 0.375 * x - 0.25),
0.625 * L * x * (x - 1) * (1.0 * x**2 + 0.2 * x - 0.4)
],
[
-2.0 * x,
4.0 * x * (x**2 - 1),
4.0 * x * (x**2 - 1),
-2.0 * x,
L * (-2.5 * x**4 + 3.0 * x**2 - 0.5),
L * (2.5 * x**4 - 3.0 * x**2 + 0.5)
],
[
1.0 * x + 0.5,
-3.75 * x * (x - 1) * (1.0 * x + 0.533333333333333) * (x + 1),
-3.75 * x * (x - 1) * (1.0 * x + 0.533333333333333) * (x + 1),
1.0 * x + 0.5,
0.625 * L * x * (x + 1) * (-1.0 * x**2 + 0.2 * x + 0.4),
L * x * (0.625 * x**3 + 0.5 * x**2 - 0.375 * x - 0.25)
]
])
@njit(nogil=True, cache=__cache)
def shape_function_derivatives_2(x, L):
"""
Evaluates the second derivatives of the shape
functions at a point x in the range [-1, 1].
"""
return np.array([
[
1.00000000000000,
15.0 * x**3 - 6.0 * x**2 - 7.5 * x + 2.0,
15.0 * x**3 - 6.0 * x**2 - 7.5 * x + 2.0,
1.00000000000000,
L * (-2.5 * x**3 + 1.5 * x**2 + 0.75 * x - 0.25),
L * (2.5 * x**3 - 1.5 * x**2 - 0.75 * x + 0.25)
],
[
-2.00000000000000,
12.0 * x**2 - 4.0,
12.0 * x**2 - 4.0,
-2.00000000000000,
L * x * (6.0 - 10.0 * x**2),
L * x * (10.0 * x**2 - 6.0)
],
[
1.00000000000000,
-15.0 * x**3 - 6.0 * x**2 + 7.5 * x + 2.0,
-15.0 * x**3 - 6.0 * x**2 + 7.5 * x + 2.0,
1.00000000000000,
L * (-2.5 * x**3 - 1.5 * x**2 + 0.75 * x + 0.25),
L * (2.5 * x**3 + 1.5 * x**2 - 0.75 * x - 0.25)
]
])
@njit(nogil=True, cache=__cache)
def shape_function_derivatives_3(x, L):
"""
Evaluates the third derivatives of the shape
functions at a point x in the range [-1, 1].
"""
return np.array([
[
0,
45.0 * x**2 - 12.0 * x - 7.5,
45.0 * x**2 - 12.0 * x - 7.5,
0,
L * (-7.5 * x**2 + 3.0 * x + 0.75),
L * (7.5 * x**2 - 3.0 * x - 0.75)
],
[
0,
24.0 * x,
24.0 * x,
0,
L * (6.0 - 30.0 * x**2),
L * (30.0 * x**2 - 6.0)
],
[
0,
-45.0 * x**2 - 12.0 * x + 7.5,
-45.0 * x**2 - 12.0 * x + 7.5,
0,
L * (-7.5 * x**2 - 3.0 * x + 0.75),
L * (7.5 * x**2 + 3.0 * x - 0.75)
]
])
@njit(nogil=True, parallel=True, cache=__cache)
def shape_function_values_bulk(x: ndarray, L: ndarray):
"""
Evaluates the shape functions at several points
in the range [-1, 1].
Parameters
----------
x : 1d numpy float array
The points of interest in the range [-1, -1]
Returns
-------
numpy float array of shape (nE, nP, nNE, nDOF=6)
"""
nP = x.shape[0]
nE = L.shape[0]
res = np.zeros((nE, nP, 3, 6), dtype=x.dtype)
for iE in prange(nE):
for iP in prange(nP):
res[iE, iP] = shape_function_values(x[iP], L[iE])
return res
@njit(nogil=True, cache=__cache)
def shape_function_derivatives(x, L):
"""
Evaluates the derivatives of the shape
functions at a point x in the range [-1, 1].
Parameters
----------
x : float
The point of interest in the range [-1, -1]
djac : float
Determinant of the Jacobi matrix of local-global transformation
between the master elment and the actual element.
Default is 1.0.
Returns
-------
numpy float array of shape (nNE, nDOF=6, 3)
"""
res = np.zeros((3, 6, 3))
res[:, :, 0] = shape_function_derivatives_1(x, L)
res[:, :, 1] = shape_function_derivatives_2(x, L)
res[:, :, 2] = shape_function_derivatives_3(x, L)
return res
@njit(nogil=True, parallel=True, cache=__cache)
def shape_function_derivatives_bulk(x: ndarray, L: ndarray):
"""
Evaluates the derivatives of the shape
functions at several points in the range [-1, 1].
Returns
-------
dshp (nE, nP, nNE, nDOF=6, 3)
"""
nP = x.shape[0]
nE = L.shape[0]
res = np.zeros((nE, nP, 3, 6, 3), dtype=x.dtype)
for iE in prange(nE):
for iP in prange(nP):
res[iE, iP] = shape_function_derivatives(x[iP], L[iE])
return res
| [
"numpy.array",
"numpy.zeros",
"numba.njit",
"numba.prange"
] | [((118, 149), 'numba.njit', 'njit', ([], {'nogil': '(True)', 'cache': '__cache'}), '(nogil=True, cache=__cache)\n', (122, 149), False, 'from numba import njit, prange\n'), ((1148, 1179), 'numba.njit', 'njit', ([], {'nogil': '(True)', 'cache': '__cache'}), '(nogil=True, cache=__cache)\n', (1152, 1179), False, 'from numba import njit, prange\n'), ((2316, 2347), 'numba.njit', 'njit', ([], {'nogil': '(True)', 'cache': '__cache'}), '(nogil=True, cache=__cache)\n', (2320, 2347), False, 'from numba import njit, prange\n'), ((3384, 3415), 'numba.njit', 'njit', ([], {'nogil': '(True)', 'cache': '__cache'}), '(nogil=True, cache=__cache)\n', (3388, 3415), False, 'from numba import njit, prange\n'), ((4229, 4275), 'numba.njit', 'njit', ([], {'nogil': '(True)', 'parallel': '(True)', 'cache': '__cache'}), '(nogil=True, parallel=True, cache=__cache)\n', (4233, 4275), False, 'from numba import njit, prange\n'), ((4843, 4874), 'numba.njit', 'njit', ([], {'nogil': '(True)', 'cache': '__cache'}), '(nogil=True, cache=__cache)\n', (4847, 4874), False, 'from numba import njit, prange\n'), ((5573, 5619), 'numba.njit', 'njit', ([], {'nogil': '(True)', 'parallel': '(True)', 'cache': '__cache'}), '(nogil=True, parallel=True, cache=__cache)\n', (5577, 5619), False, 'from numba import njit, prange\n'), ((279, 956), 'numpy.array', 'np.array', (['[[0.5 * x * (x - 1), x ** 2 * (0.75 * x + 1.0) * (x - 1) ** 2, x ** 2 * (\n 0.75 * x + 1.0) * (x - 1) ** 2, 0.5 * x * (x - 1), -0.125 * L * x ** 2 *\n (x - 1) ** 2 * (x + 1), 0.125 * L * x ** 2 * (x - 1) ** 2 * (x + 1)], [\n 1.0 - 1.0 * x ** 2, 1.0 * (x - 1) ** 2 * (x + 1) ** 2, 1.0 * (x - 1) **\n 2 * (x + 1) ** 2, 1.0 - 1.0 * x ** 2, L * x * (-0.5 * x ** 4 + 1.0 * x **\n 2 - 0.5), 0.5 * L * x * (x - 1) ** 2 * (x + 1) ** 2], [0.5 * x * (x + 1\n ), x ** 2 * (1.0 - 0.75 * x) * (x + 1) ** 2, x ** 2 * (1.0 - 0.75 * x) *\n (x + 1) ** 2, 0.5 * x * (x + 1), -0.125 * L * x ** 2 * (x - 1) * (x + 1\n ) ** 2, 0.125 * L * x ** 2 * (x - 1) * (x + 1) ** 2]]'], {}), '([[0.5 * x * (x - 1), x ** 2 * (0.75 * x + 1.0) * (x - 1) ** 2, x **\n 2 * (0.75 * x + 1.0) * (x - 1) ** 2, 0.5 * x * (x - 1), -0.125 * L * x **\n 2 * (x - 1) ** 2 * (x + 1), 0.125 * L * x ** 2 * (x - 1) ** 2 * (x + 1)\n ], [1.0 - 1.0 * x ** 2, 1.0 * (x - 1) ** 2 * (x + 1) ** 2, 1.0 * (x - 1\n ) ** 2 * (x + 1) ** 2, 1.0 - 1.0 * x ** 2, L * x * (-0.5 * x ** 4 + 1.0 *\n x ** 2 - 0.5), 0.5 * L * x * (x - 1) ** 2 * (x + 1) ** 2], [0.5 * x * (\n x + 1), x ** 2 * (1.0 - 0.75 * x) * (x + 1) ** 2, x ** 2 * (1.0 - 0.75 *\n x) * (x + 1) ** 2, 0.5 * x * (x + 1), -0.125 * L * x ** 2 * (x - 1) * (\n x + 1) ** 2, 0.125 * L * x ** 2 * (x - 1) * (x + 1) ** 2]])\n', (287, 956), True, 'import numpy as np\n'), ((1345, 2101), 'numpy.array', 'np.array', (['[[1.0 * x - 0.5, 3.75 * x * (x - 1) * (1.0 * x - 0.533333333333333) * (x + \n 1), 3.75 * x * (x - 1) * (1.0 * x - 0.533333333333333) * (x + 1), 1.0 *\n x - 0.5, L * x * (-0.625 * x ** 3 + 0.5 * x ** 2 + 0.375 * x - 0.25), \n 0.625 * L * x * (x - 1) * (1.0 * x ** 2 + 0.2 * x - 0.4)], [-2.0 * x, \n 4.0 * x * (x ** 2 - 1), 4.0 * x * (x ** 2 - 1), -2.0 * x, L * (-2.5 * x **\n 4 + 3.0 * x ** 2 - 0.5), L * (2.5 * x ** 4 - 3.0 * x ** 2 + 0.5)], [1.0 *\n x + 0.5, -3.75 * x * (x - 1) * (1.0 * x + 0.533333333333333) * (x + 1),\n -3.75 * x * (x - 1) * (1.0 * x + 0.533333333333333) * (x + 1), 1.0 * x +\n 0.5, 0.625 * L * x * (x + 1) * (-1.0 * x ** 2 + 0.2 * x + 0.4), L * x *\n (0.625 * x ** 3 + 0.5 * x ** 2 - 0.375 * x - 0.25)]]'], {}), '([[1.0 * x - 0.5, 3.75 * x * (x - 1) * (1.0 * x - 0.533333333333333\n ) * (x + 1), 3.75 * x * (x - 1) * (1.0 * x - 0.533333333333333) * (x + \n 1), 1.0 * x - 0.5, L * x * (-0.625 * x ** 3 + 0.5 * x ** 2 + 0.375 * x -\n 0.25), 0.625 * L * x * (x - 1) * (1.0 * x ** 2 + 0.2 * x - 0.4)], [-2.0 *\n x, 4.0 * x * (x ** 2 - 1), 4.0 * x * (x ** 2 - 1), -2.0 * x, L * (-2.5 *\n x ** 4 + 3.0 * x ** 2 - 0.5), L * (2.5 * x ** 4 - 3.0 * x ** 2 + 0.5)],\n [1.0 * x + 0.5, -3.75 * x * (x - 1) * (1.0 * x + 0.533333333333333) * (\n x + 1), -3.75 * x * (x - 1) * (1.0 * x + 0.533333333333333) * (x + 1), \n 1.0 * x + 0.5, 0.625 * L * x * (x + 1) * (-1.0 * x ** 2 + 0.2 * x + 0.4\n ), L * x * (0.625 * x ** 3 + 0.5 * x ** 2 - 0.375 * x - 0.25)]])\n', (1353, 2101), True, 'import numpy as np\n'), ((2514, 3098), 'numpy.array', 'np.array', (['[[1.0, 15.0 * x ** 3 - 6.0 * x ** 2 - 7.5 * x + 2.0, 15.0 * x ** 3 - 6.0 * \n x ** 2 - 7.5 * x + 2.0, 1.0, L * (-2.5 * x ** 3 + 1.5 * x ** 2 + 0.75 *\n x - 0.25), L * (2.5 * x ** 3 - 1.5 * x ** 2 - 0.75 * x + 0.25)], [-2.0,\n 12.0 * x ** 2 - 4.0, 12.0 * x ** 2 - 4.0, -2.0, L * x * (6.0 - 10.0 * x **\n 2), L * x * (10.0 * x ** 2 - 6.0)], [1.0, -15.0 * x ** 3 - 6.0 * x ** 2 +\n 7.5 * x + 2.0, -15.0 * x ** 3 - 6.0 * x ** 2 + 7.5 * x + 2.0, 1.0, L *\n (-2.5 * x ** 3 - 1.5 * x ** 2 + 0.75 * x + 0.25), L * (2.5 * x ** 3 + \n 1.5 * x ** 2 - 0.75 * x - 0.25)]]'], {}), '([[1.0, 15.0 * x ** 3 - 6.0 * x ** 2 - 7.5 * x + 2.0, 15.0 * x ** 3 -\n 6.0 * x ** 2 - 7.5 * x + 2.0, 1.0, L * (-2.5 * x ** 3 + 1.5 * x ** 2 + \n 0.75 * x - 0.25), L * (2.5 * x ** 3 - 1.5 * x ** 2 - 0.75 * x + 0.25)],\n [-2.0, 12.0 * x ** 2 - 4.0, 12.0 * x ** 2 - 4.0, -2.0, L * x * (6.0 - \n 10.0 * x ** 2), L * x * (10.0 * x ** 2 - 6.0)], [1.0, -15.0 * x ** 3 - \n 6.0 * x ** 2 + 7.5 * x + 2.0, -15.0 * x ** 3 - 6.0 * x ** 2 + 7.5 * x +\n 2.0, 1.0, L * (-2.5 * x ** 3 - 1.5 * x ** 2 + 0.75 * x + 0.25), L * (\n 2.5 * x ** 3 + 1.5 * x ** 2 - 0.75 * x - 0.25)]])\n', (2522, 3098), True, 'import numpy as np\n'), ((3581, 3991), 'numpy.array', 'np.array', (['[[0, 45.0 * x ** 2 - 12.0 * x - 7.5, 45.0 * x ** 2 - 12.0 * x - 7.5, 0, L *\n (-7.5 * x ** 2 + 3.0 * x + 0.75), L * (7.5 * x ** 2 - 3.0 * x - 0.75)],\n [0, 24.0 * x, 24.0 * x, 0, L * (6.0 - 30.0 * x ** 2), L * (30.0 * x ** \n 2 - 6.0)], [0, -45.0 * x ** 2 - 12.0 * x + 7.5, -45.0 * x ** 2 - 12.0 *\n x + 7.5, 0, L * (-7.5 * x ** 2 - 3.0 * x + 0.75), L * (7.5 * x ** 2 + \n 3.0 * x - 0.75)]]'], {}), '([[0, 45.0 * x ** 2 - 12.0 * x - 7.5, 45.0 * x ** 2 - 12.0 * x - \n 7.5, 0, L * (-7.5 * x ** 2 + 3.0 * x + 0.75), L * (7.5 * x ** 2 - 3.0 *\n x - 0.75)], [0, 24.0 * x, 24.0 * x, 0, L * (6.0 - 30.0 * x ** 2), L * (\n 30.0 * x ** 2 - 6.0)], [0, -45.0 * x ** 2 - 12.0 * x + 7.5, -45.0 * x **\n 2 - 12.0 * x + 7.5, 0, L * (-7.5 * x ** 2 - 3.0 * x + 0.75), L * (7.5 *\n x ** 2 + 3.0 * x - 0.75)]])\n', (3589, 3991), True, 'import numpy as np\n'), ((4667, 4706), 'numpy.zeros', 'np.zeros', (['(nE, nP, 3, 6)'], {'dtype': 'x.dtype'}), '((nE, nP, 3, 6), dtype=x.dtype)\n', (4675, 4706), True, 'import numpy as np\n'), ((4721, 4731), 'numba.prange', 'prange', (['nE'], {}), '(nE)\n', (4727, 4731), False, 'from numba import njit, prange\n'), ((5373, 5392), 'numpy.zeros', 'np.zeros', (['(3, 6, 3)'], {}), '((3, 6, 3))\n', (5381, 5392), True, 'import numpy as np\n'), ((5903, 5945), 'numpy.zeros', 'np.zeros', (['(nE, nP, 3, 6, 3)'], {'dtype': 'x.dtype'}), '((nE, nP, 3, 6, 3), dtype=x.dtype)\n', (5911, 5945), True, 'import numpy as np\n'), ((5960, 5970), 'numba.prange', 'prange', (['nE'], {}), '(nE)\n', (5966, 5970), False, 'from numba import njit, prange\n'), ((4751, 4761), 'numba.prange', 'prange', (['nP'], {}), '(nP)\n', (4757, 4761), False, 'from numba import njit, prange\n'), ((5990, 6000), 'numba.prange', 'prange', (['nP'], {}), '(nP)\n', (5996, 6000), False, 'from numba import njit, prange\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
import warnings
from random import shuffle
from time import time
import numpy as np
import pandas as pd
import xgboost as xgb
from xgboost.callback import reset_learning_rate
import lightgbm as lgb
from catboost import Pool, CatBoostClassifier
from itertools import product
from sklearn.metrics import roc_auc_score
from math import ceil
from gbm_utils import format_time, get_data, get_one_hot_data, factorize_cats, get_holdout_set, OneStepTimeSeriesSplit
from gbm_params import get_params
pd.set_option('display.expand_frame_repr', False)
warnings.filterwarnings('ignore')
idx = pd.IndexSlice
np.random.seed(42)
def learning_rate(n, ntot):
start_eta = 0.1
k = 8 / ntot
x0 = ntot / 1.8
return start_eta * (1 - 1 / (1 + np.exp(-k * (n - x0))))
def get_datasets(features, target, kfold, model='xgboost'):
cat_cols = ['year', 'month', 'age', 'msize', 'sector']
data = {}
for fold, (train_idx, test_idx) in enumerate(kfold.split(features)):
print(fold, end=' ', flush=True)
if model == 'xgboost':
data[fold] = {'train': xgb.DMatrix(label=target.iloc[train_idx],
data=features.iloc[train_idx],
nthread=-1), # use avail. threads
'valid': xgb.DMatrix(label=target.iloc[test_idx],
data=features.iloc[test_idx],
nthread=-1)}
elif model == 'lightgbm':
train = lgb.Dataset(label=target.iloc[train_idx],
data=features.iloc[train_idx],
categorical_feature=cat_cols,
free_raw_data=False)
# align validation set histograms with training set
valid = train.create_valid(label=target.iloc[test_idx],
data=features.iloc[test_idx])
data[fold] = {'train': train.construct(),
'valid': valid.construct()}
elif model == 'catboost':
# get categorical feature indices
cat_cols_idx = [features.columns.get_loc(c) for c in cat_cols]
data[fold] = {'train': Pool(label=target.iloc[train_idx],
data=features.iloc[train_idx],
cat_features=cat_cols_idx),
'valid': Pool(label=target.iloc[test_idx],
data=features.iloc[test_idx],
cat_features=cat_cols_idx)}
return data
def run_cv(test_params, data, n_splits=10, gb_machine='xgboost'):
"""Train-Validate with early stopping"""
result = []
cols = ['rounds', 'train', 'valid']
for fold in range(n_splits):
train = data[fold]['train']
valid = data[fold]['valid']
scores = {}
if gb_machine == 'xgboost':
model = xgb.train(params=test_params,
dtrain=train,
evals=list(zip([train, valid], ['train', 'valid'])),
verbose_eval=50,
num_boost_round=250,
early_stopping_rounds=25,
evals_result=scores)
result.append([model.best_iteration,
scores['train']['auc'][-1],
scores['valid']['auc'][-1]])
elif gb_machine == 'lightgbm':
model = lgb.train(params=test_params,
train_set=train,
valid_sets=[train, valid],
valid_names=['train', 'valid'],
num_boost_round=250,
early_stopping_rounds=25,
verbose_eval=50,
evals_result=scores)
result.append([model.current_iteration(),
scores['train']['auc'][-1],
scores['valid']['auc'][-1]])
elif gb_machine == 'catboost':
model = CatBoostClassifier(**test_params)
model.fit(X=train,
eval_set=[valid],
logging_level='Silent')
train_score = model.predict_proba(train)[:, 1]
valid_score = model.predict_proba(valid)[:, 1]
result.append([
model.tree_count_,
roc_auc_score(y_score=train_score, y_true=train.get_label()),
roc_auc_score(y_score=valid_score, y_true=valid.get_label())
])
df = pd.DataFrame(result, columns=cols)
return (df
.mean()
.append(df.std().rename({c: c + '_std' for c in cols}))
.append(pd.Series(test_params)))
GBM = 'lightgbm'
HOLDOUT = True
FACTORS = True
n_splits = 12
result_key = f"/{GBM}/{'factors' if FACTORS else 'dummies'}/results/2"
y, features = get_data()
if FACTORS:
X = factorize_cats(features)
else:
X = get_one_hot_data(features)
if HOLDOUT:
y, X, y_test, X_test = get_holdout_set(target=y,
features=X)
with pd.HDFStore('model_tuning.h5') as store:
key = f'{GBM}/holdout/'
if not any([k for k in store.keys() if k[1:].startswith(key)]):
store.put(key + 'features', X_test, format='t' if FACTORS else 'f')
store.put(key + 'target', y_test)
cv = OneStepTimeSeriesSplit(n_splits=n_splits)
datasets = get_datasets(features=X, target=y, kfold=cv, model=GBM)
results = pd.DataFrame()
param_grid = dict(
# common options
learning_rate=[.01, .1, .3],
# max_depth=list(range(3, 14, 2)),
colsample_bytree=[.8, 1], # except catboost
# lightgbm
# max_bin=[32, 128],
num_leaves=[2 ** i for i in range(9, 14)],
boosting=['gbdt', 'dart'],
min_gain_to_split=[0, 1, 5], # not supported on GPU
# xgboost
# booster=['gbtree', 'dart'],
# gamma=[0, 1, 5],
# catboost
# one_hot_max_size=[None, 2],
# max_ctr_complexity=[1, 2, 3],
# random_strength=[None, 1],
# colsample_bylevel=[.6, .8, 1]
)
all_params = list(product(*param_grid.values()))
n_models = len(all_params)
shuffle(all_params)
print('\n# Models:', n_models)
start = time()
for n, test_param in enumerate(all_params, 1):
iteration = time()
cv_params = get_params(GBM)
cv_params.update(dict(zip(param_grid.keys(), test_param)))
if GBM == 'lightgbm':
cv_params['max_depth'] = int(ceil(np.log2(cv_params['num_leaves'])))
# print(pd.Series(cv_params))
results[n] = run_cv(test_params=cv_params,
data=datasets,
n_splits=n_splits,
gb_machine=GBM)
results.loc['time', n] = time() - iteration
if n > 1:
df = results[~results.eq(results.iloc[:, 0], axis=0).all(1)].T
if 'valid' in df.columns:
df.valid = pd.to_numeric(df.valid)
print('\n')
print(df.sort_values('valid', ascending=False).head(5).reset_index(drop=True))
out = f'\n\tModel: {n} of {n_models} | '
out += f'{format_time(time() - iteration)} | '
out += f'Total: {format_time(time() - start)} | '
print(out + f'Remaining: {format_time((time() - start)/n*(n_models-n))}\n')
with pd.HDFStore('model_tuning.h5') as store:
store.put(result_key, results.T.apply(pd.to_numeric, errors='ignore'))
| [
"lightgbm.train",
"gbm_utils.get_one_hot_data",
"lightgbm.Dataset",
"catboost.CatBoostClassifier",
"xgboost.DMatrix",
"pandas.HDFStore",
"gbm_utils.get_holdout_set",
"gbm_utils.factorize_cats",
"pandas.set_option",
"numpy.exp",
"numpy.random.seed",
"gbm_utils.OneStepTimeSeriesSplit",
"pandas... | [((561, 610), 'pandas.set_option', 'pd.set_option', (['"""display.expand_frame_repr"""', '(False)'], {}), "('display.expand_frame_repr', False)\n", (574, 610), True, 'import pandas as pd\n'), ((611, 644), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (634, 644), False, 'import warnings\n'), ((665, 683), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (679, 683), True, 'import numpy as np\n'), ((5160, 5170), 'gbm_utils.get_data', 'get_data', ([], {}), '()\n', (5168, 5170), False, 'from gbm_utils import format_time, get_data, get_one_hot_data, factorize_cats, get_holdout_set, OneStepTimeSeriesSplit\n'), ((5665, 5706), 'gbm_utils.OneStepTimeSeriesSplit', 'OneStepTimeSeriesSplit', ([], {'n_splits': 'n_splits'}), '(n_splits=n_splits)\n', (5687, 5706), False, 'from gbm_utils import format_time, get_data, get_one_hot_data, factorize_cats, get_holdout_set, OneStepTimeSeriesSplit\n'), ((5786, 5800), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5798, 5800), True, 'import pandas as pd\n'), ((6513, 6532), 'random.shuffle', 'shuffle', (['all_params'], {}), '(all_params)\n', (6520, 6532), False, 'from random import shuffle\n'), ((6574, 6580), 'time.time', 'time', ([], {}), '()\n', (6578, 6580), False, 'from time import time\n'), ((4828, 4862), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {'columns': 'cols'}), '(result, columns=cols)\n', (4840, 4862), True, 'import pandas as pd\n'), ((5191, 5215), 'gbm_utils.factorize_cats', 'factorize_cats', (['features'], {}), '(features)\n', (5205, 5215), False, 'from gbm_utils import format_time, get_data, get_one_hot_data, factorize_cats, get_holdout_set, OneStepTimeSeriesSplit\n'), ((5230, 5256), 'gbm_utils.get_one_hot_data', 'get_one_hot_data', (['features'], {}), '(features)\n', (5246, 5256), False, 'from gbm_utils import format_time, get_data, get_one_hot_data, factorize_cats, get_holdout_set, OneStepTimeSeriesSplit\n'), ((5297, 5334), 'gbm_utils.get_holdout_set', 'get_holdout_set', ([], {'target': 'y', 'features': 'X'}), '(target=y, features=X)\n', (5312, 5334), False, 'from gbm_utils import format_time, get_data, get_one_hot_data, factorize_cats, get_holdout_set, OneStepTimeSeriesSplit\n'), ((6644, 6650), 'time.time', 'time', ([], {}), '()\n', (6648, 6650), False, 'from time import time\n'), ((6668, 6683), 'gbm_params.get_params', 'get_params', (['GBM'], {}), '(GBM)\n', (6678, 6683), False, 'from gbm_params import get_params\n'), ((4986, 5008), 'pandas.Series', 'pd.Series', (['test_params'], {}), '(test_params)\n', (4995, 5008), True, 'import pandas as pd\n'), ((5388, 5418), 'pandas.HDFStore', 'pd.HDFStore', (['"""model_tuning.h5"""'], {}), "('model_tuning.h5')\n", (5399, 5418), True, 'import pandas as pd\n'), ((7083, 7089), 'time.time', 'time', ([], {}), '()\n', (7087, 7089), False, 'from time import time\n'), ((7625, 7655), 'pandas.HDFStore', 'pd.HDFStore', (['"""model_tuning.h5"""'], {}), "('model_tuning.h5')\n", (7636, 7655), True, 'import pandas as pd\n'), ((7245, 7268), 'pandas.to_numeric', 'pd.to_numeric', (['df.valid'], {}), '(df.valid)\n', (7258, 7268), True, 'import pandas as pd\n'), ((1147, 1235), 'xgboost.DMatrix', 'xgb.DMatrix', ([], {'label': 'target.iloc[train_idx]', 'data': 'features.iloc[train_idx]', 'nthread': '(-1)'}), '(label=target.iloc[train_idx], data=features.iloc[train_idx],\n nthread=-1)\n', (1158, 1235), True, 'import xgboost as xgb\n'), ((1403, 1489), 'xgboost.DMatrix', 'xgb.DMatrix', ([], {'label': 'target.iloc[test_idx]', 'data': 'features.iloc[test_idx]', 'nthread': '(-1)'}), '(label=target.iloc[test_idx], data=features.iloc[test_idx],\n nthread=-1)\n', (1414, 1489), True, 'import xgboost as xgb\n'), ((1635, 1762), 'lightgbm.Dataset', 'lgb.Dataset', ([], {'label': 'target.iloc[train_idx]', 'data': 'features.iloc[train_idx]', 'categorical_feature': 'cat_cols', 'free_raw_data': '(False)'}), '(label=target.iloc[train_idx], data=features.iloc[train_idx],\n categorical_feature=cat_cols, free_raw_data=False)\n', (1646, 1762), True, 'import lightgbm as lgb\n'), ((3688, 3886), 'lightgbm.train', 'lgb.train', ([], {'params': 'test_params', 'train_set': 'train', 'valid_sets': '[train, valid]', 'valid_names': "['train', 'valid']", 'num_boost_round': '(250)', 'early_stopping_rounds': '(25)', 'verbose_eval': '(50)', 'evals_result': 'scores'}), "(params=test_params, train_set=train, valid_sets=[train, valid],\n valid_names=['train', 'valid'], num_boost_round=250,\n early_stopping_rounds=25, verbose_eval=50, evals_result=scores)\n", (3697, 3886), True, 'import lightgbm as lgb\n'), ((6815, 6847), 'numpy.log2', 'np.log2', (["cv_params['num_leaves']"], {}), "(cv_params['num_leaves'])\n", (6822, 6847), True, 'import numpy as np\n'), ((808, 829), 'numpy.exp', 'np.exp', (['(-k * (n - x0))'], {}), '(-k * (n - x0))\n', (814, 829), True, 'import numpy as np\n'), ((4315, 4348), 'catboost.CatBoostClassifier', 'CatBoostClassifier', ([], {}), '(**test_params)\n', (4333, 4348), False, 'from catboost import Pool, CatBoostClassifier\n'), ((7456, 7462), 'time.time', 'time', ([], {}), '()\n', (7460, 7462), False, 'from time import time\n'), ((7514, 7520), 'time.time', 'time', ([], {}), '()\n', (7518, 7520), False, 'from time import time\n'), ((2357, 2453), 'catboost.Pool', 'Pool', ([], {'label': 'target.iloc[train_idx]', 'data': 'features.iloc[train_idx]', 'cat_features': 'cat_cols_idx'}), '(label=target.iloc[train_idx], data=features.iloc[train_idx],\n cat_features=cat_cols_idx)\n', (2361, 2453), False, 'from catboost import Pool, CatBoostClassifier\n'), ((2567, 2661), 'catboost.Pool', 'Pool', ([], {'label': 'target.iloc[test_idx]', 'data': 'features.iloc[test_idx]', 'cat_features': 'cat_cols_idx'}), '(label=target.iloc[test_idx], data=features.iloc[test_idx],\n cat_features=cat_cols_idx)\n', (2571, 2661), False, 'from catboost import Pool, CatBoostClassifier\n'), ((7578, 7584), 'time.time', 'time', ([], {}), '()\n', (7582, 7584), False, 'from time import time\n')] |
import paddle
import paddle.fluid as fluid
from paddle.fluid.contrib import sparsity
import numpy as np
paddle.enable_static()
def main():
train_program = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(train_program, startup_prog):
input_data = fluid.layers.data(
name='test_data', shape=[None, 4, 32], dtype='float16')
label = fluid.layers.data(
name='test_label', shape=[None, 4, 32], dtype='float32')
fc = fluid.layers.fc(input=input_data, num_flatten_dims=-1, size=32, act=None)
fc_32= fluid.layers.cast(x=fc, dtype="float32")
fc_loss = fluid.layers.mean(fluid.layers.square_error_cost(fc_32, label))
with fluid.program_guard(train_program, startup_prog):
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1)
sgd_optimizer.minimize(fc_loss)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
feeder = fluid.DataFeeder(place=place, feed_list=[input_data, label])
exe.run(startup_prog)
fcw = fluid.global_scope().find_var('fc_0.w_0')
fcw_param = fcw.get_tensor()
fcw_array = np.array(fcw_param)
sparse_mask = sparsity.create_mask(fcw_array, func_name='get_mask_2d_greedy')
pruned_w = np.multiply(fcw_array, sparse_mask)
assert sparsity.check_mask_2d(pruned_w, m=4, n=2), "Pruning FC weight matrix failure!!!"
fcw_param.set(pruned_w, place)
fcb = fluid.global_scope().find_var('fc_0.b_0')
fcb_param = fcb.get_tensor()
fcb_array = np.array(fcb_param)
data = np.random.randint(9, size=(8, 4, 32))
fc_result, fc_loss_result , fc_grad = exe.run(
train_program, feed=feeder.feed([(data, data)]), fetch_list=[fc, fc_loss, 'fc_0.w_0@GRAD',])
fcw_param.set(pruned_w, place)
fcb_param.set(fcb_array, place)
sparsity.ASPHelper.replace_dense_to_sparse_op(train_program)
fc_sparse_result, fc_sparse_loss_result , fcs_grad = exe.run(
train_program, feed=feeder.feed([(data, data)]), fetch_list=[fc, fc_loss, 'fc_0.w_0@GRAD'])
print(fc_result.shape, fc_sparse_result.shape)
print(fc_grad.shape, fcs_grad.shape)
print("FC Loss: {:.3f}, FC_Sparse Loss: {:.3f}".format(fc_loss_result[0], fc_sparse_loss_result[0]))
print("Checking forwarding results")
is_pass = True
for i in range(8):
for j in range(4):
for k in range(32):
if fc_result[i][j][k] != fc_sparse_result[i][j][k]:
is_pass = False
print(i, j, "::", fc_result[i][j][k], "-" ,fc_sparse_result[i][j][k])
print("Checking forwarding results ---> ", is_pass)
print("Checking gradients")
is_pass = True
for i in range(32):
for j in range(32):
if fc_grad[i][j] != fcs_grad[i][j]:
is_pass = False
print(i, j, "::", fc_grad[i][j], "-" ,fcs_grad[i][j])
print("Checking gradients results ---> ", is_pass)
if __name__ == "__main__":
main() | [
"paddle.fluid.DataFeeder",
"paddle.fluid.contrib.sparsity.create_mask",
"paddle.fluid.Program",
"numpy.multiply",
"paddle.fluid.contrib.sparsity.check_mask_2d",
"paddle.fluid.layers.fc",
"paddle.fluid.contrib.sparsity.ASPHelper.replace_dense_to_sparse_op",
"paddle.fluid.global_scope",
"paddle.fluid.... | [((105, 127), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (125, 127), False, 'import paddle\n'), ((161, 176), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (174, 176), True, 'import paddle.fluid as fluid\n'), ((196, 211), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (209, 211), True, 'import paddle.fluid as fluid\n'), ((885, 903), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (900, 903), True, 'import paddle.fluid as fluid\n'), ((914, 935), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (928, 935), True, 'import paddle.fluid as fluid\n'), ((949, 1009), 'paddle.fluid.DataFeeder', 'fluid.DataFeeder', ([], {'place': 'place', 'feed_list': '[input_data, label]'}), '(place=place, feed_list=[input_data, label])\n', (965, 1009), True, 'import paddle.fluid as fluid\n'), ((1140, 1159), 'numpy.array', 'np.array', (['fcw_param'], {}), '(fcw_param)\n', (1148, 1159), True, 'import numpy as np\n'), ((1178, 1241), 'paddle.fluid.contrib.sparsity.create_mask', 'sparsity.create_mask', (['fcw_array'], {'func_name': '"""get_mask_2d_greedy"""'}), "(fcw_array, func_name='get_mask_2d_greedy')\n", (1198, 1241), False, 'from paddle.fluid.contrib import sparsity\n'), ((1257, 1292), 'numpy.multiply', 'np.multiply', (['fcw_array', 'sparse_mask'], {}), '(fcw_array, sparse_mask)\n', (1268, 1292), True, 'import numpy as np\n'), ((1304, 1346), 'paddle.fluid.contrib.sparsity.check_mask_2d', 'sparsity.check_mask_2d', (['pruned_w'], {'m': '(4)', 'n': '(2)'}), '(pruned_w, m=4, n=2)\n', (1326, 1346), False, 'from paddle.fluid.contrib import sparsity\n'), ((1524, 1543), 'numpy.array', 'np.array', (['fcb_param'], {}), '(fcb_param)\n', (1532, 1543), True, 'import numpy as np\n'), ((1556, 1593), 'numpy.random.randint', 'np.random.randint', (['(9)'], {'size': '(8, 4, 32)'}), '(9, size=(8, 4, 32))\n', (1573, 1593), True, 'import numpy as np\n'), ((1822, 1882), 'paddle.fluid.contrib.sparsity.ASPHelper.replace_dense_to_sparse_op', 'sparsity.ASPHelper.replace_dense_to_sparse_op', (['train_program'], {}), '(train_program)\n', (1867, 1882), False, 'from paddle.fluid.contrib import sparsity\n'), ((222, 270), 'paddle.fluid.program_guard', 'fluid.program_guard', (['train_program', 'startup_prog'], {}), '(train_program, startup_prog)\n', (241, 270), True, 'import paddle.fluid as fluid\n'), ((293, 366), 'paddle.fluid.layers.data', 'fluid.layers.data', ([], {'name': '"""test_data"""', 'shape': '[None, 4, 32]', 'dtype': '"""float16"""'}), "(name='test_data', shape=[None, 4, 32], dtype='float16')\n", (310, 366), True, 'import paddle.fluid as fluid\n'), ((396, 470), 'paddle.fluid.layers.data', 'fluid.layers.data', ([], {'name': '"""test_label"""', 'shape': '[None, 4, 32]', 'dtype': '"""float32"""'}), "(name='test_label', shape=[None, 4, 32], dtype='float32')\n", (413, 470), True, 'import paddle.fluid as fluid\n'), ((497, 570), 'paddle.fluid.layers.fc', 'fluid.layers.fc', ([], {'input': 'input_data', 'num_flatten_dims': '(-1)', 'size': '(32)', 'act': 'None'}), '(input=input_data, num_flatten_dims=-1, size=32, act=None)\n', (512, 570), True, 'import paddle.fluid as fluid\n'), ((586, 626), 'paddle.fluid.layers.cast', 'fluid.layers.cast', ([], {'x': 'fc', 'dtype': '"""float32"""'}), "(x=fc, dtype='float32')\n", (603, 626), True, 'import paddle.fluid as fluid\n'), ((719, 767), 'paddle.fluid.program_guard', 'fluid.program_guard', (['train_program', 'startup_prog'], {}), '(train_program, startup_prog)\n', (738, 767), True, 'import paddle.fluid as fluid\n'), ((793, 831), 'paddle.fluid.optimizer.SGD', 'fluid.optimizer.SGD', ([], {'learning_rate': '(0.1)'}), '(learning_rate=0.1)\n', (812, 831), True, 'import paddle.fluid as fluid\n'), ((663, 707), 'paddle.fluid.layers.square_error_cost', 'fluid.layers.square_error_cost', (['fc_32', 'label'], {}), '(fc_32, label)\n', (693, 707), True, 'import paddle.fluid as fluid\n'), ((1048, 1068), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (1066, 1068), True, 'import paddle.fluid as fluid\n'), ((1433, 1453), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (1451, 1453), True, 'import paddle.fluid as fluid\n')] |
from unittest import TestCase
import numpy as np
from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer
import torch
import os
from contextlib import contextmanager
import matplotlib.pyplot as plt
@contextmanager
def assert_plot_figures_added():
"""
Assert that the number of figures is higher than
when you started the test
"""
num_figures_before = plt.gcf().number
yield
num_figures_after = plt.gcf().number
assert num_figures_before < num_figures_after
def read_data():
data = np.loadtxt('tests/data/naca0012.txt', skiprows=1, delimiter=',')
real_inputs = data[:, 1:19]
n_params = real_inputs.shape[1]
lb = -0.01 * np.ones(n_params)
ub = 0.01 * np.ones(n_params)
normalizer = Normalizer(lb=lb, ub=ub)
# inputs in [-1, 1]
inputs = normalizer.fit_transform(real_inputs)
lift = data[:, 19]
# gradients with respect to normalized inputs
grad_lift = data[:, 21:39]
return inputs, lift, grad_lift
inputs, lift, grad_lift = read_data()
inputs_torch = torch.as_tensor(inputs, dtype=torch.double)
grad_torch = torch.as_tensor(grad_lift, dtype=torch.double)
class TestNonlinearLevelSet(TestCase):
def test_init_n_layers(self):
nll = NonlinearLevelSet(n_layers=2,
active_dim=1,
lr=0.1,
epochs=100,
dh=0.25)
self.assertEqual(nll.n_layers, 2)
def test_init_active_dim(self):
nll = NonlinearLevelSet(n_layers=2,
active_dim=1,
lr=0.1,
epochs=100,
dh=0.25)
self.assertEqual(nll.active_dim, 1)
def test_init_lr(self):
nll = NonlinearLevelSet(n_layers=2,
active_dim=1,
lr=0.1,
epochs=100,
dh=0.25)
self.assertEqual(nll.lr, 0.1)
def test_init_epochs(self):
nll = NonlinearLevelSet(n_layers=2,
active_dim=1,
lr=0.1,
epochs=100,
dh=0.25)
self.assertEqual(nll.epochs, 100)
def test_init_dh(self):
nll = NonlinearLevelSet(n_layers=2,
active_dim=1,
lr=0.1,
epochs=100,
dh=0.25)
self.assertEqual(nll.dh, 0.25)
def test_init_forward(self):
nll = NonlinearLevelSet(n_layers=2,
active_dim=1,
lr=0.1,
epochs=100,
dh=0.25)
self.assertIsNone(nll.forward)
def test_init_backward(self):
nll = NonlinearLevelSet(n_layers=2,
active_dim=1,
lr=0.1,
epochs=100,
dh=0.25)
self.assertIsNone(nll.backward)
def test_init_loss_vec(self):
nll = NonlinearLevelSet(n_layers=2,
active_dim=1,
lr=0.1,
epochs=100,
dh=0.25)
self.assertEqual(nll.loss_vec, [])
def test_train_01(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
nll.train(inputs=inputs_torch, gradients=grad_torch, interactive=False)
self.assertIsInstance(nll.forward, ForwardNet)
def test_train_02(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
nll.train(inputs=inputs_torch, gradients=grad_torch, interactive=False)
self.assertIsInstance(nll.backward, BackwardNet)
def test_train_03(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
nll.train(inputs=inputs_torch, gradients=grad_torch, interactive=False)
self.assertIs(len(nll.loss_vec), 1)
def test_train_04(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
with self.assertRaises(ValueError):
nll.train(inputs=inputs_torch,
gradients=grad_torch,
interactive=True)
def test_train_05(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
with assert_plot_figures_added():
nll.train(inputs=inputs_torch,
gradients=grad_torch,
outputs=lift,
interactive=True)
def test_forward_n_params(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
nll.train(inputs=inputs_torch, gradients=grad_torch, interactive=False)
self.assertEqual(nll.forward.n_params, 9)
def test_backward_n_params(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
nll.train(inputs=inputs_torch, gradients=grad_torch, interactive=False)
self.assertEqual(nll.backward.n_params, 9)
def test_plot_sufficient_summary_01(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
nll.train(inputs=inputs_torch, gradients=grad_torch, interactive=False)
with assert_plot_figures_added():
nll.plot_sufficient_summary(inputs=inputs_torch, outputs=lift)
def test_plot_sufficient_summary_02(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=2, lr=0.02, epochs=1)
nll.train(inputs=inputs_torch, gradients=grad_torch, interactive=False)
with self.assertRaises(ValueError):
nll.plot_sufficient_summary(inputs=inputs_torch, outputs=lift)
def test_plot_loss(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=2)
nll.train(inputs=inputs_torch, gradients=grad_torch, interactive=False)
with assert_plot_figures_added():
nll.plot_loss()
def test_save_forward(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
nll.train(inputs=inputs_torch, gradients=grad_torch, interactive=False)
outfilename = 'tests/data/saved_forward.pth'
nll.save_forward(outfilename)
self.assertTrue(os.path.exists(outfilename))
self.addCleanup(os.remove, outfilename)
def test_load_forward(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
nll.load_forward(infile='tests/data/forward_test.pth', n_params=18)
self.assertIsInstance(nll.forward, ForwardNet)
def test_save_backward(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
nll.train(inputs=inputs_torch, gradients=grad_torch, interactive=False)
outfilename = 'tests/data/saved_backward.pth'
nll.save_backward(outfilename)
self.assertTrue(os.path.exists(outfilename))
self.addCleanup(os.remove, outfilename)
def test_load_backward(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
nll.load_backward(infile='tests/data/backward_test.pth', n_params=18)
self.assertIsInstance(nll.backward, BackwardNet)
class TestForwardNet(TestCase):
def test_init_n_params(self):
nll = ForwardNet(n_params=6, n_layers=2, dh=0.25, active_dim=1)
self.assertEqual(nll.n_params, 3)
def test_init_n_layers(self):
nll = ForwardNet(n_params=6, n_layers=2, dh=0.25, active_dim=1)
self.assertEqual(nll.n_layers, 2)
def test_init_dh(self):
nll = ForwardNet(n_params=6, n_layers=2, dh=0.20, active_dim=1)
self.assertEqual(nll.dh, 0.20)
def test_init_omega(self):
nll = ForwardNet(n_params=6, n_layers=2, dh=0.25, active_dim=1)
self.assertEqual(nll.omega, slice(1))
class TestBackwardNet(TestCase):
def test_init_n_params(self):
nll = BackwardNet(n_params=6, n_layers=2, dh=0.25)
self.assertEqual(nll.n_params, 3)
def test_init_n_layers(self):
nll = BackwardNet(n_params=6, n_layers=2, dh=0.25)
self.assertEqual(nll.n_layers, 2)
def test_init_dh(self):
nll = BackwardNet(n_params=6, n_layers=2, dh=0.20)
self.assertEqual(nll.dh, 0.20)
| [
"os.path.exists",
"torch.as_tensor",
"athena.ForwardNet",
"numpy.ones",
"athena.NonlinearLevelSet",
"matplotlib.pyplot.gcf",
"athena.BackwardNet",
"athena.Normalizer",
"numpy.loadtxt"
] | [((1052, 1095), 'torch.as_tensor', 'torch.as_tensor', (['inputs'], {'dtype': 'torch.double'}), '(inputs, dtype=torch.double)\n', (1067, 1095), False, 'import torch\n'), ((1109, 1155), 'torch.as_tensor', 'torch.as_tensor', (['grad_lift'], {'dtype': 'torch.double'}), '(grad_lift, dtype=torch.double)\n', (1124, 1155), False, 'import torch\n'), ((539, 603), 'numpy.loadtxt', 'np.loadtxt', (['"""tests/data/naca0012.txt"""'], {'skiprows': '(1)', 'delimiter': '""","""'}), "('tests/data/naca0012.txt', skiprows=1, delimiter=',')\n", (549, 603), True, 'import numpy as np\n'), ((758, 782), 'athena.Normalizer', 'Normalizer', ([], {'lb': 'lb', 'ub': 'ub'}), '(lb=lb, ub=ub)\n', (768, 782), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((391, 400), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (398, 400), True, 'import matplotlib.pyplot as plt\n'), ((442, 451), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (449, 451), True, 'import matplotlib.pyplot as plt\n'), ((689, 706), 'numpy.ones', 'np.ones', (['n_params'], {}), '(n_params)\n', (696, 706), True, 'import numpy as np\n'), ((723, 740), 'numpy.ones', 'np.ones', (['n_params'], {}), '(n_params)\n', (730, 740), True, 'import numpy as np\n'), ((1245, 1317), 'athena.NonlinearLevelSet', 'NonlinearLevelSet', ([], {'n_layers': '(2)', 'active_dim': '(1)', 'lr': '(0.1)', 'epochs': '(100)', 'dh': '(0.25)'}), '(n_layers=2, active_dim=1, lr=0.1, epochs=100, dh=0.25)\n', (1262, 1317), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((1539, 1611), 'athena.NonlinearLevelSet', 'NonlinearLevelSet', ([], {'n_layers': '(2)', 'active_dim': '(1)', 'lr': '(0.1)', 'epochs': '(100)', 'dh': '(0.25)'}), '(n_layers=2, active_dim=1, lr=0.1, epochs=100, dh=0.25)\n', (1556, 1611), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((1827, 1899), 'athena.NonlinearLevelSet', 'NonlinearLevelSet', ([], {'n_layers': '(2)', 'active_dim': '(1)', 'lr': '(0.1)', 'epochs': '(100)', 'dh': '(0.25)'}), '(n_layers=2, active_dim=1, lr=0.1, epochs=100, dh=0.25)\n', (1844, 1899), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((2113, 2185), 'athena.NonlinearLevelSet', 'NonlinearLevelSet', ([], {'n_layers': '(2)', 'active_dim': '(1)', 'lr': '(0.1)', 'epochs': '(100)', 'dh': '(0.25)'}), '(n_layers=2, active_dim=1, lr=0.1, epochs=100, dh=0.25)\n', (2130, 2185), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((2399, 2471), 'athena.NonlinearLevelSet', 'NonlinearLevelSet', ([], {'n_layers': '(2)', 'active_dim': '(1)', 'lr': '(0.1)', 'epochs': '(100)', 'dh': '(0.25)'}), '(n_layers=2, active_dim=1, lr=0.1, epochs=100, dh=0.25)\n', (2416, 2471), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((2687, 2759), 'athena.NonlinearLevelSet', 'NonlinearLevelSet', ([], {'n_layers': '(2)', 'active_dim': '(1)', 'lr': '(0.1)', 'epochs': '(100)', 'dh': '(0.25)'}), '(n_layers=2, active_dim=1, lr=0.1, epochs=100, dh=0.25)\n', (2704, 2759), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((2976, 3048), 'athena.NonlinearLevelSet', 'NonlinearLevelSet', ([], {'n_layers': '(2)', 'active_dim': '(1)', 'lr': '(0.1)', 'epochs': '(100)', 'dh': '(0.25)'}), '(n_layers=2, active_dim=1, lr=0.1, epochs=100, dh=0.25)\n', (2993, 3048), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((3266, 3338), 'athena.NonlinearLevelSet', 'NonlinearLevelSet', ([], {'n_layers': '(2)', 'active_dim': '(1)', 'lr': '(0.1)', 'epochs': '(100)', 'dh': '(0.25)'}), '(n_layers=2, active_dim=1, lr=0.1, epochs=100, dh=0.25)\n', (3283, 3338), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((3554, 3616), 'athena.NonlinearLevelSet', 'NonlinearLevelSet', ([], {'n_layers': '(2)', 'active_dim': '(1)', 'lr': '(0.02)', 'epochs': '(1)'}), '(n_layers=2, active_dim=1, lr=0.02, epochs=1)\n', (3571, 3616), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((3796, 3858), 'athena.NonlinearLevelSet', 'NonlinearLevelSet', ([], {'n_layers': '(2)', 'active_dim': '(1)', 'lr': '(0.02)', 'epochs': '(1)'}), '(n_layers=2, active_dim=1, lr=0.02, epochs=1)\n', (3813, 3858), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((4040, 4102), 'athena.NonlinearLevelSet', 'NonlinearLevelSet', ([], {'n_layers': '(2)', 'active_dim': '(1)', 'lr': '(0.02)', 'epochs': '(1)'}), '(n_layers=2, active_dim=1, lr=0.02, epochs=1)\n', (4057, 4102), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((4271, 4333), 'athena.NonlinearLevelSet', 'NonlinearLevelSet', ([], {'n_layers': '(2)', 'active_dim': '(1)', 'lr': '(0.02)', 'epochs': '(1)'}), '(n_layers=2, active_dim=1, lr=0.02, epochs=1)\n', (4288, 4333), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((4549, 4611), 'athena.NonlinearLevelSet', 'NonlinearLevelSet', ([], {'n_layers': '(2)', 'active_dim': '(1)', 'lr': '(0.02)', 'epochs': '(1)'}), '(n_layers=2, active_dim=1, lr=0.02, epochs=1)\n', (4566, 4611), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((4869, 4931), 'athena.NonlinearLevelSet', 'NonlinearLevelSet', ([], {'n_layers': '(2)', 'active_dim': '(1)', 'lr': '(0.02)', 'epochs': '(1)'}), '(n_layers=2, active_dim=1, lr=0.02, epochs=1)\n', (4886, 4931), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((5115, 5177), 'athena.NonlinearLevelSet', 'NonlinearLevelSet', ([], {'n_layers': '(2)', 'active_dim': '(1)', 'lr': '(0.02)', 'epochs': '(1)'}), '(n_layers=2, active_dim=1, lr=0.02, epochs=1)\n', (5132, 5177), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((5371, 5433), 'athena.NonlinearLevelSet', 'NonlinearLevelSet', ([], {'n_layers': '(2)', 'active_dim': '(1)', 'lr': '(0.02)', 'epochs': '(1)'}), '(n_layers=2, active_dim=1, lr=0.02, epochs=1)\n', (5388, 5433), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((5693, 5755), 'athena.NonlinearLevelSet', 'NonlinearLevelSet', ([], {'n_layers': '(2)', 'active_dim': '(2)', 'lr': '(0.02)', 'epochs': '(1)'}), '(n_layers=2, active_dim=2, lr=0.02, epochs=1)\n', (5710, 5755), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((6000, 6062), 'athena.NonlinearLevelSet', 'NonlinearLevelSet', ([], {'n_layers': '(2)', 'active_dim': '(1)', 'lr': '(0.02)', 'epochs': '(2)'}), '(n_layers=2, active_dim=1, lr=0.02, epochs=2)\n', (6017, 6062), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((6261, 6323), 'athena.NonlinearLevelSet', 'NonlinearLevelSet', ([], {'n_layers': '(2)', 'active_dim': '(1)', 'lr': '(0.02)', 'epochs': '(1)'}), '(n_layers=2, active_dim=1, lr=0.02, epochs=1)\n', (6278, 6323), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((6644, 6706), 'athena.NonlinearLevelSet', 'NonlinearLevelSet', ([], {'n_layers': '(2)', 'active_dim': '(1)', 'lr': '(0.02)', 'epochs': '(1)'}), '(n_layers=2, active_dim=1, lr=0.02, epochs=1)\n', (6661, 6706), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((6887, 6949), 'athena.NonlinearLevelSet', 'NonlinearLevelSet', ([], {'n_layers': '(2)', 'active_dim': '(1)', 'lr': '(0.02)', 'epochs': '(1)'}), '(n_layers=2, active_dim=1, lr=0.02, epochs=1)\n', (6904, 6949), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((7273, 7335), 'athena.NonlinearLevelSet', 'NonlinearLevelSet', ([], {'n_layers': '(2)', 'active_dim': '(1)', 'lr': '(0.02)', 'epochs': '(1)'}), '(n_layers=2, active_dim=1, lr=0.02, epochs=1)\n', (7290, 7335), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((7553, 7610), 'athena.ForwardNet', 'ForwardNet', ([], {'n_params': '(6)', 'n_layers': '(2)', 'dh': '(0.25)', 'active_dim': '(1)'}), '(n_params=6, n_layers=2, dh=0.25, active_dim=1)\n', (7563, 7610), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((7702, 7759), 'athena.ForwardNet', 'ForwardNet', ([], {'n_params': '(6)', 'n_layers': '(2)', 'dh': '(0.25)', 'active_dim': '(1)'}), '(n_params=6, n_layers=2, dh=0.25, active_dim=1)\n', (7712, 7759), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((7845, 7901), 'athena.ForwardNet', 'ForwardNet', ([], {'n_params': '(6)', 'n_layers': '(2)', 'dh': '(0.2)', 'active_dim': '(1)'}), '(n_params=6, n_layers=2, dh=0.2, active_dim=1)\n', (7855, 7901), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((7988, 8045), 'athena.ForwardNet', 'ForwardNet', ([], {'n_params': '(6)', 'n_layers': '(2)', 'dh': '(0.25)', 'active_dim': '(1)'}), '(n_params=6, n_layers=2, dh=0.25, active_dim=1)\n', (7998, 8045), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((8175, 8219), 'athena.BackwardNet', 'BackwardNet', ([], {'n_params': '(6)', 'n_layers': '(2)', 'dh': '(0.25)'}), '(n_params=6, n_layers=2, dh=0.25)\n', (8186, 8219), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((8311, 8355), 'athena.BackwardNet', 'BackwardNet', ([], {'n_params': '(6)', 'n_layers': '(2)', 'dh': '(0.25)'}), '(n_params=6, n_layers=2, dh=0.25)\n', (8322, 8355), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((8441, 8484), 'athena.BackwardNet', 'BackwardNet', ([], {'n_params': '(6)', 'n_layers': '(2)', 'dh': '(0.2)'}), '(n_params=6, n_layers=2, dh=0.2)\n', (8452, 8484), False, 'from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer\n'), ((6519, 6546), 'os.path.exists', 'os.path.exists', (['outfilename'], {}), '(outfilename)\n', (6533, 6546), False, 'import os\n'), ((7147, 7174), 'os.path.exists', 'os.path.exists', (['outfilename'], {}), '(outfilename)\n', (7161, 7174), False, 'import os\n')] |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Class to do trained model inference in beam."""
import importlib
import os
import struct
import subprocess as sp
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
# LDIF is an internal package, should be imported last.
# pylint: disable=g-bad-import-order
from ldif.datasets import preprocess
from ldif.datasets import shapenet
from ldif.inference import experiment as experiments
from ldif.inference import extract_mesh
from ldif.inference import metrics
from ldif.model import model as sdf_model
from ldif.representation import structured_implicit_function
from ldif.util import camera_util
from ldif.util import file_util
from ldif.util import gaps_util
from ldif.util import geom_util
from ldif.util import geom_util_np
from ldif.util import gpu_util
from ldif.util import path_util
from ldif.util import py_util
from ldif.util import sdf_util
from ldif.util import np_util
from ldif.util.file_util import log
# pylint: enable=g-bad-import-order
importlib.reload(extract_mesh)
importlib.reload(structured_implicit_function)
importlib.reload(sdf_model)
importlib.reload(geom_util)
class TrainedNetwork(object):
"""A base class for all networks trained in XManager."""
def __init__(self, job, ckpt, use_gpu, **kwargs): # pylint: disable=unused-argument
self.job = job
self.ckpt = ckpt
self.graph = tf.Graph()
self.use_gpu = use_gpu
@classmethod
def from_experiment(cls,
experiment,
xid,
ckpt_idx,
use_temp_ckpts=None,
overrides=None,
use_gpu=True,
**kwargs):
"""Instantiates a TrainedNetwork from an experiment object."""
job = experiment.job_from_xmanager_id(xid, must_be_visible=True)
if use_temp_ckpts is not None:
job.set_use_temp_ckpts(use_temp_ckpts)
if overrides is not None:
for k, v in overrides.items():
setattr(job.model_config.hparams, k, v)
if ckpt_idx == 0:
log.error('Please select a checkpoint and rerun. Valid checkpoints:')
log.error(str(job.all_checkpoint_indices))
return
must_equal = ckpt_idx != -1
ckpt = job.latest_checkpoint_before(ckpt_idx, must_equal=must_equal)
log.info(f'Loading checkpoint {ckpt.abspath}')
return cls(job, ckpt, use_gpu, **kwargs)
@classmethod
def from_modeldir(cls,
model_directory,
model_name,
experiment_name,
xid,
ckpt_idx,
overrides=None,
use_temp_ckpts=True,
use_gpu=True,
**kwargs):
"""Creates a TrainedModel from a model directory root and name."""
experiment = experiments.Experiment(model_directory, model_name,
experiment_name)
return cls.from_experiment(experiment, xid, ckpt_idx, use_temp_ckpts,
overrides, use_gpu, **kwargs)
@classmethod
def from_identifiers(cls,
user,
model_name,
experiment_name,
xid,
ckpt_idx,
overrides=None,
use_temp_ckpts=None,
charged_user='viscam',
use_gpu=True,
**kwargs):
"""Creates a trained network from experiment identifiers."""
raise ValueError('No longer supported.')
def restore(self):
"""Creates a session with restored model variables."""
with self.graph.as_default():
if self.use_gpu:
# For now these are disabled since it is difficult to work on
# all GPUs.
#allowable_frac = gpu_util.get_allowable_fraction_without(
# mem_to_reserve=1024 + 512, cuda_device_index=0) # ~1GB
#gpu_options = tf.GPUOptions(
# per_process_gpu_memory_fraction=allowable_frac)
#config = tf.ConfigProto(gpu_options=gpu_options)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
else:
config = tf.ConfigProto(device_count={'GPU': 0})
self.session = tf.Session(config=config)
saver = tf.train.Saver()
saver.restore(self.session, self.ckpt.abspath)
def conform_prediction(vector):
"""Forces an arbitrary vector to be a valid (D)SIF."""
vector = vector.copy()
if vector.shape[-1] not in [10, 42]:
raise ValueError('Unimplemented.')
consts, centers, radii_aa, radii_cov = np.split(
vector[..., :10], [1, 4, 7], axis=-1)
consts = np.minimum(consts, 0.0)
radii_aa = np.maximum(radii_aa, 1e-9)
radii_cov = np.clip(radii_cov, -np.pi / 4., np.pi / 4.)
log.verbose(
repr([
x.shape
for x in [consts, centers, radii_aa, radii_cov, vector[..., 10:]]
]))
return np.concatenate(
[consts, centers, radii_aa, radii_cov, vector[..., 10:]], axis=-1)
class SingleViewDepthEncoder(TrainedNetwork):
"""Maps from a single depth image (max-0) to a shape representation."""
def __init__(self, job, ckpt, use_gpu, **kwargs):
super(SingleViewDepthEncoder, self).__init__(job, ckpt, use_gpu, **kwargs)
with self.graph.as_default():
model_config = self.job.model_config
model_config.inputs = shapenet.build_placeholder_interface(
model_config, proto='ShapeNetOneImXyzPC')
training_example = preprocess.preprocess(model_config)
self.depth_input = model_config.inputs['dataset'].depth_render
self.xyz_input = model_config.inputs['dataset'].xyz_render
self.points_input = model_config.inputs['dataset'].surface_point_samples
training_example = preprocess.preprocess(model_config)
observation = sdf_model.Observation(model_config, training_example)
imp_net = sdf_model.StructuredImplicitModel(model_config, 'imp_net')
prediction = imp_net.forward(observation)
structured_implicit = prediction.structured_implicit
self.packed_vector = structured_implicit.vector
self.restore()
def run(self, depth, points, xyz):
"""Runs the network on the input data, returning a (D)SIF."""
h, w = np.squeeze(depth).shape
depth = np.reshape(depth, [1, h, w, 1])
points = np.reshape(points, [1, 10000, 6])
xyz = np.reshape(xyz, [1, h, w, 3])
with self.graph.as_default():
packed_vector = self.session.run(
self.packed_vector,
feed_dict={
self.depth_input: depth,
self.points_input: points,
self.xyz_input: xyz
})
packed_vector = np.reshape(packed_vector,
[self.job.model_config.hparams.sc, -1])
return packed_vector
def run_example(self, ex):
return self.run(ex.max_depth_224[0, ...] * 1000.0,
ex.get_max_world_pts_from_idx(0), ex.max_world_xyz_224[0,
...])
def run_example_bts(self, ex):
return self.run(ex.bts_depth_224[0, ...] * 1000.0,
ex.get_bts_world_pts_from_idx(0), ex.bts_world_xyz_224[0,
...])
class DepthEncoder(TrainedNetwork):
"""Maps from a dodecahedron of depth images to shape elements."""
def __init__(self, job, ckpt, use_gpu, **kwargs):
super(DepthEncoder, self).__init__(job, ckpt, use_gpu, **kwargs)
with self.graph.as_default():
model_config = self.job.model_config
model_config.hparams.bs = 1
model_config.inputs = shapenet.build_placeholder_interface(model_config)
training_example = preprocess.preprocess(model_config)
self.depth_input = model_config.inputs['dataset'].depth_renders
self.points_input = model_config.inputs['dataset'].surface_point_samples
self.nss_input = model_config.inputs['dataset'].near_surface_samples
training_example = preprocess.preprocess(model_config)
if hasattr(training_example, '_tx'):
self.tx = training_example._tx
else:
self.tx = None
observation = sdf_model.Observation(model_config, training_example)
imp_net = sdf_model.StructuredImplicitModel(model_config, 'imp_net')
prediction = imp_net.forward(observation)
structured_implicit = prediction.structured_implicit
self.packed_vector = structured_implicit.vector
# *phew* we have set up the graph... now we need to pull the weights.
self.restore()
def run(self, dodeca, points, nss=None):
"""Runs the network on the input data, returning a (D)SIF."""
dodeca = np.reshape(dodeca, [1, 20, 224, 224, 1])
points = np.reshape(points, [1, 10000, 6])
with self.graph.as_default():
feed_dict = {self.depth_input: dodeca, self.points_input: points}
if nss is not None:
feed_dict[self.nss_input] = np.reshape(nss, [1, 100000, 4])
if self.tx is not None:
packed_vector, tx = self.session.run([self.packed_vector, self.tx],
feed_dict=feed_dict)
else:
packed_vector = self.session.run(
self.packed_vector, feed_dict=feed_dict)
packed_vector = np.reshape(packed_vector,
[self.job.model_config.hparams.sc, -1])
if self.tx is not None:
return packed_vector, np.reshape(tx, [4, 4])
return packed_vector
def run_example(self, ex):
return self.run(ex.depth_images, ex.precomputed_surface_samples_from_dodeca)
class Decoder(TrainedNetwork):
"""A SIF -> Mesh decoder."""
def __init__(self, job, ckpt, use_gpu, **kwargs):
super(Decoder, self).__init__(job, ckpt, use_gpu, **kwargs)
with self.graph.as_default():
self.sif_input = tf.placeholder(tf.float32, self.batched_vector_shape)
# TODO(kgenova) Maybe the net should be handled entirely by the structured
# implicit function? Although there is a difference between the network
# that can give a result from a vector and a simple wrapper for models
# that don't need variables. Maybe it's just intelligent about creating
# the net only when really needed.
if 'silence_implicits' in kwargs and kwargs['silence_implicits']:
self.job.model_config.hparams.ipc = 'f'
log.info('Silencing implicits.')
net = sdf_model.StructuredImplicitModel(
self.job.model_config, name='imp_net')
structured_implicit = (
structured_implicit_function.StructuredImplicit.from_packed_vector(
self.job.model_config, self.sif_input, net))
self.structured_implicit = structured_implicit
self.block_res = 32
self.native_point_count = self.block_res**3
self.sample_locations_ph = tf.placeholder(
tf.float32, shape=[self.block_res, self.block_res, self.block_res, 3])
samples = tf.reshape(self.sample_locations_ph, [1, self.block_res**3, 3])
predicted_alg, predicted_locals = structured_implicit.class_at_samples(
samples, apply_class_transfer=False)
predicted_class = sdf_util.apply_class_transfer(
predicted_alg,
self.job.model_config,
soft_transfer=True,
offset=self.job.model_config.hparams.lset)
vol_shape = [self.block_res, self.block_res, self.block_res]
self.predicted_alg_grid = tf.reshape(predicted_alg, vol_shape)
self.predicted_class_grid = tf.reshape(predicted_class, vol_shape)
effective_element_count = (
structured_implicit_function.get_effective_element_count(
self.job.model_config))
self.local_decisions = tf.reshape(predicted_locals[0], [
effective_element_count, self.block_res, self.block_res,
self.block_res
])
self.base_grid = np_util.make_coordinate_grid_3d(
length=self.block_res,
height=self.block_res,
width=self.block_res,
is_screen_space=False,
is_homogeneous=False).astype(np.float32)
self._world2local = structured_implicit.world2local
self._use_inference_kernel = True
# Influence samples
self.true_sample_count = 10000
self.generic_sample_ph = tf.placeholder(
tf.float32, shape=[self.true_sample_count, 3])
self.predicted_influences = structured_implicit.rbf_influence_at_samples(
tf.expand_dims(self.generic_sample_ph, axis=0))
# Optimizer stuff
self.optimizer_pc = 5000
self.optimizer_samples = tf.placeholder(
tf.float32, shape=[self.optimizer_pc, 3])
optimizer_samples = tf.reshape(self.optimizer_samples,
[1, self.optimizer_pc, 3])
self.predicted_class, _ = structured_implicit.class_at_samples(
optimizer_samples)
self.predicted_class = tf.reshape(self.predicted_class,
[self.optimizer_pc, 1])
self.target_class_ph = tf.placeholder(tf.float32, [self.optimizer_pc, 1])
loss = 'crossentropy'
if loss == 'crossentropy':
clipped_pred = tf.clip_by_value(self.predicted_class, 1e-05, 1 - 1e-05)
self.optimizer_elt_loss = tf.where(self.target_class_ph > 0.5,
-tf.log(clipped_pred),
-tf.log(1 - clipped_pred))
elif loss == 'l1':
self.optimizer_elt_loss = tf.abs(self.target_class_ph -
self.predicted_class)
elif loss == 'l2':
self.optimizer_elt_loss = tf.square(self.target_class_ph -
self.predicted_class)
apply_where_agree = True
if not apply_where_agree:
gt_outside = self.target_class_ph > 0.5
pred_outside = self.predicted_class > 0.5
gt_inside = tf.logical_not(gt_outside)
pred_inside = tf.logical_not(pred_outside)
agree = tf.logical_or(
tf.logical_and(gt_outside, pred_outside),
tf.logical_and(gt_inside, pred_inside))
self.optimizer_elt_loss = tf.where_v2(agree, 0.0,
self.optimizer_elt_loss)
self.optimizer_loss = tf.reduce_mean(self.optimizer_elt_loss)
self.ldif_gradients = tf.gradients(self.optimizer_loss, self.sif_input)
# TODO(kgenova) Currently disabled since it's in testing and hardcodes
# some values.
# self.coords_ph = tf.placeholder(tf.float32, shape=[3])
# self.am_image_ph = tf.placeholder(tf.int32, shape=[224, 224])
# pose_cam2world, pose_eye = self._spherical_to_4x4(self.coords_ph)
# self.pose_error = self._evaluate_pose_error(pose_cam2world, pose_eye,
# self.am_image_ph)
# self.pose3_gradients = tf.gradients(self.pose_error, self.coords_ph)
try:
self.restore()
except ValueError:
log.warning('No variables to restore or restoration otherwise failed.')
@property
def unbatched_vector_shape(self):
shape_count = self.job.model_config.hparams.sc
shape_size = structured_implicit_function.element_dof(self.job.model_config)
return [shape_count, shape_size]
@property
def batched_vector_shape(self):
return [1] + self.unbatched_vector_shape
@property
def use_inference_kernel(self):
return self._use_inference_kernel
@use_inference_kernel.setter
def use_inference_kernel(self, should_use):
self._use_inference_kernel = bool(should_use)
# TODO(kgenova) The intermediate vector should really be its own class...
def savetxt(self, sif_vector, path=None, version='v1'):
"""Saves a (D)SIF as ASCII text in the SIF file format.
Args:
sif_vector: A numpy array containing the ldif to write to disk. Has shape
(element_count, element_length).
path: A string containing the path to the file to write to, if provided.
If none, no file is written.
version: A string with the version identifier. Must equal 'v1'.
Returns:
A string encoding of the (D)SIF.
"""
if version == 'v0':
raise ValueError('SIF v0 files are no longer supported.')
elif version == 'v1':
s = self.encode_sif_v1(sif_vector)
else:
raise ValueError(f'Unrecognized SIF file format: {version}.')
if path is not None:
file_util.writetxt(path, s)
return s
def encode_sif_v1(self, sif_vector):
"""Encodes a ldif to a string, and optionally writes it to disk.
A description of the file format:
Line 1: SIF
Line 2: Three ints separated by spaces. In order:
1) The number of blobs.
2) The version ID for the blob types. I added this to be safe since
last time when we updated to add rotation it broke all the old txt
files. For now it will always be zero, which means the following
eleven explicit parameters will be given per blob (in order):
1 constant. float.
3 centers (XYZ). float.
3 radii (XYZ diagonals). float.
3 radii (roll-pitch-yaw rotations). float.
1 symmetry ID type. int. For now it will be either 0 or 1:
Zero: Not symmetric.
One: Left-right (XY-plane) symmetry.
3) The number of implicit parameters per blob. So it will likely
be between 0-256.
After the first two lines, there is a line for each blob.
Each line will have the explicit parameters followed by the implicit
parameters. They are space separated.
Args:
sif_vector: The SIF vector to encode as a np array. Has shape
(element_count, element_length).
Returns:
A string encoding of v in the ldif v1 file format.
"""
sif_vector = sif_vector.copy()
shape_count = sif_vector.shape[-2]
shape_len = sif_vector.shape[-1]
if shape_len == 7:
off_axis = np.zeros([shape_count, 3])
sif_vector = np.concatenate([sif_vector, off_axis], axis=1)
shape_len = 10
explicit_len = 10
implicit_len = shape_len - explicit_len
sif_vector = np.reshape(sif_vector, [shape_count, shape_len])
has_implicits = implicit_len > 0
if not has_implicits:
assert shape_len == 10
implicit_len = 0
sif_vector[:, 4:7] = np.sqrt(np.maximum(sif_vector[:, 4:7], 0))
header = 'SIF\n%i %i %i\n' % (shape_count, 0, implicit_len)
out = header
for row_idx in range(shape_count):
row = ' '.join(10 * ['%.9g']) % tuple(sif_vector[row_idx, :10].tolist())
symmetry = int(row_idx < self.job.model_config.hparams.lyr)
row += ' %i' % symmetry
if has_implicits:
implicit_params = ' '.join(implicit_len * ['%.9g']) % (
tuple(sif_vector[row_idx, 10:].tolist()))
row += ' ' + implicit_params
row += '\n'
out += row
return out
def render_ellipsoids(self, sif_vector):
"""Renders an ellipsoid image visualizing the (D)SIF RBFs."""
with py_util.py2_temporary_directory() as d:
qpath = d + '/q.txt'
self.savetxt(sif_vector, qpath)
impath = d + '/im.png'
camera = ('1.0451 1.17901 0.630437 '
'-0.614259 -0.695319 -0.373119 '
'-0.547037 0.715996 -0.433705')
with py_util.x11_server():
cmd = '%s/qview %s -camera %s -image %s' % (path_util.gaps_path(),
qpath, camera, impath)
sp.check_output(cmd, shell=True)
im = file_util.read_image(impath)
return im
def interactive_viewer(self, sif_vector, mesh=None):
"""Opens a GAPS viewer that can display the SIF blobs alongside a mesh."""
with py_util.py2_temporary_directory() as d:
qpath = d + '/q.txt'
self.savetxt(sif_vector, qpath)
init_camera = ('1.0451 1.17901 0.630437 '
'-0.614259 -0.695319 -0.373119 '
'-0.547037 0.715996 -0.433705')
mstr = ''
if mesh is not None:
mpath = d + '/m.ply'
file_util.write_mesh(mpath, mesh)
mstr = f' -input_mesh {mpath}'
cmd = f'{path_util.gaps_path()}/qview {qpath} -camera {init_camera}{mstr}'
sp.check_output(cmd, shell=True)
def world2local(self, sif_vector):
if sif_vector.shape[0] != 1:
sif_vector = np.expand_dims(sif_vector, axis=0)
m = self.session.run(
self._world2local, feed_dict={self.sif_input: sif_vector})
return m
def interactive_mesh_viewer(self, sif_vector, resolution):
"""Opens up an OpenGL session viewing the mesh defined by the SIF/LDIF."""
with py_util.py2_temporary_directory() as d:
mpath = d + '/m.ply'
m = self.extract_mesh(sif_vector, resolution)
file_util.write_mesh(mpath, m)
init_camera = ('1.0451 1.17901 0.630437 '
'-0.614259 -0.695319 -0.373119 '
'-0.547037 0.715996 -0.433705')
cmd = '%s/mshview %s -camera %s' % (path_util.gaps_path(), mpath,
init_camera)
sp.check_output(cmd, shell=True)
def interactive_gridview(self, sif_vector, resolution, extent=0.75):
volume = self._grid_eval(
sif_vector, resolution, extent, extract_parts=False, world2local=None)
return gaps_util.grdview(volume)
def _spherical_to_4x4(self, coords):
"""Turns spherical coords into a 4x4 affine transformation matrix."""
r = coords[0]
theta = coords[1]
phi = coords[2]
st = tf.sin(theta)
x = r * st * tf.cos(phi)
y = r * st * tf.sin(phi)
z = r * tf.cos(theta)
eye = tf.stack([x, y, z], axis=0)
eye = tf.reshape(eye, [1, 3])
center = tf.zeros([1, 3], dtype=tf.float32)
world_up = tf.constant([[0., 1., 0.]], dtype=tf.float32)
world2cam = camera_util.look_at(eye, center, world_up)
cam2world = tf.linalg.inv(world2cam)
cam2world = tf.constant(
[[-9.9398971e-01, 2.7342862e-03, -4.7837296e-03, 1.4993416e-04],
[1.6200442e-09, 8.6298174e-01, 4.9326313e-01, 7.1943283e-01],
[5.5100261e-03, 4.9325553e-01, -8.6296844e-01, -1.2277470e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 1.0000000e+00]],
dtype=tf.float32)
return tf.reshape(cam2world, [4, 4]), eye
def _evaluate_pose_error(self, cam2world, eye, am_image):
"""Evaluates the error of an estimated 4x4 pose matrix."""
# TODO(kgenova) Thisis a hack that only workds for 3d-r2n2
ray_directions = gaps_util.gaps_depth_image_to_cam_image(
np.ones((224, 224)), xfov=0.422204).astype(np.float32)
tc = 15
t_vals = tf.constant(np.arange(0.75, 2.25, .1), dtype=tf.float32)
t_vals = tf.reshape(t_vals, [1, tc, 1])
ray_count = int(np.prod(ray_directions.shape[:-1]))
ray_directions = tf.reshape(ray_directions, [ray_count, 1, 3])
eye = tf.reshape(eye, [1, 1, 3])
cam_rays = ray_directions * t_vals + eye
world_pts = geom_util.apply_4x4(
cam_rays, cam2world, are_points=True, batch_rank=0, sample_rank=2)
world_pts = tf.reshape(world_pts, [1, ray_count * tc, 3])
self.cam_3dof_pts = world_pts
world_rbfs = self.structured_implicit.rbf_influence_at_samples(world_pts)
eec = world_rbfs.get_shape().as_list()[-1]
assert len(am_image.get_shape().as_list()) == 2
is_bg = tf.reshape(
tf.logical_not(tf.equal(am_image, eec)), [1, ray_count, 1])
am_image = tf.tile(tf.expand_dims(am_image, axis=-1), [1, 1, tc])
flat_am = tf.reshape(am_image, [ray_count * tc, 1])
flat_am = tf.where_v2(tf.equal(flat_am, 45), 0, flat_am)
world_rbfs = tf.reshape(world_rbfs, [ray_count * tc, 45])
max_val = tf.gather(world_rbfs, flat_am, batch_dims=1)
max_val = tf.reshape(max_val, [1, ray_count, tc])
max_val = tf.reduce_max(max_val, axis=-1)
is_bg_mult = tf.cast(is_bg, dtype=tf.float32)
max_val = is_bg_mult * max_val
error = -1.0 * tf.reduce_sum(max_val)
return error
def optimize_3dof_pose(self, sif_vector, am_image, e, step_count=10, lr=1e-6):
"""Tries to fit a pose given a SIF in 3D and a SIF segmentation image."""
if len(sif_vector.shape) == 2:
sif_vector = np.expand_dims(sif_vector, axis=0)
# Now rays is an array of shape [h, w, 3]. The origin is currently [0,0,0]
# because the rays are in camera space (for now).
lr = np.array([0.0, lr, lr], dtype=np.float32)
# Just worry about a single step for now:
# The pose is 3-dof: distance, phi, theta.
coords = np.array([0.812717413913 / 1.75, 0.0, 0.0], dtype=np.float32)
# cam2world, eye = self._spherical_to_4x4(coords)
for i in range(step_count):
log.verbose('Step %i: (%0.4f, %0.4f, %0.4f)' %
(i, coords[0], coords[1], coords[2]))
grad, err, pts = self.session.run(
[self.pose3_gradients, self.pose_error, self.cam_3dof_pts],
feed_dict={
self.am_image_ph: am_image,
self.sif_input: sif_vector,
self.coords_ph: coords
})
grad = grad[0]
log.verbose('Error: %0.2f' % err)
log.verbose('grad: %s' % repr(grad))
log.verbose('pts.shape: ', repr(pts.shape))
assert len(grad.shape) == 1
assert grad.shape[0] == 3
update = lr * grad
log.verbose('Update: ', str(update))
gaps_util.ptsview(pts, mesh=e.v1_gt_mesh)
coords = coords - lr * grad
return coords
def optimize_to_gt(self,
sif_vector,
example,
step_count=1,
lr=0.01,
vis=0,
verbosity=0,
target='all',
samps='nss'):
"""Iteratively optimizes a SIF or LDIF to fit ground truth in/out values."""
if samps == 'nss':
all_samples = example.near_surface_samples.copy()
np.random.shuffle(all_samples)
elif samps == 'uni':
all_samples = example.uniform_samples.copy()
elif samps == 'nssuni':
all_samples = np.concatenate(
[example.near_surface_samples, example.uniform_samples], axis=0)
elif samps == 'dodeca':
depth_ims = example.depth_images / 1000.0
all_samples = geom_util.depth_dodeca_to_samples(depth_ims)
elif samps == 'depth':
depth_idx = 1 # TODO(kgenova) Make this the one in the observation.
depth_ims = example.depth_images / 1000.0
depth_im = depth_ims[0, depth_idx, :, :, :]
cam2world = geom_util.get_dodeca_camera_to_worlds()[depth_idx, :, :]
assert depth_im.shape[0] == 224
assert cam2world.shape[0] == 4
log.verbose('Depth im shape: ', depth_im.shape)
all_samples = geom_util.depth_image_to_samples(depth_im, cam2world)
if verbosity >= 2:
gaps_util.ptsview(all_samples[..., :], self.extract_mesh(sif_vector, 128))
np.random.shuffle(all_samples)
cl = all_samples[:, 3]
all_samples[cl < 0, 3] = 0
all_samples[cl > 0, 3] = 1
samples, gt_class = np.split(all_samples, [3], axis=-1)
samples = samples[:self.optimizer_pc, :]
gt_class = gt_class[:self.optimizer_pc, :]
def print_sat_count(vec):
"""Prints the number of contraints that are satisfied and the total."""
pred = self.class_at_samples(vec, np.reshape(samples, [-1, 3]))
pred_is_out = pred > 0.5
gt_is_out = gt_class > 0.5
log.verbose(pred_is_out.shape, gt_is_out.shape)
agree = np.logical_or(
np.logical_and(pred_is_out, gt_is_out),
np.logical_and(
np.logical_not(pred_is_out), np.logical_not(gt_is_out)))
sat_count = np.count_nonzero(agree)
log.info('%i/%i constraints are satisfied.' %
(sat_count, self.optimizer_pc))
if verbosity >= 1:
log.info('Beginning optimization.')
print_sat_count(sif_vector)
assert gt_class.shape[-1] == 1
sif_vector = sif_vector.copy()
sif_vector = np.expand_dims(sif_vector, axis=0)
cur_vector = sif_vector.copy()
ret_best = False
if ret_best:
min_loss = np.inf
best_vec = cur_vector.copy()
momentum = 0.9
velocity = np.zeros_like(cur_vector)
cur_batch_idx = 0
for i in range(step_count):
batch_start = cur_batch_idx
batch_end = cur_batch_idx + self.optimizer_pc
if batch_end > all_samples.shape[0]:
np.random.shuffle(all_samples)
batch_start = 0
batch_end = self.optimizer_pc
cur_batch_idx = 0
batch_all_samples = all_samples[batch_start:batch_end, :]
cur_batch_idx += self.optimizer_pc
batch_samples, batch_gt_class = np.split(batch_all_samples, [3], axis=-1)
grad = self.session.run(
self.ldif_gradients,
feed_dict={
self.target_class_ph: batch_gt_class,
self.sif_input: cur_vector,
self.optimizer_samples: batch_samples
})[0]
vis_this_time = vis >= 2 or (vis >= 1 and (i == 0 or i == step_count - 1))
print_this_time = verbosity >= 2 or (verbosity >= 1 and not i % 1000)
if vis_this_time or print_this_time:
loss = self.session.run(
self.optimizer_elt_loss,
feed_dict={
self.target_class_ph: batch_gt_class,
self.sif_input: cur_vector,
self.optimizer_samples: batch_samples
})
if ret_best:
lsum = np.sum(loss)
if lsum < min_loss:
min_loss = lsum
best_vec = cur_vector.copy()
# Assuming the loss is zero if a constraint is satisfied:
is_sat = self.optimizer_pc - np.count_nonzero(loss)
if print_this_time:
log.info('Step %i: Total loss: %s. Constraints %i/%i' %
(i, repr(np.sum(loss)), is_sat, self.optimizer_pc))
if vis_this_time:
self.vis_loss(
cur_vector,
gt_at_loss=gt_class,
loss=loss,
loss_positions=samples)
if target == 'all-eq':
mults = 42 * [1]
elif target == 'all':
mults = [0.001] + 3 * [0.001] + 6 * [0.0000001] + 32 * [50]
elif target == 'centers':
mults = [0.000] + 3 * [0.001] + 6 * [0.0000000] + 32 * [0]
elif target == 'radii':
mults = [0.000] + 3 * [0.000] + 6 * [0.0000001] + 32 * [0]
elif target == 'features':
mults = [0.000] + 3 * [0.000] + 6 * [0.0000000] + 32 * [50]
elif target == 'constants':
mults = [0.001] + 3 * [0.000] + 6 * [0.0000000] + 32 * [0]
else:
assert False
mults = np.array(mults).reshape([1, 1, 42])
velocity = momentum * velocity + mults * lr * grad
cur_vector = cur_vector - velocity
if verbosity >= 1:
log.info('Finished optimization.')
print_sat_count(cur_vector)
if ret_best:
cur_vector = best_vec
return np.reshape(cur_vector, self.unbatched_vector_shape)
def vis_loss(self, sif_vector, gt_at_loss, loss, loss_positions):
"""Visualizes the loss mid-optimization."""
loss = np.reshape(loss, [-1, 1])
gt_at_loss = np.reshape(gt_at_loss, [-1, 1])
assert gt_at_loss.shape[0] == loss.shape[0]
loss[gt_at_loss <= 0.5] = -loss[gt_at_loss <= 0.5]
loss_positions = np.reshape(loss_positions, [-1, 3])
arr = np.concatenate([loss_positions, loss], axis=1)
with py_util.py2_temporary_directory() as d:
sdf_path = f'{d}/a.sdf'
with file_util.open_file(sdf_path, 'wb') as f:
arr = arr.astype(np.float32)
arr.tofile(f)
m = self.extract_mesh(sif_vector, resolution=128)
m_path = f'{d}/m.ply'
file_util.write_mesh(m_path, m)
init_camera = ('1.0451 1.17901 0.630437 '
'-0.614259 -0.695319 -0.373119 '
'-0.547037 0.715996 -0.433705')
cmd = '%s/ptsview %s %s -camera %s' % (path_util.gaps_path(), sdf_path,
m_path, init_camera)
sp.check_output(cmd, shell=True)
def _grid_eval_cuda(self, sif_vector, resolution, extent):
"""Evaluates a SIF/LDIF densely on a voxel grid."""
log.verbose('Using custom CUDA kernel for evaluation.')
# First step: Get the path where the serialized occnet should be.
# The serialized occnet should be at whatever the checkpoint path is,
# but replace model.ckpt-[idx] with serialized-occnet-[idx].occnet
checkpoint_path = self.ckpt.abspath
log.info(f'Using checkpoint {checkpoint_path} to write OccNet file.')
assert 'model.ckpt-' in checkpoint_path
occnet_path = checkpoint_path.replace('model.ckpt-', 'serialized-occnet-')
occnet_path = occnet_path + '.occnet'
# Second step: If it isn't there, write it to disk.
if not os.path.isfile(occnet_path):
assert os.path.isdir(os.path.dirname(occnet_path))
if self.job.model_config.hparams.ipe == 't':
self.write_occnet_file(occnet_path)
else:
occnet_path = path_util.get_path_to_ldif_root(
) + '/ldif2mesh/extracted.occnet'
# Third step: open a temporary directory, and write the embedding.
# Make sure that the temp directories are deleted afterwards.
with py_util.py2_temporary_directory() as d:
rep_path = f'{d}/ldif.txt'
self.savetxt(sif_vector, rep_path)
# Pick the path to the output grd file:
grd_path = f'{d}/grid.grd'
# Fourth step: Get the path to the kernel
kernel_path = os.path.join(path_util.get_path_to_ldif_root(),
'ldif2mesh/ldif2mesh')
if not os.path.isfile(kernel_path):
raise ValueError(
f'There is no compiled CUDA executable at {kernel_path}.')
cmd = (f'CUDA_VISIBLE_DEVICES=0 {kernel_path} {rep_path} {occnet_path} '
f'{grd_path} -resolution {resolution}')
log.verbose(f'Executing command {cmd}')
# TODO(kgenova) Support extent as a flag
if extent != 0.75:
raise ValueError(
'Currently only 0.75 extent is supported on the '
'custom kernel. Please set use_inference_kernel to false for an'
f' extent of {extent}.')
# Fifth step: Invoke the kernel.
try:
cmd_result = sp.check_output(cmd, shell=True)
log.info(cmd_result.decode('utf-8').replace('\n', ''))
except sp.CalledProcessError as e:
if 'out of memory' in e.output.decode('utf-8'):
raise ValueError(
'The GPU does not have enough free memory left for the'
' inference kernel. Please reduce the fraction'
' reserved by tensorflow.')
elif 'no kernel image is available' in e.output.decode('utf-8'):
raise ValueError(
'It appears that the CUDA kernel was not built to your '
'gpu\'s architecture. Hopefully this is an easy fix. '
'Please go to developer.nvidia.com/cuda-gpus, and find '
'your gpu from the list. Then, modify ./build_kernel.sh '
'by adding compute_XX and sm_XX for whatever your GPU '
'compute capability is according to the website. For '
'example, a 2080 Ti would use compute_75 and sm_75. '
'Note that if your card supports below 35, it likely '
'will fail to compile using this method. If you are '
'seeing this error, please feel free to open up an issue '
'and report it. We would like to support as many gpus as '
'possible.')
else:
raise ValueError(f'Unrecognized error code {e.returncode} occurred'
f' during inference kernel evaluation: {e.output}')
# Seventh step: Read the grid file.
_, grd = file_util.read_grd(grd_path)
# Eighth step: Verify the grid shape and return the grid.
log.verbose(f'The output CUDA grid has shape {grd.shape}.')
# gaps_util.grdview(grd)
return grd
def _grid_eval(self,
sif_vector,
resolution,
extent,
extract_parts,
world2local=None):
"""Evalutes the LDIF/SIF on a grid."""
log.verbose('Evaluating SDF grid for mesh.')
if self.use_inference_kernel and not extract_parts:
return self._grid_eval_cuda(sif_vector, resolution, extent)
if extract_parts or world2local:
log.warning('Part extraction and world2local are not supported with the'
' custom kernel.')
log.warning('Using pure tensorflow for grid evaluation, this will be slow.')
t = time.time()
sif_vector = np.reshape(sif_vector, self.batched_vector_shape)
assert not resolution % self.block_res
block_count = resolution // self.block_res
block_size = (2.0 * extent) / block_count
l_block = []
i = 0
dim_offset = 1 if extract_parts else 0
grid = self.local_decisions if extract_parts else self.predicted_alg_grid
for li in range(block_count):
l_min = -extent + (li) * block_size - 0.5 / resolution
h_block = []
for hi in range(block_count):
h_min = -extent + (hi) * block_size - 0.5 / resolution
w_block = []
for wi in range(block_count):
w_min = -extent + (wi) * block_size - 0.5 / resolution
offset = np.reshape(
np.array([w_min, l_min, h_min], dtype=np.float32), [1, 1, 1, 3])
sample_locations = block_size * self.base_grid + offset
if world2local is not None:
sample_locations = geom_util_np.apply_4x4(
sample_locations, world2local, are_points=True)
grid_out_np = self.session.run(
grid,
feed_dict={
self.sif_input: sif_vector,
self.sample_locations_ph: sample_locations
})
i += 1
w_block.append(grid_out_np)
h_block.append(np.concatenate(w_block, axis=2 + dim_offset))
l_block.append(np.concatenate(h_block, axis=0 + dim_offset))
grid_out = np.concatenate(l_block, axis=1 + dim_offset)
# log.verbose(f'Grid extent: {np.min(grid_out)}, {np.max(grid_out)}')
# grid_out -= 0.5
grid_out_time = time.time()
log.verbose(f'Grid Eval Time: {grid_out_time - t}')
return grid_out
def extract_mesh(self,
sif_vectors,
resolution=128,
extent=0.75,
return_success=False,
world2local=None):
"""Extracts a mesh that is the sum of one or more SIF meshes."""
extract_start_time = time.time()
if isinstance(sif_vectors, list):
volumes = []
if world2local is not None:
assert isinstance(world2local, list)
for i, v in enumerate(sif_vectors):
volumes.append(
self._grid_eval(
v,
resolution,
extent,
extract_parts=False,
world2local=world2local[i]
if world2local is not None else None))
volume = np.sum(volumes, axis=0)
else:
volume = self._grid_eval(
sif_vectors,
resolution,
extent,
extract_parts=False,
world2local=world2local)
grid_out_time = time.time()
log.verbose(f'Grid eval time: {grid_out_time - extract_start_time}')
had_crossing, mesh = extract_mesh.marching_cubes(volume, extent)
if not had_crossing:
log.warning('Warning: Marching Cubes found no surface.')
mesh.marching_cubes_successful = had_crossing
done_time = time.time()
log.verbose(f'MCubes Time: {done_time - grid_out_time}')
if return_success:
return mesh, had_crossing
return mesh
def extract_part_meshes(self, sif_vector, resolution, extent=0.75):
elt_volume = self._grid_eval(
sif_vector, resolution, extent, extract_parts=True, world2local=None)
local_meshes = []
for i in range(self.job.model_config.hparams.sc):
had_crossing, mesh_i = extract_mesh.marching_cubes(
elt_volume[i, ...], extent)
mesh_i.marching_cubes_successful = had_crossing
local_meshes.append(mesh_i)
return local_meshes
def _chunk_sample_eval(self, samples, query_fun, chunk_size):
"""Evaluates a set of query locations chunk by chunk to avoid OOM issues."""
# Note- this code will have strange behavior if there is randomness during
# decoding, because it chunks the decoding up into multiple calls.
assert len(samples.shape) == 2
point_count = samples.shape[0]
if point_count == chunk_size:
chunks = [samples]
else:
pad_len = chunk_size - (point_count % chunk_size)
if pad_len:
samples = np.pad(samples, ((0, pad_len), (0, 0)), 'constant')
assert not (point_count + pad_len) % chunk_size
chunk_count = (point_count + pad_len) // chunk_size
chunks = np.split(samples, chunk_count, axis=0)
out = []
for chunk in chunks:
out_i = query_fun(chunk)
assert len(out_i.shape) == 2
assert out_i.shape[0] == chunk_size
out.append(out_i)
return np.concatenate(out, axis=0)[:point_count, :]
def iou(self, sif_vector, example):
samps = example.uniform_samples[:, :3]
gt_is_inside = example.uniform_samples[:, 3:4] < 0.0
pred_is_inside = self.class_at_samples(sif_vector, samps) < 0.5
result = metrics.point_iou(pred_is_inside, gt_is_inside)
return result
def class_at_samples(self, sif_vector, samples):
"""Determines whether input xyz locations are inside or outside the shape.
Args:
sif_vector: A numpy array containing the LDIF/SIF to evaluate. Has shape
(element_count, element_length).
samples: A numpy array containing samples in the LDIF/SIF frame. Has shape
(sample_count, 3).
Returns:
A numpy array with shape (sample_count, 1). A float that is positive
outside the LDIF/SIF, and negative inside.
"""
sif_vector = np.reshape(sif_vector, self.batched_vector_shape)
def query(sample_chunk):
chunk_grid = sample_chunk.reshape(
[self.block_res, self.block_res, self.block_res, 3])
classes = self.session.run(
self.predicted_class_grid,
feed_dict={
self.sif_input: sif_vector,
self.sample_locations_ph: chunk_grid
})
classes = classes.reshape([self.block_res**3, 1])
return classes
return self._chunk_sample_eval(samples, query, self.block_res**3)
def rbf_influence_at_samples(self, sif_vector, samples):
"""Evalutes the influence of each RBF in the SIF/LDIF at each sample.
Args:
sif_vector: A numpy array containing the ldif to evaluate. Has shape
(element_count, element_length).
samples: A numpy array containing the samples in the ldif frame. Has shape
(sample_count, 3).
Returns:
A numpy array with shape (sample_count, effective_element_count). The
RBF weight of each effective element at each sample point. The 'effective'
element count may be higher than the element count, depending on the
symmetry settings of the ldif. In the case where a ldif is partially
symmetric, then some elements have multiple RBF weights- their main weight
(given first) and the weight associated with the shadow element(s)
transformed by their symmetry matrix. See structured_implicit_function.py
for a mapping from element indices to equivalent classes. Regardless of
additional 'effective' elements, the first RBF weights correspond to the
'real' elements with no symmetry transforms applied, in order.
"""
# TODO(kgenova) It's a bit clunky to make the user refer to a different
# python file to get symmetry equivalence classes. Maybe that mapping should
# be returned as needed.
sif_vector = np.reshape(sif_vector, self.batched_vector_shape)
def query(sample_chunk):
chunk_in = sample_chunk.reshape([self.true_sample_count, 3])
influences = self.session.run(
self.predicted_influences,
feed_dict={
self.generic_sample_ph: chunk_in,
self.sif_input: sif_vector
})
return np.squeeze(influences)
return self._chunk_sample_eval(samples, query, self.true_sample_count)
def write_occnet_file(self, path):
"""Serializes an occnet network and writes it to disk."""
f = file_util.open_file(path, 'wb')
# Get the weight tensors associated with the occnet:
with self.graph.as_default():
all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
occnet_vars = contrib_framework.filter_variables(
all_vars, include_patterns=['eval_implicit_parameters'])
# Extract all the model weights as numpy values:
model = {}
for v in occnet_vars:
value = self.session.run(v)
log.verbose(f'{v.name}: {value.shape}')
assert v.name not in model
model[v.name] = value
# Serialize them into a single file:
def write_header(base_scope):
# Write the shape so the number of occnet resnet layers and their sizes
# are known.
num_resnet_layers = 1
# Writes all arrays in row-major order.
dim = model[base_scope +
'sample_resize_fc/fully_connected/weights:0'].shape[1]
log.verbose(f'Dimensionality is {dim}')
f.write(struct.pack('ii', num_resnet_layers, dim))
def write_fc_layer(layer_scope):
weights = model[layer_scope + '/fully_connected/weights:0']
biases = model[layer_scope + '/fully_connected/biases:0']
log.verbose(f'FC layer shapes: {weights.shape}, {biases.shape}')
f.write(weights.astype('f').tostring())
f.write(biases.astype('f').tostring())
def write_cbn_layer(layer_scope):
write_fc_layer(layer_scope + '/beta_fc')
write_fc_layer(layer_scope + '/gamma_fc')
running_mean = float(model[layer_scope + '/running_mean:0'])
running_var = float(model[layer_scope + '/running_variance:0'])
log.verbose(f'Running mean, variance: {running_mean}, {running_var}')
f.write(struct.pack('ff', running_mean, running_var))
def write_input_layer(layer_scope):
weights = model[layer_scope + '/fully_connected/weights:0']
biases = model[layer_scope + '/fully_connected/biases:0']
log.verbose(f'Input FC layer shapes: {weights.shape}, {biases.shape}')
f.write(weights.astype('f').tostring())
f.write(biases.astype('f').tostring())
def write_activation_layer(layer_scope):
weights = model[layer_scope + '/fully_connected/weights:0']
bias = float(model[layer_scope + '/fully_connected/biases:0'])
log.verbose(f'Final FC layer shape and bias: {weights.shape}, {bias}')
f.write(weights.astype('f').tostring())
f.write(struct.pack('f', bias))
base = 'imp_net/eval_implicit_parameters/all_elements/OccNet/'
write_header(base)
write_input_layer(base + 'sample_resize_fc')
write_cbn_layer(base + 'fc_resnet_layer_0/cbn_1')
write_fc_layer(base + 'fc_resnet_layer_0/fc_1')
write_cbn_layer(base + 'fc_resnet_layer_0/cbn_2')
write_fc_layer(base + 'fc_resnet_layer_0/fc_2')
write_cbn_layer(base + 'final_cbn')
write_activation_layer(base + 'final_activation')
f.close()
| [
"numpy.clip",
"ldif.util.path_util.gaps_path",
"ldif.representation.structured_implicit_function.get_effective_element_count",
"tensorflow.linalg.inv",
"ldif.inference.extract_mesh.marching_cubes",
"tensorflow.gradients",
"ldif.util.file_util.log.verbose",
"numpy.arange",
"numpy.reshape",
"tensorf... | [((1619, 1649), 'importlib.reload', 'importlib.reload', (['extract_mesh'], {}), '(extract_mesh)\n', (1635, 1649), False, 'import importlib\n'), ((1650, 1696), 'importlib.reload', 'importlib.reload', (['structured_implicit_function'], {}), '(structured_implicit_function)\n', (1666, 1696), False, 'import importlib\n'), ((1697, 1724), 'importlib.reload', 'importlib.reload', (['sdf_model'], {}), '(sdf_model)\n', (1713, 1724), False, 'import importlib\n'), ((1725, 1752), 'importlib.reload', 'importlib.reload', (['geom_util'], {}), '(geom_util)\n', (1741, 1752), False, 'import importlib\n'), ((5237, 5283), 'numpy.split', 'np.split', (['vector[..., :10]', '[1, 4, 7]'], {'axis': '(-1)'}), '(vector[..., :10], [1, 4, 7], axis=-1)\n', (5245, 5283), True, 'import numpy as np\n'), ((5302, 5325), 'numpy.minimum', 'np.minimum', (['consts', '(0.0)'], {}), '(consts, 0.0)\n', (5312, 5325), True, 'import numpy as np\n'), ((5339, 5366), 'numpy.maximum', 'np.maximum', (['radii_aa', '(1e-09)'], {}), '(radii_aa, 1e-09)\n', (5349, 5366), True, 'import numpy as np\n'), ((5380, 5425), 'numpy.clip', 'np.clip', (['radii_cov', '(-np.pi / 4.0)', '(np.pi / 4.0)'], {}), '(radii_cov, -np.pi / 4.0, np.pi / 4.0)\n', (5387, 5425), True, 'import numpy as np\n'), ((5565, 5650), 'numpy.concatenate', 'np.concatenate', (['[consts, centers, radii_aa, radii_cov, vector[..., 10:]]'], {'axis': '(-1)'}), '([consts, centers, radii_aa, radii_cov, vector[..., 10:]],\n axis=-1)\n', (5579, 5650), True, 'import numpy as np\n'), ((1989, 1999), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1997, 1999), True, 'import tensorflow as tf\n'), ((2913, 2959), 'ldif.util.file_util.log.info', 'log.info', (['f"""Loading checkpoint {ckpt.abspath}"""'], {}), "(f'Loading checkpoint {ckpt.abspath}')\n", (2921, 2959), False, 'from ldif.util.file_util import log\n'), ((3437, 3505), 'ldif.inference.experiment.Experiment', 'experiments.Experiment', (['model_directory', 'model_name', 'experiment_name'], {}), '(model_directory, model_name, experiment_name)\n', (3459, 3505), True, 'from ldif.inference import experiment as experiments\n'), ((6923, 6954), 'numpy.reshape', 'np.reshape', (['depth', '[1, h, w, 1]'], {}), '(depth, [1, h, w, 1])\n', (6933, 6954), True, 'import numpy as np\n'), ((6968, 7001), 'numpy.reshape', 'np.reshape', (['points', '[1, 10000, 6]'], {}), '(points, [1, 10000, 6])\n', (6978, 7001), True, 'import numpy as np\n'), ((7012, 7041), 'numpy.reshape', 'np.reshape', (['xyz', '[1, h, w, 3]'], {}), '(xyz, [1, h, w, 3])\n', (7022, 7041), True, 'import numpy as np\n'), ((9347, 9387), 'numpy.reshape', 'np.reshape', (['dodeca', '[1, 20, 224, 224, 1]'], {}), '(dodeca, [1, 20, 224, 224, 1])\n', (9357, 9387), True, 'import numpy as np\n'), ((9401, 9434), 'numpy.reshape', 'np.reshape', (['points', '[1, 10000, 6]'], {}), '(points, [1, 10000, 6])\n', (9411, 9434), True, 'import numpy as np\n'), ((15845, 15908), 'ldif.representation.structured_implicit_function.element_dof', 'structured_implicit_function.element_dof', (['self.job.model_config'], {}), '(self.job.model_config)\n', (15885, 15908), False, 'from ldif.representation import structured_implicit_function\n'), ((18816, 18864), 'numpy.reshape', 'np.reshape', (['sif_vector', '[shape_count, shape_len]'], {}), '(sif_vector, [shape_count, shape_len])\n', (18826, 18864), True, 'import numpy as np\n'), ((21971, 21996), 'ldif.util.gaps_util.grdview', 'gaps_util.grdview', (['volume'], {}), '(volume)\n', (21988, 21996), False, 'from ldif.util import gaps_util\n'), ((22180, 22193), 'tensorflow.sin', 'tf.sin', (['theta'], {}), '(theta)\n', (22186, 22193), True, 'import tensorflow as tf\n'), ((22288, 22315), 'tensorflow.stack', 'tf.stack', (['[x, y, z]'], {'axis': '(0)'}), '([x, y, z], axis=0)\n', (22296, 22315), True, 'import tensorflow as tf\n'), ((22326, 22349), 'tensorflow.reshape', 'tf.reshape', (['eye', '[1, 3]'], {}), '(eye, [1, 3])\n', (22336, 22349), True, 'import tensorflow as tf\n'), ((22363, 22397), 'tensorflow.zeros', 'tf.zeros', (['[1, 3]'], {'dtype': 'tf.float32'}), '([1, 3], dtype=tf.float32)\n', (22371, 22397), True, 'import tensorflow as tf\n'), ((22413, 22461), 'tensorflow.constant', 'tf.constant', (['[[0.0, 1.0, 0.0]]'], {'dtype': 'tf.float32'}), '([[0.0, 1.0, 0.0]], dtype=tf.float32)\n', (22424, 22461), True, 'import tensorflow as tf\n'), ((22475, 22517), 'ldif.util.camera_util.look_at', 'camera_util.look_at', (['eye', 'center', 'world_up'], {}), '(eye, center, world_up)\n', (22494, 22517), False, 'from ldif.util import camera_util\n'), ((22534, 22558), 'tensorflow.linalg.inv', 'tf.linalg.inv', (['world2cam'], {}), '(world2cam)\n', (22547, 22558), True, 'import tensorflow as tf\n'), ((22575, 22807), 'tensorflow.constant', 'tf.constant', (['[[-0.99398971, 0.0027342862, -0.0047837296, 0.00014993416], [1.6200442e-09,\n 0.86298174, 0.49326313, 0.71943283], [0.0055100261, 0.49325553, -\n 0.86296844, -1.227747], [0.0, 0.0, 0.0, 1.0]]'], {'dtype': 'tf.float32'}), '([[-0.99398971, 0.0027342862, -0.0047837296, 0.00014993416], [\n 1.6200442e-09, 0.86298174, 0.49326313, 0.71943283], [0.0055100261, \n 0.49325553, -0.86296844, -1.227747], [0.0, 0.0, 0.0, 1.0]], dtype=tf.\n float32)\n', (22586, 22807), True, 'import tensorflow as tf\n'), ((23356, 23386), 'tensorflow.reshape', 'tf.reshape', (['t_vals', '[1, tc, 1]'], {}), '(t_vals, [1, tc, 1])\n', (23366, 23386), True, 'import tensorflow as tf\n'), ((23464, 23509), 'tensorflow.reshape', 'tf.reshape', (['ray_directions', '[ray_count, 1, 3]'], {}), '(ray_directions, [ray_count, 1, 3])\n', (23474, 23509), True, 'import tensorflow as tf\n'), ((23520, 23546), 'tensorflow.reshape', 'tf.reshape', (['eye', '[1, 1, 3]'], {}), '(eye, [1, 1, 3])\n', (23530, 23546), True, 'import tensorflow as tf\n'), ((23608, 23698), 'ldif.util.geom_util.apply_4x4', 'geom_util.apply_4x4', (['cam_rays', 'cam2world'], {'are_points': '(True)', 'batch_rank': '(0)', 'sample_rank': '(2)'}), '(cam_rays, cam2world, are_points=True, batch_rank=0,\n sample_rank=2)\n', (23627, 23698), False, 'from ldif.util import geom_util\n'), ((23720, 23765), 'tensorflow.reshape', 'tf.reshape', (['world_pts', '[1, ray_count * tc, 3]'], {}), '(world_pts, [1, ray_count * tc, 3])\n', (23730, 23765), True, 'import tensorflow as tf\n'), ((24153, 24194), 'tensorflow.reshape', 'tf.reshape', (['am_image', '[ray_count * tc, 1]'], {}), '(am_image, [ray_count * tc, 1])\n', (24163, 24194), True, 'import tensorflow as tf\n'), ((24273, 24317), 'tensorflow.reshape', 'tf.reshape', (['world_rbfs', '[ray_count * tc, 45]'], {}), '(world_rbfs, [ray_count * tc, 45])\n', (24283, 24317), True, 'import tensorflow as tf\n'), ((24333, 24377), 'tensorflow.gather', 'tf.gather', (['world_rbfs', 'flat_am'], {'batch_dims': '(1)'}), '(world_rbfs, flat_am, batch_dims=1)\n', (24342, 24377), True, 'import tensorflow as tf\n'), ((24392, 24431), 'tensorflow.reshape', 'tf.reshape', (['max_val', '[1, ray_count, tc]'], {}), '(max_val, [1, ray_count, tc])\n', (24402, 24431), True, 'import tensorflow as tf\n'), ((24446, 24477), 'tensorflow.reduce_max', 'tf.reduce_max', (['max_val'], {'axis': '(-1)'}), '(max_val, axis=-1)\n', (24459, 24477), True, 'import tensorflow as tf\n'), ((24495, 24527), 'tensorflow.cast', 'tf.cast', (['is_bg'], {'dtype': 'tf.float32'}), '(is_bg, dtype=tf.float32)\n', (24502, 24527), True, 'import tensorflow as tf\n'), ((25013, 25054), 'numpy.array', 'np.array', (['[0.0, lr, lr]'], {'dtype': 'np.float32'}), '([0.0, lr, lr], dtype=np.float32)\n', (25021, 25054), True, 'import numpy as np\n'), ((25161, 25222), 'numpy.array', 'np.array', (['[0.812717413913 / 1.75, 0.0, 0.0]'], {'dtype': 'np.float32'}), '([0.812717413913 / 1.75, 0.0, 0.0], dtype=np.float32)\n', (25169, 25222), True, 'import numpy as np\n'), ((27504, 27534), 'numpy.random.shuffle', 'np.random.shuffle', (['all_samples'], {}), '(all_samples)\n', (27521, 27534), True, 'import numpy as np\n'), ((27649, 27684), 'numpy.split', 'np.split', (['all_samples', '[3]'], {'axis': '(-1)'}), '(all_samples, [3], axis=-1)\n', (27657, 27684), True, 'import numpy as np\n'), ((28579, 28613), 'numpy.expand_dims', 'np.expand_dims', (['sif_vector'], {'axis': '(0)'}), '(sif_vector, axis=0)\n', (28593, 28613), True, 'import numpy as np\n'), ((28780, 28805), 'numpy.zeros_like', 'np.zeros_like', (['cur_vector'], {}), '(cur_vector)\n', (28793, 28805), True, 'import numpy as np\n'), ((31509, 31560), 'numpy.reshape', 'np.reshape', (['cur_vector', 'self.unbatched_vector_shape'], {}), '(cur_vector, self.unbatched_vector_shape)\n', (31519, 31560), True, 'import numpy as np\n'), ((31689, 31714), 'numpy.reshape', 'np.reshape', (['loss', '[-1, 1]'], {}), '(loss, [-1, 1])\n', (31699, 31714), True, 'import numpy as np\n'), ((31732, 31763), 'numpy.reshape', 'np.reshape', (['gt_at_loss', '[-1, 1]'], {}), '(gt_at_loss, [-1, 1])\n', (31742, 31763), True, 'import numpy as np\n'), ((31888, 31923), 'numpy.reshape', 'np.reshape', (['loss_positions', '[-1, 3]'], {}), '(loss_positions, [-1, 3])\n', (31898, 31923), True, 'import numpy as np\n'), ((31934, 31980), 'numpy.concatenate', 'np.concatenate', (['[loss_positions, loss]'], {'axis': '(1)'}), '([loss_positions, loss], axis=1)\n', (31948, 31980), True, 'import numpy as np\n'), ((32754, 32809), 'ldif.util.file_util.log.verbose', 'log.verbose', (['"""Using custom CUDA kernel for evaluation."""'], {}), "('Using custom CUDA kernel for evaluation.')\n", (32765, 32809), False, 'from ldif.util.file_util import log\n'), ((33070, 33139), 'ldif.util.file_util.log.info', 'log.info', (['f"""Using checkpoint {checkpoint_path} to write OccNet file."""'], {}), "(f'Using checkpoint {checkpoint_path} to write OccNet file.')\n", (33078, 33139), False, 'from ldif.util.file_util import log\n'), ((36461, 36520), 'ldif.util.file_util.log.verbose', 'log.verbose', (['f"""The output CUDA grid has shape {grd.shape}."""'], {}), "(f'The output CUDA grid has shape {grd.shape}.')\n", (36472, 36520), False, 'from ldif.util.file_util import log\n'), ((36787, 36831), 'ldif.util.file_util.log.verbose', 'log.verbose', (['"""Evaluating SDF grid for mesh."""'], {}), "('Evaluating SDF grid for mesh.')\n", (36798, 36831), False, 'from ldif.util.file_util import log\n'), ((37111, 37187), 'ldif.util.file_util.log.warning', 'log.warning', (['"""Using pure tensorflow for grid evaluation, this will be slow."""'], {}), "('Using pure tensorflow for grid evaluation, this will be slow.')\n", (37122, 37187), False, 'from ldif.util.file_util import log\n'), ((37196, 37207), 'time.time', 'time.time', ([], {}), '()\n', (37205, 37207), False, 'import time\n'), ((37225, 37274), 'numpy.reshape', 'np.reshape', (['sif_vector', 'self.batched_vector_shape'], {}), '(sif_vector, self.batched_vector_shape)\n', (37235, 37274), True, 'import numpy as np\n'), ((38647, 38691), 'numpy.concatenate', 'np.concatenate', (['l_block'], {'axis': '(1 + dim_offset)'}), '(l_block, axis=1 + dim_offset)\n', (38661, 38691), True, 'import numpy as np\n'), ((38808, 38819), 'time.time', 'time.time', ([], {}), '()\n', (38817, 38819), False, 'import time\n'), ((38824, 38875), 'ldif.util.file_util.log.verbose', 'log.verbose', (['f"""Grid Eval Time: {grid_out_time - t}"""'], {}), "(f'Grid Eval Time: {grid_out_time - t}')\n", (38835, 38875), False, 'from ldif.util.file_util import log\n'), ((39194, 39205), 'time.time', 'time.time', ([], {}), '()\n', (39203, 39205), False, 'import time\n'), ((39873, 39884), 'time.time', 'time.time', ([], {}), '()\n', (39882, 39884), False, 'import time\n'), ((39889, 39957), 'ldif.util.file_util.log.verbose', 'log.verbose', (['f"""Grid eval time: {grid_out_time - extract_start_time}"""'], {}), "(f'Grid eval time: {grid_out_time - extract_start_time}')\n", (39900, 39957), False, 'from ldif.util.file_util import log\n'), ((39983, 40026), 'ldif.inference.extract_mesh.marching_cubes', 'extract_mesh.marching_cubes', (['volume', 'extent'], {}), '(volume, extent)\n', (40010, 40026), False, 'from ldif.inference import extract_mesh\n'), ((40181, 40192), 'time.time', 'time.time', ([], {}), '()\n', (40190, 40192), False, 'import time\n'), ((40197, 40253), 'ldif.util.file_util.log.verbose', 'log.verbose', (['f"""MCubes Time: {done_time - grid_out_time}"""'], {}), "(f'MCubes Time: {done_time - grid_out_time}')\n", (40208, 40253), False, 'from ldif.util.file_util import log\n'), ((41983, 42030), 'ldif.inference.metrics.point_iou', 'metrics.point_iou', (['pred_is_inside', 'gt_is_inside'], {}), '(pred_is_inside, gt_is_inside)\n', (42000, 42030), False, 'from ldif.inference import metrics\n'), ((42582, 42631), 'numpy.reshape', 'np.reshape', (['sif_vector', 'self.batched_vector_shape'], {}), '(sif_vector, self.batched_vector_shape)\n', (42592, 42631), True, 'import numpy as np\n'), ((44474, 44523), 'numpy.reshape', 'np.reshape', (['sif_vector', 'self.batched_vector_shape'], {}), '(sif_vector, self.batched_vector_shape)\n', (44484, 44523), True, 'import numpy as np\n'), ((45039, 45070), 'ldif.util.file_util.open_file', 'file_util.open_file', (['path', '"""wb"""'], {}), "(path, 'wb')\n", (45058, 45070), False, 'from ldif.util import file_util\n'), ((2672, 2741), 'ldif.util.file_util.log.error', 'log.error', (['"""Please select a checkpoint and rerun. Valid checkpoints:"""'], {}), "('Please select a checkpoint and rerun. Valid checkpoints:')\n", (2681, 2741), False, 'from ldif.util.file_util import log\n'), ((4892, 4917), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (4902, 4917), True, 'import tensorflow as tf\n'), ((4932, 4948), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (4946, 4948), True, 'import tensorflow as tf\n'), ((6014, 6092), 'ldif.datasets.shapenet.build_placeholder_interface', 'shapenet.build_placeholder_interface', (['model_config'], {'proto': '"""ShapeNetOneImXyzPC"""'}), "(model_config, proto='ShapeNetOneImXyzPC')\n", (6050, 6092), False, 'from ldif.datasets import shapenet\n'), ((6130, 6165), 'ldif.datasets.preprocess.preprocess', 'preprocess.preprocess', (['model_config'], {}), '(model_config)\n', (6151, 6165), False, 'from ldif.datasets import preprocess\n'), ((6405, 6440), 'ldif.datasets.preprocess.preprocess', 'preprocess.preprocess', (['model_config'], {}), '(model_config)\n', (6426, 6440), False, 'from ldif.datasets import preprocess\n'), ((6461, 6514), 'ldif.model.model.Observation', 'sdf_model.Observation', (['model_config', 'training_example'], {}), '(model_config, training_example)\n', (6482, 6514), True, 'from ldif.model import model as sdf_model\n'), ((6531, 6589), 'ldif.model.model.StructuredImplicitModel', 'sdf_model.StructuredImplicitModel', (['model_config', '"""imp_net"""'], {}), "(model_config, 'imp_net')\n", (6564, 6589), True, 'from ldif.model import model as sdf_model\n'), ((6887, 6904), 'numpy.squeeze', 'np.squeeze', (['depth'], {}), '(depth)\n', (6897, 6904), True, 'import numpy as np\n'), ((7317, 7382), 'numpy.reshape', 'np.reshape', (['packed_vector', '[self.job.model_config.hparams.sc, -1]'], {}), '(packed_vector, [self.job.model_config.hparams.sc, -1])\n', (7327, 7382), True, 'import numpy as np\n'), ((8301, 8351), 'ldif.datasets.shapenet.build_placeholder_interface', 'shapenet.build_placeholder_interface', (['model_config'], {}), '(model_config)\n', (8337, 8351), False, 'from ldif.datasets import shapenet\n'), ((8378, 8413), 'ldif.datasets.preprocess.preprocess', 'preprocess.preprocess', (['model_config'], {}), '(model_config)\n', (8399, 8413), False, 'from ldif.datasets import preprocess\n'), ((8664, 8699), 'ldif.datasets.preprocess.preprocess', 'preprocess.preprocess', (['model_config'], {}), '(model_config)\n', (8685, 8699), False, 'from ldif.datasets import preprocess\n'), ((8837, 8890), 'ldif.model.model.Observation', 'sdf_model.Observation', (['model_config', 'training_example'], {}), '(model_config, training_example)\n', (8858, 8890), True, 'from ldif.model import model as sdf_model\n'), ((8907, 8965), 'ldif.model.model.StructuredImplicitModel', 'sdf_model.StructuredImplicitModel', (['model_config', '"""imp_net"""'], {}), "(model_config, 'imp_net')\n", (8940, 8965), True, 'from ldif.model import model as sdf_model\n'), ((9936, 10001), 'numpy.reshape', 'np.reshape', (['packed_vector', '[self.job.model_config.hparams.sc, -1]'], {}), '(packed_vector, [self.job.model_config.hparams.sc, -1])\n', (9946, 10001), True, 'import numpy as np\n'), ((10489, 10542), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', 'self.batched_vector_shape'], {}), '(tf.float32, self.batched_vector_shape)\n', (10503, 10542), True, 'import tensorflow as tf\n'), ((11071, 11143), 'ldif.model.model.StructuredImplicitModel', 'sdf_model.StructuredImplicitModel', (['self.job.model_config'], {'name': '"""imp_net"""'}), "(self.job.model_config, name='imp_net')\n", (11104, 11143), True, 'from ldif.model import model as sdf_model\n'), ((11195, 11310), 'ldif.representation.structured_implicit_function.StructuredImplicit.from_packed_vector', 'structured_implicit_function.StructuredImplicit.from_packed_vector', (['self.job.model_config', 'self.sif_input', 'net'], {}), '(self.job\n .model_config, self.sif_input, net)\n', (11261, 11310), False, 'from ldif.representation import structured_implicit_function\n'), ((11485, 11575), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[self.block_res, self.block_res, self.block_res, 3]'}), '(tf.float32, shape=[self.block_res, self.block_res, self.\n block_res, 3])\n', (11499, 11575), True, 'import tensorflow as tf\n'), ((11598, 11663), 'tensorflow.reshape', 'tf.reshape', (['self.sample_locations_ph', '[1, self.block_res ** 3, 3]'], {}), '(self.sample_locations_ph, [1, self.block_res ** 3, 3])\n', (11608, 11663), True, 'import tensorflow as tf\n'), ((11811, 11945), 'ldif.util.sdf_util.apply_class_transfer', 'sdf_util.apply_class_transfer', (['predicted_alg', 'self.job.model_config'], {'soft_transfer': '(True)', 'offset': 'self.job.model_config.hparams.lset'}), '(predicted_alg, self.job.model_config,\n soft_transfer=True, offset=self.job.model_config.hparams.lset)\n', (11840, 11945), False, 'from ldif.util import sdf_util\n'), ((12082, 12118), 'tensorflow.reshape', 'tf.reshape', (['predicted_alg', 'vol_shape'], {}), '(predicted_alg, vol_shape)\n', (12092, 12118), True, 'import tensorflow as tf\n'), ((12153, 12191), 'tensorflow.reshape', 'tf.reshape', (['predicted_class', 'vol_shape'], {}), '(predicted_class, vol_shape)\n', (12163, 12191), True, 'import tensorflow as tf\n'), ((12236, 12315), 'ldif.representation.structured_implicit_function.get_effective_element_count', 'structured_implicit_function.get_effective_element_count', (['self.job.model_config'], {}), '(self.job.model_config)\n', (12292, 12315), False, 'from ldif.representation import structured_implicit_function\n'), ((12361, 12471), 'tensorflow.reshape', 'tf.reshape', (['predicted_locals[0]', '[effective_element_count, self.block_res, self.block_res, self.block_res]'], {}), '(predicted_locals[0], [effective_element_count, self.block_res,\n self.block_res, self.block_res])\n', (12371, 12471), True, 'import tensorflow as tf\n'), ((12930, 12991), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[self.true_sample_count, 3]'}), '(tf.float32, shape=[self.true_sample_count, 3])\n', (12944, 12991), True, 'import tensorflow as tf\n'), ((13228, 13284), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[self.optimizer_pc, 3]'}), '(tf.float32, shape=[self.optimizer_pc, 3])\n', (13242, 13284), True, 'import tensorflow as tf\n'), ((13322, 13383), 'tensorflow.reshape', 'tf.reshape', (['self.optimizer_samples', '[1, self.optimizer_pc, 3]'], {}), '(self.optimizer_samples, [1, self.optimizer_pc, 3])\n', (13332, 13383), True, 'import tensorflow as tf\n'), ((13549, 13605), 'tensorflow.reshape', 'tf.reshape', (['self.predicted_class', '[self.optimizer_pc, 1]'], {}), '(self.predicted_class, [self.optimizer_pc, 1])\n', (13559, 13605), True, 'import tensorflow as tf\n'), ((13675, 13725), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[self.optimizer_pc, 1]'], {}), '(tf.float32, [self.optimizer_pc, 1])\n', (13689, 13725), True, 'import tensorflow as tf\n'), ((14940, 14979), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.optimizer_elt_loss'], {}), '(self.optimizer_elt_loss)\n', (14954, 14979), True, 'import tensorflow as tf\n'), ((15008, 15057), 'tensorflow.gradients', 'tf.gradients', (['self.optimizer_loss', 'self.sif_input'], {}), '(self.optimizer_loss, self.sif_input)\n', (15020, 15057), True, 'import tensorflow as tf\n'), ((17089, 17116), 'ldif.util.file_util.writetxt', 'file_util.writetxt', (['path', 's'], {}), '(path, s)\n', (17107, 17116), False, 'from ldif.util import file_util\n'), ((18619, 18645), 'numpy.zeros', 'np.zeros', (['[shape_count, 3]'], {}), '([shape_count, 3])\n', (18627, 18645), True, 'import numpy as np\n'), ((18665, 18711), 'numpy.concatenate', 'np.concatenate', (['[sif_vector, off_axis]'], {'axis': '(1)'}), '([sif_vector, off_axis], axis=1)\n', (18679, 18711), True, 'import numpy as np\n'), ((19013, 19046), 'numpy.maximum', 'np.maximum', (['sif_vector[:, 4:7]', '(0)'], {}), '(sif_vector[:, 4:7], 0)\n', (19023, 19046), True, 'import numpy as np\n'), ((19692, 19725), 'ldif.util.py_util.py2_temporary_directory', 'py_util.py2_temporary_directory', ([], {}), '()\n', (19723, 19725), False, 'from ldif.util import py_util\n'), ((20201, 20229), 'ldif.util.file_util.read_image', 'file_util.read_image', (['impath'], {}), '(impath)\n', (20221, 20229), False, 'from ldif.util import file_util\n'), ((20388, 20421), 'ldif.util.py_util.py2_temporary_directory', 'py_util.py2_temporary_directory', ([], {}), '()\n', (20419, 20421), False, 'from ldif.util import py_util\n'), ((20888, 20920), 'subprocess.check_output', 'sp.check_output', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (20903, 20920), True, 'import subprocess as sp\n'), ((21011, 21045), 'numpy.expand_dims', 'np.expand_dims', (['sif_vector'], {'axis': '(0)'}), '(sif_vector, axis=0)\n', (21025, 21045), True, 'import numpy as np\n'), ((21302, 21335), 'ldif.util.py_util.py2_temporary_directory', 'py_util.py2_temporary_directory', ([], {}), '()\n', (21333, 21335), False, 'from ldif.util import py_util\n'), ((21427, 21457), 'ldif.util.file_util.write_mesh', 'file_util.write_mesh', (['mpath', 'm'], {}), '(mpath, m)\n', (21447, 21457), False, 'from ldif.util import file_util\n'), ((21746, 21778), 'subprocess.check_output', 'sp.check_output', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (21761, 21778), True, 'import subprocess as sp\n'), ((22211, 22222), 'tensorflow.cos', 'tf.cos', (['phi'], {}), '(phi)\n', (22217, 22222), True, 'import tensorflow as tf\n'), ((22240, 22251), 'tensorflow.sin', 'tf.sin', (['phi'], {}), '(phi)\n', (22246, 22251), True, 'import tensorflow as tf\n'), ((22264, 22277), 'tensorflow.cos', 'tf.cos', (['theta'], {}), '(theta)\n', (22270, 22277), True, 'import tensorflow as tf\n'), ((22914, 22943), 'tensorflow.reshape', 'tf.reshape', (['cam2world', '[4, 4]'], {}), '(cam2world, [4, 4])\n', (22924, 22943), True, 'import tensorflow as tf\n'), ((23298, 23324), 'numpy.arange', 'np.arange', (['(0.75)', '(2.25)', '(0.1)'], {}), '(0.75, 2.25, 0.1)\n', (23307, 23324), True, 'import numpy as np\n'), ((23407, 23441), 'numpy.prod', 'np.prod', (['ray_directions.shape[:-1]'], {}), '(ray_directions.shape[:-1])\n', (23414, 23441), True, 'import numpy as np\n'), ((24092, 24125), 'tensorflow.expand_dims', 'tf.expand_dims', (['am_image'], {'axis': '(-1)'}), '(am_image, axis=-1)\n', (24106, 24125), True, 'import tensorflow as tf\n'), ((24221, 24242), 'tensorflow.equal', 'tf.equal', (['flat_am', '(45)'], {}), '(flat_am, 45)\n', (24229, 24242), True, 'import tensorflow as tf\n'), ((24582, 24604), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['max_val'], {}), '(max_val)\n', (24595, 24604), True, 'import tensorflow as tf\n'), ((24836, 24870), 'numpy.expand_dims', 'np.expand_dims', (['sif_vector'], {'axis': '(0)'}), '(sif_vector, axis=0)\n', (24850, 24870), True, 'import numpy as np\n'), ((25315, 25403), 'ldif.util.file_util.log.verbose', 'log.verbose', (["('Step %i: (%0.4f, %0.4f, %0.4f)' % (i, coords[0], coords[1], coords[2]))"], {}), "('Step %i: (%0.4f, %0.4f, %0.4f)' % (i, coords[0], coords[1],\n coords[2]))\n", (25326, 25403), False, 'from ldif.util.file_util import log\n'), ((25712, 25745), 'ldif.util.file_util.log.verbose', 'log.verbose', (["('Error: %0.2f' % err)"], {}), "('Error: %0.2f' % err)\n", (25723, 25745), False, 'from ldif.util.file_util import log\n'), ((25980, 26021), 'ldif.util.gaps_util.ptsview', 'gaps_util.ptsview', (['pts'], {'mesh': 'e.v1_gt_mesh'}), '(pts, mesh=e.v1_gt_mesh)\n', (25997, 26021), False, 'from ldif.util import gaps_util\n'), ((26529, 26559), 'numpy.random.shuffle', 'np.random.shuffle', (['all_samples'], {}), '(all_samples)\n', (26546, 26559), True, 'import numpy as np\n'), ((28027, 28074), 'ldif.util.file_util.log.verbose', 'log.verbose', (['pred_is_out.shape', 'gt_is_out.shape'], {}), '(pred_is_out.shape, gt_is_out.shape)\n', (28038, 28074), False, 'from ldif.util.file_util import log\n'), ((28269, 28292), 'numpy.count_nonzero', 'np.count_nonzero', (['agree'], {}), '(agree)\n', (28285, 28292), True, 'import numpy as np\n'), ((28299, 28376), 'ldif.util.file_util.log.info', 'log.info', (["('%i/%i constraints are satisfied.' % (sat_count, self.optimizer_pc))"], {}), "('%i/%i constraints are satisfied.' % (sat_count, self.optimizer_pc))\n", (28307, 28376), False, 'from ldif.util.file_util import log\n'), ((28422, 28457), 'ldif.util.file_util.log.info', 'log.info', (['"""Beginning optimization."""'], {}), "('Beginning optimization.')\n", (28430, 28457), False, 'from ldif.util.file_util import log\n'), ((29260, 29301), 'numpy.split', 'np.split', (['batch_all_samples', '[3]'], {'axis': '(-1)'}), '(batch_all_samples, [3], axis=-1)\n', (29268, 29301), True, 'import numpy as np\n'), ((31384, 31418), 'ldif.util.file_util.log.info', 'log.info', (['"""Finished optimization."""'], {}), "('Finished optimization.')\n", (31392, 31418), False, 'from ldif.util.file_util import log\n'), ((31990, 32023), 'ldif.util.py_util.py2_temporary_directory', 'py_util.py2_temporary_directory', ([], {}), '()\n', (32021, 32023), False, 'from ldif.util import py_util\n'), ((32262, 32293), 'ldif.util.file_util.write_mesh', 'file_util.write_mesh', (['m_path', 'm'], {}), '(m_path, m)\n', (32282, 32293), False, 'from ldif.util import file_util\n'), ((32599, 32631), 'subprocess.check_output', 'sp.check_output', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (32614, 32631), True, 'import subprocess as sp\n'), ((33372, 33399), 'os.path.isfile', 'os.path.isfile', (['occnet_path'], {}), '(occnet_path)\n', (33386, 33399), False, 'import os\n'), ((33810, 33843), 'ldif.util.py_util.py2_temporary_directory', 'py_util.py2_temporary_directory', ([], {}), '()\n', (33841, 33843), False, 'from ldif.util import py_util\n'), ((34455, 34494), 'ldif.util.file_util.log.verbose', 'log.verbose', (['f"""Executing command {cmd}"""'], {}), "(f'Executing command {cmd}')\n", (34466, 34494), False, 'from ldif.util.file_util import log\n'), ((36366, 36394), 'ldif.util.file_util.read_grd', 'file_util.read_grd', (['grd_path'], {}), '(grd_path)\n', (36384, 36394), False, 'from ldif.util import file_util\n'), ((36997, 37095), 'ldif.util.file_util.log.warning', 'log.warning', (['"""Part extraction and world2local are not supported with the custom kernel."""'], {}), "(\n 'Part extraction and world2local are not supported with the custom kernel.'\n )\n", (37008, 37095), False, 'from ldif.util.file_util import log\n'), ((39658, 39681), 'numpy.sum', 'np.sum', (['volumes'], {'axis': '(0)'}), '(volumes, axis=0)\n', (39664, 39681), True, 'import numpy as np\n'), ((40058, 40114), 'ldif.util.file_util.log.warning', 'log.warning', (['"""Warning: Marching Cubes found no surface."""'], {}), "('Warning: Marching Cubes found no surface.')\n", (40069, 40114), False, 'from ldif.util.file_util import log\n'), ((40613, 40668), 'ldif.inference.extract_mesh.marching_cubes', 'extract_mesh.marching_cubes', (['elt_volume[i, ...]', 'extent'], {}), '(elt_volume[i, ...], extent)\n', (40640, 40668), False, 'from ldif.inference import extract_mesh\n'), ((41498, 41536), 'numpy.split', 'np.split', (['samples', 'chunk_count'], {'axis': '(0)'}), '(samples, chunk_count, axis=0)\n', (41506, 41536), True, 'import numpy as np\n'), ((41718, 41745), 'numpy.concatenate', 'np.concatenate', (['out'], {'axis': '(0)'}), '(out, axis=0)\n', (41732, 41745), True, 'import numpy as np\n'), ((44832, 44854), 'numpy.squeeze', 'np.squeeze', (['influences'], {}), '(influences)\n', (44842, 44854), True, 'import numpy as np\n'), ((45179, 45227), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {}), '(tf.GraphKeys.GLOBAL_VARIABLES)\n', (45196, 45227), True, 'import tensorflow as tf\n'), ((45248, 45344), 'tensorflow.contrib.framework.filter_variables', 'contrib_framework.filter_variables', (['all_vars'], {'include_patterns': "['eval_implicit_parameters']"}), "(all_vars, include_patterns=[\n 'eval_implicit_parameters'])\n", (45282, 45344), True, 'from tensorflow.contrib import framework as contrib_framework\n'), ((45486, 45525), 'ldif.util.file_util.log.verbose', 'log.verbose', (['f"""{v.name}: {value.shape}"""'], {}), "(f'{v.name}: {value.shape}')\n", (45497, 45525), False, 'from ldif.util.file_util import log\n'), ((45944, 45983), 'ldif.util.file_util.log.verbose', 'log.verbose', (['f"""Dimensionality is {dim}"""'], {}), "(f'Dimensionality is {dim}')\n", (45955, 45983), False, 'from ldif.util.file_util import log\n'), ((46215, 46279), 'ldif.util.file_util.log.verbose', 'log.verbose', (['f"""FC layer shapes: {weights.shape}, {biases.shape}"""'], {}), "(f'FC layer shapes: {weights.shape}, {biases.shape}')\n", (46226, 46279), False, 'from ldif.util.file_util import log\n'), ((46648, 46717), 'ldif.util.file_util.log.verbose', 'log.verbose', (['f"""Running mean, variance: {running_mean}, {running_var}"""'], {}), "(f'Running mean, variance: {running_mean}, {running_var}')\n", (46659, 46717), False, 'from ldif.util.file_util import log\n'), ((46955, 47025), 'ldif.util.file_util.log.verbose', 'log.verbose', (['f"""Input FC layer shapes: {weights.shape}, {biases.shape}"""'], {}), "(f'Input FC layer shapes: {weights.shape}, {biases.shape}')\n", (46966, 47025), False, 'from ldif.util.file_util import log\n'), ((47304, 47374), 'ldif.util.file_util.log.verbose', 'log.verbose', (['f"""Final FC layer shape and bias: {weights.shape}, {bias}"""'], {}), "(f'Final FC layer shape and bias: {weights.shape}, {bias}')\n", (47315, 47374), False, 'from ldif.util.file_util import log\n'), ((4738, 4754), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (4752, 4754), True, 'import tensorflow as tf\n'), ((4831, 4870), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'GPU': 0}"}), "(device_count={'GPU': 0})\n", (4845, 4870), True, 'import tensorflow as tf\n'), ((9603, 9634), 'numpy.reshape', 'np.reshape', (['nss', '[1, 100000, 4]'], {}), '(nss, [1, 100000, 4])\n', (9613, 9634), True, 'import numpy as np\n'), ((10091, 10113), 'numpy.reshape', 'np.reshape', (['tx', '[4, 4]'], {}), '(tx, [4, 4])\n', (10101, 10113), True, 'import numpy as np\n'), ((11026, 11058), 'ldif.util.file_util.log.info', 'log.info', (['"""Silencing implicits."""'], {}), "('Silencing implicits.')\n", (11034, 11058), False, 'from ldif.util.file_util import log\n'), ((13093, 13139), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.generic_sample_ph'], {'axis': '(0)'}), '(self.generic_sample_ph, axis=0)\n', (13107, 13139), True, 'import tensorflow as tf\n'), ((13811, 13867), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['self.predicted_class', '(1e-05)', '(1 - 1e-05)'], {}), '(self.predicted_class, 1e-05, 1 - 1e-05)\n', (13827, 13867), True, 'import tensorflow as tf\n'), ((14567, 14593), 'tensorflow.logical_not', 'tf.logical_not', (['gt_outside'], {}), '(gt_outside)\n', (14581, 14593), True, 'import tensorflow as tf\n'), ((14616, 14644), 'tensorflow.logical_not', 'tf.logical_not', (['pred_outside'], {}), '(pred_outside)\n', (14630, 14644), True, 'import tensorflow as tf\n'), ((14816, 14864), 'tensorflow.where_v2', 'tf.where_v2', (['agree', '(0.0)', 'self.optimizer_elt_loss'], {}), '(agree, 0.0, self.optimizer_elt_loss)\n', (14827, 14864), True, 'import tensorflow as tf\n'), ((19977, 19997), 'ldif.util.py_util.x11_server', 'py_util.x11_server', ([], {}), '()\n', (19995, 19997), False, 'from ldif.util import py_util\n'), ((20157, 20189), 'subprocess.check_output', 'sp.check_output', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (20172, 20189), True, 'import subprocess as sp\n'), ((20728, 20761), 'ldif.util.file_util.write_mesh', 'file_util.write_mesh', (['mpath', 'mesh'], {}), '(mpath, mesh)\n', (20748, 20761), False, 'from ldif.util import file_util\n'), ((24024, 24047), 'tensorflow.equal', 'tf.equal', (['am_image', 'eec'], {}), '(am_image, eec)\n', (24032, 24047), True, 'import tensorflow as tf\n'), ((27926, 27954), 'numpy.reshape', 'np.reshape', (['samples', '[-1, 3]'], {}), '(samples, [-1, 3])\n', (27936, 27954), True, 'import numpy as np\n'), ((28114, 28152), 'numpy.logical_and', 'np.logical_and', (['pred_is_out', 'gt_is_out'], {}), '(pred_is_out, gt_is_out)\n', (28128, 28152), True, 'import numpy as np\n'), ((28998, 29028), 'numpy.random.shuffle', 'np.random.shuffle', (['all_samples'], {}), '(all_samples)\n', (29015, 29028), True, 'import numpy as np\n'), ((32071, 32106), 'ldif.util.file_util.open_file', 'file_util.open_file', (['sdf_path', '"""wb"""'], {}), "(sdf_path, 'wb')\n", (32090, 32106), False, 'from ldif.util import file_util\n'), ((33428, 33456), 'os.path.dirname', 'os.path.dirname', (['occnet_path'], {}), '(occnet_path)\n', (33443, 33456), False, 'import os\n'), ((34086, 34119), 'ldif.util.path_util.get_path_to_ldif_root', 'path_util.get_path_to_ldif_root', ([], {}), '()\n', (34117, 34119), False, 'from ldif.util import path_util\n'), ((34190, 34217), 'os.path.isfile', 'os.path.isfile', (['kernel_path'], {}), '(kernel_path)\n', (34204, 34217), False, 'import os\n'), ((34841, 34873), 'subprocess.check_output', 'sp.check_output', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (34856, 34873), True, 'import subprocess as sp\n'), ((38586, 38630), 'numpy.concatenate', 'np.concatenate', (['h_block'], {'axis': '(0 + dim_offset)'}), '(h_block, axis=0 + dim_offset)\n', (38600, 38630), True, 'import numpy as np\n'), ((41319, 41370), 'numpy.pad', 'np.pad', (['samples', '((0, pad_len), (0, 0))', '"""constant"""'], {}), "(samples, ((0, pad_len), (0, 0)), 'constant')\n", (41325, 41370), True, 'import numpy as np\n'), ((45998, 46039), 'struct.pack', 'struct.pack', (['"""ii"""', 'num_resnet_layers', 'dim'], {}), "('ii', num_resnet_layers, dim)\n", (46009, 46039), False, 'import struct\n'), ((46732, 46776), 'struct.pack', 'struct.pack', (['"""ff"""', 'running_mean', 'running_var'], {}), "('ff', running_mean, running_var)\n", (46743, 46776), False, 'import struct\n'), ((47435, 47457), 'struct.pack', 'struct.pack', (['"""f"""', 'bias'], {}), "('f', bias)\n", (47446, 47457), False, 'import struct\n'), ((12520, 12674), 'ldif.util.np_util.make_coordinate_grid_3d', 'np_util.make_coordinate_grid_3d', ([], {'length': 'self.block_res', 'height': 'self.block_res', 'width': 'self.block_res', 'is_screen_space': '(False)', 'is_homogeneous': '(False)'}), '(length=self.block_res, height=self.\n block_res, width=self.block_res, is_screen_space=False, is_homogeneous=\n False)\n', (12551, 12674), False, 'from ldif.util import np_util\n'), ((14134, 14185), 'tensorflow.abs', 'tf.abs', (['(self.target_class_ph - self.predicted_class)'], {}), '(self.target_class_ph - self.predicted_class)\n', (14140, 14185), True, 'import tensorflow as tf\n'), ((14688, 14728), 'tensorflow.logical_and', 'tf.logical_and', (['gt_outside', 'pred_outside'], {}), '(gt_outside, pred_outside)\n', (14702, 14728), True, 'import tensorflow as tf\n'), ((14742, 14780), 'tensorflow.logical_and', 'tf.logical_and', (['gt_inside', 'pred_inside'], {}), '(gt_inside, pred_inside)\n', (14756, 14780), True, 'import tensorflow as tf\n'), ((15656, 15727), 'ldif.util.file_util.log.warning', 'log.warning', (['"""No variables to restore or restoration otherwise failed."""'], {}), "('No variables to restore or restoration otherwise failed.')\n", (15667, 15727), False, 'from ldif.util.file_util import log\n'), ((20816, 20837), 'ldif.util.path_util.gaps_path', 'path_util.gaps_path', ([], {}), '()\n', (20835, 20837), False, 'from ldif.util import path_util\n'), ((21655, 21676), 'ldif.util.path_util.gaps_path', 'path_util.gaps_path', ([], {}), '()\n', (21674, 21676), False, 'from ldif.util import path_util\n'), ((23206, 23225), 'numpy.ones', 'np.ones', (['(224, 224)'], {}), '((224, 224))\n', (23213, 23225), True, 'import numpy as np\n'), ((26684, 26763), 'numpy.concatenate', 'np.concatenate', (['[example.near_surface_samples, example.uniform_samples]'], {'axis': '(0)'}), '([example.near_surface_samples, example.uniform_samples], axis=0)\n', (26698, 26763), True, 'import numpy as np\n'), ((28194, 28221), 'numpy.logical_not', 'np.logical_not', (['pred_is_out'], {}), '(pred_is_out)\n', (28208, 28221), True, 'import numpy as np\n'), ((28223, 28248), 'numpy.logical_not', 'np.logical_not', (['gt_is_out'], {}), '(gt_is_out)\n', (28237, 28248), True, 'import numpy as np\n'), ((30048, 30060), 'numpy.sum', 'np.sum', (['loss'], {}), '(loss)\n', (30054, 30060), True, 'import numpy as np\n'), ((30263, 30285), 'numpy.count_nonzero', 'np.count_nonzero', (['loss'], {}), '(loss)\n', (30279, 30285), True, 'import numpy as np\n'), ((31221, 31236), 'numpy.array', 'np.array', (['mults'], {}), '(mults)\n', (31229, 31236), True, 'import numpy as np\n'), ((32494, 32515), 'ldif.util.path_util.gaps_path', 'path_util.gaps_path', ([], {}), '()\n', (32513, 32515), False, 'from ldif.util import path_util\n'), ((33587, 33620), 'ldif.util.path_util.get_path_to_ldif_root', 'path_util.get_path_to_ldif_root', ([], {}), '()\n', (33618, 33620), False, 'from ldif.util import path_util\n'), ((38519, 38563), 'numpy.concatenate', 'np.concatenate', (['w_block'], {'axis': '(2 + dim_offset)'}), '(w_block, axis=2 + dim_offset)\n', (38533, 38563), True, 'import numpy as np\n'), ((13983, 14003), 'tensorflow.log', 'tf.log', (['clipped_pred'], {}), '(clipped_pred)\n', (13989, 14003), True, 'import tensorflow as tf\n'), ((14049, 14073), 'tensorflow.log', 'tf.log', (['(1 - clipped_pred)'], {}), '(1 - clipped_pred)\n', (14055, 14073), True, 'import tensorflow as tf\n'), ((14286, 14340), 'tensorflow.square', 'tf.square', (['(self.target_class_ph - self.predicted_class)'], {}), '(self.target_class_ph - self.predicted_class)\n', (14295, 14340), True, 'import tensorflow as tf\n'), ((20051, 20072), 'ldif.util.path_util.gaps_path', 'path_util.gaps_path', ([], {}), '()\n', (20070, 20072), False, 'from ldif.util import path_util\n'), ((26871, 26915), 'ldif.util.geom_util.depth_dodeca_to_samples', 'geom_util.depth_dodeca_to_samples', (['depth_ims'], {}), '(depth_ims)\n', (26904, 26915), False, 'from ldif.util import geom_util\n'), ((37941, 37990), 'numpy.array', 'np.array', (['[w_min, l_min, h_min]'], {'dtype': 'np.float32'}), '([w_min, l_min, h_min], dtype=np.float32)\n', (37949, 37990), True, 'import numpy as np\n'), ((38141, 38211), 'ldif.util.geom_util_np.apply_4x4', 'geom_util_np.apply_4x4', (['sample_locations', 'world2local'], {'are_points': '(True)'}), '(sample_locations, world2local, are_points=True)\n', (38163, 38211), False, 'from ldif.util import geom_util_np\n'), ((27272, 27319), 'ldif.util.file_util.log.verbose', 'log.verbose', (['"""Depth im shape: """', 'depth_im.shape'], {}), "('Depth im shape: ', depth_im.shape)\n", (27283, 27319), False, 'from ldif.util.file_util import log\n'), ((27340, 27393), 'ldif.util.geom_util.depth_image_to_samples', 'geom_util.depth_image_to_samples', (['depth_im', 'cam2world'], {}), '(depth_im, cam2world)\n', (27372, 27393), False, 'from ldif.util import geom_util\n'), ((27134, 27173), 'ldif.util.geom_util.get_dodeca_camera_to_worlds', 'geom_util.get_dodeca_camera_to_worlds', ([], {}), '()\n', (27171, 27173), False, 'from ldif.util import geom_util\n'), ((30408, 30420), 'numpy.sum', 'np.sum', (['loss'], {}), '(loss)\n', (30414, 30420), True, 'import numpy as np\n')] |
import os
import sys
import bokeh.layouts as bkl
import bokeh.palettes
import bokeh.plotting as bkp
import numpy as np
# make it so we can import models/etc from parent folder
sys.path.insert(1, os.path.join(sys.path[0], '../common'))
from plotting import *
fig_err_g = bkp.figure(y_axis_type='log', x_axis_type='log', y_axis_label='Error',
x_axis_label='Coreset Construction Iterations', plot_width=1250, plot_height=1250)
fig_csz_g = bkp.figure(x_axis_type='log', y_axis_type='log', y_axis_label='Coreset Size',
x_axis_label='Coreset Construction Iterations', plot_width=1250, plot_height=1250)
fig_t_g = bkp.figure(x_axis_type='log', y_axis_type='log', y_axis_label='CPU Time (s)',
x_axis_label='Coreset Construction Iterations', plot_width=1250, plot_height=1250)
fig_errc_g = bkp.figure(x_axis_type='log', y_axis_type='log', y_axis_label='Error', x_axis_label='Coreset Size',
plot_width=1250, plot_height=1250)
fig_errt_g = bkp.figure(x_axis_type='log', y_axis_type='log', y_axis_label='Error', x_axis_label='CPU Time (s)',
plot_width=1250, plot_height=1250)
fig_err_a = bkp.figure(y_axis_type='log', x_axis_type='log', y_axis_label='Error',
x_axis_label='Coreset Construction Iterations', plot_width=1250, plot_height=1250)
fig_csz_a = bkp.figure(x_axis_type='log', y_axis_type='log', y_axis_label='Coreset Size',
x_axis_label='Coreset Construction Iterations', plot_width=1250, plot_height=1250)
fig_t_a = bkp.figure(x_axis_type='log', y_axis_type='log', y_axis_label='CPU Time (s)',
x_axis_label='Coreset Construction Iterations', plot_width=1250, plot_height=1250)
fig_errc_a = bkp.figure(y_axis_type='log', x_axis_type='log', y_axis_label='Error', x_axis_label='Coreset Size',
plot_width=1250, plot_height=1250)
fig_errt_a = bkp.figure(y_axis_type='log', x_axis_type='log', y_axis_label='Error', x_axis_label='CPU Time (s)',
plot_width=1250, plot_height=1250)
axis_font_size = '36pt'
legend_font_size = '36pt'
for f in [fig_err_g, fig_err_a, fig_csz_a, fig_csz_g, fig_t_a, fig_t_g, fig_errc_g, fig_errc_a, fig_errt_g, fig_errt_a]:
# f.xaxis.ticker = bkm.tickers.FixedTicker(ticks=[.1, 1])
f.xaxis.axis_label_text_font_size = axis_font_size
f.xaxis.major_label_text_font_size = axis_font_size
f.xaxis.formatter = logFmtr
f.yaxis.axis_label_text_font_size = axis_font_size
f.yaxis.major_label_text_font_size = axis_font_size
f.yaxis.formatter = logFmtr
f.toolbar.logo = None
f.toolbar_location = None
gr = np.load('gauss_results.npz')
anms = gr['anms']
Ms = gr['Ms']
err = gr['err']
cput = gr['cput']
csize = gr['csize']
pal = bokeh.palettes.colorblind['Colorblind'][8]
pal = [pal[0], pal[1], '#d62728', pal[4], pal[6], pal[3], pal[7], pal[2]]
for aidx, anm in enumerate(anms):
fig_err_g.line(Ms, np.percentile(err[aidx, :, :], 50, axis=0), line_color=pal[aidx], line_width=8, legend=anm)
fig_csz_g.line(Ms, np.percentile(csize[aidx, :, :], 50, axis=0), line_color=pal[aidx], line_width=8, legend=anm)
fig_t_g.line(Ms, np.percentile(cput[aidx, :, :], 50, axis=0), line_color=pal[aidx], line_width=8, legend=anm)
fig_errc_g.line(np.percentile(csize[aidx, :, :], 50, axis=0), np.percentile(err[aidx, :, :], 50, axis=0),
line_color=pal[aidx], line_width=8, legend=anm)
fig_errt_g.line(np.percentile(cput[aidx, :, :], 50, axis=0), np.percentile(err[aidx, :, :], 50, axis=0),
line_color=pal[aidx], line_width=8, legend=anm)
aa = np.load('axis_results.npz')
anms = aa['anms']
Ms = aa['Ms']
err = aa['err']
cput = aa['cput']
csize = aa['csize']
for aidx, anm in enumerate(anms):
fig_err_a.line(Ms, np.percentile(err[aidx, :, :], 50, axis=0), line_color=pal[aidx], line_width=8, legend=anm,
line_dash=[20, 30], line_dash_offset=np.random.randint(50))
fig_csz_a.line(Ms, np.percentile(csize[aidx, :, :], 50, axis=0), line_color=pal[aidx], line_width=8, legend=anm,
line_dash=[20, 30], line_dash_offset=np.random.randint(50))
fig_t_a.line(Ms, np.percentile(cput[aidx, :, :], 50, axis=0), line_color=pal[aidx], line_width=8, legend=anm)
fig_errc_a.line(np.percentile(csize[aidx, :, :], 50, axis=0), np.percentile(err[aidx, :, :], 50, axis=0),
line_color=pal[aidx], line_width=8, legend=anm, line_dash=[20, 30],
line_dash_offset=np.random.randint(50))
fig_errt_a.line(np.percentile(cput[aidx, :, :], 50, axis=0), np.percentile(err[aidx, :, :], 50, axis=0),
line_color=pal[aidx], line_width=8, legend=anm, line_dash=[20, 30],
line_dash_offset=np.random.randint(50))
for f in [fig_err_g, fig_err_a, fig_csz_a, fig_csz_g, fig_t_a, fig_t_g, fig_errc_g, fig_errc_a, fig_errt_g, fig_errt_a]:
f.legend.label_text_font_size = legend_font_size
f.legend.glyph_width = 100
f.legend.glyph_height = 40
f.legend.spacing = 20
fig_err_a.legend.location = 'bottom_left'
fig_csz_a.legend.location = 'bottom_right'
bkp.show(bkl.gridplot([[fig_err_g, fig_csz_g], [fig_t_g], [fig_errc_g, fig_errt_g], [fig_err_a, fig_csz_a], [fig_t_a],
[fig_errc_a, fig_errt_a]]))
# bkp.output_file('results.html')
# bkp.save(bkl.gridplot([[fig_err_g, fig_csz_g], [fig_t_g], [fig_errc_g, fig_errt_g], [fig_err_a, fig_csz_a], [fig_t_a], [fig_errc_a, fig_errt_a]]))
| [
"bokeh.plotting.figure",
"os.path.join",
"bokeh.layouts.gridplot",
"numpy.random.randint",
"numpy.percentile",
"numpy.load"
] | [((273, 434), 'bokeh.plotting.figure', 'bkp.figure', ([], {'y_axis_type': '"""log"""', 'x_axis_type': '"""log"""', 'y_axis_label': '"""Error"""', 'x_axis_label': '"""Coreset Construction Iterations"""', 'plot_width': '(1250)', 'plot_height': '(1250)'}), "(y_axis_type='log', x_axis_type='log', y_axis_label='Error',\n x_axis_label='Coreset Construction Iterations', plot_width=1250,\n plot_height=1250)\n", (283, 434), True, 'import bokeh.plotting as bkp\n'), ((462, 631), 'bokeh.plotting.figure', 'bkp.figure', ([], {'x_axis_type': '"""log"""', 'y_axis_type': '"""log"""', 'y_axis_label': '"""Coreset Size"""', 'x_axis_label': '"""Coreset Construction Iterations"""', 'plot_width': '(1250)', 'plot_height': '(1250)'}), "(x_axis_type='log', y_axis_type='log', y_axis_label=\n 'Coreset Size', x_axis_label='Coreset Construction Iterations',\n plot_width=1250, plot_height=1250)\n", (472, 631), True, 'import bokeh.plotting as bkp\n'), ((656, 825), 'bokeh.plotting.figure', 'bkp.figure', ([], {'x_axis_type': '"""log"""', 'y_axis_type': '"""log"""', 'y_axis_label': '"""CPU Time (s)"""', 'x_axis_label': '"""Coreset Construction Iterations"""', 'plot_width': '(1250)', 'plot_height': '(1250)'}), "(x_axis_type='log', y_axis_type='log', y_axis_label=\n 'CPU Time (s)', x_axis_label='Coreset Construction Iterations',\n plot_width=1250, plot_height=1250)\n", (666, 825), True, 'import bokeh.plotting as bkp\n'), ((851, 989), 'bokeh.plotting.figure', 'bkp.figure', ([], {'x_axis_type': '"""log"""', 'y_axis_type': '"""log"""', 'y_axis_label': '"""Error"""', 'x_axis_label': '"""Coreset Size"""', 'plot_width': '(1250)', 'plot_height': '(1250)'}), "(x_axis_type='log', y_axis_type='log', y_axis_label='Error',\n x_axis_label='Coreset Size', plot_width=1250, plot_height=1250)\n", (861, 989), True, 'import bokeh.plotting as bkp\n'), ((1023, 1161), 'bokeh.plotting.figure', 'bkp.figure', ([], {'x_axis_type': '"""log"""', 'y_axis_type': '"""log"""', 'y_axis_label': '"""Error"""', 'x_axis_label': '"""CPU Time (s)"""', 'plot_width': '(1250)', 'plot_height': '(1250)'}), "(x_axis_type='log', y_axis_type='log', y_axis_label='Error',\n x_axis_label='CPU Time (s)', plot_width=1250, plot_height=1250)\n", (1033, 1161), True, 'import bokeh.plotting as bkp\n'), ((1194, 1355), 'bokeh.plotting.figure', 'bkp.figure', ([], {'y_axis_type': '"""log"""', 'x_axis_type': '"""log"""', 'y_axis_label': '"""Error"""', 'x_axis_label': '"""Coreset Construction Iterations"""', 'plot_width': '(1250)', 'plot_height': '(1250)'}), "(y_axis_type='log', x_axis_type='log', y_axis_label='Error',\n x_axis_label='Coreset Construction Iterations', plot_width=1250,\n plot_height=1250)\n", (1204, 1355), True, 'import bokeh.plotting as bkp\n'), ((1383, 1552), 'bokeh.plotting.figure', 'bkp.figure', ([], {'x_axis_type': '"""log"""', 'y_axis_type': '"""log"""', 'y_axis_label': '"""Coreset Size"""', 'x_axis_label': '"""Coreset Construction Iterations"""', 'plot_width': '(1250)', 'plot_height': '(1250)'}), "(x_axis_type='log', y_axis_type='log', y_axis_label=\n 'Coreset Size', x_axis_label='Coreset Construction Iterations',\n plot_width=1250, plot_height=1250)\n", (1393, 1552), True, 'import bokeh.plotting as bkp\n'), ((1577, 1746), 'bokeh.plotting.figure', 'bkp.figure', ([], {'x_axis_type': '"""log"""', 'y_axis_type': '"""log"""', 'y_axis_label': '"""CPU Time (s)"""', 'x_axis_label': '"""Coreset Construction Iterations"""', 'plot_width': '(1250)', 'plot_height': '(1250)'}), "(x_axis_type='log', y_axis_type='log', y_axis_label=\n 'CPU Time (s)', x_axis_label='Coreset Construction Iterations',\n plot_width=1250, plot_height=1250)\n", (1587, 1746), True, 'import bokeh.plotting as bkp\n'), ((1772, 1910), 'bokeh.plotting.figure', 'bkp.figure', ([], {'y_axis_type': '"""log"""', 'x_axis_type': '"""log"""', 'y_axis_label': '"""Error"""', 'x_axis_label': '"""Coreset Size"""', 'plot_width': '(1250)', 'plot_height': '(1250)'}), "(y_axis_type='log', x_axis_type='log', y_axis_label='Error',\n x_axis_label='Coreset Size', plot_width=1250, plot_height=1250)\n", (1782, 1910), True, 'import bokeh.plotting as bkp\n'), ((1944, 2082), 'bokeh.plotting.figure', 'bkp.figure', ([], {'y_axis_type': '"""log"""', 'x_axis_type': '"""log"""', 'y_axis_label': '"""Error"""', 'x_axis_label': '"""CPU Time (s)"""', 'plot_width': '(1250)', 'plot_height': '(1250)'}), "(y_axis_type='log', x_axis_type='log', y_axis_label='Error',\n x_axis_label='CPU Time (s)', plot_width=1250, plot_height=1250)\n", (1954, 2082), True, 'import bokeh.plotting as bkp\n'), ((2685, 2713), 'numpy.load', 'np.load', (['"""gauss_results.npz"""'], {}), "('gauss_results.npz')\n", (2692, 2713), True, 'import numpy as np\n'), ((3664, 3691), 'numpy.load', 'np.load', (['"""axis_results.npz"""'], {}), "('axis_results.npz')\n", (3671, 3691), True, 'import numpy as np\n'), ((197, 235), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""../common"""'], {}), "(sys.path[0], '../common')\n", (209, 235), False, 'import os\n'), ((5190, 5330), 'bokeh.layouts.gridplot', 'bkl.gridplot', (['[[fig_err_g, fig_csz_g], [fig_t_g], [fig_errc_g, fig_errt_g], [fig_err_a,\n fig_csz_a], [fig_t_a], [fig_errc_a, fig_errt_a]]'], {}), '([[fig_err_g, fig_csz_g], [fig_t_g], [fig_errc_g, fig_errt_g],\n [fig_err_a, fig_csz_a], [fig_t_a], [fig_errc_a, fig_errt_a]])\n', (5202, 5330), True, 'import bokeh.layouts as bkl\n'), ((2980, 3022), 'numpy.percentile', 'np.percentile', (['err[aidx, :, :]', '(50)'], {'axis': '(0)'}), '(err[aidx, :, :], 50, axis=0)\n', (2993, 3022), True, 'import numpy as np\n'), ((3095, 3139), 'numpy.percentile', 'np.percentile', (['csize[aidx, :, :]', '(50)'], {'axis': '(0)'}), '(csize[aidx, :, :], 50, axis=0)\n', (3108, 3139), True, 'import numpy as np\n'), ((3210, 3253), 'numpy.percentile', 'np.percentile', (['cput[aidx, :, :]', '(50)'], {'axis': '(0)'}), '(cput[aidx, :, :], 50, axis=0)\n', (3223, 3253), True, 'import numpy as np\n'), ((3323, 3367), 'numpy.percentile', 'np.percentile', (['csize[aidx, :, :]', '(50)'], {'axis': '(0)'}), '(csize[aidx, :, :], 50, axis=0)\n', (3336, 3367), True, 'import numpy as np\n'), ((3369, 3411), 'numpy.percentile', 'np.percentile', (['err[aidx, :, :]', '(50)'], {'axis': '(0)'}), '(err[aidx, :, :], 50, axis=0)\n', (3382, 3411), True, 'import numpy as np\n'), ((3501, 3544), 'numpy.percentile', 'np.percentile', (['cput[aidx, :, :]', '(50)'], {'axis': '(0)'}), '(cput[aidx, :, :], 50, axis=0)\n', (3514, 3544), True, 'import numpy as np\n'), ((3546, 3588), 'numpy.percentile', 'np.percentile', (['err[aidx, :, :]', '(50)'], {'axis': '(0)'}), '(err[aidx, :, :], 50, axis=0)\n', (3559, 3588), True, 'import numpy as np\n'), ((3835, 3877), 'numpy.percentile', 'np.percentile', (['err[aidx, :, :]', '(50)'], {'axis': '(0)'}), '(err[aidx, :, :], 50, axis=0)\n', (3848, 3877), True, 'import numpy as np\n'), ((4029, 4073), 'numpy.percentile', 'np.percentile', (['csize[aidx, :, :]', '(50)'], {'axis': '(0)'}), '(csize[aidx, :, :], 50, axis=0)\n', (4042, 4073), True, 'import numpy as np\n'), ((4223, 4266), 'numpy.percentile', 'np.percentile', (['cput[aidx, :, :]', '(50)'], {'axis': '(0)'}), '(cput[aidx, :, :], 50, axis=0)\n', (4236, 4266), True, 'import numpy as np\n'), ((4336, 4380), 'numpy.percentile', 'np.percentile', (['csize[aidx, :, :]', '(50)'], {'axis': '(0)'}), '(csize[aidx, :, :], 50, axis=0)\n', (4349, 4380), True, 'import numpy as np\n'), ((4382, 4424), 'numpy.percentile', 'np.percentile', (['err[aidx, :, :]', '(50)'], {'axis': '(0)'}), '(err[aidx, :, :], 50, axis=0)\n', (4395, 4424), True, 'import numpy as np\n'), ((4594, 4637), 'numpy.percentile', 'np.percentile', (['cput[aidx, :, :]', '(50)'], {'axis': '(0)'}), '(cput[aidx, :, :], 50, axis=0)\n', (4607, 4637), True, 'import numpy as np\n'), ((4639, 4681), 'numpy.percentile', 'np.percentile', (['err[aidx, :, :]', '(50)'], {'axis': '(0)'}), '(err[aidx, :, :], 50, axis=0)\n', (4652, 4681), True, 'import numpy as np\n'), ((3983, 4004), 'numpy.random.randint', 'np.random.randint', (['(50)'], {}), '(50)\n', (4000, 4004), True, 'import numpy as np\n'), ((4179, 4200), 'numpy.random.randint', 'np.random.randint', (['(50)'], {}), '(50)\n', (4196, 4200), True, 'import numpy as np\n'), ((4551, 4572), 'numpy.random.randint', 'np.random.randint', (['(50)'], {}), '(50)\n', (4568, 4572), True, 'import numpy as np\n'), ((4808, 4829), 'numpy.random.randint', 'np.random.randint', (['(50)'], {}), '(50)\n', (4825, 4829), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Various tools for calculating statistics
"""
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import numpy as np
def weighted_mean(var, wts):
"""Calculates the weighted mean"""
return np.average(var, weights=wts)
def weighted_variance(var, wts):
"""Calculates the weighted variance"""
return np.average((var - weighted_mean(var, wts))**2, weights=wts)
def weighted_skew(var, wts):
"""Calculates the weighted skewness"""
return (np.average((var - weighted_mean(var, wts))**3, weights=wts) /
weighted_variance(var, wts)**(1.5))
def weighted_kurtosis(var, wts):
"""Calculates the weighted skewness"""
return (np.average((var - weighted_mean(var, wts))**4, weights=wts) /
weighted_variance(var, wts)**(2))
def weighted_covariance(x, y, wt):
"""Calculates the weighted covariance"""
return (np.average((x - weighted_mean(x, wt)) *
(y - weighted_mean(y, wt)), weights=wt))
def weighted_correlation(x, y, wt):
"""Calculates the weighted correlation"""
return (weighted_covariance(x, y, wt) /
(np.sqrt(weighted_variance(x, wt)) * np.sqrt(weighted_variance(y, wt))))
def weighted_correlation_rank(x, y, wt):
"""Calculatees the weighted spearman rank correlation"""
from scipy.stats import rankdata
x = rankdata(x)
y = rankdata(y)
return weighted_correlation(x, y, wt)
def near_positive_definite(input_matrix):
"""This function uses R to calculate the nearest positive definite matrix
within python. An installation of R with the library "Matrix" is required.
The module rpy2 is also needed
The only requirement is an input matrix. Can be either a pandas dataframe or
numpy-array.
Parameters:
input_matrix: input numpy array or pandas dataframe, not numpy matrix
Returns:
(np.array): Nearest positive definite matrix as a numpy-array
"""
import pandas as pd
import numpy as np
from ..utility.logging import printerr
import pygeostat as gs
# Try and load r2py
try:
import rpy2.robjects as robjects
from rpy2.robjects import r
from rpy2.robjects.packages import importr
except ImportError:
printerr(("near_positive_definite could not be loaded. Please install the r2py library"
" and the software R with the library 'Matrix' to enable it. Installation"
" instructions can be found within pygeostat's documentation."),
errtype='error')
return
# Convert input matrix to a numpy array if it is a pd.DataFrame
if isinstance(input_matrix, pd.DataFrame):
input_matrix = input_matrix.as_matrix()
# Determine matrix shape
dim = input_matrix.shape
# Call matrix R library
matcalc = importr("Matrix")
# Convert numpy array to RObject then to R matrix
pdmat = robjects.FloatVector(input_matrix.reshape((input_matrix.size)))
pdmat = robjects.r.matrix(pdmat, nrow=dim[0], ncol=dim[1], byrow=True)
# Calculate nearest positive definite matrix
pdmat = matcalc.near_positive_definite(pdmat)
# Convert calculated matrix to python string
pdmat = pdmat[0] # Extract near_positive_definite matrix from R object
pdmat = r.toString(pdmat) # Convert R binary object to a string
pdmat = pdmat.r_repr() # Convert R string to python string
pdmat = pdmat.replace('"', "") # Clean up string
pdmat = pdmat.replace(' ', "") # Clean up string
# Convert comma delimited string to list then to np array
pdmat = [float(x) for x in pdmat.split(',')]
pdmat = np.array(pdmat)
pdmat = np.reshape(pdmat, dim) # Restore near_positive_definite matrix to the original input shape
return pdmat
def accsim(truth, reals, pinc=0.05):
"""
Calculates the proportion of locations where the true value falls within symmetric p-PI
intervals when completing a jackknife study. A portion of the data is excluded from the
conditioning dataset and the excluded sample locations simulated values are then checked.
.. seealso::
<NAME>., & <NAME>. (2014). Geostatistical Reservoir Modeling (2nd ed.). New
York, NY: Oxford University Press, p. 350-351.
Arguments:
truth: Tidy (long-form) 1D data where a single column containing the true values.
A pandas dataframe/series or numpy array can be passed
reals: Tidy (long-form) 2D data where a single column contains values from a single
realizations and each row contains the simulated values from a single truth location.
A pandas dataframe or numpy matrix can be passed
Keyword Arguments:
pinc (float): Increments between the probability intervals to calculate within (0, 1)
Returns:
propavg (pd.DataFrame): Dataframe with the calculated probability intervals and the
fraction within the interval
Returns:
sumstats (dict): Dictionary containing the average variance (U), mean squared error (MSE),
accuracy measure (acc), precision measure (pre), and a goodness measure (goo)
"""
import pandas as pd
import pygeostat as gs
# Handle input
if isinstance(truth, pd.Series):
truth = truth.values
elif isinstance(truth, pd.DataFrame):
truth = truth.values
elif not isinstance(truth, np.ndarray):
raise ValueError("The argument `truth` must be a pd.DataFrame, pd.Series, or np.matrix")
if isinstance(truth, np.ndarray) and len(truth.shape) == 1:
truth = np.reshape(truth, (truth.shape[0], 1))
if isinstance(reals, pd.DataFrame):
reals = reals.values
elif not isinstance(reals, np.ndarray):
raise ValueError("The argument `reals` must be a pd.DataFrame or np.matrix")
try:
data = np.concatenate((truth, reals), axis=1)
data = pd.DataFrame(data=data)
except:
raise ValueError("The `truth` and `reals` data could not be coerced into a pd.DataFrame")
# Initialize some variables
pints = np.arange(pinc, 1, pinc)
propindic = dict([pint, []] for pint in pints)
variances = []
acc = dict([pint, 0] for pint in pints)
pre = dict([pint, 0] for pint in pints)
goo = dict([pint, 0] for pint in pints)
# Calculate the indicator responses and local variances
for i, values in data.iterrows():
cdf = gs.cdf(values[1:].values)
variances.append(np.var(values[1:].values))
for pint in pints:
if cdf[0][0] <= values[0] <= cdf[0][-1]:
p = gs.z_percentile(values[0], cdf[0], cdf[1])
plower = 0.5 - (pint / 2)
pupper = 0.5 + (pint / 2)
if plower <= p <= pupper:
indic = 1
else:
indic = 0
else:
indic = 0
propindic[pint].append(indic)
# Calculate the average proportions and average variance
propavg = []
for pint in pints:
avg = np.average(propindic[pint])
propavg.append([pint, avg])
propavg = pd.DataFrame(propavg, columns=['ProbInt', 'FracIn'])
# Calculate the summary statistics
avgvar = np.average(variances)
mse = ((propavg['ProbInt'].values - propavg['FracIn'].values) ** 2).mean()
acc = 0
pre = 0
goo = 0
for i, values in propavg.iterrows():
if values[1] >= values[0]:
acc = acc + 1
pre = pre + (values[1] - values[0])
goo = goo + (values[1] - values[0])
else:
goo = goo + (2 * (values[0] - values[1]))
acc = acc / len(propavg)
pre = 1 - ((2 * pre) / len(propavg))
goo = 1 - (goo / len(propavg))
sumstats = {'avgvar': avgvar, 'mse': mse, 'acc': acc, 'pre': pre, 'goo': goo}
return propavg, sumstats
def accmik(truth, thresholds, mikprobs, pinc=0.05):
"""
Similar to accsim but accepting mik distributions instead
Mostly pulled from accsim
Parameters
----------
truth: np.ndarray
Tidy (long-form) 1D data where a single column containing the true values.
A pandas dataframe/series or numpy array can be passed
thresholds: np.ndarray
1D array of thresholds where each CDF is defined by these thresholds and the probability
given in the mikprobs array for each location.
mikprobs: np.ndarray
Tidy (long-form) 2D data where a single column contains values from a single
MIK cutoff and each row contains the simulated values for the corresponding single
truth location. A pandas dataframe or numpy matrix can be passed
pinc: float
Increments between the probability intervals to calculate within (0, 1)
Returns
-------
propavg: pd.DataFrame
Dataframe with the calculated probability intervals and the fraction within the interval
sumstats: dict
Dictionary containing the average variance (U), mean squared error (MSE), accuracy
measure (acc), precision measure (pre), and a goodness measure (goo)
"""
import pandas as pd
# Handle input
if isinstance(truth, pd.Series):
truth = truth.values
elif isinstance(truth, pd.DataFrame):
truth = truth.values
elif not isinstance(truth, np.ndarray):
raise ValueError("The argument `truth` must be a pd.DataFrame, pd.Series, or np.matrix")
if isinstance(truth, np.ndarray) and len(truth.shape) == 1:
truth = np.reshape(truth, (truth.shape[0], 1))
if isinstance(mikprobs, pd.DataFrame):
mikprobs = mikprobs.values
elif not isinstance(mikprobs, np.ndarray):
raise ValueError("The argument `mikprobs` must be a pd.DataFrame or np.matrix")
# Initialize some variables
pints, propindic, variances = _interval_responses(truth, mikprobs, pinc, cdf_x=thresholds)
# Calculate the average proportions and average variance
propavg = []
for pint in pints:
avg = np.average(propindic[pint])
propavg.append([pint, avg])
propavg = pd.DataFrame(propavg, columns=['ProbInt', 'FracIn'])
# Calculate the summary statistics
avgvar = np.average(variances)
mse = ((propavg['ProbInt'].values - propavg['FracIn'].values) ** 2).mean()
acc = 0
pre = 0
goo = 0
for i, values in propavg.iterrows():
if values[1] >= values[0]:
acc = acc + 1
pre = pre + (values[1] - values[0])
goo = goo + (values[1] - values[0])
else:
goo = goo + (2 * (values[0] - values[1]))
acc = acc / len(propavg)
pre = 1 - ((2 * pre) / len(propavg))
goo = 1 - (goo / len(propavg))
sumstats = {'avgvar': avgvar, 'mse': mse, 'acc': acc, 'pre': pre, 'goo': goo}
return propavg, sumstats
def _interval_responses(truth, reals, pinc, cdf_x=None):
"""
When cdf_x is None, reals contains the simulated values from which a cdf should be computed.
Otherwise the ``'reals'`` contains the distribution F(cdf_x) values for each location (nloc,
nquant)
Mostly pulled from the original accsim
Parameters:
truth: np.ndarray
tidy 1D array of truth values
reals: np.ndarray
tidy 2D array of `reals` where if cdf_x is None these are the realizations from which a cdf
is built, otherwise cdf_x defines the z-values and each row of reals contains the
corresponding probabilites defining the local cdf's
pinc: float
the incremement of the probability intervals
cdf_x: np.ndarray, optional
contains the z-values when ``reals`` contains the F(z)
Returns:
pints: np.ndarray
a range of pinc spaced probability intervals
propindic: dict
the dictionary used in accsim and accmik functions
variances: list
the list of variances
"""
from .cdf import variance_from_cdf
if not isinstance(truth, np.ndarray):
truth = np.array(truth)
isjagged = False
if isinstance(reals, list) or reals.dtype == "O":
# assume reals is `jagged` (nested lists), each location has a different # simulated vals
isjagged = True
elif not isinstance(reals, np.ndarray):
reals = np.array(reals)
if truth.shape[0] != reals.shape[0]:
raise ValueError('`truth` and `reals` must have the same dimension along the first axis!')
# initializse the variables
pints = np.arange(pinc, 1, pinc)
propindic = {pint: [] for pint in pints}
variances = []
if cdf_x is not None and reals[0, 0] != 0:
reals = np.c_[np.ones(truth.shape[0]), reals]
cdf_x = np.insert(cdf_x, [0], cdf_x[0] - (cdf_x[1] - cdf_x[0]))
# Calculate the indicator responses and local variances
for i in range(truth.shape[0]):
if cdf_x is None:
if isjagged:
ecdf = cdf(reals[i]) # each element in reals is a list of sim vals
v = np.var(reals[i])
else:
ecdf = cdf(reals[i, :]) # reals is a 2D array with standard size that can be sliced
v = np.var(reals[i, :])
variances.append(v)
else:
ecdf = (cdf_x, reals[i, :])
variances.append(variance_from_cdf(ecdf[0], ecdf[1]))
for pint in pints:
if ecdf[0][0] <= truth[i] <= ecdf[0][-1]:
p = z_percentile(truth[i], ecdf[0], ecdf[1])
plower = 0.5 - (pint / 2)
pupper = 0.5 + (pint / 2)
if plower <= p <= pupper:
indic = 1
else:
indic = 0
else:
indic = 0
propindic[pint].append(indic)
return pints, propindic, variances
| [
"numpy.insert",
"pygeostat.z_percentile",
"numpy.reshape",
"numpy.ones",
"scipy.stats.rankdata",
"numpy.average",
"rpy2.robjects.r.toString",
"rpy2.robjects.packages.importr",
"rpy2.robjects.r.matrix",
"numpy.array",
"numpy.var",
"numpy.concatenate",
"pandas.DataFrame",
"pygeostat.cdf",
... | [((619, 647), 'numpy.average', 'np.average', (['var'], {'weights': 'wts'}), '(var, weights=wts)\n', (629, 647), True, 'import numpy as np\n'), ((1749, 1760), 'scipy.stats.rankdata', 'rankdata', (['x'], {}), '(x)\n', (1757, 1760), False, 'from scipy.stats import rankdata\n'), ((1769, 1780), 'scipy.stats.rankdata', 'rankdata', (['y'], {}), '(y)\n', (1777, 1780), False, 'from scipy.stats import rankdata\n'), ((3241, 3258), 'rpy2.robjects.packages.importr', 'importr', (['"""Matrix"""'], {}), "('Matrix')\n", (3248, 3258), False, 'from rpy2.robjects.packages import importr\n'), ((3401, 3463), 'rpy2.robjects.r.matrix', 'robjects.r.matrix', (['pdmat'], {'nrow': 'dim[0]', 'ncol': 'dim[1]', 'byrow': '(True)'}), '(pdmat, nrow=dim[0], ncol=dim[1], byrow=True)\n', (3418, 3463), True, 'import rpy2.robjects as robjects\n'), ((3700, 3717), 'rpy2.robjects.r.toString', 'r.toString', (['pdmat'], {}), '(pdmat)\n', (3710, 3717), False, 'from rpy2.robjects import r\n'), ((4052, 4067), 'numpy.array', 'np.array', (['pdmat'], {}), '(pdmat)\n', (4060, 4067), True, 'import numpy as np\n'), ((4080, 4102), 'numpy.reshape', 'np.reshape', (['pdmat', 'dim'], {}), '(pdmat, dim)\n', (4090, 4102), True, 'import numpy as np\n'), ((6487, 6511), 'numpy.arange', 'np.arange', (['pinc', '(1)', 'pinc'], {}), '(pinc, 1, pinc)\n', (6496, 6511), True, 'import numpy as np\n'), ((7534, 7586), 'pandas.DataFrame', 'pd.DataFrame', (['propavg'], {'columns': "['ProbInt', 'FracIn']"}), "(propavg, columns=['ProbInt', 'FracIn'])\n", (7546, 7586), True, 'import pandas as pd\n'), ((7639, 7660), 'numpy.average', 'np.average', (['variances'], {}), '(variances)\n', (7649, 7660), True, 'import numpy as np\n'), ((10546, 10598), 'pandas.DataFrame', 'pd.DataFrame', (['propavg'], {'columns': "['ProbInt', 'FracIn']"}), "(propavg, columns=['ProbInt', 'FracIn'])\n", (10558, 10598), True, 'import pandas as pd\n'), ((10651, 10672), 'numpy.average', 'np.average', (['variances'], {}), '(variances)\n', (10661, 10672), True, 'import numpy as np\n'), ((12961, 12985), 'numpy.arange', 'np.arange', (['pinc', '(1)', 'pinc'], {}), '(pinc, 1, pinc)\n', (12970, 12985), True, 'import numpy as np\n'), ((5994, 6032), 'numpy.reshape', 'np.reshape', (['truth', '(truth.shape[0], 1)'], {}), '(truth, (truth.shape[0], 1))\n', (6004, 6032), True, 'import numpy as np\n'), ((6255, 6293), 'numpy.concatenate', 'np.concatenate', (['(truth, reals)'], {'axis': '(1)'}), '((truth, reals), axis=1)\n', (6269, 6293), True, 'import numpy as np\n'), ((6309, 6332), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data'}), '(data=data)\n', (6321, 6332), True, 'import pandas as pd\n'), ((6826, 6851), 'pygeostat.cdf', 'gs.cdf', (['values[1:].values'], {}), '(values[1:].values)\n', (6832, 6851), True, 'import pygeostat as gs\n'), ((7456, 7483), 'numpy.average', 'np.average', (['propindic[pint]'], {}), '(propindic[pint])\n', (7466, 7483), True, 'import numpy as np\n'), ((9974, 10012), 'numpy.reshape', 'np.reshape', (['truth', '(truth.shape[0], 1)'], {}), '(truth, (truth.shape[0], 1))\n', (9984, 10012), True, 'import numpy as np\n'), ((10468, 10495), 'numpy.average', 'np.average', (['propindic[pint]'], {}), '(propindic[pint])\n', (10478, 10495), True, 'import numpy as np\n'), ((12488, 12503), 'numpy.array', 'np.array', (['truth'], {}), '(truth)\n', (12496, 12503), True, 'import numpy as np\n'), ((13167, 13222), 'numpy.insert', 'np.insert', (['cdf_x', '[0]', '(cdf_x[0] - (cdf_x[1] - cdf_x[0]))'], {}), '(cdf_x, [0], cdf_x[0] - (cdf_x[1] - cdf_x[0]))\n', (13176, 13222), True, 'import numpy as np\n'), ((6877, 6902), 'numpy.var', 'np.var', (['values[1:].values'], {}), '(values[1:].values)\n', (6883, 6902), True, 'import numpy as np\n'), ((12761, 12776), 'numpy.array', 'np.array', (['reals'], {}), '(reals)\n', (12769, 12776), True, 'import numpy as np\n'), ((7004, 7046), 'pygeostat.z_percentile', 'gs.z_percentile', (['values[0]', 'cdf[0]', 'cdf[1]'], {}), '(values[0], cdf[0], cdf[1])\n', (7019, 7046), True, 'import pygeostat as gs\n'), ((13119, 13142), 'numpy.ones', 'np.ones', (['truth.shape[0]'], {}), '(truth.shape[0])\n', (13126, 13142), True, 'import numpy as np\n'), ((13474, 13490), 'numpy.var', 'np.var', (['reals[i]'], {}), '(reals[i])\n', (13480, 13490), True, 'import numpy as np\n'), ((13630, 13649), 'numpy.var', 'np.var', (['reals[i, :]'], {}), '(reals[i, :])\n', (13636, 13649), True, 'import numpy as np\n')] |
import gym
from gym import error, spaces, utils
import numpy as np
from gym.utils import seeding
from ctypes import *
raisim_dll = CDLL("../gym_learn_wbc/envs/raisim_dll/build/libstoch3_raisim.so")
def init_raisim_dll_functions():
raisim_dll._sim.restype = None
raisim_dll._sim.argtypes = [c_double*10,c_long,c_double,c_double,c_bool,c_float*2]
raisim_dll._reset.restype = None
raisim_dll._reset.argtypes = [c_float*3]
class Learn_wbcEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self,learning_steps,target_velocity,render=False):
print("init")
# fixed properties
#self.mass = mass
#self.Rotational_inertia = rot_inertia
#self.friction_coeff = fric_coeff
# logitudinal velocity and lateral velocity
self.target_velocity = [target_velocity[0],target_velocity[1]]
self.render = render
self.initial_base = (c_float*3)()
self.initial_base[0] = 0
self.initial_base[1] = 0
self.initial_base[2] = 0.6
self.learning_steps = learning_steps
self.omega_range = [-5,5]
self.radius_range = [0.1,0.21]
self.avg_velocity_limits = [-1.5,1.5]
'''
the max distance between either vmin or vmax
is checked and is initialized as the max allowed
error to normalize.
'''
vtx_vmin = np.abs(self.target_velocity[0] - self.avg_velocity_limits[0])
vtx_vmax = np.abs(self.target_velocity[0] - self.avg_velocity_limits[1])
vty_vmin = np.abs(self.target_velocity[1] - self.avg_velocity_limits[0])
vty_vmax = np.abs(self.target_velocity[1] - self.avg_velocity_limits[1])
if(vtx_vmin <= vtx_vmax):
if(vty_vmin <= vty_vmax):
self.avg_velocity_error_limits = [0,vtx_vmax,0,vty_vmax]
else:
self.avg_velocity_error_limits = [0,vtx_vmax,0,vty_vmin]
else:
if(vty_vmin <= vty_vmax):
self.avg_velocity_error_limits = [0,vtx_vmin,0,vty_vmax]
else:
self.avg_velocity_error_limits = [0,vtx_vmin,0,vty_vmin]
self.avg_ang_velocity_limits = [-0.1,0.1]
self.action_space = spaces.Box(low=-1.0, high=1.0,shape=(2,))
'''
state space : target vel,avg_vel_error,avg_vel,rpy,r_dot p_dot y_dot
'''
self.observation_space = spaces.Box(low=-1.0, high=1.0,shape=(10,))
raisim_dll._init_ViSsetup(c_bool(True))
raisim_dll._init_stoch(self.initial_base,c_int(0))
init_raisim_dll_functions()
def step(self,action):
'''
1.at every step the agent will predict a configuration of the controller
2.a simulation rollout in raisim tracking a velocity target
3.calculate reward based on the roll out
3.return reward,state,wether the model has reached saturation(ie learnt)
'''
omega = c_double(0.5*( (self.omega_range[1]-self.omega_range[0])*action[0]
+(self.omega_range[1]+self.omega_range[0])))
radius = c_double(0.5*((self.radius_range[1]-self.radius_range[0])*action[1]
+(self.radius_range[1]+self.radius_range[0])))
state = (c_double*10)()
target_velocity = (c_float*2)()
target_velocity[0] = self.target_velocity[0];
target_velocity[1] = self.target_velocity[1];
raisim_dll._sim(state,self.learning_steps,
omega,radius,c_bool(self.render),
target_velocity)
state = np.array(state)
print("avgVx:",state[0],"avgVy:",state[1])
reward = self.calculate_reward(state[0:2])
#print("reward:",reward)
#clipping
state[0:2] = np.clip(state[0:2],self.avg_velocity_limits[0],self.avg_velocity_limits[1])
state[2:3] = np.clip(state[2:3],-1*self.avg_velocity_error_limits[1],self.avg_velocity_error_limits[1])
state[3:4] = np.clip(state[3:4],-1*self.avg_velocity_error_limits[3],self.avg_velocity_error_limits[3])
state[4:7] = np.clip(state[4:7],-np.pi/2,np.pi/2)
state[7:10]= np.clip(state[7:10],self.avg_ang_velocity_limits[0],self.avg_ang_velocity_limits[1])
#normalizing _ only for symmetrical limits
state[0:2] = (1/self.avg_velocity_limits[1])*state[0:2]
state[2:3] = (1/self.avg_velocity_error_limits[1])*state[2:3]
state[3:4] = (1/self.avg_velocity_error_limits[3])*state[3:4]
state[4:7] = (2/np.pi)*state[4:7]
state[7:10] = (1/self.avg_ang_velocity_limits[1])*state[7:10]
return state,reward,True,{}
def calculate_reward(self,avg_vel):
weight_matrix = np.array([0.5,0.5])
exp_weight = [-6,-6]
abs_error = np.absolute(avg_vel - self.target_velocity)
exponential_error = np.exp(exp_weight[0]*abs_error[0]+exp_weight[1]*abs_error[1])
return(exponential_error)
def reset(self):
'''
1.reset simulation parameters
'''
raisim_dll._reset(self.initial_base)
radius_zero_action =( self.radius_range[0] + self.radius_range[1] )/(self.radius_range[0] - self.radius_range[1])
omega_zero_action =( self.omega_range[0] + self.omega_range[1] )/(self.omega_range[0] - self.omega_range[1])
initial_state = self.step([omega_zero_action,radius_zero_action])[0]
#print("reset")
return initial_state
def render(self, mode='human'):
'''
visulaize a frame (need to chk if this function is required)
print("render")
'''
def close(self):
'''
kill env
'''
raisim_dll._close()
print("close") | [
"numpy.clip",
"numpy.abs",
"numpy.absolute",
"gym.spaces.Box",
"numpy.exp",
"numpy.array"
] | [((1290, 1351), 'numpy.abs', 'np.abs', (['(self.target_velocity[0] - self.avg_velocity_limits[0])'], {}), '(self.target_velocity[0] - self.avg_velocity_limits[0])\n', (1296, 1351), True, 'import numpy as np\n'), ((1367, 1428), 'numpy.abs', 'np.abs', (['(self.target_velocity[0] - self.avg_velocity_limits[1])'], {}), '(self.target_velocity[0] - self.avg_velocity_limits[1])\n', (1373, 1428), True, 'import numpy as np\n'), ((1449, 1510), 'numpy.abs', 'np.abs', (['(self.target_velocity[1] - self.avg_velocity_limits[0])'], {}), '(self.target_velocity[1] - self.avg_velocity_limits[0])\n', (1455, 1510), True, 'import numpy as np\n'), ((1526, 1587), 'numpy.abs', 'np.abs', (['(self.target_velocity[1] - self.avg_velocity_limits[1])'], {}), '(self.target_velocity[1] - self.avg_velocity_limits[1])\n', (1532, 1587), True, 'import numpy as np\n'), ((2072, 2114), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-1.0)', 'high': '(1.0)', 'shape': '(2,)'}), '(low=-1.0, high=1.0, shape=(2,))\n', (2082, 2114), False, 'from gym import error, spaces, utils\n'), ((2232, 2275), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-1.0)', 'high': '(1.0)', 'shape': '(10,)'}), '(low=-1.0, high=1.0, shape=(10,))\n', (2242, 2275), False, 'from gym import error, spaces, utils\n'), ((3339, 3354), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (3347, 3354), True, 'import numpy as np\n'), ((3510, 3587), 'numpy.clip', 'np.clip', (['state[0:2]', 'self.avg_velocity_limits[0]', 'self.avg_velocity_limits[1]'], {}), '(state[0:2], self.avg_velocity_limits[0], self.avg_velocity_limits[1])\n', (3517, 3587), True, 'import numpy as np\n'), ((3603, 3702), 'numpy.clip', 'np.clip', (['state[2:3]', '(-1 * self.avg_velocity_error_limits[1])', 'self.avg_velocity_error_limits[1]'], {}), '(state[2:3], -1 * self.avg_velocity_error_limits[1], self.\n avg_velocity_error_limits[1])\n', (3610, 3702), True, 'import numpy as np\n'), ((3711, 3810), 'numpy.clip', 'np.clip', (['state[3:4]', '(-1 * self.avg_velocity_error_limits[3])', 'self.avg_velocity_error_limits[3]'], {}), '(state[3:4], -1 * self.avg_velocity_error_limits[3], self.\n avg_velocity_error_limits[3])\n', (3718, 3810), True, 'import numpy as np\n'), ((3819, 3861), 'numpy.clip', 'np.clip', (['state[4:7]', '(-np.pi / 2)', '(np.pi / 2)'], {}), '(state[4:7], -np.pi / 2, np.pi / 2)\n', (3826, 3861), True, 'import numpy as np\n'), ((3873, 3964), 'numpy.clip', 'np.clip', (['state[7:10]', 'self.avg_ang_velocity_limits[0]', 'self.avg_ang_velocity_limits[1]'], {}), '(state[7:10], self.avg_ang_velocity_limits[0], self.\n avg_ang_velocity_limits[1])\n', (3880, 3964), True, 'import numpy as np\n'), ((4398, 4418), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (4406, 4418), True, 'import numpy as np\n'), ((4459, 4502), 'numpy.absolute', 'np.absolute', (['(avg_vel - self.target_velocity)'], {}), '(avg_vel - self.target_velocity)\n', (4470, 4502), True, 'import numpy as np\n'), ((4528, 4595), 'numpy.exp', 'np.exp', (['(exp_weight[0] * abs_error[0] + exp_weight[1] * abs_error[1])'], {}), '(exp_weight[0] * abs_error[0] + exp_weight[1] * abs_error[1])\n', (4534, 4595), True, 'import numpy as np\n')] |
"""Core functionality for foamPy."""
from __future__ import division, print_function
import numpy as np
import os
import re
import datetime
import sys
import time
import subprocess
import pandas
import glob
from .dictionaries import *
from .templates import *
def gen_stripped_lines(fpath):
with open(fpath) as f:
for line in f.readlines():
yield line.replace("(", " ").replace(")", " ")
def load_forces(casedir="./", object_name="forces", start_time=0):
"""Load forces and moments as a pandas DataFrame."""
glob_string = os.path.join(
casedir,
"postProcessing/{}/{}/forces*.dat".format(object_name, start_time)
)
fpath = sorted(glob.glob(glob_string))[-1]
data = np.loadtxt(gen_stripped_lines(fpath))
df = pandas.DataFrame()
df["time"] = data[:, 0]
df["fx_pressure"] = data[:, 1]
df["fx_viscous"] = data[:, 4]
df["fx_porous"] = data[:, 7]
df["fy_pressure"] = data[:, 2]
df["fy_viscous"] = data[:, 5]
df["fy_porous"] = data[:, 8]
df["fz_pressure"] = data[:, 3]
df["fz_viscous"] = data[:, 6]
df["fz_porous"] = data[:, 9]
df["mx_pressure"] = data[:, 10]
df["mx_viscous"] = data[:, 13]
df["mx_porous"] = data[:, 16]
df["my_pressure"] = data[:, 11]
df["my_viscous"] = data[:, 14]
df["my_porous"] = data[:, 17]
df["mz_pressure"] = data[:, 12]
df["mz_viscous"] = data[:, 15]
df["mz_porous"] = data[:, 18]
for fm in ["f", "m"]:
for component in ["x", "y", "z"]:
df[fm + component] = df[fm + component + "_pressure"] \
+ df[fm + component + "_viscous"] \
+ df[fm + component + "_porous"]
return df
def load_probes_data(casedir="./", object_name="probes", start_time=0,
field_name="U"):
"""Load probes data as pandas ``DataFrame``."""
fpath = os.path.join(casedir, "postProcessing", object_name,
str(start_time), field_name)
# First get probe locations to use as column names
with open(fpath) as f:
txt = f.read()
probe_lines = re.findall(r"# Probe \d.*\n", txt)
probe_locs = []
for line in probe_lines:
probe_locs.append(line.split("(")[-1].split(")")[0].split())
data = np.loadtxt(gen_stripped_lines(fpath))
df = pandas.DataFrame()
df["time"] = data[:, 0]
# Determine the rank of the data
nprobes = len(probe_locs)
nsamps = data.shape[0]
dims = (data.shape[1] - 1) // nprobes
for n, probe_loc in enumerate(probe_locs):
probe_loc = [float(pl) for pl in probe_loc]
d = data[:, n + 1:n + dims + 1]
if dims > 1:
d = [tuple(p) for p in d]
df[tuple(probe_loc)] = d
return df
def load_torque_drag(casedir="", folder="0", filename=None,
torque_axis="z", drag_axis="x"):
"""Loads time, z-axis torque, and streamwise force from specified forces
folder. Case name can be left empty if running within a case folder."""
# Create empty lists
t = []
fpx = []; fpy = []; fpz = []
fpox = []; fpoy = []; fpoz = []
fvx = []; fvy = []; fvz = []
mpx = []; mpy = []; mpz = []
mpox = []; mpoy = []; mpoz = []
mvx = []; mvy = []; mvz = []
# Cycle through file
if casedir: casedir += "/"
if not filename: filename = "forces.dat"
with open(casedir+"postProcessing/forces/"+str(folder)+"/"+filename, "r") as f:
for line in f.readlines():
line = line.replace("(", "")
line = line.replace(")", "")
line = line.replace(",", " ")
line = line.split()
if line[0] != "#":
t.append(float(line[0]))
fpx.append(float(line[1]))
fpy.append(float(line[2]))
fpz.append(float(line[3]))
fvx.append(float(line[4]))
fvy.append(float(line[5]))
fvz.append(float(line[6]))
fpox.append(float(line[7]))
fpoy.append(float(line[8]))
fpoz.append(float(line[9]))
mpx.append(float(line[10]))
mpy.append(float(line[11]))
mpz.append(float(line[12]))
mvx.append(float(line[13]))
mvy.append(float(line[14]))
mvz.append(float(line[15]))
mpox.append(float(line[16]))
mpoy.append(float(line[17]))
mpoz.append(float(line[18]))
#Convert to numpy arrays
t = np.asarray(t)
if torque_axis == "z":
torque = np.asarray(np.asarray(mpz) + np.asarray(mvz))
elif torque_axis == "x":
torque = np.asarray(np.asarray(mpx) + np.asarray(mvx))
if drag_axis == "x":
drag = np.asarray(np.asarray(fpx) + np.asarray(fvx))
return t, torque, drag
def load_all_torque_drag(casedir="", torque_axis="z", drag_axis="x"):
t, torque, drag = np.array([]), np.array([]), np.array([])
if casedir: casedir += "/"
folders = sorted(os.listdir(casedir+"postProcessing/forces"))
for folder in folders:
files = sorted(os.listdir(casedir+"postProcessing/forces/"+folder))
for f in files:
t1, torque1, drag1 = load_torque_drag(casedir=casedir,
folder=folder,
filename=f,
torque_axis=torque_axis,
drag_axis=drag_axis)
t = np.append(t, t1)
torque = np.append(torque, torque1)
drag = np.append(drag, drag1)
return t, torque, drag
def load_theta_omega(casedir="", t_interp=[], theta_units="degrees"):
"""Import omega from ``dynamicMeshDict`` table. Returns t, theta,
omega (rad/s) where theta is calculated using the trapezoidal rule.
`t_interp` is a keyword argument for an array over which omega and theta
will be interpolated.
"""
t = []
omega = []
if casedir != "":
casedir += "/"
with open(casedir+"constant/dynamicMeshDict", "r") as f:
regex = r"\d+.\d+"
for line in f.readlines():
match = re.findall(regex, line)
if len(match)==2:
t.append(float(match[0]))
omega.append(float(match[1]))
omega = np.asarray(omega)
t = np.asarray(t)
# Integrate omega to obtain theta
theta = np.zeros(len(t))
for n in range(len(t)):
theta[n] = np.trapz(omega[:n], t[:n])
# If provided, interpolate omega to match t vector
if len(t_interp) > 0:
omega = np.interp(t_interp, t, omega)
theta = np.interp(t_interp, t, theta)
if theta_units == "degrees":
theta = theta/np.pi*180
return t, theta, omega
def load_set(casedir="./", name="profile", quantity="U", fmt="xy", axis="xyz"):
"""Import text data created with the OpenFOAM sample utility."""
folder = os.path.join(casedir, "postProcessing", "sets")
t = []
times = os.listdir(folder)
for time1 in times:
try:
float(time1)
except ValueError:
times.remove(time1)
try:
t.append(int(time1))
except ValueError:
t.append(float(time1))
t.sort()
data = {"time" : t}
for ts in t:
filename = "{folder}/{time}/{name}_{q}.{fmt}".format(folder=folder,
time=ts, name=name, q=quantity, fmt=fmt)
with open(filename) as f:
d = np.loadtxt(f)
if quantity == "U":
data[ts] = {"u" : d[:, len(axis)],
"v" : d[:, len(axis)+1],
"w" : d[:, len(axis)+2]}
if len(axis) == 1:
data[ts][axis] = d[:,0]
else:
data[ts]["x"] = d[:,0]
data[ts]["y"] = d[:,1]
data[ts]["z"] = d[:,2]
return data
def load_sample_xy(casedir="./", profile="U"):
"""Import text data created with the OpenFOAM sample utility."""
folder = os.path.join(casedir, "postProcessing", "sets")
t = []
times = os.listdir(folder)
for time1 in times:
try:
float(time1)
except ValueError:
times.remove(time1)
try:
t.append(int(time1))
except ValueError:
t.append(float(time1))
t.sort()
# Load a y vector from a single file since they are identical
with open(folder+"/0/profile_"+profile+".xy") as f:
y = np.loadtxt(f)[:,0]
if profile == "U":
u = np.zeros((len(y), len(times)))
v = np.zeros((len(y), len(times)))
elif profile == "R":
uu = np.zeros((len(y), len(times)))
uv = np.zeros((len(y), len(times)))
uw = np.zeros((len(y), len(times)))
vv = np.zeros((len(y), len(times)))
vw = np.zeros((len(y), len(times)))
ww = np.zeros((len(y), len(times)))
for n in range(len(times)):
with open(folder+"/"+str(t[n])+"/profile_"+profile+".xy") as f:
data = np.loadtxt(f)
if profile == "U":
u[:,n] = data[:,1]
v[:,n] = data[:,2]
elif profile == "R":
uu[:,n] = data[:,1]
uv[:,n] = data[:,2]
uw[:,n] = data[:,3]
vv[:,n] = data[:,4]
vw[:,n] = data[:,5]
ww[:,n] = data[:,6]
t = np.asarray(t, dtype=float)
if profile == "U":
data = {"t" : t, "u" : u, "v": v, "y" : y}
elif profile == "R":
data = {"t" : t, "uu" : uu, "vv": vv, "ww" : ww,
"uv" : uv, "y" : y}
return data
def get_endtime():
"""Get run ``endTime``."""
with open("system/controlDict", "r") as f:
for line in f.readlines():
line = line.replace(";", "").split()
if "endTime" in line and line[0] == "endTime":
endtime = float(line[1])
return endtime
def get_deltat(casedir="./"):
"""Get run ``deltaT``."""
fpath = os.path.join(casedir, "system", "controlDict")
with open(fpath) as f:
for line in f.readlines():
line = line.replace(";", "").split()
if "deltaT" in line and line[0] == "deltaT":
deltat = float(line[1])
return deltat
def get_ncells(casedir="./", logname="log.checkMesh", keyword="cells",
autogen=True):
fpath = os.path.join(casedir, logname)
if not os.path.isfile(fpath) and autogen:
start_dir = os.getcwd()
os.chdir(casedir)
run("checkMesh", args="-time 0")
os.chdir(start_dir)
if keyword == "cells":
keyword = "cells:"
with open(fpath) as f:
for line in f.readlines():
ls = line.split()
if ls and ls[0] == keyword:
value = ls[1]
return int(value)
def get_max_courant_no():
with open("system/controlDict") as f:
for line in f.readlines():
if ";" in line:
ls = line.replace(";", " ").split()
if ls[0] == "maxCo":
return float(ls[1])
def read_dict(dictname=None, dictpath=None, casedir="./"):
"""Read an OpenFOAM dict into a Python dict. Right now this is quite
crude, but gets the job done decently for 1 word parameters."""
foamdict = {}
if dictpath is None and dictname is not None:
if dictname in system_dicts:
p = "system/" + dictname
elif dictname in constant_dicts:
p = "constant/" + dictname
dictpath = os.path.join(casedir, p)
with open(dictpath) as f:
for line in f.readlines():
if ";" in line:
line = line.replace(";", "")
line = line.split()
if len(line) > 1:
foamdict[line[0]] = line[1]
return foamdict
def read_case():
"""Will eventually read all case dicts and put in a hierarchy of dicts."""
pass
def gen_dynmeshdict(U, R, meantsr, cellzone="AMIsurface", rpm_fluc=3.7,
npoints=400, axis="(0 0 1)", direction=1):
"""Generates a dynamicMeshDict for a given U, R, meantsr, and an optional
rpm fluctuation amplitude. Phase is fixed."""
meanomega = meantsr*U/R
if npoints > 0:
amp_omega = rpm_fluc*2*np.pi/60.0
endtime = get_endtime()
t = np.linspace(0, endtime, npoints)
omega = meanomega + amp_omega*np.sin(3*meanomega*t - np.pi/1.2)
# Write to file
top = \
r"""/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / <NAME>eld | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: 2.3.x |
| \\ / A nd | Web: www.OpenFOAM.org |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class dictionary;
object dynamicMeshDict;
}
dynamicFvMesh solidBodyMotionFvMesh;
motionSolverLibs ("libfvMotionSolvers.so");
solidBodyMotionFvMeshCoeffs
{
cellZone """ + cellzone +""";
solidBodyMotionFunction rotatingMotion;
rotatingMotionCoeffs
{
origin\t\t(0 0 0);
axis\t\t""" + axis + ";\n"
if npoints > 0:
top += """ omega\t\ttable
(
"""
"""Table should be in form
(t0 omega0)
(t1 omega1)
"""
table = ""
for n in range(len(t)-1):
table += " (" + str(t[n]) + " " + str(omega[n]) + ")\n"
table += " (" + str(t[-1]) + " " + str(omega[-1]) + ")"
bottom = """
);
}
}"""
alltxt = top + table + bottom
else:
alltxt = top + "\n omega\t\t" + str(direction*meanomega)\
+ ";\n }\n}\n"
with open("constant/dynamicMeshDict", "w") as f:
f.write(alltxt)
def get_solver_times(casedir="./", solver=None, log_fpath=None, window=400):
"""Read last N lines from file solver log and return t (current Time),
`deltaT`, and `clockTime`.
"""
if log_fpath is None and solver is None:
log_fpath = "log." + read_dict("controlDict",
casedir=casedir)["application"]
if not os.path.isfile(log_fpath):
log_fpath = glob.glob(os.path.join(casedir, "log.*Foam"))[0]
elif log_fpath is None and solver is not None:
log_fpath = os.path.join(casedir, "log." + solver)
with open(log_fpath, "rb") as f:
BUFSIZ = 1024
# True if open() was overridden and file was opened in text
# mode. In that case readlines() will return unicode strings
# instead of bytes.
encoded = getattr(f, "encoding", False)
CR = "\n" if encoded else b"\n"
data = "" if encoded else b""
f.seek(0, os.SEEK_END)
fsize = f.tell()
block = -1
exit = False
while not exit:
step = (block * BUFSIZ)
if abs(step) >= fsize:
f.seek(0)
newdata = f.read(BUFSIZ - (abs(step) - fsize))
exit = True
else:
f.seek(step, os.SEEK_END)
newdata = f.read(BUFSIZ)
data = newdata + data
if data.count(CR) >= window:
break
else:
block -= 1
log = data.splitlines()[-window:]
t = []
clocktime = []
exectime = []
deltat = []
for entry in log:
try:
line = entry.split()
if line[0] == b"Time":
t.append(float(line[-1]))
if b"ClockTime" in line:
clocktime.append(float(line[-2]))
if b"ExecutionTime" in line:
exectime.append(float(line[2]))
if b"deltaT" in line:
deltat.append(float(line[-1]))
except:
pass
return {"time": t, "delta_t": deltat, "exectime": exectime,
"clocktime": clocktime}
def monitor_progress():
"""Monitor solver progress."""
controldict = read_dict("controlDict")
endtime = float(controldict["endTime"])
done = False
try:
while not done:
for d in os.listdir("./"):
try:
if float(d) == endtime:
done = True
except:
pass
solver_times = get_solver_times()
t = solver_times["time"]
deltat = solver_times["delta_t"]
exectime = solver_times["exectime"]
try:
t_per_step = exectime[-1] - exectime[-2]
tps2 = exectime[-2] - exectime[-3]
t_per_step = (t_per_step + tps2)/2
except IndexError:
solver_times = get_solver_times(window=2000)
t = solver_times["time"]
deltat = solver_times["delta_t"]
exectime = solver_times["exectime"]
t_per_step = exectime[-1] - exectime[-2]
try:
deltat = deltat[-1]
except IndexError:
deltat = get_deltat()
percent_done = int(t[-1]/endtime*100)
time_left, solve_rate = endtime - t[-1], t_per_step/deltat/3600
slt = time_left*solve_rate
solve_time_left = str(datetime.timedelta(hours=slt))[:-7]
print("\r" + " "*66, end="")
print("\r[{}%] - solving at {:0.2f} h/s - {} remaining".format\
(percent_done, solve_rate, solve_time_left), end="")
time.sleep(1)
print("\nEnd")
except KeyboardInterrupt:
print("")
def read_log_end(logname, nlines=20):
"""Read last lines from log and return as a list."""
window = nlines
with open("log."+logname, "rb") as f:
BUFSIZ = 1024
# True if open() was overridden and file was opened in text
# mode. In that case readlines() will return unicode strings
# instead of bytes.
encoded = getattr(f, 'encoding', False)
CR = '\n' if encoded else b'\n'
data = '' if encoded else b''
f.seek(0, os.SEEK_END)
fsize = f.tell()
block = -1
exit = False
while not exit:
step = (block * BUFSIZ)
if abs(step) >= fsize:
f.seek(0)
newdata = f.read(BUFSIZ - (abs(step) - fsize))
exit = True
else:
f.seek(step, os.SEEK_END)
newdata = f.read(BUFSIZ)
data = newdata + data
if data.count(CR) >= window:
break
else:
block -= 1
log = data.splitlines()[-window:]
return [line.decode("utf-8") for line in log]
def get_n_processors(casedir="./", dictpath="system/decomposeParDict"):
"""Read number of processors from decomposeParDict."""
dictpath = os.path.join(casedir, dictpath)
with open(dictpath) as f:
for line in f.readlines():
line = line.strip().replace(";", " ")
if line:
line = line.split()
if line[0] == "numberOfSubdomains":
return int(line[1])
def run(appname, tee=False, logname=None, parallel=False, nproc=None, args=[],
overwrite=False, append=False):
"""Run an application."""
if logname is None:
logname = "log." + appname
if os.path.isfile(logname) and not overwrite and not append:
raise IOError(logname + " exists; remove or use overwrite=True")
if nproc is not None:
if nproc > 1:
parallel = True
if parallel and nproc is None:
nproc = get_n_processors()
if isinstance(args, list):
args = " ".join(args)
if parallel:
cmd = "mpirun -np {nproc} {app} -parallel {args}"
else:
cmd = "{app} {args}"
if tee:
cmd += " 2>&1 | tee {logname}"
if append:
cmd += " -a"
else:
cmd += " > {logname} 2>&1"
if append:
cmd = cmd.replace(" > ", " >> ")
if parallel:
print("Running {appname} on {n} processors".format(appname=appname,
n=nproc))
else:
print("Running " + appname)
subprocess.call(cmd.format(nproc=nproc, app=appname, args=args,
logname=logname), shell=True)
def run_parallel(appname, **kwargs):
"""Run application in parallel."""
run(appname, parallel=True, **kwargs)
def summary(casedir="./", **extra_params):
"""Summarize a case and return as a pandas Series.
Parameters
----------
casedir : str
Case directory to be summarized.
extra_params : dict
Key/value pairs for keywords and the functions that return their
respective values.
"""
s = pandas.Series()
s["delta_t"] = get_deltat(casedir=casedir)
s["n_cells"] = get_ncells(casedir=casedir)
td = get_solver_times(casedir=casedir)
s["simulated_time"] = td["time"][-1]
s["clocktime"] = td["clocktime"][-1]
s["exectime"] = td["exectime"][-1]
for key, val in extra_params.items():
s[key] = val
return s
def clean(leave_mesh=False, remove_zero=False, extra=[]):
"""Clean case."""
if not leave_mesh:
subprocess.call(". $WM_PROJECT_DIR/bin/tools/CleanFunctions && "
"cleanCase", shell=True)
else:
subprocess.call(". $WM_PROJECT_DIR/bin/tools/CleanFunctions && "
"cleanTimeDirectories && cleanDynamicCode", shell=True)
subprocess.call("rm -rf postProcessing", shell=True)
if remove_zero:
subprocess.call("rm -rf 0", shell=True)
if extra:
if not isinstance(extra, list):
extra = [extra]
for item in extra:
print("Removing", item)
subprocess.call("rm -rf {}".format(item), shell=True)
| [
"time.sleep",
"numpy.array",
"numpy.sin",
"datetime.timedelta",
"os.listdir",
"numpy.asarray",
"numpy.linspace",
"subprocess.call",
"pandas.DataFrame",
"glob.glob",
"numpy.trapz",
"os.path.isfile",
"numpy.interp",
"re.findall",
"pandas.Series",
"os.path.join",
"os.getcwd",
"os.chdi... | [((776, 794), 'pandas.DataFrame', 'pandas.DataFrame', ([], {}), '()\n', (792, 794), False, 'import pandas\n'), ((2130, 2165), 're.findall', 're.findall', (['"""# Probe \\\\d.*\\\\n"""', 'txt'], {}), "('# Probe \\\\d.*\\\\n', txt)\n", (2140, 2165), False, 'import re\n'), ((2341, 2359), 'pandas.DataFrame', 'pandas.DataFrame', ([], {}), '()\n', (2357, 2359), False, 'import pandas\n'), ((4552, 4565), 'numpy.asarray', 'np.asarray', (['t'], {}), '(t)\n', (4562, 4565), True, 'import numpy as np\n'), ((6404, 6421), 'numpy.asarray', 'np.asarray', (['omega'], {}), '(omega)\n', (6414, 6421), True, 'import numpy as np\n'), ((6430, 6443), 'numpy.asarray', 'np.asarray', (['t'], {}), '(t)\n', (6440, 6443), True, 'import numpy as np\n'), ((7014, 7061), 'os.path.join', 'os.path.join', (['casedir', '"""postProcessing"""', '"""sets"""'], {}), "(casedir, 'postProcessing', 'sets')\n", (7026, 7061), False, 'import os\n'), ((7085, 7103), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (7095, 7103), False, 'import os\n'), ((8146, 8193), 'os.path.join', 'os.path.join', (['casedir', '"""postProcessing"""', '"""sets"""'], {}), "(casedir, 'postProcessing', 'sets')\n", (8158, 8193), False, 'import os\n'), ((8217, 8235), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (8227, 8235), False, 'import os\n'), ((9524, 9550), 'numpy.asarray', 'np.asarray', (['t'], {'dtype': 'float'}), '(t, dtype=float)\n', (9534, 9550), True, 'import numpy as np\n'), ((10135, 10181), 'os.path.join', 'os.path.join', (['casedir', '"""system"""', '"""controlDict"""'], {}), "(casedir, 'system', 'controlDict')\n", (10147, 10181), False, 'import os\n'), ((10523, 10553), 'os.path.join', 'os.path.join', (['casedir', 'logname'], {}), '(casedir, logname)\n', (10535, 10553), False, 'import os\n'), ((19318, 19349), 'os.path.join', 'os.path.join', (['casedir', 'dictpath'], {}), '(casedir, dictpath)\n', (19330, 19349), False, 'import os\n'), ((21229, 21244), 'pandas.Series', 'pandas.Series', ([], {}), '()\n', (21242, 21244), False, 'import pandas\n'), ((4955, 4967), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4963, 4967), True, 'import numpy as np\n'), ((4969, 4981), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4977, 4981), True, 'import numpy as np\n'), ((4983, 4995), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4991, 4995), True, 'import numpy as np\n'), ((5048, 5093), 'os.listdir', 'os.listdir', (["(casedir + 'postProcessing/forces')"], {}), "(casedir + 'postProcessing/forces')\n", (5058, 5093), False, 'import os\n'), ((6558, 6584), 'numpy.trapz', 'np.trapz', (['omega[:n]', 't[:n]'], {}), '(omega[:n], t[:n])\n', (6566, 6584), True, 'import numpy as np\n'), ((6682, 6711), 'numpy.interp', 'np.interp', (['t_interp', 't', 'omega'], {}), '(t_interp, t, omega)\n', (6691, 6711), True, 'import numpy as np\n'), ((6728, 6757), 'numpy.interp', 'np.interp', (['t_interp', 't', 'theta'], {}), '(t_interp, t, theta)\n', (6737, 6757), True, 'import numpy as np\n'), ((10620, 10631), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (10629, 10631), False, 'import os\n'), ((10640, 10657), 'os.chdir', 'os.chdir', (['casedir'], {}), '(casedir)\n', (10648, 10657), False, 'import os\n'), ((10707, 10726), 'os.chdir', 'os.chdir', (['start_dir'], {}), '(start_dir)\n', (10715, 10726), False, 'import os\n'), ((11682, 11706), 'os.path.join', 'os.path.join', (['casedir', 'p'], {}), '(casedir, p)\n', (11694, 11706), False, 'import os\n'), ((12489, 12521), 'numpy.linspace', 'np.linspace', (['(0)', 'endtime', 'npoints'], {}), '(0, endtime, npoints)\n', (12500, 12521), True, 'import numpy as np\n'), ((19831, 19854), 'os.path.isfile', 'os.path.isfile', (['logname'], {}), '(logname)\n', (19845, 19854), False, 'import os\n'), ((21692, 21782), 'subprocess.call', 'subprocess.call', (['""". $WM_PROJECT_DIR/bin/tools/CleanFunctions && cleanCase"""'], {'shell': '(True)'}), "('. $WM_PROJECT_DIR/bin/tools/CleanFunctions && cleanCase',\n shell=True)\n", (21707, 21782), False, 'import subprocess\n'), ((21824, 21951), 'subprocess.call', 'subprocess.call', (['""". $WM_PROJECT_DIR/bin/tools/CleanFunctions && cleanTimeDirectories && cleanDynamicCode"""'], {'shell': '(True)'}), "(\n '. $WM_PROJECT_DIR/bin/tools/CleanFunctions && cleanTimeDirectories && cleanDynamicCode'\n , shell=True)\n", (21839, 21951), False, 'import subprocess\n'), ((21977, 22029), 'subprocess.call', 'subprocess.call', (['"""rm -rf postProcessing"""'], {'shell': '(True)'}), "('rm -rf postProcessing', shell=True)\n", (21992, 22029), False, 'import subprocess\n'), ((22058, 22097), 'subprocess.call', 'subprocess.call', (['"""rm -rf 0"""'], {'shell': '(True)'}), "('rm -rf 0', shell=True)\n", (22073, 22097), False, 'import subprocess\n'), ((690, 712), 'glob.glob', 'glob.glob', (['glob_string'], {}), '(glob_string)\n', (699, 712), False, 'import glob\n'), ((5143, 5198), 'os.listdir', 'os.listdir', (["(casedir + 'postProcessing/forces/' + folder)"], {}), "(casedir + 'postProcessing/forces/' + folder)\n", (5153, 5198), False, 'import os\n'), ((5576, 5592), 'numpy.append', 'np.append', (['t', 't1'], {}), '(t, t1)\n', (5585, 5592), True, 'import numpy as np\n'), ((5614, 5640), 'numpy.append', 'np.append', (['torque', 'torque1'], {}), '(torque, torque1)\n', (5623, 5640), True, 'import numpy as np\n'), ((5660, 5682), 'numpy.append', 'np.append', (['drag', 'drag1'], {}), '(drag, drag1)\n', (5669, 5682), True, 'import numpy as np\n'), ((6250, 6273), 're.findall', 're.findall', (['regex', 'line'], {}), '(regex, line)\n', (6260, 6273), False, 'import re\n'), ((7566, 7579), 'numpy.loadtxt', 'np.loadtxt', (['f'], {}), '(f)\n', (7576, 7579), True, 'import numpy as np\n'), ((8612, 8625), 'numpy.loadtxt', 'np.loadtxt', (['f'], {}), '(f)\n', (8622, 8625), True, 'import numpy as np\n'), ((9152, 9165), 'numpy.loadtxt', 'np.loadtxt', (['f'], {}), '(f)\n', (9162, 9165), True, 'import numpy as np\n'), ((10565, 10586), 'os.path.isfile', 'os.path.isfile', (['fpath'], {}), '(fpath)\n', (10579, 10586), False, 'import os\n'), ((14637, 14662), 'os.path.isfile', 'os.path.isfile', (['log_fpath'], {}), '(log_fpath)\n', (14651, 14662), False, 'import os\n'), ((14808, 14846), 'os.path.join', 'os.path.join', (['casedir', "('log.' + solver)"], {}), "(casedir, 'log.' + solver)\n", (14820, 14846), False, 'import os\n'), ((16604, 16620), 'os.listdir', 'os.listdir', (['"""./"""'], {}), "('./')\n", (16614, 16620), False, 'import os\n'), ((17974, 17987), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (17984, 17987), False, 'import time\n'), ((4621, 4636), 'numpy.asarray', 'np.asarray', (['mpz'], {}), '(mpz)\n', (4631, 4636), True, 'import numpy as np\n'), ((4639, 4654), 'numpy.asarray', 'np.asarray', (['mvz'], {}), '(mvz)\n', (4649, 4654), True, 'import numpy as np\n'), ((4799, 4814), 'numpy.asarray', 'np.asarray', (['fpx'], {}), '(fpx)\n', (4809, 4814), True, 'import numpy as np\n'), ((4817, 4832), 'numpy.asarray', 'np.asarray', (['fvx'], {}), '(fvx)\n', (4827, 4832), True, 'import numpy as np\n'), ((12560, 12599), 'numpy.sin', 'np.sin', (['(3 * meanomega * t - np.pi / 1.2)'], {}), '(3 * meanomega * t - np.pi / 1.2)\n', (12566, 12599), True, 'import numpy as np\n'), ((4713, 4728), 'numpy.asarray', 'np.asarray', (['mpx'], {}), '(mpx)\n', (4723, 4728), True, 'import numpy as np\n'), ((4731, 4746), 'numpy.asarray', 'np.asarray', (['mvx'], {}), '(mvx)\n', (4741, 4746), True, 'import numpy as np\n'), ((14698, 14732), 'os.path.join', 'os.path.join', (['casedir', '"""log.*Foam"""'], {}), "(casedir, 'log.*Foam')\n", (14710, 14732), False, 'import os\n'), ((17736, 17765), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'slt'}), '(hours=slt)\n', (17754, 17765), False, 'import datetime\n')] |
import torch
import torch_geometric #torch_geometric == 2.5
import community
import numpy as np
import networkx
import argparse
from torch_geometric.datasets import TUDataset
from torch_geometric.data import DataLoader
from torch_geometric.data import Batch
from Sign_OPT import *
import torch_geometric.transforms as T
from Gin import GIN
from time import time
def get_args():
parser = argparse.ArgumentParser(description='Pytorch graph isomorphism network for graph classification')
#these are parameters for attack model
parser.add_argument('--svm', type=int, default=0)
parser.add_argument('--max_query', type=int, default=40000)
parser.add_argument('--effective', type=int, default=1)
#these are parameters for GIN model
parser.add_argument('--dataset', type=str, default="IMDB-BINARY")
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--batch_size', type=int, default=32, help='social dataset:64 bio dataset:32')
parser.add_argument('--hidden_dim', type=int, default=64)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--model_path', type=str, default='./trained_model/')
args = parser.parse_args()
return args
def distance(x_adv, x):
adj_adv = nx.adjacency_matrix(to_networkx(x_adv, to_undirected=True))
adj_x = nx.adjacency_matrix(to_networkx(x, to_undirected=True))
return np.sum(np.abs(adj_adv-adj_x)) / 2
TUD = {'NCI1':0,'COIL-DEL':0,'IMDB-BINARY':1}
Num = {'NCI1':318,'COIL-DEL':304,'IMDB-BINARY':77}
if __name__ == '__main__':
args = get_args()
dataset_name = args.dataset
device = torch.device("cuda:"+str(args.device) if torch.cuda.is_available() else torch.device("cpu"))
batch_size = args.batch_size
hidden_dim = args.hidden_dim
dropout = args.dropout
model_path = args.model_path
if dataset_name in TUD.keys():
degree_as_attr = TUD[dataset_name]
else:
print('invalid dataset!')
raise(ValueError)
if degree_as_attr:
dataset = TUDataset(root='./dataset',name=dataset_name,use_edge_attr='False', use_node_attr=True,
pre_transform=T.Constant(1, True))
else:
dataset = TUDataset(root='./dataset',name=dataset_name,use_edge_attr='False',use_node_attr=True)
n = (len(dataset) // 10) * 2
print('length of training dataset of detection classifier:', n)
index_path = './data_split/' + dataset_name + '_'
with open(index_path+'train_index.txt', 'r') as f:
train_index = eval(f.read())
#detect_train_index = random.sample(train_index, n)
train_dataset = dataset[train_index]
#index_path = './detection/' + dataset_name+'_'
#with open(index_path+'train_index.txt', 'w') as f:
# f.write(str(detect_train_index))
input_dim = dataset.num_node_features
output_dim = dataset.num_classes
print('input dim: ', input_dim)
print('output dim: ', output_dim)
model = GIN(5,2,input_dim,hidden_dim,output_dim,dropout).to(device)
load_path = model_path + dataset_name + '.pt'
model.load_state_dict(torch.load(load_path, map_location=device))
model.eval()
attacker = OPT_attack_sign_SGD(model, device, args.effective)
num_train = len(train_dataset)
perturbation = [] #perturbation for each poisoned graph
perturbation_ratio = [] #perturbation ratio for each poisoned graph
success_index = []
success_count = 0
no_need_count = 0
num_query = []
fail_count = 0
distortion = []
attack_time = []
detect_train_normal = []
detect_train_advers = []
count = 0
for i in range(num_train):
print('begin to attack instance {}'.format(i))
x0 = train_dataset[i].to(device)
y0 = x0.y[0]
y1 = model.predict(x0, device)
num_nodes = x0.num_nodes
space = num_nodes * (num_nodes - 1) / 2
if y0 == y1:
time_start = time()
adv_x0, adv_y0, query, success, dis, init = attacker.attack_untargeted(x0, y0, svm=args.svm, query_limit=args.max_query)
time_end = time()
attack_time.append(time_end-time_start)
num_query.append(query)
if success :
perturb = distance(adv_x0, x0)
success_count += 1
perturbation.append(perturb)
perturbation_ratio.append(perturb/space)
distortion.append(dis)
x0.y = torch.tensor([0])
adv_x0.y = torch.tensor([1])
detect_train_normal.append(x0)
detect_train_advers.append(adv_x0)
count += 1
else:
fail_count += 1
else:
print('instance {} is wrongly classified, No Need to Attack'.format(i))
no_need_count += 1
num_query.append(0)
if count == 3*Num[dataset_name]:
break
'''
print('{} instances don\'t need to be attacked'.format(no_need_count))
print('Sign-Opt fails to attack {} instance'.format(fail_count))
success_ratio = success_count / (num_train - no_need_count)
avg_perturbation = sum(perturbation) / success_count
print("Sign-Opt: the success rate of black-box attack is {}/{} = {:.4f}".format(success_count,num_train-no_need_count, success_ratio))
print('Sign-Opt: the average perturbation is {:.4f}'.format(avg_perturbation))
print('Sign-Opt: the average perturbation ratio is {:.4f}'.format(sum(perturbation_ratio) / success_count))
print('Sign-Opt: the average query count is {:.4f}'.format(sum(num_query)/(num_train-no_need_count)))
print('Sign-Opt: the average attacking time is {:.4f}'.format(sum(attack_time)/(num_train-no_need_count)))
print('Sign-Opt: the average distortion is {:.4f}'.format(sum(distortion)/success_count))
print('Sign-Opt: detail perturbation are: {}'.format(perturbation))
print('Sign-Opt: detail perturbation ratio are: {}'.format(perturbation_ratio))
print('dataset: {}'.format(dataset_name))
'''
detect_train_path = './detection/'+dataset_name+'_Our_'
torch.save(detect_train_normal, detect_train_path+'train_normal.pt')
torch.save(detect_train_advers,detect_train_path+'train_advers.pt')
'''
query_path = './detection/' + dataset_name + '_query.txt'
with open(query_path, 'w') as f:
f.write(str(num_query))
'''
'''
out_path = './detection/{}_Opt_{}.txt'.format(dataset_name, bound)
with open(out_path, 'w') as f:
f.write('{} instances don\'t need to be attacked\n'.format(no_need_count))
f.write('Sign-Opt fails to attack {} instance\n'.format(fail_count))
f.write("Sign-Opt: the success rate of black-box attack is {}/{} = {:.4f}\n".format(success_count,num_train-no_need_count, success_ratio))
f.write('Sign-Opt: the average perturbation is {:.4f}\n'.format(avg_perturbation))
f.write('Sign-Opt: the average perturbation ratio is {:.4f}\n'.format(sum(perturbation_ratio) / success_count*100))
f.write('Sign-Opt: the average query count is {:.4f}\n'.format(sum(num_query)/(num_train-no_need_count)))
f.write('Sign-Opt: the average attacking time is {:.4f}\n'.format(sum(attack_time)/(num_train-no_need_count)))
f.write('Sign-Opt: the average distortion is {:.4f}\n'.format(sum(distortion)/success_count))
f.write('Sign-Opt: detail perturbation are: {}\n'.format(perturbation))
f.write('Sign-Opt: detail perturbation ratio are: {}\n'.format(perturbation_ratio))
'''
| [
"numpy.abs",
"torch_geometric.datasets.TUDataset",
"argparse.ArgumentParser",
"torch.load",
"torch.tensor",
"torch.cuda.is_available",
"torch.save",
"Gin.GIN",
"torch_geometric.transforms.Constant",
"time.time",
"torch.device"
] | [((393, 495), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Pytorch graph isomorphism network for graph classification"""'}), "(description=\n 'Pytorch graph isomorphism network for graph classification')\n", (416, 495), False, 'import argparse\n'), ((6121, 6191), 'torch.save', 'torch.save', (['detect_train_normal', "(detect_train_path + 'train_normal.pt')"], {}), "(detect_train_normal, detect_train_path + 'train_normal.pt')\n", (6131, 6191), False, 'import torch\n'), ((6194, 6264), 'torch.save', 'torch.save', (['detect_train_advers', "(detect_train_path + 'train_advers.pt')"], {}), "(detect_train_advers, detect_train_path + 'train_advers.pt')\n", (6204, 6264), False, 'import torch\n'), ((2209, 2302), 'torch_geometric.datasets.TUDataset', 'TUDataset', ([], {'root': '"""./dataset"""', 'name': 'dataset_name', 'use_edge_attr': '"""False"""', 'use_node_attr': '(True)'}), "(root='./dataset', name=dataset_name, use_edge_attr='False',\n use_node_attr=True)\n", (2218, 2302), False, 'from torch_geometric.datasets import TUDataset\n'), ((3103, 3145), 'torch.load', 'torch.load', (['load_path'], {'map_location': 'device'}), '(load_path, map_location=device)\n', (3113, 3145), False, 'import torch\n'), ((1416, 1439), 'numpy.abs', 'np.abs', (['(adj_adv - adj_x)'], {}), '(adj_adv - adj_x)\n', (1422, 1439), True, 'import numpy as np\n'), ((1677, 1702), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1700, 1702), False, 'import torch\n'), ((1708, 1727), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1720, 1727), False, 'import torch\n'), ((2967, 3020), 'Gin.GIN', 'GIN', (['(5)', '(2)', 'input_dim', 'hidden_dim', 'output_dim', 'dropout'], {}), '(5, 2, input_dim, hidden_dim, output_dim, dropout)\n', (2970, 3020), False, 'from Gin import GIN\n'), ((3941, 3947), 'time.time', 'time', ([], {}), '()\n', (3945, 3947), False, 'from time import time\n'), ((4104, 4110), 'time.time', 'time', ([], {}), '()\n', (4108, 4110), False, 'from time import time\n'), ((2160, 2179), 'torch_geometric.transforms.Constant', 'T.Constant', (['(1)', '(True)'], {}), '(1, True)\n', (2170, 2179), True, 'import torch_geometric.transforms as T\n'), ((4470, 4487), 'torch.tensor', 'torch.tensor', (['[0]'], {}), '([0])\n', (4482, 4487), False, 'import torch\n'), ((4515, 4532), 'torch.tensor', 'torch.tensor', (['[1]'], {}), '([1])\n', (4527, 4532), False, 'import torch\n')] |
import h5py
import os, sys, glob
import numpy as np
import plotly.offline as offline
from preprocessing import analysis_pp
from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils
from scipy.stats.stats import power_divergence
from scipy.stats import ttest_ind_from_stats
import csv
import scipy.signal as ss
import math
import time
from pandas import DataFrame
from scipy import optimize
import pandas as pd
import matplotlib.pyplot as plt
from collections import deque
class AstrocytePlotter():
def __init__(self, output_folder):
self.output_folder = output_folder
#For correlation plots
self.filter_probs = [0.05, 0.10, 0.25]
self.n_samples_corr_fake = 20
self.num_frames_splits_l = [250, 500, 1000, 3000, 6000, 12000, 24000, 100000]
self.num_frames_splits_m_l = [0.5, 1, 2, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 60, 70, 80]
self.num_frames_splits_splits_m_l = [10, 15, 20, 25, 30, 35, 40]
self.max_split_comparison_samples = 100
self.behaviours_list_a = ['default', 'rest', 'running',
'running_start', 'running_before', 'stick',
'stick_start', 'stick_end', 'stick_expect',
'stick_rest', 'whisker_rest_stick', 'whisker_stick']
self.behaviours_list_small = ['whisker_rest_stick', 'default', 'rest', 'running', 'stick']
def setup_plot_folders(self, output_experiment_path):
paths = ['borders', 'behaviour_heatmaps', 'behaviours_basic',
'signal_delays', 'signal_durations', 'triplet', 'behaviour_activity',
'behaviour_areas', 'signal_basic_samples', 'signal_behaviour_samples',
'correlations', 'random_events', 'splits', 'splits_self', 'signal_amplitudes',
'signal_proportion_delays', 'signal_stick_run_samples', 'splits_split_split',
'triplet_bar', 'size_v_time_corr',
'behaviour_heatmaps_threshold_with_random',
'split_behaviour_grids',
'size_histogram_bh_comparison_individual', 'amplitude_histogram_bh_comparison_individual', 'duration_histogram_bh_comparison_individual',]
for p in paths:
try:
os.makedirs(os.path.join(output_experiment_path, 'plots' , p))
except:
pass
def setup_file_folders(self, output_experiment_path):
paths = ['correlations', 'csv']
for p in paths:
try:
print(os.path.join(output_experiment_path, 'files', p))
os.makedirs(os.path.join(output_experiment_path, 'files', p))
except:
print('Folder structure exists?')
def setup_plot_folders_comparison(self, output_experiment_path_comparison):
paths = ['behaviour_heatmaps', 'triplet', 'intersection', 'correlations', 'align',
'intersection_border_xcorr_aligned',]
for p in paths:
try:
os.makedirs(os.path.join(output_experiment_path_comparison, 'plots', p))
except:
print('Folder structure exists?')
def setup_file_folders_comparison(self, output_experiment_path_comparison):
paths = ['correlations', 'csv']
for p in paths:
try:
print(os.path.join(output_experiment_path_comparison, 'files', p))
os.makedirs(os.path.join(output_experiment_path_comparison, 'files', p))
except:
print('Folder structure exists?')
def setup_plot_folders_all_comparison(self, output_experiment_path_all_comparison):
#print(output_experiment_path_all_comparison)
paths = ['size_histogram_comparison', 'amplitude_histogram_comparison', 'duration_histogram_comparison',
'size_histogram_bh_comparison', 'amplitude_histogram_bh_comparison', 'duration_histogram_bh_comparison',
'activity_all', 'activity_all_number_minute', 'waterfall_together', 'signal_proportion_delays',
'signal_proportion_delays_alt_average_proportions',
'behaviour_heatmaps_V2_comparison_scale',
'bar_rest_run_all',
'bar_rest_rest_stick_all',
'bar_run_run_stick_all',
'dot_rest_run_pair_all',
'bar_run_stick_run_transition_all',
'rest_to_run_proportions_alt',
'run_to_rest_proportions_alt',
'run_stick_run_proportions_alt',
'run_stick_run_proportions_alt_filter_max_3_frames',
'run_stick_run_proportions_alt_filter_max_5_frames',
'rest_to_run_amplitudes_default_alt',
'rest_to_run_amplitudes_alt',
'rest_to_run_durations_alt',
'rest_to_run_sizes_alt',
'rest_to_run_speed_alt',
'rest_to_run_pupil_alt',
'run_to_rest_amplitudes_default_alt',
'run_to_rest_amplitudes_alt',
'run_to_rest_durations_alt',
'run_to_rest_sizes_alt',
'rest_to_run_amplitudes_default_outlier_alt',
'rest_to_run_amplitudes_outlier_alt',
'rest_to_run_durations_outlier_alt',
'rest_to_run_sizes_outlier_alt',
'run_to_rest_amplitudes_default_outlier_alt',
'run_to_rest_amplitudes_outlier_alt',
'run_to_rest_durations_outlier_alt',
'run_to_rest_sizes_outlier_alt',
'run_to_rest_speed_alt',
'run_to_rest_pupil_alt',
'run_stick_run_amplitudes_default_alt',
'run_stick_run_amplitudes_alt',
'run_stick_run_durations_alt',
'run_stick_run_sizes_alt',
'run_stick_run_amplitudes_default_outlier_alt',
'run_stick_run_amplitudes_outlier_alt',
'run_stick_run_durations_outlier_alt',
'run_stick_run_sizes_outlier_alt',
'run_stick_run_speed_alt',
'run_stick_run_pupil_alt',
'run_stick_run_amplitudes_default_alt_filter_max_3_frames',
'run_stick_run_amplitudes_alt_filter_max_3_frames',
'run_stick_run_durations_alt_filter_max_3_frames',
'run_stick_run_sizes_alt_filter_max_3_frames',
'run_stick_run_speed_alt_filter_max_3_frames',
'run_stick_run_pupil_alt_filter_max_3_frames',
'run_stick_run_amplitudes_default_alt_filter_max_5_frames',
'run_stick_run_amplitudes_alt_filter_max_5_frames',
'run_stick_run_durations_alt_filter_max_5_frames',
'run_stick_run_sizes_alt_filter_max_5_frames',
'run_stick_run_speed_alt_filter_max_5_frames',
'run_stick_run_pupil_alt_filter_max_5_frames',
'all_amplitudes', 'all_durations', 'all_sizes',
'all_amplitudes_filt_bh', 'all_durations_filt_bh', 'all_sizes_filt_bh',
'correlations',
'correlations_long_events',
'correlations_short_events',
'correlations_no_align',
'correlations_no_align_long_events',
'correlations_no_align_short_events',
'correlations_csv',
'correlations_long_events_csv',
'correlations_short_events_csv',
'correlations_no_align_csv',
'correlations_no_align_long_events_csv',
'correlations_no_align_short_events_csv',
'control',
'outliers',
'triplet_dot_all',
'size_v_time_corr_ALL',
'speed_v_events_ALL',
'split_correlation_all',
'behaviour_over_recording',
'pixel_distribution',
'splits_self_all',
]
data_paths = [
'correlations',
'correlations_long_events',
'correlations_short_events',
'correlations_no_align',
'correlations_no_align_long_events',
'correlations_no_align_short_events',
'control',
'outliers',
'behaviour_ratios',
'top_average_values',
'split_correlation_all',
'splits_self_all'
]
for p in paths:
#print('Trying...', p)
try:
os.makedirs(os.path.join(output_experiment_path_all_comparison, 'plots', p))
except:
print('Folder structure exists?')
for p in data_paths:
try:
os.makedirs(os.path.join(output_experiment_path_all_comparison, 'data', p))
except:
print('Folder structure exists?')
def get_output_experiment_path(self, astroA, output_folder):
experiment_id = '/'.join(astroA.experiment_path.split('/')[-2:])
output_experiment_path = os.path.join(output_folder, experiment_id)
return output_experiment_path
def plot_all_single(self, astroA):
output_experiment_path = self.get_output_experiment_path(astroA, self.output_folder)
print('Making dirs', output_experiment_path)
self.setup_plot_folders(output_experiment_path)
print('Plotting behaviours basic...')
#Behaviour basic
figs_basic_plots = self.get_behaviour_basic_plots(astroA)
for fig_k in figs_basic_plots.keys():
saving_utils.save_plotly_fig(figs_basic_plots[fig_k], os.path.join(output_experiment_path, 'plots', 'behaviours_basic', '{}'.format(fig_k)), width=1000, height=400)
print('Plotting random samples of signals...')
fig_signals = self.get_signal_figs_samples(astroA, 20)
for i, fig_signal in enumerate(fig_signals):
fig_signal_path = os.path.join(output_experiment_path, 'plots', 'signal_basic_samples', 'signal_{}'.format(i))
saving_utils.save_plotly_fig(fig_signal, fig_signal_path)
print('Plotting borders...')
#Borders plot
fig_border = self.get_border_plot(astroA)
saving_utils.save_plotly_fig(fig_border, os.path.join(output_experiment_path, 'plots' , 'borders', 'border'))
print('Plotting behaviour heatmaps...')
#Behaviour heatmaps
fig_heatmap_grids, fig_heatmap_dff_grids = self.get_behaviour_contour_plots(astroA)
heatmap_grid_base_path = os.path.join(output_experiment_path, 'plots', 'behaviour_heatmaps')
for k in fig_heatmap_grids.keys():
saving_utils.save_plotly_fig(fig_heatmap_grids[k], os.path.join(heatmap_grid_base_path, k))
saving_utils.save_plotly_fig(fig_heatmap_dff_grids[k], os.path.join(heatmap_grid_base_path, k + 'dff'))
print('Plotting behaviour activity bar plot...')
behaviour_activity_path = os.path.join(output_experiment_path, 'plots', 'behaviour_activity', 'activity')
fig_behaviour_activity = self.get_behaviour_activity_plot(astroA)
print('BEHAVIOUR ACTIVITY PATH \nn\\n\n\n\n', behaviour_activity_path)
saving_utils.save_plotly_fig(fig_behaviour_activity, behaviour_activity_path, width=1200, height=800)
print('Plotting behaviour event size bar plot...')
behaviour_area_path = os.path.join(output_experiment_path, 'plots', 'behaviour_areas', 'areas')
fig_behaviour_area = self.get_behaviour_area_plot(astroA)
saving_utils.save_plotly_fig(fig_behaviour_area, behaviour_area_path)
print('Plotting behaviour amplitude size bar plot...')
behaviour_amplitude_path = os.path.join(output_experiment_path, 'plots', 'signal_amplitudes', 'amplitudes')
fig_behaviour_amplitude = self.get_behaviour_amplitude_bar_plot(astroA)
saving_utils.save_plotly_fig(fig_behaviour_amplitude, behaviour_amplitude_path)
print('Plotting random samples of signals on different behaviours...')
fig_bk_signals = self.get_signal_bk_figs_samples(astroA, 3)
for bk in fig_bk_signals.keys():
for i, fig_bk_signal in enumerate(fig_bk_signals[bk]):
fig_bk_signal_path = os.path.join(output_experiment_path, 'plots', 'signal_behaviour_samples', 'signal_{}-{}'.format(bk, i))
saving_utils.save_plotly_fig(fig_bk_signal, fig_bk_signal_path)
print('Plotting local signal samples with stick and running...')
stick_run_sample_path = os.path.join(output_experiment_path, 'plots', 'signal_stick_run_samples')
fig_stick_run_samples_l = self.get_stick_run_sample_figs(astroA)
for i, sample_figs in enumerate(fig_stick_run_samples_l):
saving_utils.save_plotly_fig(sample_figs[0], os.path.join(stick_run_sample_path, '{}-running'.format(i)))
saving_utils.save_plotly_fig(sample_figs[1], os.path.join(stick_run_sample_path, '{}-stick'.format(i)))
for j in range(min(10, len(sample_figs[2]))):
saving_utils.save_plotly_fig(sample_figs[2][j], os.path.join(stick_run_sample_path, '{}-signal_{}'.format(i, j)))
bh_l = ['rest', 'stick_rest', 'running', 'stick_run_ind_15']
#Area: None, 60, num_bins = 10
#Duration: None, 30, num_bins = 10
#dff : 0.6, 5, num_bins = 20
print('Comparing behaviour distribution plots for SINGLE...')
for n_bins in [10, 20]:
print('NUM BINS:', n_bins)
for behaviour_l in [bh_l]: #, ['rest', 'running'], ['running', 'stick'], ['rest', 'stick_rest'], ['running', 'stick_run_ind_15']]:
for measure, min_measure, max_measure in [
['area', None, 60],
['dffMax2', 0.6, 5],
['duration', None, 30],
]:
for confidence in [True]:
measure_name = aqua_utils.get_measure_names(measure)
path = os.path.join(output_experiment_path, 'plots', '{}_histogram_bh_comparison_individual'.format(measure_name), 'behaviours-{}-nbins={}-min={}-max={}-conf={}'.format('_'.join(behaviour_l), n_bins, min_measure, max_measure, confidence))
plot, stats_d = self.measure_distribution_bh_compare_plot([astroA], behaviour_l, measure=measure, num_bins=n_bins, min_measure=min_measure, max_measure=max_measure, measure_name=measure_name, confidence=confidence, with_stats=True, mode='MOA')
if measure == 'duration':
plotly_utils.apply_fun_axis_fig(plot, lambda x : x / astroA.fr, axis='x')
saving_utils.save_pth_plt_l_log([plot], [path], axis='x')
saving_utils.save_pth_plt_l_log([plot], [path], axis='y')
#Save results in text file
for i, name in enumerate(stats_d['names']):
#Create folder
data_folder_path = path
try:
os.makedirs(path)
except:
pass
temp_d = {k : stats_d[k][i] for k in stats_d.keys()}
saving_utils.save_csv_dict(temp_d, os.path.join(data_folder_path, '{}.csv'.format(name)), key_order=['names', 'x', 'mean', 'conf_95', 'std'])
np.savetxt(os.path.join(data_folder_path, '{}-data.csv'.format(name)), np.array(temp_d['data']).transpose(), delimiter=",")
'''
for confidence in [True]:
for with_log in [False, True]:
try:
measure_name = aqua_utils.get_measure_names(measure)
plot, stats_d = self.measure_distribution_bh_compare_plot_exponential_fit([astroA], behaviour_l, measure=measure, num_bins=n_bins, min_measure=min_measure, max_measure=max_measure, measure_name=measure_name, confidence=False, with_stats=True, with_log=with_log)
path = os.path.join(output_experiment_path, 'plots', '{}_histogram_bh_comparison_individual'.format(measure_name), 'behaviours-{}-nbins={}-min={}-max={}-conf={}_EXPFIT-withlog={}'.format('_'.join(behaviour_l), n_bins, min_measure, max_measure, confidence, with_log))
if measure == 'duration':
plotly_utils.apply_fun_axis_fig(plot, lambda x : x / astroA.fr, axis='x')
#Save results in text file
for i, name in enumerate(stats_d['names']):
#Create folder
data_folder_path = path
try:
os.makedirs(path)
except:
pass
temp_d = {k : stats_d[k][i] for k in stats_d.keys()}
if len(name.split('__')) == 2:
tx_name = name.split('__')[0] + '_expfit'
else:
tx_name = name
print('TX NAME', name)
saving_utils.save_csv_dict(temp_d, os.path.join(data_folder_path, '{}.csv'.format(tx_name)), key_order=['names', 'x', 'mean', 'conf_95', 'std'])
np.savetxt(os.path.join(data_folder_path, '{}-data.csv'.format(tx_name)), np.array(temp_d['data']).transpose(), delimiter=",")
saving_utils.save_plotly_fig(plot, path)
print('THE STAT HERE?', stats_d)
except Exception as e:
print('EXCEPTION\n\n\n', 'CONF', confidence, 'LOG', with_log, 'measure' ,measure)
'''
print('Plotting signal durations...')
#Signal durations plot
durations_base_path = os.path.join(output_experiment_path, 'plots', 'signal_durations')
fig_durations = self.get_signal_durations_plot(astroA)
for k in fig_durations.keys():
saving_utils.save_plotly_fig(fig_durations[k], os.path.join(durations_base_path, k + '-durations'))
'''
if astroA.aqua_bound == True:
print('Plotting triplet plot...')
#Triplet plot
triplet_base_path = os.path.join(output_experiment_path, 'plots' , 'triplet')
radii_path = os.path.join(output_experiment_path, 'plots', 'triplet', 'radii')
fig_triplets, fig_radii_border = self.get_triplet_plots(astroA, n_bins=8)
for k in fig_triplets.keys():
saving_utils.save_plotly_fig(fig_triplets[k], os.path.join(triplet_base_path, k + '-triplet'))
saving_utils.save_plotly_fig(fig_radii_border, radii_path)
print('Plotting bar plots (triplet plot bands) num_events, duration, amplitude, ')
measure_names = [None, 'Area', 'Amplitude', 'Time (s)']
for bh in ['default', 'rest', 'running', 'stick', 'stick_rest', 'stick_run_ind_15']:
for i, measure in enumerate([None, 'area', 'dffMax2', 'time_s']):
path = os.path.join(output_experiment_path, 'plots', 'triplet_bar', '{}_{}'.format(bh, measure))
if bh in astroA.event_subsets:
fig = self.triplet_bar_plot(astroA, bh=bh, measure=measure, n_bins=8, y_title=measure_names[i])
print('SAVING TRIPLET BAR')
saving_utils.save_plotly_fig(fig, path)
'''
'''
print('Plotting Signal duration split relative differences...')
duration_split_differences_path = os.path.join(output_experiment_path, 'plots', 'signal_durations', 'duration_splits_relative_differences')
fig_duration_split_differences = self.get_duration_split_differences_from_default(astroA)
saving_utils.save_plotly_fig(fig_duration_split_differences, duration_split_differences_path)
'''
'''
#Signal delays plot
signal_delays_path = os.path.join(output_experiment_path, 'plots' , 'signal_delays')
print('Plotting signal delays')
fig_delays_waterfall_d, fig_delays_waterfall_interpolate_d = self.get_waterfall_delays_plot_all(astroA)
for fig_k in fig_delays_waterfall_d.keys():
print('FIG K', fig_k)
saving_utils.save_plotly_fig(fig_delays_waterfall_d[fig_k], os.path.join(signal_delays_path, fig_k + '-delays_waterfall'))
saving_utils.save_plotly_fig(fig_delays_waterfall_interpolate_d[fig_k], os.path.join(signal_delays_path, fig_k + '-delays_waterfall_interpolate'))
print('Plotting singal proportion delays...')
fig_proportion_delays_path = os.path.join(output_experiment_path, 'plots', 'signal_proportion_delays')
fig_proportion_delays_d = self.get_proportion_delays_plot_all([astroA])
for fig_k in fig_proportion_delays_d.keys():
saving_utils.save_plotly_fig(fig_proportion_delays_d[fig_k], os.path.join(fig_proportion_delays_path, fig_k))
print('Plotting sample frame split examples...')
figs_frame_split_examples = self.get_frame_split_example_plots(astroA)
for pk in figs_frame_split_examples.keys():
for frame_split in figs_frame_split_examples[pk].keys():
figs_frame_split_example_path = os.path.join(output_experiment_path, 'plots', 'correlations', 'frame_split_pair_example_frames_{}_p={}'.format(frame_split, pk))
saving_utils.save_plotly_fig(figs_frame_split_examples[pk][frame_split], figs_frame_split_example_path)
print('Plotting random astrocyte FULL sample plots...')
figs_random_event_path = os.path.join(output_experiment_path, 'plots', 'random_events')
fig_l = self.get_random_astrocyte_plot(astroA)
for i, fig in enumerate(fig_l):
saving_utils.save_plotly_fig(fig, os.path.join(figs_random_event_path, 'sample_{}'.format(i)))
'''
'''
print('Plotting split counter')
figs_frame_split = self.get_compare_frame_split_plots(astroA)
for pk in figs_frame_split.keys():
figs_frame_split_path = os.path.join(output_experiment_path, 'plots', 'splits', 'splits_p={}'.format(pk))
saving_utils.save_plotly_fig(figs_frame_split[pk], figs_frame_split_path)
#TODO RUN THIS
print('Plotting frame split xcorr value to full self (self<->split)')
fig_frame_split_self_path_a = os.path.join(output_experiment_path, 'plots', 'splits_self', 'splits_self_a')
fig_frame_split_self_path_b = os.path.join(output_experiment_path, 'plots', 'splits_self', 'splits_self_b')
fig_frame_split_self_a, fig_frame_split_self_b = self.get_compare_full_self_frame_split_plot_xcorr(astroA)
saving_utils.save_plotly_fig(fig_frame_split_self_a, fig_frame_split_self_path_a)
saving_utils.save_plotly_fig(fig_frame_split_self_b, fig_frame_split_self_path_b)
'''
'''
print('Plotting frame split xcorr value to splits splits (split<->split)')
fig_frame_split_self_path_a = os.path.join(output_experiment_path, 'plots', 'splits_split_split', 'splits_self_a')
fig_frame_split_self_path_b = os.path.join(output_experiment_path, 'plots', 'splits_split_split', 'splits_self_b')
fig_frame_split_self_a, fig_frame_split_self_b = self.get_compare_full_self_frame_split_split_plot_xcorr(astroA)
saving_utils.save_plotly_fig(fig_frame_split_self_a, fig_frame_split_self_path_a)
saving_utils.save_plotly_fig(fig_frame_split_self_b, fig_frame_split_self_path_b)
'''
'''
print('Plotting first last 20 min of rest heatmap comparison...')
fig_20min_rest_path = os.path.join(output_experiment_path, 'plots', 'splits_self', 'splits_first_last_rest_20min')
fig_20min_rest = self.get_plot_first_last_x_min_behaviour(astroA, num_min=20, behaviour_ind='rest')
if fig_20min_rest is not None:
saving_utils.save_plotly_fig(fig_20min_rest, fig_20min_rest_path)
print('Plotting continuous 20 min rest heatmaps compared to start...')
fig_20min_cont_rest_path = os.path.join(output_experiment_path, 'plots', 'splits_self', 'cont_splits_first_last_rest_20min')
fig_20min_cont_rest = self.get_plot_x_min_rest_relative(astroA, num_min=20, behaviour_ind='rest')
if fig_20min_cont_rest is not None:
saving_utils.save_plotly_fig(fig_20min_cont_rest, fig_20min_cont_rest_path)
'''
'''
plt.ioff()
print('Plotting Size vs Time correlation plot...')
path = os.path.join(output_experiment_path, 'plots', 'size_v_time_corr')
areas = np.log(astroA.res_d['area'])
times = astroA.res_d['time_s']
r, p = stat_utils.get_pearsonr(times, areas)
df = pd.DataFrame({'Size': areas, 'Time': times})
title ='Size vs Time correlation plot'
text = 'r = {}, p < {}'.format(general_utils.truncate(r, 2), p)
for kind in ['reg', 'hex', 'kde']:
plotly_utils.seaborn_joint_grid(df, 'Size', 'Time', kind=kind, text=text)
plt.savefig(os.path.join(path, '{}.svg'.format(kind)))
plt.savefig(os.path.join(path, '{}.png'.format(kind)))
'''
'''
print('Split BEHAVIOUR GRIDS...')
n_chunks = 3
for bh in ['default', 'running', 'rest']:
event_grid_splits = aqua_utils.split_n_event_grids(astroA, bh=bh, n=n_chunks)
path = os.path.join(output_experiment_path, 'plots', 'split_behaviour_grids')
for i, event_grid_split in enumerate(event_grid_splits):
plot = plotly_utils.plot_contour(event_grid_split, title='{}-split {}/{}'.format(bh, i+1, len(event_grid_splits)))
saving_utils.save_plotly_fig(plot, os.path.join(path, 'bh_{}-split_{}-chunks_{}'.format(bh,i,n_chunks)))
'''
'''
print('HEATMAPS V2_2... (each astro day scaled with random)')
for dff_mode in ['False']:
#for bh in ['default', 'running', 'rest', 'stick_run_ind_15', 'stick_rest']:
for bh in ['default']:
print('THIS REPETITION LOOP MUST BE ONCE')
path = os.path.join(output_experiment_path, 'plots', 'behaviour_heatmaps_threshold_with_random')
d = self.get_individual_heatmaps_threshold_scaled(astroA, bh=bh, threshold=0.7, num_samples=3, dff_mode=dff_mode)
if d is None:
continue
saving_utils.save_plotly_fig(d['contour'], os.path.join(path, 'bh_{}-dff_{}'.format(bh, dff_mode)))
for i, contour_random in enumerate(d['contour_random']):
saving_utils.save_plotly_fig(contour_random, os.path.join(path, 'bh_{}-dff_{}-random_{}'.format(bh, dff_mode, i)))
'''
'''
#Every 60 seconds, whole vid
with_donwsample = True
downsample_length = int(astroA.fr * 60)
second_length = astroA.fr
bh_l = ['default', 'rest', 'running']
end_t = -1
start_t = 0
for bh in bh_l:
save_base_path = os.path.join(output_experiment_path, 'plots', 'video_plots-{}-d{}-e{}'.format(bh, downsample_length, end_t))
try:
os.makedirs(save_base_path)
except:
print('Folder exists')
self.make_event_appended_video(astroA,
bh=bh,
start_t=start_t,
end_t=end_t,
downsample_length=downsample_length,
save_base_path=save_base_path)
'''
'''
#Every 2 seconds, first 120 seconds
with_donwsample = True
downsample_length = int(astroA.fr * 2)
end_t = int(1200 * astroA.fr)
start_t = 0
second_length = astroA.fr
#bh_l = ['default', 'rest', 'running']
bh_l = ['default', 'rest', 'running']
for bh in bh_l:
save_base_path = os.path.join(output_experiment_path, 'plots', 'video_plots-{}-d{}-e{}'.format(bh, downsample_length, end_t))
try:
os.makedirs(save_base_path)
except:
print('Folder exists')
self.make_event_appended_video(astroA,
bh=bh,
start_t=start_t,
end_t=end_t,
downsample_length=downsample_length,
save_base_path=save_base_path)
'''
'''
bh_l = ['default', 'rest', 'running']
for bh in bh_l:
end_t = int(120*astroA.fr)
time_sorted_events_trunc = sorted((i for i,e in enumerate(astroA.res_d['tEnd']) if (e < frame_max)))
save_base_path = os.path.join(output_experiment_path, 'plots', 'video_plots_precise-{}-d{}-e{}'.format(bh, downsample_length, end_t))
downsample_length = int(astroA.fr * 2)
self.make_event_appended_video_precise(astroA,
event_l=time_sorted_events_trunc,
end_t=end_t,
downsample_length=downsample_length,
save_base_path=save_base_path)
'''
'''
bh_l = ['rest', 'running']
for bh in bh_l:
start_t = 0
end_t = int(1200 * astroA.fr)
downsample_length = int(astroA.fr * 2)
save_base_path = os.path.join(output_experiment_path, 'plots', 'video_plots_bh_frames-{}-d{}-e{}'.format(bh, downsample_length, end_t))
try:
os.makedirs(save_base_path)
except:
print('Folder exists')
self.make_event_appended_video_bh_frames(astroA,
bh=bh,
start_t=start_t,
end_t=end_t,
downsample_length=downsample_length,
save_base_path=save_base_path)
'''
def make_event_appended_video_bh_frames(self, astro, bh, start_t=0, end_t=-1, downsample_length=60, save_base_path=''):
curr_indices = astro.indices_d[bh][start_t:end_t]
if len(curr_indices) % downsample_length != 0:
curr_indices_fix = curr_indices[:-(len(curr_indices) % downsample_length)]
else:
curr_indices_fix = curr_indices
num_splits = len(curr_indices_fix) // downsample_length
curr_indices_split = {i : curr_indices_fix[i*downsample_length:(i+1)*downsample_length] for i in range(num_splits)}
curr_indices_split['default'] = astro.indices_d['default']
bh_event_subsets = aqua_utils.get_event_subsets(curr_indices_split, astro.res_d)
x2d_all = np.zeros([astro.input_shape[0], astro.input_shape[1]])
for i in range(num_splits):
print(i, '/', num_splits)
x2d = aqua_utils.get_event_grid_from_x2D(astro.res_d['x2D'][bh_event_subsets[i]], (astro.input_shape[0], astro.input_shape[1]))
x2d_all = x2d_all + x2d
x2d_all_normalized = np.copy(x2d_all) / ((i+1) * (downsample_length)) * astro.minute_frames
#Linearly rescale 0-1
x2d_all_normalized = (x2d_all_normalized - np.min(x2d_all_normalized)) / (np.max(x2d_all_normalized) - np.min(x2d_all_normalized))
fig = plotly_utils.plot_contour(x2d_all_normalized, title='', tick_x=[0.2, 0.4, 0.6, 0.8])
saving_utils.save_plotly_fig(fig, os.path.join(save_base_path, '{:05d}'.format(i)), save_svg=False)
def make_event_appended_video(self, astro, bh='default', start_t=0, end_t=-1, downsample_length=60, save_base_path=''):
# Create array of (end_t - start_t) values consisting of event indices (lists) inside each frame
#Time sorted events [[time, event_id], ..] sorted by time
with_downsample = False if downsample_length == 1 else True
if end_t == -1:
end_t = astro.total_indices
time_sorted_events = deque(sorted((e,i) for i,e in enumerate(astro.res_d['tBegin'][astro.event_subsets[bh]])))
#Populate events over time: for each frame we have a list of event indices starting then
events_ot_l = []
for t in range(start_t, end_t):
events_ot_l.append([])
#As long as first element has same time, we pop to add to our list
while(len(time_sorted_events) != 0 and t == time_sorted_events[0][0]):
events_ot_l[t].append(time_sorted_events.popleft()[1])
#################################################################
#Downsample
if with_downsample:
new_events_ot_l = general_utils.merge_l_l(events_ot_l, downsample_length)
else:
# copy it, not really need to
new_events_ot_l = [ev for ev in events_ot_l]
# Generate plots over time
x2d_all = np.zeros([astro.input_shape[0], astro.input_shape[1]])
for i, segment_events_l in enumerate(new_events_ot_l):
x2d = aqua_utils.get_event_grid_from_x2D(astro.res_d['x2D'][segment_events_l], (astro.input_shape[0], astro.input_shape[1]))
x2d_all = x2d_all + x2d
#Normalize
x2d_all_normalized = np.copy(x2d_all) / ((i+1) * (downsample_length if with_downsample else 1)) * astro.minute_frames
#Linearly rescale 0-1
x2d_all_normalized = (x2d_all_normalized - np.min(x2d_all_normalized)) / (np.max(x2d_all_normalized) - np.min(x2d_all_normalized))
fig = plotly_utils.plot_contour(x2d_all_normalized, title='', tick_x=[0.2, 0.4, 0.6, 0.8])
saving_utils.save_plotly_fig(fig, os.path.join(save_base_path, '{:05d}'.format(i)), save_svg=False)
#Pass event list to choose which events. E.g. events in first 2 minutes
#Slow but potentially prettier method. You can see each individual event its duration
def make_event_appended_video_precise(self, astro_curr, event_l, end_t, downsample_length, save_base_path):
dim_1 = astro_curr.input_shape[0]
dim_2 = astro_curr.input_shape[1]
#dim_3 = np.sum([x[2] for x in astro_curr.input_shape_l])
dim_3 = end_t
a = np.zeros([dim_1, dim_2, dim_3])
for i, event in enumerate(astro_curr.res_d['x3D'][event_l]):
print(i)
unraveled = np.unravel_index(event, [dim_1, dim_2, dim_3], order='F')
begin_time = np.min(unraveled[2])
end_time = np.max(unraveled[2])
added_arr = np.zeros([dim_1, dim_2])
for u_i in range(len(unraveled[0])):
c_0 = unraveled[0][u_i]
c_1 = unraveled[1][u_i]
t = unraveled[2][u_i]
#print('begin {} end {}'.format(begin_time, end_time))
if added_arr[c_0, c_1] == 1:
continue
a[c_0, c_1, t:] += 1
added_arr[c_0, c_1] = 1
return a
for i in range(a_3d.shape[2] // (downsample_length if with_downsample else 1)):
print(i)
x2d = np.sum(a_3d[:, :, i*downsample_length:(i+1)*downsample_length], axis=2)
#Normalize
x2d_all_normalized = np.copy(x2d) / ((i+1) * (downsample_length if with_downsample else 1)) * astro_curr.minute_frames
#Linearly rescale 0-1
x2d_all_normalized = (x2d_all_normalized - np.min(x2d_all_normalized)) / (np.max(x2d_all_normalized) - np.min(x2d_all_normalized))
fig = plotly_utils.plot_contour(x2d_all_normalized, title='', tick_x=[0.2, 0.4, 0.6, 0.8])
saving_utils.save_plotly_fig(fig, os.path.join(save_base_path, '{:05d}'.format(i)), save_svg=False)
#--------#--------#--------#--------#--------#--------#--------#--------#--------#--------
#Experiment_id/days
def plot_comparisons(self, astroA_l):
output_experiment_path_comparison, days_str, day_l_s, astroA_l_s = self.setup_comparison_vars(astroA_l, self.output_folder)
print(output_experiment_path_comparison)
#Setup folders
self.setup_plot_folders_comparison(output_experiment_path_comparison)
'''
#Behaviour contour plots compare
for k in astroA_l[0].event_subsets.keys():
try:
event_grids_l = [astroA.event_grids_compare[k] for astroA in astroA_l]
fig_k = plotly_utils.plot_contour_multiple(event_grids_l, title=k + '_event grid comparison_' + days_str, height=500, width=600*len(astroA_l))
saving_utils.save_plotly_fig(fig_k , os.path.join(output_experiment_path_comparison, 'plots', 'behaviour_heatmaps', k), height=500, width=600*len(astroA_l))
except:
continue
for k in astroA_l[0].event_subsets.keys():
try:
event_grids_dff_l = [astroA.event_grids_compare_dff[k] for astroA in astroA_l]
fig_k = plotly_utils.plot_contour_multiple(event_grids_dff_l, title=k + '_event grid dff comparison_' + days_str, height=500, width=600*len(astroA_l))
saving_utils.save_plotly_fig(fig_k , os.path.join(output_experiment_path_comparison, 'plots', 'behaviour_heatmaps', k + '-dff'), height=500, width=600*len(astroA_l))
except:
continue
'''
'''
name = '{}-{}'.format(astroA_l[0].day, astroA_l[1].day)
behaviour_l = ['default', 'running', 'rest']
p_l = [0.05, 0.1, 0.25]
dff_mode_l = [False, True]
for behaviour in behaviour_l:
for dff_mode in dff_mode_l:
for p in p_l:
same_spots_prob, astro_filt_l, astro_all_filt, astro_nz_bool_l, astro_all_nz_bool = compare_astro_utils.get_astro_pair_same_spots_prob([astroA_l[0], astroA_l[1]], p=0.05, dff_mode=True)
print('Plotting intersections...')
top_five_perc_path = os.path.join(output_experiment_path_comparison, 'plots', 'intersection', name + 'bh_{}-dff_{}-top_{}'.format(behaviour, dff_mode, p))
nz_border_path = os.path.join(output_experiment_path_comparison, 'plots', 'intersection', name + 'nz_border')
fig_perc = plotly_utils.plot_contour_multiple([astro_filt_l[0], astro_filt_l[1], astro_all_filt],
subplot_titles=['top 5% values day {}'.format(astroA_l[0].day), 'top 5% values day {}'.format(astroA_l[1].day), 'intersection'],
title='Probability to occur randomly {:.2e}'.format(same_spots_prob),
color_bar_title='',
line_width=0.1,
font_size_individual=40,
scale_equal=False)
fig_bord = plotly_utils.plot_contour_multiple([astro_nz_bool_l[0].astype(int), astro_nz_bool_l[1].astype(int), astro_all_nz_bool.astype(int)],
subplot_titles=['non-0 values day {}'.format(astroA_l[0].day), 'non-0 values day {}'.format(astroA_l[1].day), 'intersection'],
title='Event activity borders',
color_bar_title='',
line_width=0.1,
font_size_individual=40,
scale_equal=False)
saving_utils.save_plotly_fig(fig_perc, top_five_perc_path, width=2000, height=1000)
saving_utils.save_plotly_fig(fig_bord, nz_border_path, width=2000, height=1000)
'''
'''
behaviour_l = ['default', 'running', 'rest']
dff_mode_l = [False, True]
p_l = [0.05, 0.10, 0.25]
for behaviour in behaviour_l:
print('Plotting intersections after alignment...')
#move_vector = compare_astro_utils.get_move_vector_xcorr_default(astroA_l[0], astroA_l[1])
move_vector = [0, 0]
#p_l = [0.05, 0.1, 0.25]
for dff_mode in dff_mode_l:
for p in p_l:
same_spots_prob, astro_filt_l, astro_all_filt, astro_nz_bool_l, astro_all_nz_bool = compare_astro_utils.get_astro_pair_same_spots_prob([astroA_l[0], astroA_l[1]], p=0.05, move_vector=move_vector, dff_mode=True)
print('Plotting intersections...')
top_perc_path = os.path.join(output_experiment_path_comparison, 'plots', 'intersection_border_xcorr_aligned', name + 'bh_{}-dff_{}-top_{}'.format(behaviour, dff_mode, p))
fig_perc = plotly_utils.plot_contour_multiple([astro_filt_l[0], astro_filt_l[1], astro_all_filt],
subplot_titles=['top 5% values day {}'.format(astroA_l[0].day), 'top 5% values day {}'.format(astroA_l[1].day), 'intersection'],
title='Probability to occur randomly {:.2e}'.format(same_spots_prob),
color_bar_title='',
line_width=0.1,
font_size_individual=40,
scale_equal=False)
saving_utils.save_plotly_fig(fig_perc, top_perc_path, width=2000, height=1000)
'''
'''
print('Plotting correlations compare...')
figs_compare_corrs = self.get_compare_max_corrs_plots(astroA_l)
for pk in figs_compare_corrs.keys():
figs_compare_corrs_path = os.path.join(output_experiment_path_comparison, 'plots', 'correlations', 'max_correlations_compare_p={}'.format(pk))
saving_utils.save_plotly_fig(figs_compare_corrs[pk], figs_compare_corrs_path)
print('Plotting compare alignments intersection sizes...')
figs_compare_align = self.get_compare_align_plots(astroA_l)
for setting in figs_compare_align.keys():
for pk in figs_compare_align[setting].keys():
figs_compare_align_path = os.path.join(output_experiment_path_comparison, 'plots', 'align', 'align_compare_s={}_p={}'.format(setting, pk))
saving_utils.save_plotly_fig(figs_compare_align[setting][pk], figs_compare_align_path)
for behaviour in self.behaviours_list_small:
if (behaviour in astroA_l[0].indices_d) and (behaviour in astroA_l[1].indices_d):
print('Plotting compare alignments xcorr full... (Aligning borders then taking xcorr value of the 2 astrocytes. Then compare to random astrocyte plots)')
figs_compare_align_xcorr = self.get_compare_align_plots_xcorr(astroA_l, align_setting='xcorr', dff_mode=False, behaviour=behaviour)
figs_compare_align_xcorr_path = os.path.join(output_experiment_path_comparison, 'plots', 'align', 'align_compare_xcorr_values_full_{}'.format(behaviour))
saving_utils.save_plotly_fig(figs_compare_align_xcorr, figs_compare_align_xcorr_path)
print('Plotting compare alignments dff xcorr full... (Aligning borders then taking xcorr value of the 2 astrocytes. Then compare to random astrocyte plots)')
figs_compare_align_xcorr_dff = self.get_compare_align_plots_xcorr(astroA_l, align_setting='xcorr', dff_mode=True, behaviour=behaviour)
figs_compare_align_xcorr_dff_path = os.path.join(output_experiment_path_comparison, 'plots', 'align', 'align_compare_xcorr_values_full_dff_{}'.format(behaviour))
saving_utils.save_plotly_fig(figs_compare_align_xcorr_dff, figs_compare_align_xcorr_dff_path)
else:
print('Behaviour {} not existent in astro'.format(behaviour))
print('Plotting sample for comparison')
#Make contour plot of astro1, astro2, sample_1, sample_2, sample_3
figs_compare_samples = self.get_compare_corrs_samples_plots(astroA_l)
for pk in figs_compare_samples.keys():
for s in figs_compare_samples[pk].keys():
path_s = os.path.join(output_experiment_path_comparison, 'plots', 'correlations', '{}_p={}'.format(s, pk))
saving_utils.save_plotly_fig(figs_compare_samples[pk][s], path_s)
behaviour_corr_path = os.path.join(output_experiment_path_comparison, 'plots', 'correlations', 'behaviour_corr')
fig_behaviour_corr = self.get_plot_compare_behaviour_correlation(astroA_l)
saving_utils.save_plotly_fig(fig_behaviour_corr, behaviour_corr_path)
behaviour_corr_path = os.path.join(output_experiment_path_comparison, 'plots', 'correlations', 'behaviour_corr_dff')
fig_behaviour_corr = self.get_plot_compare_behaviour_correlation(astroA_l, dff_mode=True)
saving_utils.save_plotly_fig(fig_behaviour_corr, behaviour_corr_path)
'''
def plot_comparisons_all(self, astroA_l, astroA_l_pairs=None, astroA_l_good_pairs=None, astroA_l_good=None, astroA_long_l=None):
output_experiment_path_all_comparison, _, _, astroA_l_s = self.setup_comparison_all_vars(astroA_l, self.output_folder)
print('Plotting sizes histogram dataset comparison for each behaviour')
self.setup_plot_folders_all_comparison(output_experiment_path_all_comparison)
bh_l = ['rest', 'stick_rest', 'running', 'stick_run_ind_15']
astroA_l_filt = []
bh_l_test = ['rest', 'running', 'stick_run_ind_15', 'stick_rest']
for astroA in astroA_l:
include = True
for bh in bh_l_test:
if bh not in astroA.indices_d.keys() or bh not in astroA.activity_ratios.keys():
include = False
print(':(', astroA.print_id, bh)
if include:
astroA_l_filt.append(astroA)
day_0_1_pairs = []
if astroA_l_pairs is not None:
for astroA_l_pair in astroA_l_pairs:
if astroA_l_pair[1].day == 1:
day_0_1_pairs.append(astroA_l_pair)
'''
print('Saving results of ratios running, rest, stick-running, stick-rest of each astrocyte in csv...')
c = ['running', 'rest', 'stick_run_ind_15', 'stick_rest', 'total_time_s', 'total_time_m', 'avg_running_speed', 'avg_speed_global']
c_n = ['running', 'rest', 'stick_run', 'stick_rest', 'total_time(s)', 'total_time(m)', 'avg_speed(cm/s)', 'avg_speed_global(cm/s)']
astro_ratios_np = np.zeros([len(astroA_l), len(c)])
r = [astroA.id for astroA in astroA_l]
for i, astroA in enumerate(astroA_l):
num_frames = len(astroA.indices_d['default'])
num_seconds = num_frames / astroA.fr
num_minutes = general_utils.truncate(num_seconds / 60.0, 2)
num_seconds = general_utils.truncate(num_seconds, 2)
for j, k in enumerate(c):
if j == 4:
astro_ratios_np[i, j] = num_seconds
continue
if j == 5:
astro_ratios_np[i, j] = num_minutes
continue
if k not in astroA.indices_d:
if 'speed' in k:
if k == 'avg_running_speed':
astro_ratios_np[i, j] = np.mean(astroA.speed_values[astroA.speed_values!=0])
elif k == 'avg_speed_global':
astro_ratios_np[i, j] = np.mean(astroA.speed_values)
else:
print('Not exist', k, astroA.id)
astro_ratios_np[i, j] = 0
continue
else:
astro_ratios_np[i, j] = general_utils.truncate(len(astroA.indices_d[k]) / num_frames, 3)
behaviour_ratios_csv_path = os.path.join(output_experiment_path_all_comparison, 'data', 'behaviour_ratios', 'ratios.csv')
DataFrame(astro_ratios_np, columns=c, index=r).to_csv(behaviour_ratios_csv_path)
'''
'''
print('Saving results of average maximum characteristic values (e.g. Average maximum duration over all astrocyte recordings)')
measure_l = ['area', 'dffMax2', 'duration']
measure_names_l = ['area', 'amplitude', 'duration']
bh_l = ['rest', 'stick_rest', 'running', 'stick_run_ind_15']
settings = ['max', 'meantop10', 'mediantop10', 'meantop5', 'mediantop5']
settings_d_i = {setting: i for i, setting in enumerate(settings)}
np_d = [np.zeros([len(astroA_l), len(bh_l)]) for i in range(len(settings))]
max_np = np.zeros([len(astroA_l), len(bh_l)])
r = [astroA.id for astroA in astroA_l]
#Dictionary of events for each behaviour for each astrocyte.
#events_d_d['astro_id']['behaviour'] = event ids of astro id
events_d_d = {}
for astroA in astroA_l:
d = {'default': astroA.indices_d['default']}
for bh in bh_l:
if bh in astroA.indices_d:
d[bh] = astroA.indices_d[bh]
events_d_d[astroA.print_id] = aqua_utils.get_event_subsets(d, astroA.res_d)
base_path = os.path.join(output_experiment_path_all_comparison, 'data', 'top_average_values')
for m_i, measure in enumerate(measure_l):
for i, astroA in enumerate(astroA_l):
measure_vals_all = astroA.res_d[measure]
bh_events_d = events_d_d[astroA.print_id]
for j, bh in enumerate(bh_l):
if bh in bh_events_d:
#Measure values corresponding to given astrocyte & measure & behaviour
bh_measure_vals = measure_vals_all[bh_events_d[bh]]
bh_measure_vals_s = np.sort(bh_measure_vals)[::-1]
top10 = bh_measure_vals_s[:len(bh_measure_vals_s)//10]
top5 = bh_measure_vals_s[:len(bh_measure_vals_s)//20]
print(astroA.print_id)
if astroA.print_id == 'm181129_d190222_c005_day_0' and bh == 'stick_rest':
print('A')
print(top5)
if astroA.print_id == 'm181129_d190222_c005_day_3' and bh == 'stick_rest':
print('B')
print(top5)
np_d[settings_d_i['max']][i, j] = bh_measure_vals_s[0]
np_d[settings_d_i['meantop10']][i, j] = np.mean(top10)
np_d[settings_d_i['meantop5']][i, j] = np.mean(top5)
np_d[settings_d_i['mediantop10']][i, j] = np.median(top10)
np_d[settings_d_i['mediantop5']][i, j] = np.median(top5)
for setting in settings_d_i.keys():
DataFrame(np_d[settings_d_i[setting]], columns=bh_l, index=r).to_csv(os.path.join(base_path, 'measure={}-type={}.csv'.format(measure_names_l[m_i], setting)))
'''
'''
measure_l = ['time_s', 'dffMax2', 'area']
measure_names = ['Duration(s)', 'Amplitude', 'Area']
print('Calcium signal behaviour change over time')
#How does calcium signals change over recording time?
#1 sort events by time
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'behaviour_over_recording')
for astroA in astroA_l:
for i, measure in enumerate(measure_l):
sorted_ev_i = np.argsort(astroA.res_d['tBegin'])
x = []
y = []
for ev_i in sorted_ev_i:
x.append(ev_i)
y.append(astroA.res_d[measure][ev_i])
fig = plotly_utils.plot_scatter(np.array(x), np.array(y) , mode='markers', title='scatter', x_title='', y_title='')
plotly_utils.apply_fun_axis_fig(fig, lambda x : x / astroA.fr, axis='x')
saving_utils.save_plotly_fig(fig, os.path.join(path, '{}-{}'.format(astroA.print_id, measure_names[i])))
'''
'''
print('Speed over time...')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'behaviour_over_recording')
for astroA in astroA_l:
fig = plotly_utils.plot_scatter(np.arange(len(astroA.speed_values)), astroA.speed_values, mode='lines')
plotly_utils.apply_fun_axis_fig(fig, lambda x : x / astroA.fr, axis='x')
saving_utils.save_plotly_fig(fig, os.path.join(path, '{}-speed'.format(astroA.print_id)))
'''
'''
print('Individual behaviour distribution plots...')
for n_bins in [10, 20, 40, 80]:
#Size, amplitude, signal duration distribution plots over all datasets on different behaviours
for bh in bh_l:
plt_l = []
pth_l = []
for measure, min_measure, max_measure in [
['area', None, 6],
['area', None, None],
['dffMax2', None, 5],
['dffMax2', None, None],
['duration', None, None],
['duration', None, 50]
]:
try:
for with_max in [True, False]:
measure_name = aqua_utils.get_measure_names(measure)
fig_path = os.path.join(output_experiment_path_all_comparison, 'plots', '{}_histogram_comparison'.format(measure_name), '{}-nbins={}-min={}-max={}'.format(bh, n_bins, min_measure, max_measure))
plot, _, _ = self.measure_distribution_plot(astroA_l, bh, measure=measure, num_bins=n_bins, max_measure=max_measure, min_measure=min_measure, measure_name=measure_name)
if measure == 'duration':
plotly_utils.apply_fun_axis_fig(plot, lambda x : x / astroA_l[0].fr, axis='x')
saving_utils.save_pth_plt_l_log([plot], [fig_path], axis='x')
except KeyError as e:
print('Got key error: some behaviour its fine {}'.format(e))
'''
'''
#Area: None, 60, num_bins = 10
#Duration: None, 30, num_bins = 10
#dff : 0.6, 5, num_bins = 20
print('Comparing behaviour distribution plots...')
for n_bins in [10, 20]:
print('NUM BINS:', n_bins)
for behaviour_l in [bh_l]: #, ['rest', 'running'], ['running', 'stick'], ['rest', 'stick_rest'], ['running', 'stick_run_ind_15']]:
for measure, min_measure, max_measure in [
['area', None, 60],
['dffMax2', 0.6, 5],
['duration', None, 30],
]:
for confidence in [True]:
for mode in ['MOA', 'MOE']:
measure_name = aqua_utils.get_measure_names(measure)
path = os.path.join(output_experiment_path_all_comparison, 'plots', '{}_histogram_bh_comparison'.format(measure_name), 'behaviours-{}-nbins={}-min={}-max={}-conf={}-mode={}'.format('_'.join(behaviour_l), n_bins, min_measure, max_measure, confidence, mode))
plot, stats_d = self.measure_distribution_bh_compare_plot(astroA_l, behaviour_l, measure=measure, num_bins=n_bins, min_measure=min_measure, max_measure=max_measure, measure_name=measure_name, confidence=confidence, with_stats=True, mode=mode)
if measure == 'duration':
plotly_utils.apply_fun_axis_fig(plot, lambda x : x / astroA_l[0].fr, axis='x')
saving_utils.save_pth_plt_l_log([plot], [path], axis='x')
saving_utils.save_pth_plt_l_log([plot], [path], axis='y')
#Save results in text file
for i, name in enumerate(stats_d['names']):
#Create folder
data_folder_path = path
try:
os.makedirs(path)
except:
pass
temp_d = {k : stats_d[k][i] for k in stats_d.keys()}
saving_utils.save_csv_dict(temp_d, os.path.join(data_folder_path, '{}.csv'.format(name)), key_order=['names', 'x', 'mean', 'conf_95', 'std'])
np.savetxt(os.path.join(data_folder_path, '{}-data.csv'.format(name)), np.array(temp_d['data']).transpose(), delimiter=",")
for confidence in [True]:
for with_log in [False, True]:
measure_name = aqua_utils.get_measure_names(measure)
plot, stats_d = self.measure_distribution_bh_compare_plot_exponential_fit(astroA_l, behaviour_l, measure=measure, num_bins=n_bins, min_measure=min_measure, max_measure=max_measure, measure_name=measure_name, confidence=False, with_stats=True, with_log=with_log)
path = os.path.join(output_experiment_path_all_comparison, 'plots', '{}_histogram_bh_comparison'.format(measure_name), 'behaviours-{}-nbins={}-min={}-max={}-conf={}_EXPFIT-withlog={}'.format('_'.join(behaviour_l), n_bins, min_measure, max_measure, confidence, with_log))
if measure == 'duration':
plotly_utils.apply_fun_axis_fig(plot, lambda x : x / astroA_l[0].fr, axis='x')
#Save results in text file
for i, name in enumerate(stats_d['names']):
#Create folder
data_folder_path = path
try:
os.makedirs(path)
except:
pass
temp_d = {k : stats_d[k][i] for k in stats_d.keys()}
if len(name.split('__')) == 2:
tx_name = name.split('__')[0] + '_expfit'
else:
tx_name = name
print('TX NAME', name)
saving_utils.save_csv_dict(temp_d, os.path.join(data_folder_path, '{}.csv'.format(tx_name)), key_order=['names', 'x', 'mean', 'conf_95', 'std'])
np.savetxt(os.path.join(data_folder_path, '{}-data.csv'.format(tx_name)), np.array(temp_d['data']).transpose(), delimiter=",")
saving_utils.save_plotly_fig(plot, path)
saving_utils.save_pth_plt_l_log([plot], [path], axis='y')
print('THE STAT HERE?', stats_d)
'''
'''
print('Violin plots...')
plt_l = []
pth_l = []
for max_dff in [2, 5, 10, None]:
#VIOLIN PLOTS comparing TWO behaviour distribution plots (but in violin form)
fig_amp_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'amplitude_histogram_comparison', 'violin_rest_run_dff={}'.format(max_dff))
fig = self.amplitude_distribution_plot_violin_duo(astroA_l_filt, 'rest', 'running', max_dff=max_dff)
#saving_utils.save_plotly_fig(fig, fig_amp_violin_path)
plt_l.append(fig)
pth_l.append(fig_amp_violin_path)
fig_amp_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'amplitude_histogram_comparison', 'violin_run_stick_dff={}'.format(max_dff))
fig = self.amplitude_distribution_plot_violin_duo(astroA_l_filt, 'running', 'stick_run_ind_15', max_dff=max_dff)
#saving_utils.save_plotly_fig(fig, fig_amp_violin_path2)
plt_l.append(fig)
pth_l.append(fig_amp_violin_path)
fig_amp_violin_path3 = os.path.join(output_experiment_path_all_comparison, 'plots', 'amplitude_histogram_comparison', 'violin_rest_stick_dff={}'.format(max_dff))
fig = self.amplitude_distribution_plot_violin_duo(astroA_l_filt, 'rest', 'stick_rest', max_dff=max_dff)
#saving_utils.save_plotly_fig(fig, fig_amp_violin_path)
plt_l.append(fig)
pth_l.append(fig_amp_violin_path3)
for max_area in [9, 20, 40, None]:
sizes_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'sizes_histogram_comparison', 'violin_rest_run_area={}'.format(max_area))
fig = self.sizes_distribution_plot_violin_duo(astroA_l_filt, 'rest', 'running', max_area=max_area)
plt_l.append(fig)
pth_l.append(sizes_violin_path)
sizes_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'sizes_histogram_comparison', 'violin_run_stick_area={}'.format(max_area))
fig = self.sizes_distribution_plot_violin_duo(astroA_l_filt, 'running', 'stick_run_ind_15', max_area=max_area)
plt_l.append(fig)
pth_l.append(sizes_violin_path)
sizes_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'sizes_histogram_comparison', 'violin_rest_stick_area={}'.format(max_area))
fig = self.sizes_distribution_plot_violin_duo(astroA_l_filt, 'rest', 'stick_rest', max_area=max_area)
plt_l.append(fig)
pth_l.append(sizes_violin_path)
for max_duration in [10, 20, 30, 40, None]:
duration_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'duration_histogram_comparison', 'violin_rest_run_duration={}'.format(max_duration))
fig = self.signal_duration_distribution_plot_violin_duo(astroA_l_filt, 'rest', 'running', max_duration=max_duration)
plt_l.append(fig)
pth_l.append(duration_violin_path)
duration_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'duration_histogram_comparison', 'violin_run_stick_duration={}'.format(max_duration))
fig = self.signal_duration_distribution_plot_violin_duo(astroA_l_filt, 'running', 'stick_run_ind_15', max_duration=max_duration)
plt_l.append(fig)
pth_l.append(duration_violin_path)
duration_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'duration_histogram_comparison', 'violin_rest_stick_duration={}'.format(max_duration))
fig = self.signal_duration_distribution_plot_violin_duo(astroA_l_filt, 'rest', 'stick_rest', max_duration=max_duration)
plt_l.append(fig)
pth_l.append(duration_violin_path)
save_pth_plt_l_log(plt_l, pth_l, axis='y')
'''
'''
print('Splits SELF ALL')
#STEP 1
#Take only long duration astrocytes
#Set maximum length of astrocyte duration to be 70min
#Then apply splits with xcorr
data_save_path = os.path.join(output_experiment_path_all_comparison, 'data', 'splits_self_all')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'splits_self_all')
y_l_l = []
x_l = []
minute_frame_splits_l = [35, 30, 25, 20, 15, 10, 5, 2]
cut_duration = 70
param_str = 'cut_{}-'.format(cut_duration) + 'splits_{}-'.format('_'.join([str(m) for m in minute_frame_splits_l]))
name_l = []
for i, astroA in enumerate(astroA_long_l):
curr_save_path = os.path.join(data_save_path, 'id_{}-{}.pkl'.format(astroA.print_id, param_str))
res_d = self.get_compare_full_self_results_alt(astroA, cut_duration_min=cut_duration, minute_frame_splits_l=minute_frame_splits_l, save_pkl_path=curr_save_path)
y_l_l.append(res_d['y'])
x_l.append(res_d['x'])
name_l.append(astroA.print_id)
fig, stats_d = plotly_utils.plot_scatter_mult_with_avg(x_l[0], y_l_l, None, name_l, mode='lines', title='Splits self', x_title='Splits (minutes)', y_title='Correlation',
xrange=None, yrange=None, confidence=True, with_stats=True, point_box=True)
df_data_m = DataFrame(stats_d['mean_l_l'], columns=stats_d['x'], index=stats_d['names'])
df_ci = DataFrame(stats_d['conf_95'], columns=stats_d['x'], index=stats_d['names'])
df_mean = DataFrame([stats_d['mean'], stats_d['mean_conf']], columns=stats_d['x'], index=['mean', 'conf_95'])
df_data_m.to_csv(path + '-data_means.csv')
df_ci.to_csv(path + '-data_ci.csv')
df_mean.to_csv(path + '-mean_and_CI.csv')
saving_utils.save_plotly_fig(fig, path)
'''
'''
print('HEATMAPS V2... (astro days scaled the same (to minimum maximum scale of the 2))')
for astroA_pair in astroA_l_pairs:
for dff_mode in ['False']:
for bh in ['default', 'running', 'rest', 'stick_run_ind_15', 'stick_rest']:
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'behaviour_heatmaps_V2_comparison_scale', self.get_astro_pair_id(astroA_pair))
d = self.get_day_heatmaps_scaled(astroA_pair, bh=bh, dff_mode=dff_mode)
if d is None:
continue
try:
os.makedirs(os.path.join(path))
except:
pass
saving_utils.save_plotly_fig(d['contour_0'], os.path.join(path, 'bh_{}-day_{}-dff_{}'.format(bh, astroA_pair[0].day, dff_mode)))
saving_utils.save_plotly_fig(d['contour_x'], os.path.join(path, 'bh_{}-day_{}-dff_{}'.format(bh, astroA_pair[1].day, dff_mode)))
'''
'''
#TODO FIX THE DOT PLOTS
#TODO CAN JUST ADD ANOTHER LOOP FOR THE BEHAVIOURS LOTS OF REPETITION
bh_l_activity = ['rest', 'running', 'stick_rest', 'stick_run_ind_15']
print('Bar charts and dot plots of all amplitudes, durations, sizes')
#for type_plot in ['dot', 'bar']:
for type_plot in ['bar']:
for error_type in ['std', 'conf']:
for err_symmetric in [True, False]:
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'all_amplitudes', '{}_plot_dff_filter_event_{}_symm{}'.format(type_plot, error_type, err_symmetric))
fig, stats_d = self.get_all_signal_attribute_plot(astroA_l_s, bh_l_activity, type_plot=type_plot, type_event='dffMax2',
y_title='Amplitude', title='Amplitudes', error_type=error_type, err_symmetric=err_symmetric, with_stats=True)
saving_utils.save_plotly_fig(fig, path)
saving_utils.save_csv_dict(stats_d, path + '.csv', key_order=['behaviour', 'mean', 'std', 'conf_95'])
saving_utils.save_csv_dict(stats_d['data'], path +'-data.csv', key_order=stats_d['behaviour'])
len_d = {k: [len(stats_d['data'][k])] for k in stats_d['data'].keys()}
saving_utils.save_csv_dict(len_d, path +'-len_data.csv', key_order=stats_d['behaviour'])
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'all_amplitudes', '{}_plot_dff_notfiltered_{}_symm{}'.format(type_plot, error_type, err_symmetric))
fig, stats_d = self.get_all_signal_attribute_plot(astroA_l_s, bh_l_activity, type_plot=type_plot, type_event='dffMax',
y_title='Amplitude', title='Amplitudes', error_type=error_type, err_symmetric=err_symmetric, with_stats=True)
saving_utils.save_plotly_fig(fig, path)
saving_utils.save_csv_dict(stats_d, path + '.csv', key_order=['behaviour', 'mean', 'std', 'conf_95'])
saving_utils.save_csv_dict(stats_d['data'], path +'-data.csv', key_order=stats_d['behaviour'])
len_d = {k: [len(stats_d['data'][k])] for k in stats_d['data'].keys()}
saving_utils.save_csv_dict(len_d, path +'-len_data.csv', key_order=stats_d['behaviour'])
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'all_durations', '{}_plot_{}_symm{}'.format(type_plot, error_type, err_symmetric))
fig, stats_d = self.get_all_signal_attribute_plot(astroA_l_s, bh_l_activity, type_plot=type_plot, type_event='time_s',
y_title='Duration (s)', title='Event durations', error_type=error_type, err_symmetric=err_symmetric, with_stats=True)
saving_utils.save_plotly_fig(fig, path)
saving_utils.save_csv_dict(stats_d, path + '.csv', key_order=['behaviour', 'mean', 'std', 'conf_95'])
saving_utils.save_csv_dict(stats_d['data'], path +'-data.csv', key_order=stats_d['behaviour'])
len_d = {k: [len(stats_d['data'][k])] for k in stats_d['data'].keys()}
saving_utils.save_csv_dict(len_d, path +'-len_data.csv', key_order=stats_d['behaviour'])
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'all_sizes', '{}_plot_{}_symm{}'.format(type_plot, error_type, err_symmetric))
fig, stats_d = self.get_all_signal_attribute_plot(astroA_l_s, bh_l_activity, type_plot=type_plot, type_event='area',
y_title='Event sizes (\u03bcm<sup>2</sup>)', title='Sizes of events', error_type=error_type, err_symmetric=err_symmetric, with_stats=True)
saving_utils.save_plotly_fig(fig, path)
saving_utils.save_csv_dict(stats_d, path + '.csv', key_order=['behaviour', 'mean', 'std', 'conf_95'])
saving_utils.save_csv_dict(stats_d['data'], path +'-data.csv', key_order=stats_d['behaviour'])
len_d = {k: [len(stats_d['data'][k])] for k in stats_d['data'].keys()}
saving_utils.save_csv_dict(len_d, path +'-len_data.csv', key_order=stats_d['behaviour'])
'''
'''
print('COMPARE THIS', len(astroA_l_filt), 'WITH THIS', len(astroA_l_s))
for astroA in astroA_l_filt:
for bh_k in bh_l_activity:
if bh_k not in astroA.event_subsets.keys():
print('SHOULD NOT HAPPEND BH ', bh_k, 'NOT IN', astroA.print_id)
for type_plot in ['bar']:
for error_type in ['std', 'conf']:
for err_symmetric in [True, False]:
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'all_amplitudes_filt_bh', '{}_plot_dff_filter_event_{}_symm{}'.format(type_plot, error_type, err_symmetric))
fig, stats_d = self.get_all_signal_attribute_plot(astroA_l_filt, bh_l_activity, type_plot=type_plot, type_event='dffMax2',
y_title='Amplitude', title='Amplitudes', error_type=error_type, err_symmetric=err_symmetric, with_stats=True)
saving_utils.save_plotly_fig(fig, path)
saving_utils.save_csv_dict(stats_d, path + '.csv', key_order=['behaviour', 'mean', 'std', 'conf_95'])
saving_utils.save_csv_dict(stats_d['data'], path +'-data.csv', key_order=stats_d['behaviour'])
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'all_amplitudes_filt_bh', '{}_plot_dff_notfiltered_{}_symm{}'.format(type_plot, error_type, err_symmetric))
fig, stats_d = self.get_all_signal_attribute_plot(astroA_l_filt, bh_l_activity, type_plot=type_plot, type_event='dffMax',
y_title='Amplitude', title='Amplitudes', error_type=error_type, err_symmetric=err_symmetric, with_stats=True)
saving_utils.save_plotly_fig(fig, path)
saving_utils.save_csv_dict(stats_d, path + '.csv', key_order=['behaviour', 'mean', 'std', 'conf_95'])
saving_utils.save_csv_dict(stats_d['data'], path +'-data.csv', key_order=stats_d['behaviour'])
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'all_durations_filt_bh', '{}_plot_{}_symm{}'.format(type_plot, error_type, err_symmetric))
fig, stats_d = self.get_all_signal_attribute_plot(astroA_l_filt, bh_l_activity, type_plot=type_plot, type_event='time_s',
y_title='Duration (s)', title='Event durations', error_type=error_type, err_symmetric=err_symmetric, with_stats=True)
saving_utils.save_plotly_fig(fig, path)
saving_utils.save_csv_dict(stats_d, path + '.csv', key_order=['behaviour', 'mean', 'std', 'conf_95'])
saving_utils.save_csv_dict(stats_d['data'], path +'-data.csv', key_order=stats_d['behaviour'])
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'all_sizes_filt_bh', '{}_plot_{}_symm{}'.format(type_plot, error_type, err_symmetric))
fig, stats_d = self.get_all_signal_attribute_plot(astroA_l_filt, bh_l_activity, type_plot=type_plot, type_event='area',
y_title='Event sizes (\u03bcm<sup>2</sup>)', title='Sizes of events', error_type=error_type, err_symmetric=err_symmetric, with_stats=True)
saving_utils.save_plotly_fig(fig, path)
saving_utils.save_csv_dict(stats_d, path + '.csv', key_order=['behaviour', 'mean', 'std', 'conf_95'])
saving_utils.save_csv_dict(stats_d['data'], path +'-data.csv', key_order=stats_d['behaviour'])
'''
print('--------------------------------------------------------------------------------------------------')
print('Distribution of pixel values real vs fake...')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'pixel_distribution')
x_l = []
y_l = []
name_l = [astroA.print_id for astroA in astroA_l]
for astroA in astroA_l:
grid = astroA.event_grids_1min['default']
grid = np.interp(grid, (grid.min(), grid.max()), (0, 1))
grid_flat = grid.flatten()
grid_flat_nz = grid_flat[grid_flat != 0]
hist, bin_edges = np.histogram(grid_flat_nz, bins=20, range=(0,1), density=True)
hist = hist * (bin_edges[1] - bin_edges[0])
print('HIST SUM', np.sum(hist))
x_l = bin_edges[:-1]
y_l.append(hist)
y_l_fmt = []
for i in range(len(y_l[0])):
y_l_fmt.append([y[i] for y in y_l])
plot_path = os.path.join(path, 'real')
fig, stats_d = plotly_utils.plot_scatter_error(x_l, y_l_fmt, x_title='Pixel intensity percentile', y_title='Frequency (Density)', exp_fit=True, with_details=True)
saving_utils.save_plotly_fig(fig, plot_path)
df_data = DataFrame(np.array(stats_d['data']).T, columns=x_l, index=name_l)
df_stats = DataFrame([stats_d['mean'], stats_d['conf_95'], stats_d['fit']], columns=x_l, index=['mean', 'conf_95', 'fit'])
df_data.to_csv(plot_path + '-data.csv')
df_stats.to_csv(plot_path +'-stats.csv')
sample_l_all = []
for astroA in astroA_l:
d = self.get_individual_heatmaps_threshold_scaled(astroA, bh='default', threshold=1, num_samples=1, dff_mode=False, with_arr=True)
sample_l_all.append(d['arrs_d']['arr_r'][0])
x_l = []
y_l = []
for grid in sample_l_all:
grid = np.interp(grid, (grid.min(), grid.max()), (0, 1))
grid_flat = grid.flatten()
grid_flat_nz = grid_flat[grid_flat != 0]
#Normalize values to 1
grid_flat_nz /= np.max(grid_flat_nz)
hist, bin_edges = np.histogram(grid_flat_nz, bins=20, range=(0,1), density=True)
hist = hist * (bin_edges[1] - bin_edges[0])
print('HIST SUM', np.sum(hist))
x_l = bin_edges[:-1]
y_l.append(hist)
y_l_fmt = []
for i in range(len(y_l[0])):
y_l_fmt.append([y[i] for y in y_l])
plot_path = os.path.join(path, 'fake')
fig, stats_d = plotly_utils.plot_scatter_error(x_l, y_l_fmt, x_title='Pixel intensity percentile', y_title='Frequency (Density)', exp_fit=False, with_details=True)
saving_utils.save_plotly_fig(fig, plot_path)
df_data = DataFrame(np.array(stats_d['data']).T, columns=x_l)
df_stats = DataFrame([stats_d['mean'], stats_d['conf_95']], columns=x_l, index=['mean', 'conf_95'])
df_data.to_csv(plot_path + '-data.csv')
df_stats.to_csv(plot_path +'-stats.csv')
print('--------------------------------------------------------------------------------------------------')
'''
print('SINGLE BAR CHART OF BEHAVIOURS (REST, RUN) of all astrocytes')
names_l = ['amplitude', 'size', 'duration']
measure_l = ['dffMax2', 'area', 'time_s' ]
for i, measure in enumerate(measure_l):
plot_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'bar_rest_run_all', '{}'.format(names_l[i]))
plot = self.get_measure_all_bar_plot(astroA_l, measure, bh_list=['rest', 'running'])
saving_utils.save_plotly_fig(plot, plot_path)
'''
'''
names_l = ['Event number (per minute)', 'amplitude', 'size', 'duration']
measure_l = [None, 'dffMax2', 'area', 'time_s']
bh_list_pairs = [['rest', 'running'], ['rest', 'stick_rest'], ['running', 'stick_run_ind_15']]
bh_list_pairs_names = ['rest_run', 'rest_rest_stick', 'run_run_stick']
for j, bh_list_pair in enumerate(bh_list_pairs):
for i, measure in enumerate(measure_l):
plot_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'bar_{}_all'.format(bh_list_pairs_names[j]), '{}'.format('dots_'+names_l[i]))
if 'stick_rest' in bh_list_pair:
plot, stats_d = self.get_measure_all_dot_plot(astroA_l_filt, measure, bh_list=bh_list_pair)
else:
plot, stats_d = self.get_measure_all_dot_plot(astroA_l, measure, bh_list=bh_list_pair)
saving_utils.save_plotly_fig(plot, plot_path)
with open(os.path.join(plot_path + '.csv'), mode='w') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
l = ['']
l.extend(stats_d['x'])
l.extend(['conf_0', 'conf_1'])
writer.writerow(l)
for i in range(len(stats_d['names'])):
l = [stats_d['names'][i]]
l.extend(stats_d['mean_l_l'][i])
if 'conf_95' in stats_d:
l.extend(stats_d['conf_95'][i])
writer.writerow(l)
writer.writerow('')
writer.writerow(['mean_0', 'mean_1', 'mean_conf_0', 'mean_conf_1'])
l = []
l.extend(stats_d['mean'])
l.extend(stats_d['mean_conf'])
writer.writerow(l)
'''
"""
print('With transitions before and after measures dot plot')
names_l = ['Event number (per minute)', 'amplitude', 'size', 'duration']
measure_l = [None, 'dffMax2', 'area', 'time_s']
delay_ranges_pairs = [ [3*astroA_l[0].fr, 6*astroA_l[0].fr],
#[1*astroA_l[0].fr, 1*astroA_l[0].fr],
#[2*astroA_l[0].fr, 4*astroA_l[0].fr]
]
delay_ranges_pairs = [[int(v[0]), int(v[1])] for v in delay_ranges_pairs]
for delay_ranges_pair in delay_ranges_pairs:
before_range, after_range = delay_ranges_pair
for i, measure in enumerate(measure_l):
plot_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'bar_run_stick_run_transition_all', 'range_{}_{}_{}'.format(before_range, after_range, 'dots_'+names_l[i]))
plot, stats_d = self.get_measure_all_transition_dot_plot(astroA_l, measure, before_bh='running_semi_exact',
inds_bh='stick_exact_start', after_bh='running_semi_exact',
before_range=before_range, after_range=after_range)
saving_utils.save_plotly_fig(plot, plot_path)
with open(os.path.join(plot_path + '.csv'), mode='w') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
l = ['']
l.extend(stats_d['x'])
l.extend(['conf_0', 'conf_1'])
writer.writerow(l)
for i in range(len(stats_d['names'])):
l = [stats_d['names'][i]]
l.extend(stats_d['mean_l_l'][i])
if 'conf_95' in stats_d:
l.extend(stats_d['conf_95'][i])
writer.writerow(l)
writer.writerow('')
writer.writerow(['mean_0', 'mean_1', 'mean_conf_0', 'mean_conf_1'])
l = []
l.extend(stats_d['mean'])
l.extend(stats_d['mean_conf'])
writer.writerow(l)
"""
"""
#TODO ADD CSV
bh_l_activity = ['rest', 'running', 'stick_rest', 'stick_run_ind_15']
print('Activity all bar plot...')
plot, stats_d = self.get_behaviour_activity_bar_plot_all(astroA_l_s, bh_l_activity, with_stats=True)
plot_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'activity_all', 'activity_bar')
saving_utils.save_plotly_fig(plot, plot_path)
print('Activity all number events per minute bar plot...')
plot, stats_d = self.get_behaviour_activity_number_bar_plot_all(astroA_l_s, bh_l_activity, with_stats=True)
plot_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'activity_all_number_minute', 'activity_bar')
saving_utils.save_plotly_fig(plot, plot_path)
"""
'''
bh_l_activity = ['rest', 'running', 'stick_rest', 'stick_run_ind_15']
print('Activity all dot plot...')
plot, stats_d = self.get_behaviour_activity_dot_plot_all(astroA_l_s, bh_l_activity)
plot_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'activity_all', 'activity_dot')
saving_utils.save_plotly_fig(plot, plot_path)
saving_utils.save_csv_dict(stats_d, plot_path+'.csv', key_order=['x', 'mean', 'conf_95'])
print(stats_d['data'])
#print(stats_d['data'].shape)
DataFrame(stats_d['data'], columns=[astroA.print_id for astroA in astroA_l_s], index=stats_d['x']).to_csv(plot_path + '-data.csv')
'''
'''
df_data_m = DataFrame(stats_d['mean_l_l'], columns=stats_d['x'], index=stats_d['names'])
df_mean_conf = DataFrame([stats_d['mean'], stats_d['mean_conf']], columns=stats_d['x'], index=['mean', 'conf_95'])
df_data_m.to_csv(path + '-data.csv')
df_mean_conf.to_csv(path + '-mean_and_CI.csv')
'''
"""
print('Activity all dot plot with lines...')
print(len(astroA_l_filt))
plot, stats_d = self.get_behaviour_activity_dot_plot_all(astroA_l_filt, bh_l_activity, lines=True)
plot_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'activity_all', 'activity_dot_lines')
saving_utils.save_plotly_fig(plot, plot_path)
print('Activity all number events per minute dot plot...')
plot, stats_d = self.get_behaviour_activity_number_dot_plot_all(astroA_l_s, bh_l_activity)
plot_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'activity_all_number_minute', 'activity_dot')
saving_utils.save_plotly_fig(plot, plot_path)
saving_utils.save_csv_dict(stats_d, plot_path+'.csv', key_order=['x', 'mean', 'conf_95'])
print('Activity all number events per minute dot plot...')
plot, stats_d = self.get_behaviour_activity_number_dot_plot_all(astroA_l_filt, bh_l_activity, lines=True)
plot_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'activity_all_number_minute', 'activity_dot_lines')
saving_utils.save_plotly_fig(plot, plot_path)
"""
'''
print('Plotting bar plots (triplet plot bands) num_events, duration, amplitude for ALL TOGETHER')
measure_names = [None, 'Area', 'Amplitude', 'Time (s)']
for bh in ['default', 'rest', 'running', 'stick', 'stick_rest', 'stick_run_ind_15']:
for i, measure in enumerate([None, 'area', 'dffMax2', 'time_s']):
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'triplet_dot_all', '{}_{}'.format(bh, measure))
if bh in astroA.event_subsets:
fig, stats_d = self.triplet_dot_plot_all(astroA_l_s, bh=bh, measure=measure, n_bins=8, y_title=measure_names[i])
print('SAVING TRIPLET DOT ALL')
saving_utils.save_plotly_fig(fig, path)
print(stats_d.keys())
#Saving events only, we don't have CI's for each astrocyte
if measure is None:
df_data_m = DataFrame(stats_d['mean_l_l'], columns=stats_d['x'], index=stats_d['names'])
df_mean_conf = DataFrame([stats_d['mean'], stats_d['mean_conf']], columns=stats_d['x'], index=['mean', 'conf_95'])
df_data_m.to_csv(path + '-data.csv')
df_mean_conf.to_csv(path + '-mean_and_CI.csv')
else:
df_data_m = DataFrame(stats_d['mean_l_l'], columns=stats_d['x'], index=stats_d['names'])
df_ci = DataFrame(stats_d['conf_95'], columns=stats_d['x'], index=stats_d['names'])
df_mean = DataFrame([stats_d['mean'], stats_d['mean_conf']], columns=stats_d['x'], index=['mean', 'conf_95'])
df_data_m.to_csv(path + '-data_means.csv')
df_ci.to_csv(path + '-data_ci.csv')
df_mean.to_csv(path + '-mean_and_CI.csv')
'''
"""
#--------------------------------------------------
#--------------------------------------------------
#--------------------------------------------------
##REST TO RUN , RUN TO REST, RUN STICK RUN SECTION
#--------------------------------------------------
#--------------------------------------------------
#--------------------------------------------------
print('Alternative run-rest/rest-run averaging individual lines')
delay_ranges_pairs = [ [3*astroA_l[0].fr, 6*astroA_l[0].fr],
[1*astroA_l[0].fr, 1*astroA_l[0].fr],
[2*astroA_l[0].fr, 4*astroA_l[0].fr]]
delay_ranges_pairs = [[int(v[0]), int(v[1])] for v in delay_ranges_pairs]
#measure_l = ['dffMax2default', 'dffMax2', 'time_s', 'area']
#measure_path_l = ['amplitudes_default', 'amplitudes', 'durations', 'sizes']
#measure_y_titles = ['Amplitude', 'Amplitude', 'Duration (s)', 'Size']
measure_l = ['dffMax2default', 'time_s', 'area']
measure_path_l = ['amplitudes_default', 'durations', 'sizes']
measure_y_titles = ['Amplitude', 'Duration (s)', 'Size']
measure_l = ['dffMax2default']
measure_path_l = ['amplitudes_default']
measure_y_titles = ['Amplitude']
bh_measure_l = ['speed']
bh_measure_path_l = ['speed']
bh_measure_y_titles = ['Speed (cm/s)']
print('Alt Proportion plots...')
for delay_ranges_pair in delay_ranges_pairs:
before_range, after_range = delay_ranges_pair
for p in [#{'fit' : True, 'delay_step_size' : 1, 'confidence' : True},
#{'fit' : True, 'delay_step_size' : 5, 'confidence' : True},
{'fit' : True, 'delay_step_size' : 10, 'confidence': True}
]:
################################################
##############Proportion plots##################
################################################
print('EXTRA PARS', p, p.keys())
print('rest to run prop')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'rest_to_run_proportions_alt')
fig_d, bin_stats = self.get_transition_proportion_delays_plot_all_alt(astroA_l, before_bh='rest_semi_exact', inds_bh='running_exact_start', after_bh='running_semi_exact',
before_range=before_range, after_range=after_range,
**p)
for fig_k in fig_d.keys():
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
print('run to rest prop')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_to_rest_proportions_alt')
fig_d, bin_stats = self.get_transition_proportion_delays_plot_all_alt(astroA_l, before_bh='running_semi_exact', inds_bh='rest_start', after_bh='rest_semi_exact',
before_range=before_range, after_range=after_range,
**p)
for fig_k in fig_d.keys():
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
print('run stick hit run prop')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_proportions_alt')
fig_d, bin_stats = self.get_transition_proportion_delays_plot_all_alt(astroA_l, before_bh='running_semi_exact', inds_bh='stick_exact_start', after_bh='running_semi_exact',
before_range=before_range, after_range=after_range,
**p)
for fig_k in fig_d:
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
print('run stick hit run prop duration filter [None, 3]')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_proportions_alt_filter_max_3_frames')
fig_d, bin_stats = self.get_transition_proportion_delays_plot_all_alt(astroA_l, before_bh='running_semi_exact', inds_bh='stick_exact_start', after_bh='running_semi_exact',
before_range=before_range, after_range=after_range, duration_filter=[None, 3],
**p)
for fig_k in fig_d:
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
print('run stick hit run prop duration filter [None, 5]')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_proportions_alt_filter_max_5_frames')
fig_d, bin_stats = self.get_transition_proportion_delays_plot_all_alt(astroA_l, before_bh='running_semi_exact', inds_bh='stick_exact_start', after_bh='running_semi_exact',
before_range=before_range, after_range=after_range, duration_filter=[None, 5],
**p)
for fig_k in fig_d:
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
'''
################################################
##############Measure plots#####################
################################################
'''
for m_i, measure in enumerate(measure_l):
print('rest to run measure: {}'.format(measure))
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'rest_to_run_{}_alt'.format(measure_path_l[m_i]))
fig_d, bin_stats = self.get_transition_proportion_delays_plot_all_alt(astroA_l, before_bh='rest_semi_exact', inds_bh='running_exact_start', after_bh='running_semi_exact',
before_range=before_range, after_range=after_range,
measure=measure,
y_title=measure_y_titles[m_i],
**p)
for fig_k in fig_d.keys():
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
print('run to rest measure: {}'.format(measure))
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_to_rest_{}_alt'.format(measure_path_l[m_i]))
fig_d, bin_stats = self.get_transition_proportion_delays_plot_all_alt(astroA_l, before_bh='running_semi_exact', inds_bh='rest_start', after_bh='rest_semi_exact',
before_range=before_range, after_range=after_range,
measure=measure,
y_title=measure_y_titles[m_i],
**p)
for fig_k in fig_d.keys():
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
print('run stick hit run measure: {}'.format(measure))
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_{}_alt'.format(measure_path_l[m_i]))
fig_d, bin_stats = self.get_transition_proportion_delays_plot_all_alt(astroA_l, before_bh='running_semi_exact', inds_bh='stick_exact_start', after_bh='running_semi_exact',
before_range=before_range, after_range=after_range,
measure=measure,
y_title=measure_y_titles[m_i],
**p)
for fig_k in fig_d.keys():
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
print('run stick hit run measure: max frames 3 {}'.format(measure))
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_{}_alt_filter_max_3_frames'.format(measure_path_l[m_i]))
fig_d, bin_stats = self.get_transition_proportion_delays_plot_all_alt(astroA_l, before_bh='running_semi_exact', inds_bh='stick_exact_start', after_bh='running_semi_exact',
before_range=before_range, after_range=after_range,
measure=measure,
y_title=measure_y_titles[m_i], duration_filter=[None, 3],
**p)
for fig_k in fig_d.keys():
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
print('run stick hit run measure: max frames 5 {}'.format(measure))
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_{}_alt_filter_max_5_frames'.format(measure_path_l[m_i]))
fig_d, bin_stats = self.get_transition_proportion_delays_plot_all_alt(astroA_l, before_bh='running_semi_exact', inds_bh='stick_exact_start', after_bh='running_semi_exact',
before_range=before_range, after_range=after_range,
measure=measure,
y_title=measure_y_titles[m_i], duration_filter=[None, 5],
**p)
for fig_k in fig_d.keys():
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
################################################
##############Behaviour measure plots###########
################################################
for m_i, bh_measure in enumerate(bh_measure_l):
print('BH measure {} rest-run'.format(bh_measure))
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'rest_to_run_{}_alt'.format(bh_measure_path_l[m_i]))
fig_d, bin_stats = self.get_transition_bh_values_plot_all_alt(astroA_l,
before_bh='rest_semi_exact', inds_bh='running_exact_start', after_bh='running_semi_exact',
bh_measure=bh_measure,
before_range=before_range, after_range=after_range,
y_title=bh_measure_y_titles[m_i],
**p)
for fig_k in fig_d.keys():
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
print('BH measure {} run-rest'.format(bh_measure))
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_to_rest_{}_alt'.format(bh_measure_path_l[m_i]))
fig_d, bin_stats = self.get_transition_bh_values_plot_all_alt(astroA_l,
before_bh='running_semi_exact', inds_bh='rest_start', after_bh='rest_semi_exact',
bh_measure=bh_measure,
before_range=before_range, after_range=after_range,
y_title=bh_measure_y_titles[m_i],
**p)
for fig_k in fig_d.keys():
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
print('BH measure {} run-stick-run'.format(bh_measure))
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_{}_alt'.format(bh_measure_path_l[m_i]))
fig_d, bin_stats = self.get_transition_bh_values_plot_all_alt(astroA_l,
before_bh='running_semi_exact', inds_bh='stick_exact_start', after_bh='running_semi_exact',
bh_measure=bh_measure,
before_range=before_range, after_range=after_range,
y_title=bh_measure_y_titles[m_i],
**p)
for fig_k in fig_d.keys():
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
"""
"""
print('OUTLIERS TRANSITION PLOTS...')
delay_ranges_pairs = [ [3*astroA_l[0].fr, 6*astroA_l[0].fr],
[1*astroA_l[0].fr, 1*astroA_l[0].fr],
[2*astroA_l[0].fr, 4*astroA_l[0].fr]
]
delay_ranges_pairs = [[int(v[0]), int(v[1])] for v in delay_ranges_pairs]
measure_l = ['dffMax2default', 'time_s', 'area']
measure_path_l = ['amplitudes_default', 'durations', 'sizes']
measure_y_titles = ['Amplitude', 'Duration (s)', 'Size']
for delay_ranges_pair in delay_ranges_pairs:
before_range, after_range = delay_ranges_pair
for m_i, measure in enumerate(measure_l):
print('rest to run measure: {}'.format(measure))
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'rest_to_run_{}_outlier_alt'.format(measure_path_l[m_i]))
fig, stats_d = self.get_transition_outliers_plot(astroA_l, before_bh='rest_semi_exact', inds_bh='running_exact_start', after_bh='running_semi_exact',
before_range=before_range, after_range=after_range,
measure=measure,
y_title=measure_y_titles[m_i])
fig_id = os.path.join(path, 'outlier_range_{}_{}'.format(before_range, after_range))
saving_utils.save_plotly_fig(fig, fig_id)
with open(os.path.join(fig_id + '.csv'), mode='w') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i in range(len(stats_d['names'])):
l = [stats_d['names'][i]]
l.extend(stats_d['mean'][i])
writer.writerow(l)
print('run to rest measure: {}'.format(measure))
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_to_rest_{}_outlier_alt'.format(measure_path_l[m_i]))
fig, stats_d = self.get_transition_outliers_plot(astroA_l, before_bh='running_semi_exact', inds_bh='rest_start', after_bh='rest_semi_exact',
before_range=before_range, after_range=after_range,
measure=measure,
y_title=measure_y_titles[m_i])
fig_id = os.path.join(path, 'outlier_range_{}_{}'.format(before_range, after_range))
saving_utils.save_plotly_fig(fig, fig_id)
with open(os.path.join(fig_id + '.csv'), mode='w') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i in range(len(stats_d['names'])):
l = [stats_d['names'][i]]
l.extend(stats_d['mean'][i])
writer.writerow(l)
print('run stick hit run measure: {}'.format(measure))
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_{}_outlier_alt'.format(measure_path_l[m_i]))
fig, stats_d = self.get_transition_outliers_plot(astroA_l, before_bh='running_semi_exact', inds_bh='stick_exact_start', after_bh='running_semi_exact',
before_range=before_range, after_range=after_range,
measure=measure,
y_title=measure_y_titles[m_i])
fig_id = os.path.join(path, 'outlier_range_{}_{}'.format(before_range, after_range))
saving_utils.save_plotly_fig(fig, fig_id)
with open(os.path.join(fig_id + '.csv'), mode='w') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i in range(len(stats_d['names'])):
l = [stats_d['names'][i]]
l.extend(stats_d['mean'][i])
writer.writerow(l)
"""
"""
print('Correlation plots ALL')
if astroA_l_pairs is not None:
for dff_mode in [False]:
#for align_setting in ['xcorr', 'xcorr_free']:
for align_setting in ['xcorr']:
#for filter_duration in [[None, None], [None, 1], [1, None]]:
for filter_duration in [[None, None], [None, 1], [1, None]]:
for bh in ['default', 'rest', 'running', 'stick']:
main_folder_id = 'correlations_no_align' if align_setting == 'xcorr_free' else 'correlations'
if (filter_duration[0] == None and filter_duration[1] == 1):
main_folder_id += '_short_events'
if (filter_duration[0] == 1 and filter_duration[1] == None):
main_folder_id += '_long_events'
fig_corr_path = os.path.join(output_experiment_path_all_comparison, 'plots', main_folder_id, 'xcorr_compare_{}_is_dff_{}'.format(bh, dff_mode))
save_results_path = os.path.join(output_experiment_path_all_comparison, 'data', main_folder_id, 'xcorr_compare_{}_is_dff_{}'.format(bh, dff_mode))
fig, pair_fakes_before, pair_fakes, pair_corrs_l_before, pair_corrs_l, days_id_l = self.get_compare_align_plot_xcorr_all(astroA_l_pairs, align_setting='xcorr', dff_mode=dff_mode, behaviour=bh, n_fake_samples=25 ,save_results_path=save_results_path)
saving_utils.save_plotly_fig(fig, fig_corr_path)
csv_corr_path = os.path.join(output_experiment_path_all_comparison, 'plots', main_folder_id + '_csv', 'xcorr_compare_{}_is_dff_{}.csv'.format(bh, dff_mode))
self.save_xcorr_pairs_align_results_csv(csv_corr_path, astroA_l_pairs, pair_fakes_before, pair_fakes, pair_corrs_l_before, pair_corrs_l)
"""
'''
print('Correlation plots (rest 0-1, run 0-1, rest-stick 0-1, run-stick 0-1, all, random)')
file_id = 'xcorr_compare_states_all'
if astroA_l_pairs is not None:
for dff_mode in [False]:
#for align_setting in ['xcorr', 'xcorr_free']:
for align_setting in ['xcorr']:
for astro_pair in astroA_l_pairs:
#for filter_duration in [[None, None], [None, 1], [1, None]]:
for filter_duration in [[None, None]]:
main_folder_id = 'correlations_no_align' if align_setting == 'xcorr_free' else 'correlations'
if (filter_duration[0] == None and filter_duration[1] == 1):
main_folder_id += '_short_events'
if (filter_duration[0] == 1 and filter_duration[1] == None):
main_folder_id += '_long_events'
fig_corr_path = os.path.join(output_experiment_path_all_comparison, 'plots', main_folder_id, 'pair_{}_type_{}_is_dff_{}'.format(self.get_astro_pair_id(astro_pair), file_id, dff_mode))
save_pkl_path = os.path.join(output_experiment_path_all_comparison, 'data', main_folder_id, 'pair_{}_type_{}_is_dff_{}.pkl'.format(self.get_astro_pair_id(astro_pair), file_id, dff_mode))
csv_corr_path = os.path.join(output_experiment_path_all_comparison, 'plots', main_folder_id + '_csv', 'pair_{}_type_{}_is_dff_{}.csv'.format(self.get_astro_pair_id(astro_pair), file_id, dff_mode))
behaviour_list_compare =['rest', 'running', 'stick_rest', 'stick_run_ind_15', 'default']
fig, res_d = self.get_compare_states_all_xcorr(astro_pair, align_setting=align_setting, dff_mode=dff_mode, n_fake_samples=1, save_pkl_path=save_pkl_path, filter_duration=filter_duration,
behaviour_l=behaviour_list_compare)
saving_utils.save_plotly_fig(fig, fig_corr_path)
saving_utils.save_csv_dict(res_d, csv_corr_path, key_order=behaviour_list_compare)
'''
'''
print('Correlation plots (rest 0 run 0, rest 1 run 1, random)')
file_id = 'xcorr_compare_between_states'
if astroA_l_pairs is not None:
for dff_mode in [False]:
#for align_setting in ['xcorr', 'xcorr_free']:
for align_setting in ['xcorr']:
for astro_pair in astroA_l_pairs:
#for filter_duration in [[None, None], [None, 1], [1, None]]:
for filter_duration in [[None, None]]:
main_folder_id = 'correlations_no_align' if align_setting == 'xcorr_free' else 'correlations'
if (filter_duration[0] == None and filter_duration[1] == 1):
main_folder_id += '_short_events'
if (filter_duration[0] == 1 and filter_duration[1] == None):
main_folder_id += '_long_events'
fig_corr_path = os.path.join(output_experiment_path_all_comparison, 'plots', main_folder_id, 'pair_{}_type_{}_is_dff_{}'.format(self.get_astro_pair_id(astro_pair), file_id, dff_mode))
save_pkl_path = os.path.join(output_experiment_path_all_comparison, 'data', main_folder_id, 'pair_{}_type_{}_is_dff_{}.pkl'.format(self.get_astro_pair_id(astro_pair), file_id, dff_mode))
csv_corr_path = os.path.join(output_experiment_path_all_comparison, 'plots', main_folder_id + '_csv', 'pair_{}_type_{}_is_dff_{}.csv'.format(self.get_astro_pair_id(astro_pair), file_id, dff_mode))
fig, res_d = self.get_compare_states_same_astro_all_xcorr(astro_pair, align_setting=align_setting, dff_mode=dff_mode, n_fake_samples=100, save_pkl_path=save_pkl_path, filter_duration=filter_duration)
print('RES D', res_d)
saving_utils.save_plotly_fig(fig, fig_corr_path)
saving_utils.save_csv_dict(res_d, csv_corr_path, key_order=list(res_d.keys()))
'''
#TODO RUUN THESE AGAIN
"""
#USING GOOD PAIRS FROM HERE ON
#RUN AFTER
file_id = 'xcorr_compare_between_group'
if astroA_l_good_pairs is not None:
for dff_mode in [False]:
#for align_setting in ['xcorr', 'xcorr_free']:
#NOT USING ALIGN SETTING
for align_setting in ['xcorr']:
#for filter_duration in [[None, None], [None, 1], [1, None]]:
#for filter_duration in [[None, None], [None, 1], [1, None]]:
for filter_duration in [[None, None]]:
main_folder_id = 'correlations_no_align' if align_setting == 'xcorr_free' else 'correlations'
if (filter_duration[0] == None and filter_duration[1] == 1):
main_folder_id += '_short_events'
if (filter_duration[0] == 1 and filter_duration[1] == None):
main_folder_id += '_long_events'
fig_corr_path = os.path.join(output_experiment_path_all_comparison, 'plots', main_folder_id, 'type_{}_is_dff_{}'.format(file_id, dff_mode))
save_pkl_path = os.path.join(output_experiment_path_all_comparison, 'data', main_folder_id, 'type_{}_is_dff_{}.pkl'.format(file_id, dff_mode))
csv_corr_path = os.path.join(output_experiment_path_all_comparison, 'plots', main_folder_id + '_csv', 'type_{}_is_dff_{}.csv'.format(file_id, dff_mode))
fig, res_d = self.get_compare_between_group_xcorr(astroA_l_good_pairs, dff_mode=dff_mode, n_fake_samples=5, save_pkl_path=save_pkl_path, filter_duration=filter_duration)
saving_utils.save_plotly_fig(fig, fig_corr_path)
saving_utils.save_csv_dict(res_d, csv_corr_path, key_order=list(res_d.keys()))
"""
"""
save_folder = os.path.join(output_experiment_path_all_comparison, 'data', 'control')
plot_folder = os.path.join(output_experiment_path_all_comparison, 'plots', 'control')
print('CONTROLS plot')
print('Recombination results...')
save_recombination_pkl_path = os.path.join(save_folder, 'recombination.pkl')
fig, res_d = self.get_compare_between_group_xcorr(astroA_l_good_pairs, dff_mode=False, n_fake_samples=1, save_pkl_path=save_recombination_pkl_path)
recombination_corrs = res_d['between']
recombination_rand_corrs = res_d['random']
print('Recombination CORRS', recombination_corrs)
print('Recombination rand corrs', recombination_rand_corrs)
#between_id
#between
#random
print('Random sample results...')
save_random_pair_pkl_path = os.path.join(save_folder, 'random_pair.pkl')
if os.path.isfile(save_random_pair_pkl_path):
print('FILE EXISTS', save_random_pair_pkl_path)
random_pair_corrs = saving_utils.load_pickle(save_random_pair_pkl_path)
else:
random_pair_corrs = []
for astroA_pair in astroA_l_good_pairs:
d = compare_astro_utils.alignment_counter(astroA_pair[0], astroA_pair[1],
n_fake_samples=10,
align_setting='xcorr',
eval_setting='xcorr',
fake_sample_setting='from_astro',
p=1,
behaviour='default',
filter_duration=[None, None],
with_output_details=True)
random_pair_corrs.extend(d['num_fake'])
saving_utils.save_pickle(random_pair_corrs, save_random_pair_pkl_path)
print('Random pair corrs:', random_pair_corrs)
print('Flip control results...')
save_flip_pkl_path = os.path.join(save_folder, 'flip.pkl')
if os.path.isfile(save_flip_pkl_path):
print('File exists', save_flip_pkl_path)
flip_corrs = saving_utils.load_pickle(save_flip_pkl_path)
else:
flip_corrs = []
for astroA in astroA_l_good:
for num_rot in range(1, 6):
astro_grid, _, _,_ = compare_astro_utils.get_filters_compare([astroA], p=1, dff_mode=False, behaviour='default')
astro_grid = astro_grid[0]
astro_grid_rot_1 = np.copy(astro_grid)
astro_grid_border_1 = np.copy(astroA.border)
if num_rot < 4:
astro_grid_rot_2 = np.rot90(np.copy(astro_grid), k=num_rot)
astro_grid_border_2 = np.rot90(np.copy(astroA.border), k=num_rot)
elif num_rot == 5:
astro_grid_rot_2 = np.flipud(np.copy(astro_grid))
astro_grid_border_2 = np.flipud(np.copy(astroA.border))
elif num_rot == 6:
astro_grid_rot_2 = np.fliplr(np.copy(astro_grid))
astro_grid_border_2 = np.fliplr(np.copy(astroA.border))
d = compare_astro_utils.alignment_counter(astroA, astroA,
n_fake_samples=0,
align_setting='param',
eval_setting='xcorr',
fake_sample_setting='from_astro',
grid_target=astro_grid_rot_1,
grid_source=astro_grid_rot_2,
target_border_grid=astro_grid_border_1,
source_border_grid=astro_grid_border_2,
move_vector=[0,0],
p=1,
behaviour='default',
with_output_details=True)
flip_corrs.append(d['num_compare'])
saving_utils.save_pickle(flip_corrs, save_flip_pkl_path)
print('Flip corrs', flip_corrs)
print('LENS, random pair, flip, recombination')
print(len(random_pair_corrs), len(flip_corrs), len(recombination_corrs))
x =['Random simulation', 'Flip Control', 'Recombination Control']
y = [random_pair_corrs, flip_corrs, recombination_corrs]
fig = plotly_utils.plot_point_box_revised(x, y, title='Mean +/- standard deviation of controls', x_title='', y_title='xcorr', err_type='std')
saving_utils.save_plotly_fig(fig, os.path.join(plot_folder, 'control_plot'))
"""
'''
plt.ioff()
print('Plotting Size vs Time correlation plot...')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'size_v_time_corr_ALL')
areas_all = []
times_all = []
for astroA in astroA_l:
areas_all.extend(np.log(astroA.res_d['area']))
times_all.extend(astroA.res_d['time_s'])
areas_all = np.array(areas_all)
times_all = np.array(times_all)
r, p = stat_utils.get_pearsonr(times_all, areas_all)
df = pd.DataFrame({'Size': areas_all, 'Time': times_all})
title ='Size vs Time correlation plot'
text = 'r = {}, p < {}'.format(general_utils.truncate(r, 2), p)
for kind in ['reg', 'hex', 'kde']:
plotly_utils.seaborn_joint_grid(df, 'Size', 'Time', kind=kind, text=text)
plt.savefig(os.path.join(path, '{}.svg'.format(kind)))
plt.savefig(os.path.join(path, '{}.png'.format(kind)))
'''
'''
print('---------------------------------')
print('EVENTS VS SPEED PLOTS...')
print('---------------------------------')
speed_event_tuple_d = {}
n_bins_l = [3, 5, 10]
n_frame_splits_l = [15, int(astroA_l[0].minute_frames/6)]
for eval_type in ['max', 'mean']:
for n_bins in n_bins_l:
for n_frame_splits in n_frame_splits_l:
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'speed_v_events_ALL', 'eval_type={}_splits={}_bins={}'.format(eval_type, n_frame_splits, n_bins))
for astroA in astroA_l:
#split n frames. Measure average speed in that bin. Measure how many events in that bin.
#add to histogram
#10 second frame splits
total_frames = len(astroA.indices_d['default'])
num_chunks = total_frames//n_frame_splits
print('NUM FRAME SPLITS {}, TOTAL FRAMES {} NUM CHUNKS {}'.format(n_frame_splits, total_frames, num_chunks))
split_arr_i_l = np.array_split(astroA.indices_d['default'], num_chunks)
speed_event_tuple_l = aqua_utils.speed_event_tuple(astroA, split_arr_i_l, num_events_only=True, eval_type=eval_type)
speed_event_tuple_d[astroA.print_id] = speed_event_tuple_l
#Find maximum speed, for bounds of histogram
max_speed = 0
for k in speed_event_tuple_d.keys():
max_speed_k = np.max(np.array([speed for speed, ev_l in speed_event_tuple_d[k]]))
#print('MAX SPEED {} : {}'.format(k, max_speed_k))
if max_speed_k > max_speed:
max_speed = max_speed_k
#print('MAX SPEED' , max_speed)
events_bins_d = {}
bin_values = np.linspace(0, max_speed, n_bins)
for astroA in astroA_l:
events_bins = [[] for i in range((n_bins-1))]
speed_event_tuple = speed_event_tuple_d[astroA.print_id]
for sp_ev_tup in speed_event_tuple:
ind = np.searchsorted(bin_values, sp_ev_tup[0], side='right')-1
if ind == len(events_bins):
ind -= 1
events_bins[ind].append(sp_ev_tup[1] / n_frame_splits)
#events_bins_avg = [np.mean(events_bins[i]) for i in range(len(events_bins))]
events_bins_d[astroA.print_id] = events_bins
x = bin_values[:-1]
names_l = list(events_bins_d.keys())
x_l = [x for i in range(len(astroA_l))]
y_l_l = [events_bins_d[k] for k in names_l]
x_l_dpl = [tup[0] for tup in speed_event_tuple]
y_l_dpl = [tup[1] for tup in speed_event_tuple]
r, p = stat_utils.get_pearsonr(y_l_dpl, x_l_dpl)
df = pd.DataFrame({'Events': y_l_dpl, 'Speed': x_l_dpl})
fig, stats_d = plotly_utils.plot_scatter_mult_with_avg(x, y_l_l, None, names_l, mode='lines', title='scatter', x_title='Speed (cm/s)', y_title='',
xrange=None, yrange=None, confidence=True, with_stats=True, point_box=False, mean_width_size=5)
saving_utils.save_plotly_fig(fig, path)
print('KEYS', stats_d.keys())
print('THE STTS D X', stats_d['x'])
df_data_m = DataFrame(stats_d['mean_l_l'], columns=stats_d['x'], index=stats_d['names'])
df_ci = DataFrame(stats_d['conf_95'], columns=stats_d['x'], index=stats_d['names'])
df_mean = DataFrame([stats_d['mean'], stats_d['mean_conf']], columns=stats_d['x'], index=['mean', 'conf_95'])
df_data_m.to_csv(path + '-data_means.csv')
df_ci.to_csv(path + '-data_ci.csv')
df_mean.to_csv(path + '-mean_and_CI.csv')
title ='Events vs Speed correlation plot'
text = 'r = {}, p < {}'.format(general_utils.truncate(r, 2), p)
for kind in ['reg', 'hex', 'kde']:
plotly_utils.seaborn_joint_grid(df, 'Speed', 'Events', kind=kind, text=text)
plt.savefig(path + '_corr_{}.svg'.format(kind))
plt.savefig(path + '_corr_{}.png'.format(kind))
print('---------------------------------')
'''
'''
print('Plotting correlation of splitted plots in 3 parts...')
save_folder = os.path.join(output_experiment_path_all_comparison, 'data', 'split_correlation_all')
plot_folder = os.path.join(output_experiment_path_all_comparison, 'plots', 'split_correlation_all')
save_splits_pkl_path = os.path.join(save_folder, 'between_splits.pkl')
save_day_splits_pkl_path = os.path.join(save_folder, 'between_days.pkl')
save_random_pkl_path = os.path.join(save_folder, 'random.pkl')
save_bh_splits_pkl_path = os.path.join(save_folder, 'between_rest_run.pkl')
#1 random simulations
#2 (correlation between splits days with variable the splits (so not between days) 3 split correlations with each other (only day 0 and day 1). day 0 splitted 3 times and correlated between each other. same with day 1
#3 (correlation between splits days with variable the between days)) the day 0 and day 1 splitted and then compared between each other between days
#'split_correlation_all'
#for bh_l in ['default', 'rest', 'running']:
#4 (correlation between split days with variable the rest-run behaviour)
for bh in ['rest']:
#2
fig, res_splits_l = self.get_between_split_split_xcorr(astroA_long_l, bh=bh, save_pkl_path=save_splits_pkl_path)
#3
fig_2, res_day_splits_l = self.get_between_day_split_xcorr(day_0_1_pairs, bh=bh, save_pkl_path=save_day_splits_pkl_path)
#4
fig_3, res_bh_splits_l = self.get_between_bh_split_xcorr(astroA_long_l, bh_pair=['rest','running'], save_pkl_path=save_bh_splits_pkl_path)
#1
if os.path.isfile(save_random_pkl_path):
print('FILE EXISTS')
random_l = saving_utils.load_pickle(save_random_pkl_path)
else:
random_l = []
for astroA in astroA_long_l:
random_l.extend(self.get_random_corrs_self(astroA, bh, n_fake_samples=3))
if save_random_pkl_path is not None:
saving_utils.save_pickle(random_l, save_random_pkl_path)
x = ['Random', 'Self splits', 'Rest-Run splits', 'Day 0-1 Splits']
y = [random_l, res_splits_l, res_bh_splits_l, res_day_splits_l]
print('LENS', [len(y_i) for y_i in y])
fig, stats_d = plotly_utils.plot_point_box_revised(x, y, title='Split correlations (between splits)- {}'.format(bh), x_title='', y_title='Xcorr value', with_stats=True)
saving_utils.save_plotly_fig(fig, os.path.join(plot_folder, 'splits'))
saving_utils.save_csv_dict(stats_d, os.path.join(plot_folder, 'splits' + '.csv'), key_order=['x', 'mean', 'conf_95'])
results_dict = {x[i] : y[i] for i in range(len(x))}
results_dict['x'] = x
key_order = ['x']
key_order.extend(x)
saving_utils.save_csv_dict(results_dict, os.path.join(plot_folder, 'splits_data' + '.csv'), key_order=key_order)
return fig
'''
def get_random_corrs_self(self, astroA, bh, n_fake_samples=3):
random_l = []
d = compare_astro_utils.alignment_counter(astroA, astroA,
n_fake_samples=n_fake_samples,
align_setting='param',
eval_setting='xcorr',
fake_sample_setting='from_astro',
move_vector=[0, 0],
p=1,
behaviour=bh)
return d['num_fake']
def get_between_bh_split_xcorr(self, astroA_l, bh_pair=['rest', 'running'], n_chunks=3, dff_mode=False, save_pkl_path=None, filter_duration=(None, None)):
'''
Split bh_pair[0] into 3 splits. Correlate with whole of bh_pair[1]
'''
if os.path.isfile(save_pkl_path):
print('FILE EXISTS')
res_l = saving_utils.load_pickle(save_pkl_path)
else:
event_grid_splits_d = {}
astros_d = {}
for astroA in astroA_l:
print(astroA.print_id)
event_grid_splits_d[astroA.print_id] = aqua_utils.split_n_event_grids(astroA, bh=bh_pair[0], n=n_chunks)
astros_d[astroA.print_id] = astroA
res_l = []
for astroA_k in event_grid_splits_d.keys():
#Get correlations of splits between splits same days
astroA_splits_l = event_grid_splits_d[astroA_k]
bh_split = astros_d[astroA_k].event_grids_1min[bh_pair[1]]
for i in range(n_chunks):
split_i = astroA_splits_l[i]
d = compare_astro_utils.alignment_counter(astros_d[astroA_k], astros_d[astroA_k],
n_fake_samples=0,
align_setting='param',
eval_setting='xcorr',
fake_sample_setting='from_astro',
grid_target=bh_split,
grid_source=split_i,
move_vector=[0, 0],
p=1,
behaviour=bh_pair[0],
filter_duration=filter_duration,
with_output_details=True)
res_l.append(d['num_compare'])
if save_pkl_path is not None:
saving_utils.save_pickle(res_l, save_pkl_path)
x = ['Split correlations']
y = [np.copy(np.array(res_l))]
print('THE Y', y)
fig = plotly_utils.plot_point_box_revised(x, y, title='{} Split correlations (between splits)- {}'.format(n_chunks, '_'.join(bh_pair)), x_title='', y_title='Xcorr value')
return fig, res_l
def get_between_split_split_xcorr(self, astroA_l, bh='default', n_chunks=3, dff_mode=False, save_pkl_path=None, filter_duration=(None, None)):
if os.path.isfile(save_pkl_path):
print('FILE EXISTS')
res_l = saving_utils.load_pickle(save_pkl_path)
else:
event_grid_splits_d = {}
astros_d = {}
for astroA in astroA_l:
print(astroA.print_id)
event_grid_splits_d[astroA.print_id] = aqua_utils.split_n_event_grids(astroA, bh=bh, n=n_chunks)
astros_d[astroA.print_id] = astroA
res_l = []
for astroA_k in event_grid_splits_d.keys():
#Get correlations of splits between splits same days
astroA_splits_l = event_grid_splits_d[astroA_k]
for i in range(n_chunks):
for j in range(i+1, n_chunks):
print(i, j)
split_i = astroA_splits_l[i]
split_j = astroA_splits_l[j]
d = compare_astro_utils.alignment_counter(astros_d[astroA_k], astros_d[astroA_k],
n_fake_samples=0,
align_setting='param',
eval_setting='xcorr',
fake_sample_setting='from_astro',
grid_target=split_i,
grid_source=split_j,
move_vector=[0, 0],
p=1,
behaviour=bh,
filter_duration=filter_duration,
with_output_details=True)
res_l.append(d['num_compare'])
if save_pkl_path is not None:
saving_utils.save_pickle(res_l, save_pkl_path)
x = ['Split correlations']
y = [np.copy(np.array(res_l))]
print('THE Y', y)
fig = plotly_utils.plot_point_box_revised(x, y, title='{} Split correlations (between splits)- {}'.format(n_chunks, bh), x_title='', y_title='Xcorr value')
return fig, res_l
def get_between_day_split_xcorr(self, astroA_l_pairs, bh='default', n_chunks=3, dff_mode=False, n_fake_samples=5, save_pkl_path=None, filter_duration=(None, None)):
if os.path.isfile(save_pkl_path):
print('FILE EXISTS')
res_l = saving_utils.load_pickle(save_pkl_path)
else:
res_l = []
event_grid_splits_d = {}
for astro_pair in astroA_l_pairs:
pair_k = self.get_astro_pair_id(astro_pair)
event_grid_splits_d[pair_k] = {'day_0' : None, 'day_x' : None}
#Split each astro into 3
event_grid_splits_d[pair_k]['day_0'] = aqua_utils.split_n_event_grids(astro_pair[0], bh=bh, n=n_chunks)
event_grid_splits_d[pair_k]['day_x'] = aqua_utils.split_n_event_grids(astro_pair[1], bh=bh, n=n_chunks)
event_grid_splits_d[pair_k]['astro_pair'] = astro_pair
#Get all split correlations between day 0 and day x of same astro pair
#All possible here (note the 2nd for loop different than function above)
astro_pair = event_grid_splits_d[pair_k]['astro_pair']
d_temp = compare_astro_utils.alignment_counter(astro_pair[0], astro_pair[1],
n_fake_samples=0,
align_setting='xcorr',
eval_setting='xcorr',
fake_sample_setting='from_astro',
p=1,
behaviour='default',
dff_mode=dff_mode)
move_vector = d_temp['move_vector']
for i in range(n_chunks):
for j in range(n_chunks):
print(i, j)
split_i = event_grid_splits_d[pair_k]['day_0'][i]
split_j = event_grid_splits_d[pair_k]['day_x'][j]
d = compare_astro_utils.alignment_counter(astro_pair[0], astro_pair[1],
n_fake_samples=0,
align_setting='param',
eval_setting='xcorr',
fake_sample_setting='from_astro',
grid_target=split_i,
grid_source=split_j,
move_vector=move_vector,
p=1,
behaviour=bh,
filter_duration=filter_duration,
with_output_details=True)
res_l.append(d['num_compare'])
if save_pkl_path is not None:
saving_utils.save_pickle(res_l, save_pkl_path)
x = ['Split correlations']
y = [np.copy(np.array(res_l))]
fig = plotly_utils.plot_point_box_revised(x, y, title='{} Split correlations (between days) - {}'.format(n_chunks, bh), x_title='', y_title='Xcorr value')
return fig, res_l
#--------#--------#--------#--------#--------#--------#--------#--------#--------#--------
def generate_corr_data(self, astroA):
output_experiment_path = self.get_output_experiment_path(astroA, self.output_folder)
print('Making dirs', output_experiment_path)
self.setup_file_folders(output_experiment_path)
print(output_experiment_path)
print('Generating fake sample correlations and split correlations...')
#Will use these to compare how much to split before losing correlation
for p in self.filter_probs:
samples_save_path = os.path.join(output_experiment_path, 'files', 'correlations', 'fake_sample_p={}.pkl'.format(p))
samples_corr_d = correlation_utils.get_corr_astro_samples_v2(astro_xc=astroA, astro_base=astroA, p=p, n_samples=self.n_samples_corr_fake)
saving_utils.save_pickle(samples_corr_d, samples_save_path)
#splits_save_path = os.path.join(output_experiment_path, 'files', 'correlations', 'splits_p={}.pkl'.format(p))
#splits_corr_d = correlation_utils.get_splits_corr(astroA, num_frames_splits_l=self.num_frames_splits_l, p=p, max_comparisons=self.max_split_comparison_samples)
#saving_utils.save_pickle(splits_corr_d, splits_save_path)
print('Writing csv...')
duration_csv_path = os.path.join(output_experiment_path, 'files', 'csv', 'duration_split_ratios.csv')
self.write_csv_duration_splits(astroA, duration_csv_path)
def generate_corr_data_pair(self, astroA_l):
output_experiment_path_comparison, days_str, day_l_s, astroA_l_s = self.setup_comparison_vars(astroA_l, self.output_folder)
print(output_experiment_path_comparison)
print('Making dirs', output_experiment_path_comparison)
self.setup_file_folders_comparison(output_experiment_path_comparison)
for p in self.filter_probs:
print(p)
d = {}
corr_compare_save_path = os.path.join(output_experiment_path_comparison, 'files', 'correlations', 'corr_compare_p={}.pkl'.format(p))
astro_filt_l, astro_all_filt, astro_nz_bool_l, astro_all_nz_bool = compare_astro_utils.get_filters_compare(astroA_l_s, p=p)
#1 - self correlation
corr_res_self, max_corr_self, move_vector_self, max_coord_self = correlation_utils.get_cross_correlation_2D_info_compare(astro_filt_l[0], astro_filt_l[0])
corr_res, max_corr, move_vector, max_coord = correlation_utils.get_cross_correlation_2D_info_compare(astro_filt_l[0], astro_filt_l[1])
#3 - astroA - astroB fake sample correlations
samples_d = correlation_utils.get_corr_astro_samples_v2(astro_xc=astroA_l[0], astro_base=astroA_l[1], p=p, n_samples=self.n_samples_corr_fake)
d['self'] = {'max_corr' : max_corr_self,
' corr_res' : corr_res_self,
'move_vector' : move_vector_self,
'max_coord' : max_coord_self }
d['compare'] = {'max_corr' : max_corr,
' corr_res' : corr_res,
'move_vector' : move_vector,
'max_coord' : max_coord}
d['samples'] = samples_d
saving_utils.save_pickle(d, corr_compare_save_path)
def parse_prob(self, path):
base_name = os.path.splitext(os.path.basename(path))[0]
prob_v = float(base_name.split('=')[-1])
return prob_v
def read_corr_pair_data(self, astroA_l):
output_experiment_path_comparison, days_str, day_l_s, astroA_l_s = self.setup_comparison_vars(astroA_l, self.output_folder)
comparison_paths = glob.glob(os.path.join(output_experiment_path_comparison, 'files/correlations/corr_compare_*.pkl'))
corr_pair_d = {}
for comparison_path in comparison_paths:
prob_k = self.parse_prob(comparison_path)
print('Prob k', prob_k)
corr_pair_d[prob_k] = saving_utils.load_pickle(comparison_path)
return corr_pair_d
def read_corr_data(self, astroA):
experiment_path = self.get_output_experiment_path(astroA, self.output_folder)
print('Experiment path', experiment_path)
fake_sample_corr_paths = glob.glob(os.path.join(experiment_path, 'files/correlations/fake_sample_*.pkl'))
#splits_corr_paths = glob.glob(os.path.join(experiment_path, 'files/correlations/splits_*.pkl'))
fake_corr_d = {}
#splits_corr_d = {}
for fake_sample_path in fake_sample_corr_paths:
fake_corr_d[str(self.parse_prob(fake_sample_path))] = saving_utils.load_pickle(fake_sample_path)
#for split_path in splits_corr_paths:
# splits_corr_d[str(self.parse_prob(split_path))] = saving_utils.load_pickle(split_path)
#return fake_corr_d, splits_corr_d
return fake_corr_d
def setup_comparison_vars(self, astroA_l, output_folder):
experiment_id_l = []
day_l = []
for astroA in astroA_l:
experiment_id_l.append('/'.join(astroA.experiment_path.split('/')[-3:-1]))
day_l.append(int(astroA.experiment_path.split('/')[-1].split('_')[-1]))
if len(set(experiment_id_l)) != 1:
print('Different experiment ids, stopping', experiment_id_l)
return
sort_i = np.argsort(day_l)
day_l_s = [day_l[i] for i in sort_i]
astroA_l_s = [astroA_l[i] for i in sort_i]
days_str = 'days_' + '_'.join([str(day) for day in day_l_s])
output_experiment_path_comparison = os.path.join(output_folder,
experiment_id_l[0],
days_str)
return output_experiment_path_comparison, days_str, day_l_s, astroA_l_s
def setup_comparison_all_vars(self, astroA_l, output_folder):
experiment_id_l = []
day_l = []
for astroA in astroA_l:
experiment_id_l.append('/'.join(astroA.experiment_path.split('/')[-3:-1]))
day_l.append(int(astroA.experiment_path.split('/')[-1].split('_')[-1]))
sort_i = np.argsort(day_l)
day_l_s = [day_l[i] for i in sort_i]
astroA_l_s = [astroA_l[i] for i in sort_i]
days_str = 'days_' + '_'.join([str(day) for day in day_l_s])
output_experiment_path_all_comparison = os.path.join(output_folder, 'all')
print('done')
return output_experiment_path_all_comparison, days_str, day_l_s, astroA_l_s
def get_behaviour_basic_plots(self, astroA):
figs = {}
figs['stick_bin'] = plotly_utils.plot_scatter_fmt(x=np.arange(len(astroA.stick_bin)), y=astroA.stick_bin, astype='int', straight_lines_only=True, title='Stick', x_title='Frame', y_title='Off whisker/On whisker')
figs['speed_bin'] = plotly_utils.plot_scatter_fmt(x=np.arange(len(astroA.speed_bin)), y=astroA.speed_bin, astype='int', straight_lines_only=True, title='Speed', x_title='Frame', y_title='Rest/Running')
figs['whisker_bin'] = plotly_utils.plot_scatter_fmt(x=np.arange(len(astroA.whisker_bin)), y=astroA.whisker_bin, astype='int', straight_lines_only=True, title='Whisker', x_title='Frame', y_title='No whisker/Whisker movement')
figs['pupil'] = plotly_utils.plot_scatter_fmt(x=np.arange(len(astroA.pupil_values)), y=astroA.pupil_values, astype='float', straight_lines_only=True, title='Pupil', x_title='Frame', y_title='Pupil value')
figs['stick_values'] = plotly_utils.plot_scatter(x=np.arange(len(astroA.roi_dict['extra']['stick'])), y=astroA.roi_dict['extra']['stick'], title='Stick', x_title='Frame', y_title='Stick value')
figs['speed_values'] = plotly_utils.plot_scatter(x=np.arange(len(astroA.roi_dict['extra']['speed'])), y=astroA.roi_dict['extra']['speed'], title='Speed', x_title='Frame', y_title='Speed value')
figs['whisker_values'] = plotly_utils.plot_scatter(x=np.arange(len(astroA.roi_dict['extra']['whiskers'])), y=astroA.roi_dict['extra']['whiskers'], title='Whisker', x_title='Frame', y_title='Whisker value')
def make_arr(inds, arr_length):
arr = np.zeros([arr_length])
arr[inds] = 1
return arr
arr_length = len(astroA.stick_bin)
for k in astroA.indices_d.keys():
arr = make_arr(astroA.indices_d[k], arr_length)
figs[k] = plotly_utils.plot_scatter_fmt(x=np.arange(len(arr)), y=arr, title=k, astype='int', straight_lines_only=True, x_title='Frame', y_title='Value')
return figs
def get_signal_durations_plot(self, astroA):
signal_duration_figs = {}
#Signal durations
for k in astroA.event_subsets.keys():
signal_duration_figs[k] = plotly_utils.plot_histogram(astroA.all_durations_d[k], title=' Signal durations histogram ({})'.format(k))
return signal_duration_figs
def get_border_plot(self, astroA):
if 'clandmark_mask' in astroA.res_d.keys():
return plotly_utils.plot_contour(astroA.res_d['border_mask'] + astroA.res_d['clandmark_mask'], title='border_and_landmark_mask', height=600, width=800)
else:
return plotly_utils.plot_contour(astroA.res_d['border_mask'], title='border_mask', height=600, width=800)
def get_behaviour_contour_plots(self, astroA):
'''
Use 1 min normalized plots
'''
fig_heatmap_grids = {}
fig_heatmap_dff_grids = {}
#fig_heatmap_dff_grids
for k in astroA.event_subsets.keys():
fig_heatmap_grids[k] = plotly_utils.plot_contour(astroA.event_grids_1min[k], title=k + '_event grid', height=600, width=800)
for k in astroA.event_subsets.keys():
fig_heatmap_dff_grids[k] = plotly_utils.plot_contour(astroA.event_grids_1min_dff[k], title=k+'_event grid dff', height=600, width=800)
return fig_heatmap_grids, fig_heatmap_dff_grids
def get_behaviour_contour_threshold_plots(self, astroA, threshold=0.5):
'''
Use 1 min normalized plots
'''
fig_heatmap_grids = {}
fig_heatmap_dff_grids = {}
#fig_heatmap_dff_grids
for k in astroA.event_subsets.keys():
fig_heatmap_grids[k] = plotly_utils.plot_contour_threshold(astroA.event_grids_1min[k], threshold_perc=threshold, title=k + '_event grid - Saturation : ' + str(threshold*100) + '%', height=600, width=800)
for k in astroA.event_subsets.keys():
fig_heatmap_dff_grids[k] = plotly_utils.plot_contour_threshold(astroA.event_grids_1min_dff[k], threshold_perc=threshold, title=k+'_event grid dff - Saturation : ' + str(threshold*100) + '%', height=600, width=800)
return fig_heatmap_grids, fig_heatmap_dff_grids
def get_behaviour_activity_plot(self, astroA):
activity_ratio_k = np.array(self.filter_keys(astroA))
activity_ratio_l = np.array([astroA.activity_ratios[k] for k in activity_ratio_k])
text_values = np.array(['Frames: ' + str(len(astroA.indices_d[k])) for k in activity_ratio_k])
activity_i = np.argsort(activity_ratio_l)
activity_ratio_k_s = activity_ratio_k[activity_i]
activity_ratio_l_s = activity_ratio_l[activity_i]
text_values_s = text_values[activity_i]
activity_ratio_k_s[np.where(activity_ratio_k_s == 'default')] = 'all'
fig = plotly_utils.plot_bar(x=activity_ratio_k_s, y=activity_ratio_l_s, text_values=['']*len(activity_ratio_l_s), text_size=20, title='Activity ratio (events per voxel)', x_title='', y_title='Events per voxel (%)', margin_b=150)
plotly_utils.apply_fun_axis_fig(fig, lambda x : x * 100, axis='y',)
return fig
def get_behaviour_activity_bar_plot_all(self, astroA_l, bh_l, with_stats=False):
activity_ratios_np = np.zeros(len(bh_l))
activity_ratios_num_added = np.zeros(len(bh_l))
for i, bh_k in enumerate(bh_l):
for astroA in astroA_l:
if bh_k in astroA.activity_ratios.keys():
activity_ratios_np[i] += astroA.activity_ratios[bh_k]
activity_ratios_num_added[i] += 1
activity_ratios_np /= activity_ratios_num_added
activity_i = np.argsort(activity_ratios_np)
activity_ratio_k_s = np.array(bh_l)[activity_i]
activity_ratio_l_s = activity_ratios_np[activity_i]
activity_ratio_k_s[np.where(activity_ratio_k_s == 'default')] = 'all'
fig = plotly_utils.plot_bar(x=activity_ratio_k_s, y=activity_ratio_l_s, text_values=['']*len(activity_ratio_l_s), text_size=20,
title='Activity ratio (events per voxel)', x_title='', y_title='Events per voxel (%)',
margin_b=150,
err_y=[], err_symmetric=None)
plotly_utils.apply_fun_axis_fig(fig, lambda x : x * 100, axis='y',)
if with_stats:
#data = {k : areas[i] for i, k in enumerate(area_keys_s)}
return fig, {}
return fig
def get_behaviour_activity_dot_plot_all(self, astroA_l, bh_l, lines=False):
activity_ratio_l = []
for bh in bh_l:
activity_bh_l = []
for i, astroA in enumerate(astroA_l):
if bh in astroA.activity_ratios.keys():
activity_bh_l.append(astroA.activity_ratios[bh])
activity_ratio_l.append(activity_bh_l)
activity_means = [np.mean(activity_ratios) for activity_ratios in activity_ratio_l]
activity_i = np.argsort(activity_means)
x = np.array(bh_l)[activity_i]
y = []
for i in activity_i:
y.append(activity_ratio_l[i])
fig, stats_d = plotly_utils.plot_point_box_revised(x, y, title='Activity ratio', x_title='', y_title='Events per voxel (%)', lines=lines, with_stats=True)
return fig, stats_d
def get_behaviour_activity_number_bar_plot_all(self, astroA_l, bh_l, with_stats=False):
activity_num_np = np.zeros(len(bh_l))
activity_num_added = np.zeros(len(bh_l))
for astroA in astroA_l:
for i, bh_k in enumerate(bh_l):
if bh_k in astroA.activity_ratios.keys():
activity_num_np[i] += (len(astroA.res_d['area'][astroA.event_subsets[bh_k]]) / len(astroA.indices_d[bh_k])) * astroA.minute_frames
activity_num_added[i] += 1
activity_num_np /= activity_num_added
activity_i = np.argsort(activity_num_np)
activity_num_k_s = np.array(bh_l)[activity_i]
activity_num_l_s = activity_num_np[activity_i]
activity_num_k_s[np.where(activity_num_k_s == 'default')] = 'all'
fig = plotly_utils.plot_bar(x=activity_num_k_s, y=activity_num_l_s, text_values=['']*len(activity_num_l_s),
text_size=20, title='Activity number',
x_title='', y_title='Events per minute in state', margin_b=150,
err_y=[], err_symmetric=None)
if with_stats:
#data = {k : areas[i] for i, k in enumerate(area_keys_s)}
return fig, {}
return fig
def get_behaviour_activity_number_dot_plot_all(self, astroA_l, bh_l, with_stats=False, lines=False):
activity_num_l = []
for bh in bh_l:
activity_bh_l = []
for i, astroA in enumerate(astroA_l):
if bh in astroA.event_subsets.keys():
num_events = len(astroA.res_d['area'][astroA.event_subsets[bh]])
num_frames = len(astroA.indices_d[bh])
activity_bh_l.append((num_events / num_frames) * astroA.minute_frames)
activity_num_l.append(activity_bh_l)
activity_means = [np.mean(activity_nums) for activity_nums in activity_num_l]
activity_i = np.argsort(activity_means)
x = np.array(bh_l)[activity_i]
y = []
for i in activity_i:
y.append(activity_num_l[i])
fig, stats_d = plotly_utils.plot_point_box_revised(x, y, title='Activity number', x_title='', y_title='Events per minute in state', lines=lines, with_stats=True)
return fig, stats_d
def get_common_keys(self, astroA_l, bh_l):
s = set(bh_l)
for astroA in astroA_l:
s &= set(astroA.indices_d.keys())
return np.sort(list(s))
def get_all_signal_attribute_plot(self, astroA_l, bh_l, type_event='area', type_plot='bar',
y_range=None, divide_y=1, title='', x_title='', y_title='',
error_type='std', err_symmetric=True, with_stats=False):
areas = [[] for i in range(len(bh_l))]
for astroA in astroA_l:
for i, k in enumerate(bh_l):
if k in astroA.event_subsets.keys():
areas_k = astroA.res_d[type_event][astroA.event_subsets[k]]
areas[i].extend(areas_k)
areas_std = np.array([np.std(v_l) for v_l in areas])
areas_mean = np.array([np.mean(v_l) for v_l in areas])
areas_conf = []
for v_l in areas:
m, l, h = stat_utils.mean_confidence_interval(v_l, confidence=0.95)
areas_conf.append(m-l)
areas_conf = np.array(areas_conf)
areas_i = np.argsort(areas_mean)
area_keys_s = np.array(bh_l)[areas_i]
areas_s = np.array(areas)[areas_i]
areas_mean_s = np.array(areas_mean)[areas_i]
areas_std_s = np.array(areas_std)[areas_i]
areas_conf_s = np.array(areas_conf)[areas_i]
if type_plot == 'bar':
if error_type == 'std':
fig = plotly_utils.plot_bar(x=area_keys_s, y=areas_mean_s, text_values=[], text_size=20, title=title, x_title=x_title, y_title=y_title, margin_b=150, err_y=areas_std_s, err_symmetric=err_symmetric)
elif error_type == 'conf':
fig = plotly_utils.plot_bar(x=area_keys_s, y=areas_mean_s, text_values=[], text_size=20, title=title, x_title=x_title, y_title=y_title, margin_b=150, err_y=areas_conf_s, err_symmetric=err_symmetric)
elif type_plot == 'dot':
fig = plotly_utils.plot_point_box_revised(x=area_keys_s, y=areas_s, title=title, x_title=x_title, y_title=y_title, margin_b=150, y_range=y_range)
else:
return None
if with_stats:
data = {k : areas_s[i] for i, k in enumerate(area_keys_s)}
return fig, {'behaviour' : area_keys_s, 'mean' : areas_mean_s, 'std': areas_std_s, 'conf_95': areas_conf_s, 'data' : data}
return fig
def get_behaviour_area_plot(self, astroA):
area_keys = np.array(self.filter_keys(astroA))
area_l_mean = []
area_l_std = []
for k in area_keys:
area_k = astroA.res_d['area'][astroA.event_subsets[k]]
area_l_mean.append(np.mean(area_k))
area_l_std.append(np.std(area_k))
area_l_mean = np.array(area_l_mean)
area_l_std = np.array(area_l_std)
areas_i = np.argsort(area_l_mean)
area_keys_s = area_keys[areas_i]
area_l_mean_s = area_l_mean[areas_i]
area_l_std_s = area_l_std[areas_i]
fig = plotly_utils.plot_bar(x=area_keys_s, y=area_l_mean_s, text_values=[], text_size=20, title='Sizes of events', x_title='', y_title='Event sizes (\u03bcm<sup>2</sup>)', margin_b=150)
return fig
def get_behaviour_amplitude_bar_plot(self, astroA):
am_keys = np.array(self.filter_keys(astroA))
am_l_mean = []
for k in am_keys:
dff_res = astroA.res_d['dffMax2'][astroA.event_subsets[k]]
am_l_mean.append(np.mean(dff_res))
am_l_mean = np.array(am_l_mean)
am_i = np.argsort(am_l_mean)
am_keys_s = am_keys[am_i]
am_l_mean_s= am_l_mean[am_i]
fig = plotly_utils.plot_bar(x=am_keys_s, y=am_l_mean_s, text_values=[], text_size=20, title='Amplitude (df/f) of events', x_title='', y_title='df/f', margin_b=150)
return fig
def get_waterfall_delays_plot_all(self, astroA, return_results_only=False):
#Unique, no unique
#Num stick start non num stick start
#Half second non half second
#unique_args = [True, False]
unique_args = [True, False]
max_duration_args = [None, astroA.duration_small]
with_stick_num_args = [True]
figs = {}
figs_interp = {}
stick_id = 'stick_exact_start'
running_id = 'running_exact'
rest_id = 'rest_exact'
stick_v_l_d = {}
running_v_l_d = {}
no_running_v_l_d = {}
for un in unique_args:
for max_duration in max_duration_args:
for with_stick_num in with_stick_num_args:
delay_info_args = {'event_inds_subset' : astroA.event_subsets['default'],
'min_delay' : -20,
'max_delay' : 50,
'max_duration' : max_duration,
'unique_events' : un
}
plot_id = '{}-{}-{}'.format('unique' if un else 'notunique',
'max_duration_None' if (max_duration is None) else 'max_duration_' + str(max_duration),
'stick_num_' + str(with_stick_num))
if with_stick_num:
rand_running = np.random.choice(list(set(astroA.indices_d[running_id]) - set(astroA.indices_d[stick_id])), size=len(astroA.indices_d[stick_id]), replace=False)
rand_no_running = np.random.choice(list(set(astroA.indices_d[rest_id]) - set(astroA.indices_d[stick_id])), size=len(astroA.indices_d[stick_id]), replace=False)
else:
rand_running = list(set(astroA.indices_d[running_id]) - set(astroA.indices_d[stick_id]))
rand_no_running = list(set(astroA.indices_d[rest_id]) - set(astroA.indices_d[stick_id]))
signal_delays_stick_np, peak_delays_stick_np = aqua_utils.get_delay_info_from_res(astroA.indices_d[stick_id], astroA.res_d, **delay_info_args)
signal_delays_running_np, peak_delays_running_np = aqua_utils.get_delay_info_from_res(rand_running, astroA.res_d, **delay_info_args)
signal_delays_no_running_np, peak_delays_no_running_np = aqua_utils.get_delay_info_from_res(rand_no_running, astroA.res_d, **delay_info_args)
stick_v = np.sort(signal_delays_stick_np)
running_v = np.sort(signal_delays_running_np)
no_running_v = np.sort(signal_delays_no_running_np)
stick_v_l_d[plot_id] = stick_v
running_v_l_d[plot_id] = running_v
no_running_v_l_d[plot_id] = no_running_v
figs[plot_id] = plotly_utils.plot_waterfall(arrays_l=[stick_v, running_v, no_running_v], legend_names=['stick', 'running', 'rest'], title='Signal (event) delays after behaviour', x_title='Delay (s)', y_title='Event id')
plotly_utils.apply_fun_axis_fig(figs[plot_id], lambda x : x / astroA.fr, axis='x')
figs_interp[plot_id] = plotly_utils.plot_waterfall_interpolate(arrays_l=[stick_v, running_v, no_running_v], legend_names=['stick', 'running', 'rest'], title='Signal (event) delays after behaviour (scaled)', x_title='Delay (s)', y_title='Event id')
plotly_utils.apply_fun_axis_fig(figs_interp[plot_id], lambda x : x / astroA.fr, axis='x')
if return_results_only:
return [stick_v_l_d, running_v_l_d, no_running_v_l_d]
return figs, figs_interp
def get_waterfall_delays_plot_all_mult(self, astroA_l):
figs_d = {}
figs_interp_d = {}
stick_v_l_d = {}
running_v_l_d = {}
no_running_v_l_d = {}
for astroA_i, astroA in enumerate(astroA_l):
stick_d, running_d, no_running_d = self.get_waterfall_delays_plot_all(astroA, return_results_only=True)
if astroA_i == 0:
stick_v_l_d = stick_d
running_v_l_d = running_d
no_running_v_l_d = no_running_d
k_0 = list(stick_d.keys())[0]
arrs = [stick_v_l_d, running_v_l_d, no_running_v_l_d]
for k in stick_d.keys():
for arr in arrs:
arr[k] = list(arr[k])
else:
k_0 = list(stick_d.keys())[0]
for k in stick_d.keys():
stick_v_l_d[k].extend(stick_d[k])
running_v_l_d[k].extend(running_d[k])
no_running_v_l_d[k].extend(no_running_d[k])
for k in stick_v_l_d.keys():
stick_v = np.sort(stick_v_l_d[k])
running_v = np.sort(running_v_l_d[k])
no_running_v = np.sort(no_running_v_l_d[k])
fig = plotly_utils.plot_waterfall(arrays_l=[stick_v, running_v, no_running_v], legend_names=['stick', 'running', 'rest'], title='Signal (event) delays after behaviour', x_title='Delay (s)', y_title='Event id')
plotly_utils.apply_fun_axis_fig(fig, lambda x : x / astroA_l[0].fr, axis='x')
fig_interp = plotly_utils.plot_waterfall_interpolate(arrays_l=[stick_v, running_v, no_running_v], legend_names=['stick', 'running', 'rest'], title='Signal (event) delays after behaviour (scaled) All axons', x_title='Delay (s)', y_title='Event id')
plotly_utils.apply_fun_axis_fig(fig_interp, lambda x : x / astroA_l[0].fr, axis='x')
figs_d[k] = fig
figs_interp_d[k] = fig_interp
return figs_d, figs_interp_d
def get_transition_proportion_delays_plot_all(self, astroA_l, before_bh, inds_bh, after_bh,
before_range=20, after_range=50, avg_proportions=False,
delay_step_size=1):
'''
inds: the inds i to check
before_bh: for each i, make sure bh before is before_bh otherwize don't include i
after_bh: for each i, make sure bh after is after_bh otherwize don't include i
before_range: the range we look for events
after_range: the range we look for events
'''
#Unique, no unique
#Num stick start non num stick start
unique_args = [True, False]
max_duration_args = [None, astroA_l[0].duration_small]
figs = {}
for max_duration in max_duration_args:
#STICK
for un in unique_args:
plot_id = 'prop-{}-{}'.format('unique' if un else 'notunique', 'max_duration_None' if (max_duration is None) else 'max_duration_' + str(max_duration))
prop = np.zeros([(after_range+before_range+1)])
signal_delays_all_l = []
for astroA in astroA_l:
inds = astroA.indices_d[inds_bh]
#Filter indices
indices_filt_before = aqua_utils.filter_range_inds(inds, astroA.indices_d[before_bh], range=(-before_range, -1), prop=1.0)
indices_filt_after = aqua_utils.filter_range_inds(inds, astroA.indices_d[after_bh], range=(1, after_range), prop=1.0)
indices_filt = np.array(np.sort(list(set(indices_filt_before) & set(indices_filt_after))))
if len(indices_filt) == 0:
continue
#print('Len indices {} len filt before {} len filt after {} len filt {}'.format(len(inds), len(indices_filt_before), len(indices_filt_after), len(indices_filt)))
delay_info_args = {'event_inds_subset' : astroA.event_subsets['default'],
'min_delay' : -before_range,
'max_delay' : after_range,
'max_duration' : max_duration,
'unique_events' : un
}
signal_delays_np, peak_delays_np = aqua_utils.get_delay_info_from_res(indices_filt, astroA.res_d, **delay_info_args)
signal_delays_all_l.extend(list(signal_delays_np))
signal_delays_all = np.array(signal_delays_all_l)
print('Total signals {} {}-{} delay {} {}'.format(len(signal_delays_all), before_bh, after_bh, before_range, after_range))
for i, delay_x in enumerate(range(-before_range, after_range+1)):
if len(signal_delays_all) == 0:
prop[i] = 0
else:
prop[i] = float(np.sum(signal_delays_all == delay_x)) / len(signal_delays_all)
rem = len(prop) % delay_step_size
if rem != 0:
prop = prop[:-rem]
prop_step_sum = np.sum(prop.reshape([-1, delay_step_size]), axis=1)
x_l = [np.arange(-before_range, after_range+1, delay_step_size) for i in range(1)]
y_l = [prop_step_sum]
figs[plot_id] = plotly_utils.plot_scatter_mult(x_l, y_l, name_l=['{} to {}'.format(before_bh, after_bh)], mode='lines', title='scatter', x_title='Delay (s)', y_title='Events')
plotly_utils.apply_fun_axis_fig(figs[plot_id], lambda x : x / astroA.fr, axis='x')
return figs
def get_transition_proportion_delays_plot_all_alt(self, astroA_l, before_bh, inds_bh, after_bh,
before_range=20, after_range=50, y_title=None,
delay_step_size=1, fit=False, measure=None, fix_dff_interval=50, confidence=False,
duration_filter=[None, None]):
'''
Generate plots of transitions between behaviours lasting for some period of time
(e.g. 20 frames of rest (before_bh) and then transition to 30 frames of running (after_bh)
for valid indices in running_start_exact (inds_bh)). We can provide a measure to
plot a particular measure such as size or amplitude or leave it empty and obtain
the proportion of events taking place at which delay during these intervals found.
inds: the inds i to check
before_bh: for each i, make sure bh before is before_bh otherwize don't include i
after_bh: for each i, make sure bh after is after_bh otherwize don't include i
before_range: the range we look for events
after_range: the range we look for events
before_delay: the delay of the interval we look for continious befrore and after bh(its actually kind the range...)
'''
signal_delays_all_l_l = []
if measure is not None:
event_measure_all_l_l = []
#DFF max fix, to support both default and the fix
dff_max_to_fix = (measure == 'dffMax2')
if measure == 'dffMax2default':
measure = 'dffMax2'
#Fix dffMax by adding more range and delay
if dff_max_to_fix:
before_range += fix_dff_interval
after_range += fix_dff_interval
for astroA in astroA_l:
inds = astroA.indices_d[inds_bh]
#Filter indices
indices_filt_before = aqua_utils.filter_range_inds(inds, astroA.indices_d[before_bh], range=(-before_range, -1), prop=1.0)
indices_filt_after = aqua_utils.filter_range_inds(inds, astroA.indices_d[after_bh], range=(1, after_range), prop=1.0)
indices_filt = np.array(np.sort(list(set(indices_filt_before) & set(indices_filt_after))))
if len(indices_filt) == 0:
continue
delay_info_args = {'event_inds_subset' : astroA.event_subsets['default'],
'min_delay' : -before_range,
'max_delay' : after_range,
'min_duration' : duration_filter[0],
'max_duration' : duration_filter[1],
'unique_events' : False,
'return_non_unique_delays_arr' : True
}
_, _, _, signal_delays_l_l, peak_mins_l_l, valid_event_i_l_l = aqua_utils.get_delay_info_from_res(indices_filt, astroA.res_d, **delay_info_args)
#-------------------------------------------------------------------
signal_delays_l_l_amended = []
if measure is not None:
#Special case for amplitude, we want the exact spot where
#the maximum takes place, not beginning of event
#So we increase signal delay by max_frame_i - tBegin to incorporate this
if dff_max_to_fix:
for i, signal_delays_l in enumerate(signal_delays_l_l):
valid_event_i_l = valid_event_i_l_l[i]
new_delays_l = [(s + astroA.res_d['dffMaxFrame'][valid_event_i_l[j]] - astroA.res_d['tBegin'][valid_event_i_l[j]]) for j, s in enumerate(signal_delays_l)]
for j, s in enumerate(new_delays_l):
if s > after_range:
new_delays_l.pop(j)
valid_event_i_l.pop(j)
signal_delays_l_l_amended.append(new_delays_l)
signal_delays_l_l = signal_delays_l_l_amended
#-------------------------------------------------------------------
for i, signal_delays_l in enumerate(signal_delays_l_l):
signal_delays_all_l_l.append(signal_delays_l)
if measure is not None:
event_measure_all_l_l.append(list(astroA.res_d[measure][valid_event_i_l_l[i]]))
total_events = np.sum([len(signal_delays_all_l) for signal_delays_all_l in signal_delays_all_l_l])
#if measure is not None:
# total_events2 = np.sum([len(v_l) for v_l in event_measure_all_l_l])
#print('Total signals {} {}-{} delay {} {}'.format(total_events, before_bh, after_bh, before_range, after_range))
#Measure or event matrix
prop_all_np = np.zeros([len(signal_delays_all_l_l), after_range + before_range+1])
#Count events in case we are using a measure
ev_count_all_np = np.zeros([len(signal_delays_all_l_l), after_range + before_range+1])
#Generate individual proportion plots
for s_i, signal_delays_all_l in enumerate(signal_delays_all_l_l):
prop = np.zeros([(after_range+before_range+1)])
ev_count = np.zeros([(after_range+before_range+1)])
for i, delay_x in enumerate(range(-before_range, after_range+1)):
if len(signal_delays_all_l) == 0:
prop[i] = 0
else:
if measure is None:
prop[i] = float(np.sum(np.array(signal_delays_all_l) == delay_x))
else:
ev_count[i] = float(np.sum(np.array(signal_delays_all_l) == delay_x))
valid_delays_i = np.where(np.array(signal_delays_all_l) == delay_x)
if len(valid_delays_i[0]) == 0:
prop[i] = 0
else:
prop[i] = np.mean(np.array(event_measure_all_l_l[s_i])[np.where(np.array(signal_delays_all_l) == delay_x)])
prop_all_np[s_i, :] = prop
if measure is not None:
ev_count_all_np[s_i, :] = ev_count
#Working on proportion plots and event numbers
if measure is None:
prop_avg_events = np.sum(prop_all_np, axis=0) / (prop_all_np.shape[0])
#print('BEFORE EVENTS', np.sum(np.sum(prop_all_np, axis=0)[0:before_range]))
#print('AFTER EVENTS', np.sum(np.sum(prop_all_np, axis=0)[before_range:]))
prop_avg_prop = np.sum(prop_all_np, axis=0) / np.sum(prop_all_np)
prop_total_events = np.sum(prop_all_np, axis=0)
#Working on durations, areas, amplitudes, we only care about averaging
#the non-zero values, where there are events.
else:
#[num_intervals, interval_size]
#How many intervals are non zero [1, interval_size]
count_nz_intervals = np.count_nonzero(ev_count_all_np, axis=0)
#print('COUNT NZ', count_nz_intervals)
prop_avg_events = np.sum(prop_all_np, axis=0) / count_nz_intervals
#Set non existent events to nan, so they aren't showing in the plot
prop_all_np[ev_count_all_np == 0] = np.nan
bin_type = 'add' if measure is None else 'mean'
#TODO HACK
if delay_step_size != 1 and ((after_range + before_range) // delay_step_size != 2):
bin_type = 'mean'
#Fix redo of dffMax to keep original range
if dff_max_to_fix:
before_range -= fix_dff_interval
after_range -= fix_dff_interval
prop_avg_events = prop_avg_events[fix_dff_interval:-fix_dff_interval]
prop_all_np = prop_all_np[:, fix_dff_interval:-fix_dff_interval]
x = np.arange(-before_range, after_range + 1, 1)
fig, bin_stats = plotly_utils.plot_scatter_mult_tree(x=x, y_main=prop_avg_events, y_mult=prop_all_np, mode_main='lines', mode_mult='markers',
title='Average - Total events: {} Total intervals: {}'.format(total_events, prop_all_np.shape[0]),
y_title='Num events / interval' if y_title is None else y_title, x_title='Delay (s)', fit=fit, fit_annotation_pos_fix=astroA.fr,
bin_main_size=delay_step_size, bin_mult_size=delay_step_size, opacity=0.5, confidence=confidence, with_stats=True,
bin_type=bin_type)
confidence_format = 'lines' if delay_step_size == 1 else 'bar'
fig2 = plotly_utils.plot_scatter_mult_tree(x=x, y_main=prop_avg_events, y_mult=prop_all_np, mode_main='lines', mode_mult='markers',
title='Average - Total events: {} Total intervals: {}'.format(total_events, prop_all_np.shape[0]),
y_title='Num events / interval' if y_title is None else y_title, x_title='Delay (s)', fit=fit, fit_annotation_pos_fix=astroA.fr,
bin_main_size=delay_step_size, bin_mult_size=delay_step_size, opacity=0.5, confidence=confidence, y_mult_include=False,
confidence_format=confidence_format, bin_type=bin_type)
#Normally we take the mean of the bin. However when we take the number of events in the bin
#we want to add them up
if len(x) // delay_step_size > 2:
plotly_utils.apply_fun_axis_fig(fig, lambda x : x / astroA.fr, axis='x')
plotly_utils.apply_fun_axis_fig(fig2, lambda x : x / astroA.fr, axis='x')
#No proportions or total is used if we are doing measure
if measure is not None:
return {'event_avg' : fig, 'event_avg_no_mult' : fig2}, bin_stats
fig3 = plotly_utils.plot_scatter(x=x, y=prop_avg_prop, title='Proportions - plot: Total events: {} Total intervals: {}'.format(total_events, prop_all_np.shape[0]),
y_title='Normalized events (%)' if y_title is None else y_title, x_title='Delay (s)', bin_size=delay_step_size, bin_type=bin_type)
plotly_utils.apply_fun_axis_fig(fig3, lambda x : x * 100, axis='y')
if len(x) // delay_step_size > 2:
plotly_utils.apply_fun_axis_fig(fig3, lambda x : x / astroA.fr, axis='x')
return {'event_avg' : fig, 'event_avg_no_mult' : fig2,
'event_prop' : fig3}, bin_stats
def get_transition_bh_values_plot_all_alt(self, astroA_l, before_bh, inds_bh, after_bh,
bh_measure='speed',
before_range=20, after_range=50, y_title=None,
delay_step_size=1, fit=False, confidence=False):
'''
Get transition plots, but plots the values of behaviours (e.g. speed, stick, ...)
'''
figs = {}
bh_val_all_l = []
for astroA in astroA_l:
inds = astroA.indices_d[inds_bh]
#Filter indices
indices_filt_before = aqua_utils.filter_range_inds(inds, astroA.indices_d[before_bh], range=(-before_range, -1), prop=1.0)
indices_filt_after = aqua_utils.filter_range_inds(inds, astroA.indices_d[after_bh], range=(1, after_range), prop=1.0)
indices_filt = np.array(np.sort(list(set(indices_filt_before) & set(indices_filt_after))))
if len(indices_filt) == 0:
continue
#print('LEN INDICES FILT : {}'.format(len(indices_filt)))
for center_ind in indices_filt:
interval_inds = np.arange(center_ind-before_range, center_ind+after_range+1)
if bh_measure == 'speed':
bh_values = astroA.speed_values[interval_inds]
elif bh_measure == 'pupil':
bh_values = astroA.pupil_values[interval_inds]
else:
print('Other measures not supported')
return None
bh_val_all_l.append(bh_values)
bh_val_all_np = np.zeros([len(bh_val_all_l), before_range + after_range + 1])
for i, bh_val_l in enumerate(bh_val_all_l):
if bh_measure == 'speed':
bh_val_l = np.copy(bh_val_l)
if before_bh == 'running_semi_exact':
bh_val_l[:before_range][bh_val_l[:before_range] == 0] = None
if after_bh == 'running_semi_exact':
bh_val_l[before_range+1:][bh_val_l[before_range+1:] == 0] = None
bh_val_all_np[i, :] = np.array(bh_val_l)
bh_val_avg = np.nanmean(bh_val_all_np, axis=0)
x = np.arange(-before_range, after_range+1, 1)
fig, bin_stats = plotly_utils.plot_scatter_mult_tree(x=x, y_main=bh_val_avg, y_mult=bh_val_all_np, mode_main='lines', mode_mult='lines',
title='Total intervals: {}'.format(bh_val_all_np.shape[0]),
y_title='Speed (cm/s)' if y_title is None else y_title, x_title='Delay (s)', fit=fit, fit_annotation_pos_fix=astroA.fr,
bin_main_size=delay_step_size, bin_mult_size=delay_step_size, opacity=0.5, confidence=confidence, with_stats=True)
if len(x) // delay_step_size > 2:
plotly_utils.apply_fun_axis_fig(fig, lambda x : x / astroA.fr, axis='x')
confidence_format = 'lines' if delay_step_size == 1 else 'bar'
if confidence:
fig2 = plotly_utils.plot_scatter_mult_tree(x=x, y_main=bh_val_avg, y_mult=bh_val_all_np, mode_main='lines', mode_mult='lines',
title='Total intervals: {}'.format(bh_val_all_np.shape[0]),
y_title='Speed (cm/s)' if y_title is None else y_title, x_title='Delay (s)', fit=fit, fit_annotation_pos_fix=astroA.fr,
bin_main_size=delay_step_size, bin_mult_size=delay_step_size, opacity=0.5, confidence=confidence, y_mult_include=False,
confidence_format=confidence_format)
if len(x) // delay_step_size > 2:
plotly_utils.apply_fun_axis_fig(fig2, lambda x : x / astroA.fr, axis='x')
return {'event_avg' : fig, 'event_avg_no_mult' : fig2}, bin_stats
else:
return {'event_avg' : fig}, bin_stats
def get_transition_outliers_plot(self, astroA_l, before_bh, inds_bh, after_bh,
before_range=20, after_range=50, y_title=None,
delay_step_size=1, fit=False, measure=None, fix_dff_interval=50, confidence=False,
duration_filter=[None, None]):
'''
We have 2 behaviours in transition, before bh and after bh.
We calculate how many events in before bh and after bh are 1,2,3 sd > for measure set
We then normalize by number of events and also the length of the range of this behaviour
inds: the inds i to check
before_bh: for each i, make sure bh before is before_bh otherwize don't include i
after_bh: for each i, make sure bh after is after_bh otherwize don't include i
before_range: the range we look for events
after_range: the range we look for events
before_delay: the delay of the interval we look for continious befrore and after bh(its actually kind the range...)
'''
signal_delays_all_l_l = []
event_measure_all_l_l = []
#DFF max fix, to support both default and the fix
dff_max_to_fix = (measure == 'dffMax2')
if measure == 'dffMax2default':
measure = 'dffMax2'
#Fix dffMax by adding more range and delay
if dff_max_to_fix:
before_range += fix_dff_interval
after_range += fix_dff_interval
#Events total (all time) and 1,2,3 std thresholds for measure
all_events = []
for astroA in astroA_l:
all_events_individual = astroA.res_d[measure][astroA.event_subsets['default']]
print('-----', np.mean(all_events_individual))
all_events.extend(all_events_individual)
all_events_mean = np.mean(all_events)
all_events_std = np.std(all_events)
std_thresholds = np.array([all_events_mean + all_events_std,
all_events_mean + 2*all_events_std,
all_events_mean + 3*all_events_std])
for astroA in astroA_l:
inds = astroA.indices_d[inds_bh]
#Filter indices
indices_filt_before = aqua_utils.filter_range_inds(inds, astroA.indices_d[before_bh], range=(-before_range, -1), prop=1.0)
indices_filt_after = aqua_utils.filter_range_inds(inds, astroA.indices_d[after_bh], range=(1, after_range), prop=1.0)
indices_filt = np.array(np.sort(list(set(indices_filt_before) & set(indices_filt_after))))
#print('LEN INDICES_FILT: {}'.format(len(indices_filt)))
#print('TOTAL IND {} BEFORE {} AFTER {} JOIN {}'.format(len(inds), len(indices_filt_before), len(indices_filt_after), len(indices_filt)))
if len(indices_filt) == 0:
continue
#print('Len indices {} len filt before {} len filt after {} len filt {}'.format(len(inds), len(indices_filt_before), len(indices_filt_after), len(indices_filt)))
#print('LEN INDICES FILT : {}'.format(len(indices_filt)))
delay_info_args = {'event_inds_subset' : astroA.event_subsets['default'],
'min_delay' : -before_range,
'max_delay' : after_range,
'min_duration' : duration_filter[0],
'max_duration' : duration_filter[1],
'unique_events' : False,
'return_non_unique_delays_arr' : True
}
_, _, _, signal_delays_l_l, peak_mins_l_l, valid_event_i_l_l = aqua_utils.get_delay_info_from_res(indices_filt, astroA.res_d, **delay_info_args)
#DFF MAX FIX
#-------------------------------------------------------------------
signal_delays_l_l_amended = []
if measure is not None:
#Special case for amplitude, we want the exact spot where
#the maximum takes place, not beginning of event
#So we increase signal delay by max_frame_i - tBegin to incorporate this
if dff_max_to_fix:
for i, signal_delays_l in enumerate(signal_delays_l_l):
valid_event_i_l = valid_event_i_l_l[i]
new_delays_l = [(s + astroA.res_d['dffMaxFrame'][valid_event_i_l[j]] - astroA.res_d['tBegin'][valid_event_i_l[j]]) for j, s in enumerate(signal_delays_l)]
for j, s in enumerate(new_delays_l):
if s > after_range:
new_delays_l.pop(j)
valid_event_i_l.pop(j)
signal_delays_l_l_amended.append(new_delays_l)
signal_delays_l_l = signal_delays_l_l_amended
#-------------------------------------------------------------------
for i, signal_delays_l in enumerate(signal_delays_l_l):
signal_delays_all_l_l.append(signal_delays_l)
if measure is not None:
event_measure_all_l_l.append(list(astroA.res_d[measure][valid_event_i_l_l[i]]))
total_events = np.sum([len(signal_delays_all_l) for signal_delays_all_l in signal_delays_all_l_l])
#if measure is not None:
# total_events2 = np.sum([len(v_l) for v_l in event_measure_all_l_l])
#print('Total signals {} {}-{} delay {} {}'.format(total_events, before_bh, after_bh, before_range, after_range))
#Measure or event matrix
prop_all_np = np.zeros([len(signal_delays_all_l_l), after_range + before_range+1])
#Count events in case we are using a measure
ev_count_all_np = np.zeros([len(signal_delays_all_l_l), after_range + before_range+1])
measure_values_all = {'before' : [], 'after' : []}
for s_i, signal_delays_all_l in enumerate(signal_delays_all_l_l):
for state in ['before', 'after']:
if state == 'before':
valid_delays_i = np.where(np.array(signal_delays_all_l) < 0)
elif state == 'after':
valid_delays_i = np.where(np.array(signal_delays_all_l) > 0)
else:
sys.exit()
measure_values_l = list(np.array(event_measure_all_l_l[s_i])[valid_delays_i])
measure_values_all[state].extend(measure_values_l)
measure_values_all['before'] = np.array(measure_values_all['before'])
measure_values_all['after'] = np.array(measure_values_all['after'])
name_l = ['1 SD', '2 SD', '3 SD']
y_l_l = []
x_l = [['Before', 'After'] for i in range(3)]
for std_threshold in std_thresholds:
y_l = [[], []]
for i, state in enumerate(['before', 'after']):
sum_t_events = np.sum(measure_values_all[state] > std_threshold)
norm_t_events = sum_t_events / len(measure_values_all[state])
print(before_bh, after_bh)
print('STATE {} SUM T {} NORM T {} ALL MEAS {}'.format(state, sum_t_events, norm_t_events, len(measure_values_all[state])))
y_l[i].append(norm_t_events)
y_l_l.append(y_l)
print(y_l_l)
fig, stats_d = plotly_utils.plot_scatter_mult(x_l, y_l_l, name_l=name_l, mode='lines+markers', title='scatter', x_title='', y_title='',
xrange=None, yrange=None, confidence=False, with_stats=True)
return fig, stats_d
def get_proportion_delays_plot_all(self, astroA_l, min_delay=-20, max_delay=50, avg_proportions=False, title=''):
'''
For stick find take stick_exact_start when the mouse first hits
For running and rest:
Take all rest frames. Stich them and then split into (max_delay-min_delay) frame segments
Then see the events taking place at each point during the segment from min delay to max delay
'''
#Unique, no unique
#Num stick start non num stick start
unique_args = [True, False]
max_duration_args = [None, astroA_l[0].duration_small]
figs = {}
stick_id = 'stick_exact_start'
running_id = 'running_exact'
rest_id = 'rest_exact'
#Split into max_delay-min_delay frames
split_size = (max_delay - min_delay) + 1
running_prop, rest_prop = self.get_rest_run_proportion_events_interval(astroA_l, running_id='running_exact', rest_id='rest_exact', interval=split_size)
for max_duration in max_duration_args:
#STICK
for un in unique_args:
plot_id = 'prop-{}-{}'.format('unique' if un else 'notunique', 'max_duration_None' if (max_duration is None) else 'max_duration_' + str(max_duration))
stick_prop = np.zeros([max_delay-min_delay+1])
if not avg_proportions:
signal_delays_all_l = []
for astroA in astroA_l:
stick_indices_filt = aqua_utils.filter_range_inds(astroA.indices_d[stick_id], astroA.indices_d['running'], range=(min_delay, max_delay), prop=0.95)
delay_info_args = {'event_inds_subset' : astroA.event_subsets['default'],
'min_delay' : min_delay,
'max_delay' : max_delay,
'max_duration' : max_duration,
'unique_events' : un
}
signal_delays_stick_np, peak_delays_stick_np = aqua_utils.get_delay_info_from_res(stick_indices_filt, astroA.res_d, **delay_info_args)
if avg_proportions:
for i, delay_x in enumerate(range(min_delay, max_delay+1)):
stick_prop[i] += float(np.sum(signal_delays_stick_np == delay_x)) / len(signal_delays_stick_np)
if not avg_proportions:
signal_delays_all_l.extend(list(signal_delays_stick_np))
if avg_proportions:
stick_prop /= len(astroA_l)
if not avg_proportions:
signal_delays_all = np.array(signal_delays_all_l)
for i, delay_x in enumerate(range(min_delay, max_delay+1)):
stick_prop[i] = float(np.sum(signal_delays_all == delay_x)) / len(signal_delays_all)
x_l = [np.arange(min_delay, max_delay+1) for i in range(3)]
y_l = [stick_prop, running_prop, rest_prop]
figs[plot_id] = plotly_utils.plot_scatter_mult(x_l, y_l, name_l=['stick', 'running', 'rest'], mode='lines', title=title, x_title='Delay (s)', y_title='Events')
plotly_utils.apply_fun_axis_fig(figs[plot_id], lambda x : x / astroA.fr, axis='x')
return figs
def get_rest_run_proportion_events_interval(self, astroA_l, running_id='running_exact', rest_id='rest_exact', interval=71):
running_prop = np.zeros([interval])
rest_prop = np.zeros([interval])
for astroA in astroA_l:
############################################################
#RUNNING AND REST
running_ind = astroA.indices_d[running_id]
rest_ind = astroA.indices_d[rest_id]
if len(running_ind) % interval != 0:
running_ind = running_ind[:-(len(running_ind) % interval)]
if len(rest_ind) % interval != 0:
rest_ind = rest_ind[:-(len(rest_ind) % interval)]
running_split_l = np.split(running_ind, len(running_ind) / interval)
rest_split_l = np.split(rest_ind, len(rest_ind) / interval)
#Add events in delays based on their delay. Ignore events if there is max duration filter
#For each split of frames, get events in those frames
split_d = {'default' : astroA.indices_d['default']}
for i, running_split in enumerate(running_split_l):
split_d['running_{}'.format(i)] = running_split
for i, rest_split in enumerate(rest_split_l):
split_d['rest_{}'.format(i)] = rest_split
event_subsets, indices_events_bin = aqua_utils.get_event_subsets(split_d, astroA.res_d, after_i=0, before_i=0, to_print=False, return_info=True)
for k in split_d.keys():
if k != 'default':
#Take indices_d x events and take only current split (split x events)
indices_events_k_subset = indices_events_bin[split_d[k], :]
#Sum over events to get array of positions where events took place in split_d
indices_k_subset_sum = np.sum(indices_events_k_subset, axis=(1))
if 'rest' in k:
rest_prop += indices_k_subset_sum
elif 'running' in k:
#Then add these to running prop. At each spot is the number of events that took place
running_prop += indices_k_subset_sum
else:
print('????what', k)
############################################################
running_prop = running_prop / np.sum(running_prop)
rest_prop = rest_prop / np.sum(rest_prop)
return running_prop, rest_prop
def get_triplet_plots(self, astroA, n_bins):
fig_triplets = {}
fig_radii_border = None
for k in astroA.event_subsets.keys():
if astroA.aqua_bound == False:
print('Plot triplet requires aqua bound to be true')
return None, None
#Find event centroids:
#For each event in x2D extract 2D coordinates as mask and get event centroid coordinates
#event_centroids = aqua_utils.get_event_centroids_from_x2D(astroA.res_d['x2D'], (astroA.input_shape[0], astroA.input_shape[1]))
border_mask = astroA.res_d['border_mask']
clandmark_center = astroA.res_d['clandmark_center']
event_distances_from_center_micrometers = astroA.res_d['clandmark_distAvg'][astroA.event_subsets[k]]
event_distances_from_center = event_distances_from_center_micrometers / astroA.spatial_res
event_durations = astroA.res_d['tEnd'][astroA.event_subsets[k]] - astroA.res_d['tBegin'][astroA.event_subsets[k]]
event_areas = astroA.res_d['area'][astroA.event_subsets[k]]
n_events_arr_norm, n_events_i_arr, area_bins, r_bins = aqua_utils.radius_event_extraction(event_distances_from_center, clandmark_center, border_mask, n_bins=n_bins)
event_distances_from_center_bins_l = []
event_areas_bins_l = []
event_durations_bins_l = []
for event_inds in n_events_i_arr:
event_distances_from_center_bins_l.append(event_distances_from_center[event_inds])
event_areas_bins_l.append(event_areas[event_inds])
event_durations_bins_l.append(event_durations[event_inds])
if 0 in [len(n) for n in n_events_i_arr]:
print('not enough events for key: ', k)
continue
border_mask_temp = np.copy(astroA.res_d['border_mask'])
#When indexing 2D array for x,y coordinates we need to index arr[row][col] = arr[y][x] so we flip the coordinates
clandmark_center_flipped = (astroA.res_d['clandmark_center'][1], astroA.res_d['clandmark_center'][0])
clandmark_center_flipped_int = (int(clandmark_center_flipped[0]), int(clandmark_center_flipped[1]))
r_bin_diff = r_bins[1] - r_bins[0]
#Radius bins of triplet plot on top of heatmap
for i in range(border_mask_temp.shape[0]):
for j in range(border_mask_temp.shape[1]):
v = border_mask_temp[i, j]
if v != 0:
r_dist = aqua_utils.get_euclidean_distances(clandmark_center_flipped, [i, j])
search_ind_r = np.searchsorted(r_bins, r_dist, side='right')
if search_ind_r == len(r_bins):
border_mask_temp[i, j] *= (r_bins[-1]+r_bin_diff)
#print('DISTANCE LARGER THAN MAX EVENT??', r_dist)
else:
border_mask_temp[i, j] *= r_bins[search_ind_r-1]
#print('CLANDMARK CENTER', astroA.res_d['clandmark_center'])
border_mask_temp[clandmark_center_flipped_int] -= r_bins[1]
if fig_radii_border == None:
fig_radii_border = plotly_utils.plot_contour(border_mask_temp, title='radius_extension_from_center', height=1000, width=1000,
color_bar_title='Radius (pixels)')
fig_triplets[k] = plotly_utils.plot_event_triplet(num_events_bins=n_events_arr_norm[:-1],
distances_bins=r_bins[:-1],
sizes_bins_lists=event_areas_bins_l[:-1],
durations_bins_lists=event_durations_bins_l[:-1],
height=1000,
width=1000,
spatial_res=astroA.spatial_res,
fr=(1.0/astroA.fr_inv),
title=k + '_event_triplet_plot')
break
return fig_triplets, fig_radii_border
def write_csv_duration_splits(self, astroA, path):
#How are the indices split between short, medium and long during running, stick, ...
with open(os.path.join(path), mode='w') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['behaviour', 'short', 'medium', 'long', 'total events', 'total frames'])
for k in astroA.indices_d.keys():
short_signals_len = np.sum(astroA.all_durations_class_d[k] == 1)
medium_signals_len = np.sum(astroA.all_durations_class_d[k] == 2)
long_signals_len = np.sum(astroA.all_durations_class_d[k] == 3)
total_signals = short_signals_len + medium_signals_len + long_signals_len
short_signals_ratio = general_utils.truncate(short_signals_len/total_signals, 2)
long_signals_ratio = general_utils.truncate(long_signals_len/total_signals, 2)
medium_signals_ratio = general_utils.truncate(medium_signals_len/total_signals, 2)
writer.writerow([k, short_signals_ratio, medium_signals_ratio, long_signals_ratio, total_signals, len(astroA.indices_d[k])])
def save_xcorr_pairs_align_results_csv(self, save_path, astro_pair_l, pair_fakes_before, pair_fakes_after, pair_corrs_before, pair_corrs_after):
with open(os.path.join(save_path), mode='w') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i, astro_pair in enumerate(astro_pair_l):
name = astro_pair[0].print_id + '-' + astro_pair[1].print_id
writer.writerow(['Name'])
writer.writerow([name])
writer.writerow(['Samples before'])
writer.writerow([general_utils.truncate(v, 4) for v in pair_fakes_before[i]])
writer.writerow(['Samples after'])
writer.writerow([general_utils.truncate(v, 4) for v in pair_fakes_after[i]])
writer.writerow(['Corr before'])
writer.writerow([general_utils.truncate(pair_corrs_before[i], 4)])
writer.writerow(['Corr after'])
writer.writerow([general_utils.truncate(pair_corrs_after[i], 4)])
writer.writerow([])
writer.writerow([])
def get_duration_split_differences_from_default(self, astroA):
#How are the indices split between short, medium and long during running, stick, ...
relative_ratios = {}
ratios = {}
lengths = {}
ks = np.array(self.filter_keys(astroA))
for k in ks:
relative_ratios[k] = {}
ratios[k] = {}
lengths[k] = {}
short_signals_len = np.sum(astroA.all_durations_class_d[k] == 1)
medium_signals_len = np.sum(astroA.all_durations_class_d[k] == 2)
long_signals_len = np.sum(astroA.all_durations_class_d[k] == 3)
total_signals = short_signals_len + medium_signals_len + long_signals_len
lengths[k]['short'] = short_signals_len
lengths[k]['medium'] = medium_signals_len
lengths[k]['long'] = long_signals_len
short_signals_ratio = general_utils.truncate(short_signals_len/total_signals, 4)
long_signals_ratio = general_utils.truncate(long_signals_len/total_signals, 4)
medium_signals_ratio = general_utils.truncate(medium_signals_len/total_signals, 4)
ratios[k]['short'] = short_signals_ratio
ratios[k]['medium'] = medium_signals_ratio
ratios[k]['long'] = long_signals_ratio
for dk in ratios[k].keys():
relative_ratios[k][dk] = ratios[k][dk] - ratios['default'][dk] + 0.00001
x = ks
y_l = [[relative_ratios[k][duration_k] for k in ks] for duration_k in ['short', 'medium', 'long']]
text_values_l = [[lengths[k][duration_k] for k in ks] for duration_k in ['short', 'medium', 'long']]
legends=['short', 'medium', 'long']
fig = plotly_utils.plot_group_bar(x=x, y_l=y_l, text_values_l=text_values_l, legends=legends, x_title='Behaviour', y_title='Relative difference to default', title='Relative difference in short,medium,long signals to default')
return fig
def get_signal_fig(self, astroA, event_i):
x1 = np.arange(len(astroA.res_d['dff_only'][event_i]))
y1 = astroA.res_d['dff_only'][event_i]
divisor = astroA.res_d['dff_only'].shape[1]
t_begin = int(astroA.res_d['tBegin'][event_i] % divisor)
t_end = int(astroA.res_d['tEnd'][event_i] % divisor)
if t_begin > t_end:
print('Tbegin > tEnd')
return None
fig = plotly_utils.plot_scatter_signal(x=x1, y=y1, begin_i=t_begin, end_i=t_end, mode='lines', title='Signal', x_title='', y_title='')
return fig
def get_signal_figs_samples(self, astroA, sample_num=20):
event_sample_inds = np.random.choice(len(astroA.res_d['tBegin']), sample_num, replace=False)
figs = []
for event_i in event_sample_inds:
figs.append(self.get_signal_fig(astroA, event_i))
return figs
def get_signal_bk_figs_samples(self, astroA, sample_num=10):
figs = {}
for bk in astroA.event_subsets.keys():
sample_num_x = min(len(astroA.event_subsets[bk]), sample_num)
event_sample_inds = np.random.choice(astroA.event_subsets[bk], sample_num_x, replace=False)
figs[bk] = []
for event_i in event_sample_inds:
figs[bk].append(self.get_signal_fig(astroA, event_i))
return figs
######Other analysis#######
def get_compare_max_corrs_plots(self, astro_l_pair):
corr_pair_d = self.read_corr_pair_data(astro_l_pair)
figs = {}
for prob_k in corr_pair_d.keys():
print(prob_k)
self_max_corr = corr_pair_d[prob_k]['self']['max_corr']
compare_max_corr = corr_pair_d[prob_k]['compare']['max_corr']
samples_max_corr_l = corr_pair_d[prob_k]['samples']['max_corr_l']
x = ['self: day {}'.format(astro_l_pair[0].day),
'comparison: day {}'.format(astro_l_pair[1].day),
'samples']
y = [[self_max_corr], [compare_max_corr], samples_max_corr_l]
figs[prob_k] = plotly_utils.plot_point_box_revised(x, y, title='Max correlations', x_title='', y_title='Max correlation value')
return figs
def get_compare_align_plots(self, astro_l_pair):
figs = {}
align_setting_l = ['xcorr', 'clandmark']
move_vector_d = {'xcorr' : None, 'clandmark' : None}
for align_setting in align_setting_l:
d_temp = compare_astro_utils.alignment_counter(astro_l_pair[0], astro_l_pair[1],
n_fake_samples=0,
align_setting=align_setting,
eval_setting='counter',
fake_sample_setting='from_grid',
behaviour='default',
p=0.05)
move_vector_d[align_setting] = d_temp['move_vector']
figs[align_setting] = {}
for pk in self.filter_probs:
d = compare_astro_utils.alignment_counter(astro_l_pair[0], astro_l_pair[1],
n_fake_samples=self.n_samples_corr_fake,
align_setting='param',
eval_setting='counter',
fake_sample_setting='from_grid',
move_vector=move_vector_d[align_setting],
p=pk)
x = ['fake_samples', 'day {} aligned'.format(astro_l_pair[1].day), 'day {} self aligned'.format(astro_l_pair[0].day)]
y = [d['num_fake'], [d['num_compare']], [d['num_self']]]
figs[align_setting][pk] = plotly_utils.plot_point_box_revised(x, y, title='Day {} with day {} alignment. Comparison of intersection size to random samples - p = {}'.format(astro_l_pair[0].day, astro_l_pair[1].day, pk),
x_title='', y_title='Aligned intersection size')
return figs
def get_compare_align_plots_xcorr(self, astro_l_pair, align_setting='xcorr', dff_mode=False, behaviour='default'):
d_temp = compare_astro_utils.alignment_counter(astro_l_pair[0], astro_l_pair[1],
n_fake_samples=0,
align_setting=align_setting,
eval_setting='xcorr',
fake_sample_setting='from_astro',
p=1,
behaviour='default') #Here I use default to homogenize move_vector result over diffeerent behaviours
move_vector = d_temp['move_vector']
d = compare_astro_utils.alignment_counter(astro_l_pair[0], astro_l_pair[1],
n_fake_samples=self.n_samples_corr_fake,
align_setting='param',
eval_setting='xcorr',
fake_sample_setting='from_astro',
move_vector=move_vector,
p=1,
behaviour=behaviour)
x = ['fake_samples', 'day {} aligned'.format(astro_l_pair[1].day)]
y = [d['num_fake'], [d['num_compare']]]
tstat, pvalue = ttest_ind_from_stats(np.mean(y[0]), np.std(y[0]), len(y[0]), np.mean(y[1]), np.std(y[1]), len(y[1]))
print('NUM COMPARE: {}, mode {} behaviour {}'.format(d['num_compare'], dff_mode, behaviour))
fig = plotly_utils.plot_point_box_revised(x, y, title='{} Day {} with day {} alignment. Comparison to random. p={:.2e}'.format(behaviour, astro_l_pair[0].day, astro_l_pair[1].day, pvalue),
x_title='', y_title='Aligned xcorr value')
return fig
def get_day_heatmaps_scaled(self, astroA_pair, bh='default', dff_mode=False):
if dff_mode==True:
raise NotImplementedError()
if (bh not in astroA_pair[0].event_grids_1min) or (bh not in astroA_pair[0].event_grids_1min):
return None
day_0_grid = astroA_pair[0].event_grids_1min[bh] if dff_mode else astroA_pair[0].event_grids_1min_dff[bh]
day_1_grid = astroA_pair[1].event_grids_1min[bh] if dff_mode else astroA_pair[1].event_grids_1min_dff[bh]
max_0 = np.max(day_0_grid)
max_1 = np.max(day_1_grid)
if max_1 > max_0:
contour_day_0, details_d = plotly_utils.plot_contour_threshold(day_0_grid, threshold_perc=1.0, title=bh + '_event grid', with_details=True)
min_v, max_v = details_d['min'], details_d['max']
contour_day_x = plotly_utils.plot_contour_threshold(day_1_grid, threshold_perc=None, set_min_v=min_v, set_max_v=max_v, title=bh+ '_event_grid')
else:
contour_day_x, details_d = plotly_utils.plot_contour_threshold(day_1_grid, threshold_perc=1.0, title=bh + '_event grid', with_details=True)
min_v, max_v = details_d['min'], details_d['max']
contour_day_0 = plotly_utils.plot_contour_threshold(day_0_grid, threshold_perc=None, set_min_v=min_v, set_max_v=max_v, title=bh+ '_event_grid')
return {'contour_0' : contour_day_0, 'contour_x' : contour_day_x}
def get_individual_heatmaps_threshold_scaled(self, astroA, bh='default', threshold=0.7, num_samples=3, dff_mode=False, with_arr=False):
if dff_mode==True:
raise NotImplementedError()
arrs_d = {}
if (bh not in astroA.event_grids_1min) or (bh not in astroA.event_grids_1min):
return None
contour, details_d = plotly_utils.plot_contour_threshold(astroA.event_grids_1min[bh] if not dff_mode else astroA.event_grids_1min_dff[bh], title=bh + '_event grid', threshold_perc=threshold, with_details=True)
min_v, max_v = details_d['min'], details_d['max']
f, sample_l = self.get_random_astrocyte_plot(astroA, bh=bh, with_samples=True, num_samples=num_samples)
contour_random_l = []
if with_arr:
arrs_d['arr'] = details_d['arr']
arrs_d['arr_r'] = []
for sample in sample_l:
contour_random, details_r_d = plotly_utils.plot_contour_threshold(sample, threshold_perc=None, set_min_v=min_v, set_max_v=max_v, title=bh + '_random_event_grid', with_details=True)
contour_random_l.append(contour_random)
arrs_d['arr_r'].append(details_r_d['arr'])
if with_arr:
return {'contour' : contour,
'contour_random' : contour_random_l,
'arrs_d' : arrs_d}
return {'contour' : contour,
'contour_random' : contour_random_l}
def get_compare_frame_split_plots(self, astroA):
figs = {}
data_d = {}
event_grid_splits_d = {}
for split_frames in self.num_frames_splits_l:
event_grid_splits_d[split_frames] = compare_astro_utils.split_astro_grid(astroA, split_frames=split_frames, bk='default')
for pk in self.filter_probs:
figs[pk] = {}
data_d[pk] = {}
for split_frames in self.num_frames_splits_l:
event_grid_splits_l = event_grid_splits_d[split_frames]
pairs = [(i, j ) for i in range(len(event_grid_splits_l)) for j in range(i+1, len(event_grid_splits_l))]
data_d[pk][split_frames] = {'num_fake_l' : [], 'num_compare_l' : [], 'num_self_l' : [], 'num_fake_ratio_l' : [], 'num_compare_ratio_l' : []}
if len(pairs) > self.max_split_comparison_samples :
print('Max comparisons > len pairs, {} > {}'.format(self.max_split_comparison_samples, len(pairs)))
pairs_perm = np.random.permutation(pairs)
pairs = pairs_perm[:self.max_split_comparison_samples]
for (i, j) in pairs:
#Pretty much takes p highest, and calculates intersections with self, compare and fake samples
d = compare_astro_utils.alignment_counter(astroA, astroA,
grid_target=event_grid_splits_l[i], grid_source=event_grid_splits_l[j],
n_fake_samples=1, align_setting='param',
move_vector=[0,0], p=pk)
data_d[pk][split_frames]['num_fake_l'].append(d['num_fake'][0])
data_d[pk][split_frames]['num_compare_l'].append(d['num_compare'])
data_d[pk][split_frames]['num_self_l'].append(d['num_self'])
data_d[pk][split_frames]['num_fake_ratio_l'].append(d['num_fake'][0]/float(d['num_self']))
data_d[pk][split_frames]['num_compare_ratio_l'].append(d['num_compare']/float(d['num_self']))
x = []
y = [[], []]
for split_frames in self.num_frames_splits_l:
if len(data_d[pk][split_frames]['num_fake_ratio_l']) == 0:
continue
f_m = np.mean(data_d[pk][split_frames]['num_fake_ratio_l'])
f_s = np.std(data_d[pk][split_frames]['num_fake_ratio_l'])
f_l = len(data_d[pk][split_frames]['num_fake_ratio_l'])
c_m = np.mean(data_d[pk][split_frames]['num_compare_ratio_l'])
c_s = np.std(data_d[pk][split_frames]['num_compare_ratio_l'])
c_l = len(data_d[pk][split_frames]['num_compare_ratio_l'])
tstat, pvalue = ttest_ind_from_stats(c_m, c_s, c_l, f_m, f_s, f_l)
for i in range(len(data_d[pk][split_frames]['num_fake_ratio_l'])):
x.append('~ {} minutes <br /> p = {:.1e}'.format(general_utils.truncate(split_frames/(astroA.fr*60), 1), pvalue))
y[0].extend(data_d[pk][split_frames]['num_fake_ratio_l'])
y[1].extend(data_d[pk][split_frames]['num_compare_ratio_l'])
#print('TSTAT: {} PVALUE: {}'.format(tstat, pvalue))
figs[pk] = plotly_utils.plot_multi_point_box(x, y, names=['Fake', 'Compare'], title='Splits comparison - Top {}%: '.format(pk*100),
x_title='', y_title='Intersection ratio')
return figs
def get_compare_corrs_samples_plots(self, astroA_l):
figs = {}
corr_pair_d = self.read_corr_pair_data(astroA_l)
for pk in corr_pair_d.keys():
astro_filt_l, astro_all_filt, astro_nz_bool_l, astro_all_nz_bool = compare_astro_utils.get_filters_compare(astroA_l, p=pk)
ids = ['astro_a', 'astro_b', 'border', 'sample_1', 'sample_2', 'sample_3']
print('COOR PAIR D', corr_pair_d)
print(pk)
print(len(corr_pair_d[pk]['samples']['sample_l']))
grid_l = [astro_filt_l[0],
astro_filt_l[1],
astro_nz_bool_l[1].astype(int),
corr_pair_d[pk]['samples']['sample_l'][0],
corr_pair_d[pk]['samples']['sample_l'][1],
corr_pair_d[pk]['samples']['sample_l'][2]]
titles = ['Astrocyte A day {}'.format(astroA_l[0].day),
'Astrocyte B day {}'.format(astroA_l[1].day),
'Border Astro B',
'Sample 1',
'Sample 2',
'Sample 3']
for i, grid in enumerate(grid_l):
if i == 0:
figs[pk] = {}
figs[pk][ids[i]] = plotly_utils.plot_contour(grid, title=titles[i])
return figs
def get_frame_split_example_plots(self, astroA):
figs = {}
for pk in self.filter_probs:
figs[pk] = {}
for num_frames_split in self.num_frames_splits_l:
event_grid_splits_l = compare_astro_utils.split_astro_grid(astroA, split_frames=num_frames_split, bk='default')
if len(event_grid_splits_l) < 2:
continue
inds = np.random.choice(len(event_grid_splits_l), 2, replace=False)
grid_l_pair = [event_grid_splits_l[inds[0]], event_grid_splits_l[inds[1]]]
astro_filt_l_tmp, astro_all_filt_tmp, astro_nz_bool_l_tmp, astro_all_nz_bool_tmp = compare_astro_utils.get_filters_compare_from_grids(grid_l_pair, p=float(pk))
figs[pk][str(num_frames_split)] = plotly_utils.plot_contour_multiple(astro_filt_l_tmp, title='Frames: {} top {}%'.format(num_frames_split, pk))
return figs
def get_compare_full_self_frame_split_plot_xcorr(self, astroA, minute_frame_splits_l=None):
'''
Grid 1 is normalized event heatmap of astroA
We apply frame splits of 0.5, 1, 2, 5, 10 ,15, 20, 25, 30 minutes splits to obtain grid 2.
Then we get cross correlation between the 2
'''
xcorr_split_corrs_d = {}
grid_1 = astroA.event_grids['default']
if minute_frame_splits_l is None:
minute_frame_splits_l = self.num_frames_splits_m_l[0:3][::-1]
#frame_splits_l_temp = [int(np.round(astroA.fr*split_frames_m*60)) for split_frames_m in self.num_frames_splits_m_l[::-1]]
frame_splits_l_temp = [int(np.round(astroA.fr*split_frames_m*60)) for split_frames_m in minute_frame_splits_l]
frame_splits_l_temp.insert(0, len(astroA.indices_d['default']))
for split_frames in frame_splits_l_temp:
xcorr_split_corrs_d[split_frames] = []
print('Split frames (self): {}'.format(split_frames))
event_grid_splits_l = compare_astro_utils.split_astro_grid(astroA, split_frames=split_frames, bk='default')
for i in range(len(event_grid_splits_l)):
grid_2 = event_grid_splits_l[i]
res = compare_astro_utils.alignment_counter(astroA, astroA, grid_target=grid_1, grid_source=grid_2, n_fake_samples=0,
align_setting='param', eval_setting='xcorr', fake_sample_setting='from_grid',
move_vector=[0,0], p=1, dff_mode=False, behaviour='default', filter_duration=(None, None),
with_output_details=False, border_nan=True)
corr_res = res['num_compare']
xcorr_split_corrs_d[split_frames].append(corr_res)
x = ['~ {}'.format(np.round(general_utils.truncate(split_frames/(astroA.fr*60), 1), decimals=1)) for split_frames in frame_splits_l_temp]
y = [xcorr_split_corrs_d[split_frames] for split_frames in frame_splits_l_temp]
x_fixed = []
y_fixed = []
for i in range(len(y)):
if len(y[i]) == 0:
continue
x_fixed.append(x[i])
y_fixed.append(y[i])
fig_a = plotly_utils.plot_point_box_revised(x_fixed, y_fixed, title='Split correlation to full', x_title='Minutes splits', y_title='Correlation (max = 1)')
fig_b = plotly_utils.plot_scatter_error(x_fixed, y_fixed, title='Split correlation to full', x_title='Minutes splits', y_title='Correlation (max = 1)')
return fig_a, fig_b
def get_compare_full_self_results_alt(self, astroA, cut_duration_min=70, minute_frame_splits_l=None, save_pkl_path=None):
'''
Grid 1 is normalized event heatmap of astroA
We apply frame splits of 0.5, 1, 2, 5, 10 ,15, 20, 25, 30 minutes splits to obtain grid 2.
Then we get cross correlation between the 2
'''
res_d = {}
if (cut_duration_min is not None):
if (astroA.total_minutes < cut_duration_min):
return None
if os.path.isfile(save_pkl_path):
print('FILE EXISTS')
res_d = saving_utils.load_pickle(save_pkl_path)
else:
xcorr_split_corrs_d = {}
#How many frames are cut_duration_min minutes? We cut up to this point
num_frames_cut_duration = int(cut_duration_min * astroA.minute_frames)
default_ind = astroA.indices_d['default']
cut_ind = astroA.indices_d['default'][:num_frames_cut_duration]
event_subsets_temp = aqua_utils.get_event_subsets({'default' : default_ind, 'cut' : cut_ind}, astroA.res_d, after_i=0, before_i=0, to_print=False)
cut_event_subsets = event_subsets_temp['cut']
grid_1 = aqua_utils.get_event_grid_from_x2D(astroA.res_d['x2D'][cut_event_subsets], (astroA.input_shape[0], astroA.input_shape[1]))
if minute_frame_splits_l is None:
minute_frame_splits_l = self.num_frames_splits_m_l[0:3][::-1]
frame_splits_l_temp = [int(np.round(astroA.fr*split_frames_m*60)) for split_frames_m in minute_frame_splits_l]
frame_splits_l_temp.insert(0, len(cut_ind))
for split_frames in frame_splits_l_temp:
xcorr_split_corrs_d[split_frames] = []
event_grid_splits_l = compare_astro_utils.split_astro_grid(astroA, split_frames=split_frames, bk='default', inds_subset=cut_ind)
for i in range(len(event_grid_splits_l)):
grid_2 = event_grid_splits_l[i]
res = compare_astro_utils.alignment_counter(astroA, astroA, grid_target=grid_1, grid_source=grid_2, n_fake_samples=0,
align_setting='param', eval_setting='xcorr', fake_sample_setting='from_grid',
move_vector=[0,0], p=1, dff_mode=False, behaviour='default', filter_duration=(None, None),
with_output_details=False, border_nan=True)
corr_res = res['num_compare']
xcorr_split_corrs_d[split_frames].append(corr_res)
#x = minute_frame_splits_l
x = ['~ {}'.format(np.round(general_utils.truncate(split_frames/(astroA.fr*60), 1), decimals=1)) for split_frames in frame_splits_l_temp]
y = [xcorr_split_corrs_d[split_frames] for split_frames in frame_splits_l_temp]
x_fixed = []
y_fixed = []
for i in range(len(y)):
if len(y[i]) == 0:
continue
x_fixed.append(x[i])
y_fixed.append(y[i])
res_d['x'] = x_fixed
res_d['y'] = y_fixed
if save_pkl_path is not None:
saving_utils.save_pickle(res_d, save_pkl_path)
return res_d
def get_compare_full_self_frame_split_split_plot_xcorr(self, astroA):
'''
Grid 1 is normalized event heatmap of astroA
We apply frame splits of minutes splits to obtain grid 2.
Then we get cross correlation between the 2
'''
xcorr_split_corrs_d = {}
grid_1 = astroA.event_grids['default']
frame_splits_l_temp = [int(np.round(astroA.fr*split_frames_m*60)) for split_frames_m in self.num_frames_splits_splits_m_l[::-1]]
for split_frames in frame_splits_l_temp:
xcorr_split_corrs_d[split_frames] = []
event_grid_splits_l = compare_astro_utils.split_astro_grid(astroA, split_frames=split_frames, bk='default')
if len(event_grid_splits_l) > 2:
for i in range(len(event_grid_splits_l)):
grid_1 = event_grid_splits_l[i]
for j in range(i+1, len(event_grid_splits_l)):
grid_2 = event_grid_splits_l[j]
res = compare_astro_utils.alignment_counter(astroA, astroA, grid_target=grid_1, grid_source=grid_2, n_fake_samples=0,
align_setting='param', eval_setting='xcorr', fake_sample_setting='from_grid',
move_vector=[0,0], p=1, dff_mode=False, behaviour='default', filter_duration=(None, None),
with_output_details=False, border_nan=True)
corr_res = res['num_compare']
xcorr_split_corrs_d[split_frames].append(corr_res)
x = ['~ {}'.format(np.round(general_utils.truncate(split_frames/(astroA.fr*60), 1), decimals=1)) for split_frames in frame_splits_l_temp]
y = [xcorr_split_corrs_d[split_frames] for split_frames in frame_splits_l_temp]
x_fixed = []
y_fixed = []
for i in range(len(y)):
if len(y[i]) == 0:
continue
x_fixed.append(x[i])
y_fixed.append(y[i])
fig_a = plotly_utils.plot_point_box_revised(x_fixed, y_fixed, title='Split-split correlation', x_title='Minutes splits', y_title='Correlation (max = 1)')
fig_b = plotly_utils.plot_scatter_error(x_fixed, y_fixed, title='Split-split correlation to full', x_title='Minutes splits', y_title='Correlation (max = 1)')
return fig_a, fig_b
def get_random_astrocyte_plot(self, astroA, num_samples=3, bh='default', with_samples=False):
event_areas = astroA.res_d['area'][astroA.event_subsets[bh]] / (astroA.spatial_res**2)
fig_l = []
sample_l = []
for i in range(num_samples):
sample = compare_astro_utils.get_fake_astrocyte_sample_from_areas(astroA, event_areas, mode='append', filter_ratio=1)
sample = (sample / len(astroA.indices_d[bh])) * astroA.minute_frames
fig_l.append(plotly_utils.plot_contour(sample, title='Random event contour plot'))
sample_l.append(sample)
if with_samples:
return fig_l, sample_l
return fig_l
def filter_keys(self, astroA):
to_remove_k = list(set(self.behaviours_list_a) - set(list(astroA.event_subsets.keys())))
print('keys to remove: ', to_remove_k)
print('new filter keys:', [k for k in self.behaviours_list_a if k not in to_remove_k])
return [k for k in self.behaviours_list_a if k not in to_remove_k]
def get_plot_first_last_x_min_behaviour(self, astroA, num_min=20, behaviour_ind='rest'):
"""
Get plot of first and last twenty minutes of rest
"""
num_min_frames = int(np.round(astroA.fr * num_min * 60))
if len(astroA.indices_d[behaviour_ind]) < 2 * num_min_frames:
return None
first_inds = astroA.indices_d[behaviour_ind][:num_min_frames]
last_inds = astroA.indices_d[behaviour_ind][-num_min_frames:]
indices_d_temp = {'default' : astroA.indices_d['default'], 'first' : first_inds, 'last' : last_inds}
event_subsets_temp = aqua_utils.get_event_subsets(indices_d_temp, astroA.res_d)
event_grid_first = aqua_utils.get_event_grid_from_x2D(astroA.res_d['x2D'][event_subsets_temp['first']], (astroA.input_shape[0], astroA.input_shape[1]))
event_grid_last = aqua_utils.get_event_grid_from_x2D(astroA.res_d['x2D'][event_subsets_temp['last']], (astroA.input_shape[0], astroA.input_shape[1]))
return plotly_utils.plot_contour_multiple([event_grid_first, event_grid_last], title='{}'.format(behaviour_ind), subplot_titles=['First {} min'.format(num_min), 'Last {} min'.format(num_min)])
def get_plot_x_min_rest_relative(self, astroA, num_min=20, behaviour_ind='rest'):
"""
Compare correlation of first 20 minutes with subsequent 20 minutes. This is to see how the correlation degrades over time.
"""
num_min_frames = int(np.round(astroA.fr * num_min * 60))
if len(astroA.indices_d[behaviour_ind]) < 2 * num_min_frames:
return None
ind_split_l = []
indices_d_temp = {}
indices_d_temp['default'] = astroA.indices_d['default']
#Get frame indices corresponding to each x min split
for i in range(len(astroA.indices_d[behaviour_ind]) // num_min_frames):
ind_split = astroA.indices_d[behaviour_ind][i*num_min_frames:(i+1)*num_min_frames]
ind_split_l.append(ind_split)
indices_d_temp[i] = ind_split
#Get event indices corresponting to each x min split
event_subsets_temp = aqua_utils.get_event_subsets(indices_d_temp, astroA.res_d)
#Get event grid corresponding to each x min split
event_grid_split_l = []
for i in range(len(astroA.indices_d[behaviour_ind]) // num_min_frames):
event_grid_x = aqua_utils.get_event_grid_from_x2D(astroA.res_d['x2D'][event_subsets_temp[i]], (astroA.input_shape[0], astroA.input_shape[1]))
event_grid_split_l.append(event_grid_x)
corr_res_l = []
#Calculate xcorr between first grid and rest
for i in range(1, len(event_grid_split_l)):
corr_res, _, move_vector, _ = correlation_utils.get_cross_correlation_2D_info_compare(event_grid_split_l[0], event_grid_split_l[i], normalize=True, mode='valid')
corr_res_l.append(corr_res[0][0])
corr_res_l = np.array(corr_res_l)
return plotly_utils.plot_scatter([i for i in range(len(corr_res_l))], corr_res_l , mode='lines', title='scatter', x_title='', y_title='')
def get_plot_compare_behaviour_correlation(self, astro_l_pair, dff_mode=False):
behaviour_l = ['rest', 'running', 'stick', 'whisker']
results = {}
#run, rest, stick, whisker
#Get correlations bh-bh day 0, day 1
#Step 1: obtain move vector for alignment between day 0 and day x
d_temp = compare_astro_utils.alignment_counter(astro_l_pair[0], astro_l_pair[1],
n_fake_samples=0,
align_setting='xcorr',
eval_setting='xcorr',
fake_sample_setting='from_astro',
p=1,
behaviour='default',
dff_mode=dff_mode)
move_vector = d_temp['move_vector']
#Step 2: obtain correlation
for bh_i in behaviour_l:
d = compare_astro_utils.alignment_counter(astro_l_pair[0], astro_l_pair[1],
n_fake_samples=0,
align_setting='param',
eval_setting='xcorr',
fake_sample_setting='from_astro',
move_vector=move_vector,
p=1,
behaviour=bh_i,
dff_mode=dff_mode)
results[bh_i + '_' + bh_i] = d['num_compare']
#Get correlations bh_i-bh_j same day
for bh_i in range(len(behaviour_l)):
for bh_j in range(bh_i+1, len(behaviour_l)):
if bh_i == bh_j:
continue
for i in range(2):
d = compare_astro_utils.alignment_counter(astro_l_pair[i], astro_l_pair[i],
n_fake_samples=0,
align_setting='param',
eval_setting='xcorr',
fake_sample_setting='from_astro',
move_vector=[0,0],
p=1,
behaviour=[behaviour_l[bh_i], behaviour_l[bh_j]],
dff_mode=dff_mode)
results[behaviour_l[bh_i] + '_' + behaviour_l[bh_j] + '_' + str(i)] = d['num_compare']
###
x = list(results.keys())
y = [[results[x_k]] for x_k in x]
return plotly_utils.plot_point_box_revised(x, y, margin_b=400)
def measure_distribution_plot(self, astroA_l, bh, measure, num_bins=10, min_measure=0, max_measure=0, measure_name='', mode='MOA', with_measure_values=False):
'''
Default min is 0
MOA = mean over astrocytes
MOE = mean over 'all' events over all astrocytes
'''
measure_d = {}
for astroA in astroA_l:
if bh in astroA.event_subsets:
measure_d[astroA.print_id] = astroA.res_d[measure][astroA.event_subsets[bh]]
measure_counts_d = {}
all_events_measure_l = []
#No events...
if np.sum([len(measure_d[k]) for k in measure_d.keys()]) == 0:
return None, None, None
#Get min and max range to filter (if not given just take min and max values of event value measures)
if min_measure is None:
min_range = np.min([np.min(measure_d[k]) for k in measure_d.keys()])
else:
min_range = min_measure
if max_measure is None:
max_range = np.max([np.max(measure_d[k]) for k in measure_d.keys()])
else:
max_range = max_measure
if measure == 'duration':
if ((max_range - min_range) % 2) == 1:
max_range += 1
num_bins_x = np.int((max_range - min_range)/ 2)
else:
num_bins_x = num_bins
for k in measure_d.keys():
measure_d[k] = measure_d[k][measure_d[k] >= min_range]
measure_d[k] = measure_d[k][measure_d[k] <= max_range]
#If mean over astrocytes -> take histogram for each individual astrocyte
if mode == 'MOA':
measure_counts_d[k], bins_arr = np.histogram(measure_d[k], bins=num_bins_x, range=(min_range, max_range))
measure_counts_d[k] = measure_counts_d[k] / np.sum(measure_counts_d[k])
#If mean over events -> append filtered events of current astrocyte
elif mode == 'MOE':
all_events_measure_l.extend(measure_d[k])
#Mean over events, now produce a histogram
if mode == 'MOE':
measure_counts_d['all_events'], bins_arr = np.histogram(all_events_measure_l, bins=num_bins_x, range=(min_range,max_range))
measure_counts_d['all_events'] = measure_counts_d['all_events'] / np.sum(measure_counts_d['all_events'])
#In the case of MOE we have only 1 key: all events, so the list is simply [[0.1, 0.4, 0.3, ...]] values of 1 histogram
y_l = [[measure_counts_d[k][i] for k in measure_counts_d.keys()] for i in range(num_bins_x)]
x = bins_arr
x_title = measure_name
if measure_name == 'duration':
x_title += ' (s)'
if measure_name == 'size':
#TODO
x_title += ''
fig = plotly_utils.plot_scatter_error(x, y_l, mode='lines', title='{}-{} distribution'.format(bh, measure_name), x_title=measure_name, y_title='')
if mode == 'MOE' and with_measure_values == True:
return fig, x, y_l, all_events_measure_l
return fig, x, y_l
def measure_distribution_bh_compare_plot(self, astroA_l, bh_l, measure, num_bins=10, min_measure=0, max_measure=0, measure_name='', confidence=True, with_stats=True, mode='MOA'):
bh_y_d = {}
x_l = []
for bh in bh_l:
_, x, y_l = self.measure_distribution_plot(astroA_l, bh, measure=measure, num_bins=num_bins, min_measure=min_measure, max_measure=max_measure, measure_name=measure_name, mode=mode)
if x is None:
continue
x_l.append(x)
if confidence:
bh_y_d[bh] = y_l
else:
bh_y_d[bh] = [np.mean(y) for y in y_l]
bh_k_l = list(bh_y_d.keys())
bh_y_l = [bh_y_d[bh] for bh in bh_k_l]
x_title = measure_name
if measure_name == 'duration':
x_title += ' (s)'
return plotly_utils.plot_scatter_mult(x_l=x_l, y_l_l=bh_y_l,
name_l=bh_k_l, title='{}s distribution'.format(measure_name),
x_title=x_title, y_title='',
xrange=(min_measure, max_measure), confidence=confidence, with_stats=True)
def measure_distribution_bh_compare_plot_exponential_fit(self, astroA_l, bh_l, measure, num_bins=10, min_measure=0, max_measure=0, measure_name='', confidence=True, with_stats=True, with_log=True):
bh_y_d = {}
x_l = []
for bh in bh_l:
print(bh)
print(measure)
_, x, y_l = self.measure_distribution_plot(astroA_l, bh, measure=measure, num_bins=num_bins, min_measure=min_measure, max_measure=max_measure, measure_name=measure_name)
if x is None:
continue
x_l.append(x[:-1])
if confidence:
bh_y_d[bh] = y_l
else:
bh_y_d[bh] = [np.mean(y) for y in y_l]
if with_log:
if min_measure is not None:
min_measure=np.log(min_measure)
if max_measure is not None:
max_measure=np.log(max_measure)
x_l = list(np.log(np.array(x_l)))
bh_k_l = list(bh_y_d.keys())
bh_y_l = [bh_y_d[bh] for bh in bh_k_l]
x_title = measure_name
if measure_name == 'duration':
x_title += ' (s)'
new_bh_k_l = list(bh_y_d.keys())
def test_func(x, N, b):
return N*(np.exp(-(x/b)))
for i, bh in enumerate(bh_k_l):
print(x_l[i])
print(bh_y_l[i])
params, params_covariance = optimize.curve_fit(test_func, x_l[i], bh_y_l[i])
y_fit = test_func(x_l[i], *params)
par = [v for v in params]
par.insert(0, bh)
new_bh_k_l.append('{}__{:.1e}*exp<sup>-(t/{:.1e})<sup>'.format(*par))
bh_y_l.append(y_fit)
x_l.append(x_l[i])
return plotly_utils.plot_scatter_mult(x_l=x_l, y_l_l=bh_y_l,
name_l=new_bh_k_l, title='{}s distribution'.format(measure_name),
x_title=x_title, y_title='',
xrange=(min_measure, max_measure), confidence=confidence, with_stats=True)
def amplitude_distribution_plot_violin_duo(self, astroA_l, bh_1, bh_2, max_dff=5):
amp_l_1 = []
amp_l_2 = []
for astroA in astroA_l:
if bh_1 in astroA.event_subsets.keys():
amp_l_1.extend(list(astroA.res_d['dffMax2'][astroA.event_subsets[bh_1]]))
if bh_2 in astroA.event_subsets.keys():
amp_l_2.extend(list(astroA.res_d['dffMax2'][astroA.event_subsets[bh_2]]))
amp_l_1 = np.array(amp_l_1)
amp_l_2 = np.array(amp_l_2)
if max_dff is not None:
amp_l_1 = amp_l_1[amp_l_1 <= max_dff]
amp_l_2 = amp_l_2[amp_l_2 <= max_dff]
fig = plotly_utils.plot_violin_duo(bh_1, bh_2, amp_l_1, amp_l_2, title='', x_title='', y_title='')
return fig
def sizes_distribution_plot_violin_duo(self, astroA_l, bh_1, bh_2, max_area=18):
sizes_l_1 = []
sizes_l_2 = []
for astroA in astroA_l:
if bh_1 in astroA.event_subsets.keys():
sizes_l_1.extend(list(astroA.res_d['area'][astroA.event_subsets[bh_1]]))
if bh_2 in astroA.event_subsets.keys():
sizes_l_2.extend(list(astroA.res_d['area'][astroA.event_subsets[bh_2]]))
sizes_l_1 = np.array(sizes_l_1)
sizes_l_2 = np.array(sizes_l_2)
if max_area is not None:
sizes_l_1 = sizes_l_1[sizes_l_1 <= max_area]
sizes_l_2 = sizes_l_2[sizes_l_2 <= max_area]
fig = plotly_utils.plot_violin_duo(bh_1, bh_2, sizes_l_1, sizes_l_2, title='', x_title='', y_title='')
return fig
def signal_duration_distribution_plot_violin_duo(self, astroA_l, bh_1, bh_2, max_duration=100):
durations_l_1 = []
durations_l_2 = []
for astroA in astroA_l:
if bh_1 in astroA.event_subsets.keys():
durations_l_1 = astroA.all_durations_d[bh_1]
if bh_2 in astroA.event_subsets.keys():
durations_l_2 = astroA.all_durations_d[bh_2]
durations_l_1 = np.array(durations_l_1)
durations_l_2 = np.array(durations_l_2)
if max_duration is not None:
durations_l_1 = durations_l_1[durations_l_1 <= max_duration]
durations_l_2 = durations_l_2[durations_l_2 <= max_duration]
fig = plotly_utils.plot_violin_duo(bh_1, bh_2, durations_l_1, durations_l_2, title='Signal duration (s) distribution', x_title='', y_title='')
plotly_utils.apply_fun_axis_fig(fig, lambda x : x / astroA_l[0].fr, axis='y')
return fig
def get_stick_run_sample_figs(self, astroA):
figs = []
possible_spots = list(np.sort(list(set(astroA.indices_d['stick_exact_start']) & set(astroA.indices_d['running_exact']))))
print(np.random.choice(possible_spots, min(len(possible_spots), 10), replace=False))
np.random.seed(0)
for spot in np.random.choice(possible_spots, min(len(possible_spots), 10), replace=False):
time_from = spot - 100
time_to = spot + 100
stick_start_bin = np.zeros([len(astroA.stick_bin)])
stick_start_bin[astroA.indices_d['stick_exact_start']] = 1
stick_signal = stick_start_bin[time_from:time_to]
#Obtain running signal
running_signal = astroA.speed_values[time_from:time_to]
fig_running = plotly_utils.plot_scatter_fmt(np.arange(len(running_signal)), running_signal, astype='float')
fig_stick = plotly_utils.plot_scatter_fmt(np.arange(len(stick_signal)), stick_signal, astype='int')
#Obtain available events during this period
interval_events = list(set(np.where(astroA.res_d['tBegin'] > time_from)[0]) & set(np.where(astroA.res_d['tEnd'] < time_to)[0]))
signal_figs = []
for i, event_i in enumerate(interval_events[0:10]):
adj_from = int(time_from % astroA.input_shape[2])
adj_to = int(time_to % astroA.input_shape[2])
if adj_to < adj_from:
print('Skipping: change time from to')
continue
y = astroA.res_d['dff_only'][event_i][adj_from:adj_to]
x = np.arange(0, adj_to-adj_from)
adj_begin = int(astroA.res_d['tBegin'][event_i] % astroA.input_shape[2]) - adj_from
adj_end = int(astroA.res_d['tEnd'][event_i] % astroA.input_shape[2]) - adj_from
print(adj_begin, adj_end)
fig = plotly_utils.plot_scatter_signal(x, y, adj_begin, adj_end, mode='lines', title='scatter', x_title='', y_title='', with_legend=False)
signal_figs.append(fig)
figs.append([fig_running, fig_stick, signal_figs])
return figs
def get_compare_align_plot_xcorr_all(self, astro_pair_l, align_setting='xcorr', dff_mode=False, behaviour='default', filter_duration=(None, None),
with_border_align=True, n_fake_samples=5, save_results_path=None):
'''
Go with each astrocyte pairs
Calculate day 0-day x correlation
Calculate random samples correlation
Normalize s.t. random samples correlation for all pairs is the same (and the 0-x corr)
Create plot
'''
pair_fakes = []
pair_corrs_l = []
days_id_l = []
for astro_pair in astro_pair_l:
astro_1, astro_2 = astro_pair[0], astro_pair[1]
days = (str(astro_pair[0].day), str(astro_pair[1].day))
days_id = '-'.join(days)
pair_save_results_path = save_results_path + self.get_astro_pair_id(astro_pair) + '.pkl'
if os.path.isfile(pair_save_results_path):
d = saving_utils.load_pickle(pair_save_results_path)
else:
if align_setting == 'xcorr':
#Get move vector
move_vector = compare_astro_utils.get_move_vector_xcorr_default(astro_1, astro_2)
#self.n_samples_corr_fake
d = compare_astro_utils.alignment_counter(astro_1, astro_2,
n_fake_samples=n_fake_samples,
align_setting='param',
eval_setting='xcorr',
fake_sample_setting='from_astro',
move_vector=move_vector,
p=1,
behaviour=behaviour,
filter_duration=filter_duration,
with_output_details=True)
elif align_setting == 'xcorr_free':
d = compare_astro_utils.alignment_counter(astro_1, astro_2,
n_fake_samples=n_fake_samples,
align_setting='param',
eval_setting='xcorr_free',
fake_sample_setting='from_astro',
move_vector=None,
p=1,
behaviour=behaviour,
filter_duration=filter_duration,
with_output_details=True)
if save_results_path is not None:
saving_utils.save_pickle(d, pair_save_results_path)
print(d)
pair_fakes.append(d['num_fake'])
pair_corrs_l.append(d['num_compare'])
days_id_l.append(days_id)
pair_fakes_before = np.copy(pair_fakes)
pair_corrs_l_before = np.copy(pair_corrs_l)
#print('PAIR FAKES', pair_fakes)
mean_num_fake = np.mean([np.mean(pair_fake) for pair_fake in pair_fakes])
pair_corrs_d = {}
for i in range(len(pair_corrs_l)):
#mult = mean_num_fake / np.mean(pair_fakes[i])
#NOT DOING ANY SCALING
mult = 1
pair_fakes[i] = np.array(pair_fakes[i]) * mult
pair_corrs_l[i] = pair_corrs_l[i] * mult
if days_id_l[i] not in pair_corrs_d:
pair_corrs_d[days_id_l[i]] = []
pair_corrs_d[days_id_l[i]].append(pair_corrs_l[i])
print('Pair corrs', pair_corrs_d)
x = ['fake_samples']
y = [[item for sublist in pair_fakes for item in sublist]]
for k in pair_corrs_d.keys():
x.append('days ' + k)
y.append(pair_corrs_d[k])
#tstat, pvalue = ttest_ind_from_stats(np.mean(y[0]), np.std(y[0]), len(y[0]), np.mean(y[1]), np.std(y[1]), len(y[1]))
#print('NUM COMPARE: {}, mode {} behaviour {}'.format(d['num_compare'], dff_mode, behaviour))
fig = plotly_utils.plot_point_box_revised(x, y, title='Behaviour: {} - correlations'.format(behaviour), x_title='', y_title='Aligned xcorr value')
return fig, pair_fakes_before, pair_fakes, pair_corrs_l_before, pair_corrs_l, days_id_l
def get_compare_states_all_xcorr(self, astro_pair, align_setting='xcorr_free', dff_mode='False', n_fake_samples=5, save_pkl_path=None, filter_duration=(None, None),
behaviour_l=['rest', 'running', 'stick_rest', 'stick_run_ind_15', 'default']):
astro_1, astro_2 = astro_pair
print('Working on {}'.format(self.get_astro_pair_id(astro_pair)))
if os.path.isfile(save_pkl_path):
res_d = saving_utils.load_pickle(save_pkl_path)
else:
res_d = {}
for behaviour in behaviour_l:
print('Current behaviour: ', behaviour)
if (behaviour in astro_1.indices_d) and (behaviour in astro_2.indices_d) and \
(behaviour in astro_1.event_subsets) and (behaviour in astro_2.event_subsets):
if align_setting == 'xcorr':
#Get move vector
move_vector = compare_astro_utils.get_move_vector_xcorr_default(astro_1, astro_2)
#self.n_samples_corr_fake
d = compare_astro_utils.alignment_counter(astro_1, astro_2,
n_fake_samples=n_fake_samples if behaviour == 'default' else 0,
align_setting='param',
eval_setting='xcorr',
fake_sample_setting='from_astro',
move_vector=move_vector,
p=1,
behaviour=behaviour,
filter_duration=filter_duration,
with_output_details=True,
dff_mode=dff_mode)
elif align_setting == 'xcorr_free':
d = compare_astro_utils.alignment_counter(astro_1, astro_2,
n_fake_samples=n_fake_samples if behaviour == 'default' else 0,
align_setting='param',
eval_setting='xcorr_free',
fake_sample_setting='from_astro',
move_vector=None,
p=1,
behaviour=behaviour,
filter_duration=filter_duration,
with_output_details=True,
dff_mode=dff_mode)
res_d[behaviour] = d['num_compare']
if behaviour == 'default':
res_d['random'] = d['num_fake']
else:
print('Behaviour {} not in one of {} / {}'.format(behaviour, astro_1.id, astro_2.id))
if save_pkl_path is not None:
saving_utils.save_pickle(res_d, save_pkl_path)
behaviours = [b for b in behaviour_l]
behaviours.append('random')
x = []
y = []
for k in behaviours:
if ((k in astro_1.indices_d) and (k in astro_2.indices_d) and (k in astro_1.event_subsets) and (k in astro_2.event_subsets)) or (k=='random'):
if k != 'random':
res_d[k] = [res_d[k]]
x.append(k)
y.append(res_d[k])
#x = behaviour_l
#y = [res_d['rest'], res_d['running'], res_d['default'], res_d['random']]
print(y)
fig = plotly_utils.plot_point_box_revised(x, y, title='Behaviour correlations', x_title='Behaviour', y_title='Xcorr value')
return fig, res_d
def get_compare_states_same_astro_all_xcorr(self, astro_pair, align_setting='xcorr_free', dff_mode=False, n_fake_samples=5, save_pkl_path=None, filter_duration=(None, None)):
if os.path.isfile(save_pkl_path):
res_d = saving_utils.load_pickle(save_pkl_path)
else:
res_d = {}
astro_1, astro_2 = astro_pair
for astro in astro_pair:
astro_day = astro.day
for behaviour_pair in [['rest', 'running'], ['default', 'default']]:
astro_a_grid, _, _,_ = compare_astro_utils.get_filters_compare([astro], p=1, dff_mode=dff_mode, behaviour=behaviour_pair[0])
astro_a_grid = astro_a_grid[0]
astro_b_grid, _, _,_ = compare_astro_utils.get_filters_compare([astro], p=1, dff_mode=dff_mode, behaviour=behaviour_pair[1])
astro_b_grid = astro_b_grid[0]
if align_setting == 'xcorr':
#Get move vector
move_vector = compare_astro_utils.get_move_vector_xcorr_default(astro_1, astro_2)
d = compare_astro_utils.alignment_counter(astro_1, astro_2,
n_fake_samples=n_fake_samples if behaviour_pair[0] == 'default' else 0,
align_setting='param',
eval_setting='xcorr',
fake_sample_setting='from_astro',
grid_target=astro_a_grid,
grid_source=astro_b_grid,
move_vector=move_vector,
p=1,
behaviour='default',
filter_duration=filter_duration,
with_output_details=True)
elif align_setting == 'xcorr_free':
d = compare_astro_utils.alignment_counter(astro_1, astro_2,
n_fake_samples=n_fake_samples if behaviour_pair[0] == 'default' else 0,
align_setting='param',
eval_setting='xcorr_free',
fake_sample_setting='from_astro',
grid_target=astro_a_grid,
grid_source=astro_b_grid,
move_vector=None,
p=1,
behaviour='default',
filter_duration=filter_duration,
with_output_details=True)
if behaviour_pair[0] == 'rest':
res_d['_'.join(behaviour_pair) + '_{}'.format(astro_day)] = d['num_compare']
if behaviour_pair[0] == 'default':
res_d['random_{}'.format(astro_day)] = d['num_fake']
if save_pkl_path is not None:
saving_utils.save_pickle(res_d, save_pkl_path)
for k in res_d.keys():
if 'random' not in k:
res_d[k] = [res_d[k]]
x = [k for k in res_d.keys()]
y = [res_d[k] for k in x]
fig = plotly_utils.plot_point_box_revised(x, y, title='Behaviour correlations', x_title='Behaviour', y_title='Xcorr value')
return fig, res_d
def get_compare_between_group_xcorr(self, astroA_l_pairs, n_fake_samples=5, dff_mode=False, save_pkl_path=None, filter_duration=[None, None]):
if os.path.isfile(save_pkl_path):
res_d = saving_utils.load_pickle(save_pkl_path)
else:
res_d = {'between' : [], 'random' : [], 'between_id' : []}
for astro_i in range(len(astroA_l_pairs)):
for astro_j in range(astro_i+1, len(astroA_l_pairs)):
astroA_pair_1 = astroA_l_pairs[astro_i]
astroA_pair_2 = astroA_l_pairs[astro_j]
#quick hack, ignore the bad dataset
if astroA_pair_1[0].id == 'm190129_d190226_cx_day_0' or astroA_pair_2[0].id == 'm190129_d190226_cx_day_0':
continue
#continue if we are on same pair
if astroA_pair_1[0].id == astroA_pair_2[0].id:
continue
for i in [0, 1]:
for j in [0, 1]:
astro_pair = [astroA_pair_1[i], astroA_pair_2[j]]
d = compare_astro_utils.alignment_counter(astro_pair[0], astro_pair[1],
n_fake_samples=n_fake_samples,
align_setting='xcorr',
eval_setting='xcorr_random_both',
fake_sample_setting='from_astro',
p=1,
behaviour='default',
dff_mode=dff_mode,
border_nan=True,
with_output_details=True)
res_d['between_id'].append(self.get_astro_pair_id(astro_pair))
res_d['between'].append(d['num_compare'])
res_d['random'].extend(d['num_fake'])
if save_pkl_path is not None:
saving_utils.save_pickle(res_d, save_pkl_path)
x = ['Astro between group', 'Random between group']
y = [res_d['between'], res_d['random']]
fig = plotly_utils.plot_point_box_revised(x, y, title='Between group correlations vs random (95% confidence)', x_title='', y_title='Xcorr value')
return fig, res_d
def get_astro_pair_id(self, astro_pair):
return '_'.join([astro.print_id for astro in astro_pair])
def get_measure_all_bar_plot(self, astroA_l, measure, bh_list=['rest', 'running']):
y_pair_l = [[] for i in range(len(bh_list))]
err_pair_l = [[] for i in range(len(bh_list))]
length_l = [[] for i in range(len(bh_list))]
x = []
for astroA in astroA_l:
x.append(astroA.print_id)
for i, bh in enumerate(bh_list):
measure_res = astroA.res_d[measure][astroA.event_subsets[bh]]
mean, conf_low, conf_high = stat_utils.mean_confidence_interval(measure_res, confidence=0.95)
conf = conf_high - mean
y_pair_l[i].append(mean)
err_pair_l[i].append(conf)
length_l[i].append(len(measure_res))
fig = plotly_utils.plot_group_bar(x, y_pair_l, text_values_l=length_l, title='', text_size=20, x_title='', y_title='', legends=bh_list, std_y=err_pair_l, margin_b=300, margin_r=300)
return fig
def get_measure_all_dot_plot(self, astroA_l, measure, bh_list=['rest', 'running']):
x_l = bh_list
name_l=[]
y_pair_l_l = []
for astroA in astroA_l:
name_l.append(astroA.print_id)
y_pair_l = [[] for i in range(len(bh_list))]
length_l = [[] for i in range(len(bh_list))]
for i, bh in enumerate(bh_list):
if measure != None:
measure_res = astroA.res_d[measure][astroA.event_subsets[bh]]
y_pair_l[i].append(measure_res)
else:
n = (len(astroA.event_subsets[bh]) / len(astroA.indices_d[bh])) * astroA.minute_frames
y_pair_l[i].append([n])
y_pair_l_l.append(y_pair_l)
fig, stats_d = plotly_utils.plot_scatter_mult_with_avg(x_l=x_l, y_l_l=y_pair_l_l, y_mean=None, name_l=name_l, mode='lines', x_title='', y_title='',
confidence=True, with_stats=True)
return fig, stats_d
def get_before_after_transition_events(self, astroA, before_bh, inds_bh, after_bh, before_range=20, after_range=50, measure=None,
duration_filter=[None, None]):
inds = astroA.indices_d[inds_bh]
#Filter indices
indices_filt_before = aqua_utils.filter_range_inds(inds, astroA.indices_d[before_bh], range=(-before_range, -1), prop=1.0)
indices_filt_after = aqua_utils.filter_range_inds(inds, astroA.indices_d[after_bh], range=(1, after_range), prop=1.0)
indices_filt = np.array(np.sort(list(set(indices_filt_before) & set(indices_filt_after))))
if len(indices_filt) == 0:
return [], []
delay_info_args = {'event_inds_subset' : astroA.event_subsets['default'],
'min_delay' : -before_range,
'max_delay' : after_range,
'min_duration' : duration_filter[0],
'max_duration' : duration_filter[1],
'unique_events' : False,
'return_non_unique_delays_arr' : True
}
_, _, _, signal_delays_l_l, peak_mins_l_l, valid_event_i_l_l = aqua_utils.get_delay_info_from_res(indices_filt, astroA.res_d, **delay_info_args)
if measure is None:
before_l = 0
after_l = 0
else:
before_l = []
after_l = []
for i, signal_delays_l in enumerate(signal_delays_l_l):
signal_delays_np = np.array(signal_delays_l)
if measure is None:
before_l += len(signal_delays_np[signal_delays_np < 0])
after_l += len(signal_delays_np[signal_delays_np > 0])
else:
measure_np = np.array(list(astroA.res_d[measure][valid_event_i_l_l[i]]))
before_l.extend(list(measure_np[signal_delays_np < 0]))
after_l.extend(list(measure_np[signal_delays_np > 0]))
if measure is None:
before_l = [before_l]
after_l = [after_l]
return before_l, after_l
def get_measure_all_transition_dot_plot(self, astroA_l, measure, before_bh, inds_bh,
after_bh, before_range=20, after_range=50, duration_filter=[None, None]):
'''
In get measure all dot plot we take a list of behaviours : e.g. [rest, running]
Then we find the events that take place during each behaviour
Then we either measure number of events normalized to minute or the measure values
Here we care about transition. We first find all events that are before transition and then after transition
'''
x_l = [before_bh + '-' + inds_bh, inds_bh + '-' + after_bh]
name_l=[]
y_pair_l_l = []
for astroA in astroA_l:
name_l.append(astroA.print_id)
y_pair_l = [[] for i in range(2)]
length_l = [[] for i in range(2)]
#Find events or number of events for behaviour before and after
before_l, after_l = self.get_before_after_transition_events(astroA, before_bh, inds_bh, after_bh,
before_range=before_range, after_range=after_range,
measure=measure, duration_filter=duration_filter)
y_pair_l[0].append(before_l)
y_pair_l[1].append(after_l)
y_pair_l_l.append(y_pair_l)
fig, stats_d = plotly_utils.plot_scatter_mult_with_avg(x_l=x_l, y_l_l=y_pair_l_l, y_mean=None, name_l=name_l, mode='lines', x_title='', y_title='',
confidence=True, with_stats=True)
return fig, stats_d
def duration_size_amplitude_plot(self, astroA):
areas = astroA.res_d['area']
amplitudes = astroA.res_d['dffMax2']
times = astroA.res_d['time_s']
trace_i = go.Scatter(
x=x_l[i],
y=mean_l_l[i],
mode=mode,
name=name_l[i],
line=dict(color=colour_l[i])
)
layout = go.Layout(title=title, xaxis=dict(title=x_title),
yaxis=dict(title=y_title),)
if yrange is not None:
layout.update(yaxis=dict(range=yrange))
if xrange is not None:
layout.update(xaxis=dict(range=xrange))
fig = go.Figure(data=traces_l, layout=layout)
'''
plot_scatter_mult_tree(x, y_main, y_mult, mode_main='lines', mode_mult='markers',
title='', y_title='', x_title='', fit=False, fit_annotation_pos_fix=1,
bin_main_size=1, bin_mult_size=1, opacity=0.1, confidence=False, with_stats=False,
y_mult_include=True, confidence_format='lines', bin_type='mean'):
'''
def triplet_bar_plot(self, astroA, bh='default', measure=None, n_bins=8, y_title=''):
border_mask= astroA.border
clandmark_center = astroA.res_d['clandmark_center']
event_distances_from_center_micrometers = astroA.res_d['clandmark_distAvg'][astroA.event_subsets[bh]]
event_distances_from_center = event_distances_from_center_micrometers / astroA.spatial_res
n_events_arr_norm, n_events_i_arr, area_bins, r_bins = aqua_utils.radius_event_extraction(event_distances_from_center, clandmark_center, border_mask, n_bins=n_bins)
y = []
x = []
err_l = []
text_values_l = []
if measure is None:
#Y axis: number of events / (Area of band x time) -> time is to calibrate the number of events to a minute. X axis: radius of band from the center.
for i, events_i in enumerate(n_events_i_arr):
num_events = len(events_i)
area = area_bins[i]
#pp = per pixel
num_events_pp = num_events / area
#pm = per minute
num_events_pp_pm = (num_events_pp / len(astroA.indices_d[bh])) * astroA.minute_frames
#we have events per pixel, now we scale to events as if scaled up to be size of whole astrocyte
print('sum area bins', np.sum(area_bins))
num_events_pp_pm_norm_whole = num_events_pp_pm * np.sum(area_bins)
y.append(num_events_pp_pm_norm_whole)
x.append(r_bins[i+1])
text_values_l.append('n={}'.format(len(events_i)))
y_title='Events scaled to whole astrocyte size'
else:
#Y axis: mean duration (s). X axis: radius of band from the center
#Y axis: mean size (um^2). X axis: radius of band from the center
#Y axis amplitude (df/f). X axis: radius of band from center
event_durations = astroA.res_d[measure][astroA.event_subsets[bh]]
for i, events_i in enumerate(n_events_i_arr):
event_durations_i = event_durations[events_i]
ev_mean, ev_low, ev_high = stat_utils.mean_confidence_interval(event_durations_i, confidence=0.95)
y.append(ev_mean)
x.append(r_bins[i+1])
err_l.append(ev_high - ev_mean)
text_values_l.append('n={}'.format(len(events_i)))
y_title=y_title
fig = plotly_utils.plot_bar(x, y, err_y=err_l, text_size=20, text_values=text_values_l, y_title=y_title, x_title='Radius')
return fig
def triplet_dot_plot_all(self, astroA_l, bh='default', measure=None, n_bins=8, y_title=''):
x_l_l = []
y_l_l = []
name_l = []
astroA_l = [astroA for astroA in astroA_l if bh in astroA.event_subsets]
y_mean_np=np.zeros([len(astroA_l), n_bins])
for astroA_i, astroA in enumerate(astroA_l):
border_mask= astroA.border
clandmark_center = astroA.res_d['clandmark_center']
event_distances_from_center_micrometers = astroA.res_d['clandmark_distAvg'][astroA.event_subsets[bh]]
event_distances_from_center = event_distances_from_center_micrometers / astroA.spatial_res
n_events_arr_norm, n_events_i_arr, area_bins, r_bins = aqua_utils.radius_event_extraction(event_distances_from_center, clandmark_center, border_mask, n_bins=n_bins)
y_l = []
x_l = []
y_mean_l = []
if measure is None:
#Y axis: number of events / (Area of band x time) -> time is to calibrate the number of events to a minute. X axis: radius of band from the center.
for i, events_i in enumerate(n_events_i_arr):
num_events = len(events_i)
area = area_bins[i]
#pp = per pixel
num_events_pp = num_events / area
#pm = per minute
num_events_pp_pm = (num_events_pp / len(astroA.indices_d[bh])) * astroA.minute_frames
#we have events per pixel, now we scale to events as if scaled up to be size of whole astrocyte
num_events_pp_pm_norm_whole = num_events_pp_pm * np.sum(area_bins)
y_l.append([num_events_pp_pm_norm_whole])
x_l.append(r_bins[i+1])
y_mean_np[astroA_i, i] = np.mean(num_events_pp_pm_norm_whole)
else:
#Y axis: mean duration (s). X axis: radius of band from the center
#Y axis: mean size (um^2). X axis: radius of band from the center
#Y axis amplitude (df/f). X axis: radius of band from center
event_durations = astroA.res_d[measure][astroA.event_subsets[bh]]
for i, events_i in enumerate(n_events_i_arr):
event_durations_i = event_durations[events_i]
y_l.append(event_durations_i)
x_l.append(r_bins[i+1])
y_title=y_title
y_mean_np[astroA_i, i] = np.mean(event_durations_i)
x_l_l.append(x_l)
y_l_l.append(y_l)
name_l.append(astroA.print_id)
y_mean_np = np.mean(y_mean_np, axis=0)
x_l = ['Band {}'.format(i) for i in range(1, 1+len(x_l))]
if measure is None:
y_title='Events scaled to whole astrocyte size'
fig, stats_d = plotly_utils.plot_scatter_mult_with_avg(x_l=x_l, y_l_l=y_l_l, y_mean=list(y_mean_np), name_l=name_l, mode='lines', x_title='Band (0-{})'.format(n_bins-1), y_title=y_title,
confidence=False, avg_confidence=True, with_stats=True)
else:
y_title=y_title
fig, stats_d = plotly_utils.plot_scatter_mult_with_avg(x_l=x_l, y_l_l=y_l_l, y_mean=list(y_mean_np), name_l=name_l, mode='lines', x_title='Band (0-{})'.format(n_bins-1), y_title=y_title,
confidence=True, with_stats=True)
return fig, stats_d
| [
"analysis.general_utils.compare_astro_utils.get_fake_astrocyte_sample_from_areas",
"analysis.general_utils.saving_utils.save_pickle",
"analysis.general_utils.plotly_utils.apply_fun_axis_fig",
"analysis.general_utils.aqua_utils.get_event_subsets",
"analysis.general_utils.plotly_utils.plot_point_box_revised",... | [((9180, 9222), 'os.path.join', 'os.path.join', (['output_folder', 'experiment_id'], {}), '(output_folder, experiment_id)\n', (9192, 9222), False, 'import os, sys, glob\n'), ((10663, 10730), 'os.path.join', 'os.path.join', (['output_experiment_path', '"""plots"""', '"""behaviour_heatmaps"""'], {}), "(output_experiment_path, 'plots', 'behaviour_heatmaps')\n", (10675, 10730), False, 'import os, sys, glob\n'), ((11086, 11165), 'os.path.join', 'os.path.join', (['output_experiment_path', '"""plots"""', '"""behaviour_activity"""', '"""activity"""'], {}), "(output_experiment_path, 'plots', 'behaviour_activity', 'activity')\n", (11098, 11165), False, 'import os, sys, glob\n'), ((11327, 11432), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['fig_behaviour_activity', 'behaviour_activity_path'], {'width': '(1200)', 'height': '(800)'}), '(fig_behaviour_activity,\n behaviour_activity_path, width=1200, height=800)\n', (11355, 11432), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((11520, 11593), 'os.path.join', 'os.path.join', (['output_experiment_path', '"""plots"""', '"""behaviour_areas"""', '"""areas"""'], {}), "(output_experiment_path, 'plots', 'behaviour_areas', 'areas')\n", (11532, 11593), False, 'import os, sys, glob\n'), ((11668, 11737), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['fig_behaviour_area', 'behaviour_area_path'], {}), '(fig_behaviour_area, behaviour_area_path)\n', (11696, 11737), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((11837, 11922), 'os.path.join', 'os.path.join', (['output_experiment_path', '"""plots"""', '"""signal_amplitudes"""', '"""amplitudes"""'], {}), "(output_experiment_path, 'plots', 'signal_amplitudes', 'amplitudes'\n )\n", (11849, 11922), False, 'import os, sys, glob\n'), ((12006, 12085), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['fig_behaviour_amplitude', 'behaviour_amplitude_path'], {}), '(fig_behaviour_amplitude, behaviour_amplitude_path)\n', (12034, 12085), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((12669, 12742), 'os.path.join', 'os.path.join', (['output_experiment_path', '"""plots"""', '"""signal_stick_run_samples"""'], {}), "(output_experiment_path, 'plots', 'signal_stick_run_samples')\n", (12681, 12742), False, 'import os, sys, glob\n'), ((18365, 18430), 'os.path.join', 'os.path.join', (['output_experiment_path', '"""plots"""', '"""signal_durations"""'], {}), "(output_experiment_path, 'plots', 'signal_durations')\n", (18377, 18430), False, 'import os, sys, glob\n'), ((31575, 31636), 'analysis.general_utils.aqua_utils.get_event_subsets', 'aqua_utils.get_event_subsets', (['curr_indices_split', 'astro.res_d'], {}), '(curr_indices_split, astro.res_d)\n', (31603, 31636), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((31656, 31710), 'numpy.zeros', 'np.zeros', (['[astro.input_shape[0], astro.input_shape[1]]'], {}), '([astro.input_shape[0], astro.input_shape[1]])\n', (31664, 31710), True, 'import numpy as np\n'), ((33813, 33867), 'numpy.zeros', 'np.zeros', (['[astro.input_shape[0], astro.input_shape[1]]'], {}), '([astro.input_shape[0], astro.input_shape[1]])\n', (33821, 33867), True, 'import numpy as np\n'), ((35112, 35143), 'numpy.zeros', 'np.zeros', (['[dim_1, dim_2, dim_3]'], {}), '([dim_1, dim_2, dim_3])\n', (35120, 35143), True, 'import numpy as np\n'), ((75640, 75726), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""pixel_distribution"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'pixel_distribution')\n", (75652, 75726), False, 'import os, sys, glob\n'), ((76448, 76474), 'os.path.join', 'os.path.join', (['path', '"""real"""'], {}), "(path, 'real')\n", (76460, 76474), False, 'import os, sys, glob\n'), ((76498, 76655), 'analysis.general_utils.plotly_utils.plot_scatter_error', 'plotly_utils.plot_scatter_error', (['x_l', 'y_l_fmt'], {'x_title': '"""Pixel intensity percentile"""', 'y_title': '"""Frequency (Density)"""', 'exp_fit': '(True)', 'with_details': '(True)'}), "(x_l, y_l_fmt, x_title=\n 'Pixel intensity percentile', y_title='Frequency (Density)', exp_fit=\n True, with_details=True)\n", (76529, 76655), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((76654, 76698), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['fig', 'plot_path'], {}), '(fig, plot_path)\n', (76682, 76698), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((76803, 76919), 'pandas.DataFrame', 'DataFrame', (["[stats_d['mean'], stats_d['conf_95'], stats_d['fit']]"], {'columns': 'x_l', 'index': "['mean', 'conf_95', 'fit']"}), "([stats_d['mean'], stats_d['conf_95'], stats_d['fit']], columns=\n x_l, index=['mean', 'conf_95', 'fit'])\n", (76812, 76919), False, 'from pandas import DataFrame\n'), ((77972, 77998), 'os.path.join', 'os.path.join', (['path', '"""fake"""'], {}), "(path, 'fake')\n", (77984, 77998), False, 'import os, sys, glob\n'), ((78022, 78180), 'analysis.general_utils.plotly_utils.plot_scatter_error', 'plotly_utils.plot_scatter_error', (['x_l', 'y_l_fmt'], {'x_title': '"""Pixel intensity percentile"""', 'y_title': '"""Frequency (Density)"""', 'exp_fit': '(False)', 'with_details': '(True)'}), "(x_l, y_l_fmt, x_title=\n 'Pixel intensity percentile', y_title='Frequency (Density)', exp_fit=\n False, with_details=True)\n", (78053, 78180), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((78180, 78224), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['fig', 'plot_path'], {}), '(fig, plot_path)\n', (78208, 78224), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((78315, 78407), 'pandas.DataFrame', 'DataFrame', (["[stats_d['mean'], stats_d['conf_95']]"], {'columns': 'x_l', 'index': "['mean', 'conf_95']"}), "([stats_d['mean'], stats_d['conf_95']], columns=x_l, index=['mean',\n 'conf_95'])\n", (78324, 78407), False, 'from pandas import DataFrame\n'), ((137723, 137934), 'analysis.general_utils.compare_astro_utils.alignment_counter', 'compare_astro_utils.alignment_counter', (['astroA', 'astroA'], {'n_fake_samples': 'n_fake_samples', 'align_setting': '"""param"""', 'eval_setting': '"""xcorr"""', 'fake_sample_setting': '"""from_astro"""', 'move_vector': '[0, 0]', 'p': '(1)', 'behaviour': 'bh'}), "(astroA, astroA, n_fake_samples=\n n_fake_samples, align_setting='param', eval_setting='xcorr',\n fake_sample_setting='from_astro', move_vector=[0, 0], p=1, behaviour=bh)\n", (137760, 137934), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((138589, 138618), 'os.path.isfile', 'os.path.isfile', (['save_pkl_path'], {}), '(save_pkl_path)\n', (138603, 138618), False, 'import os, sys, glob\n'), ((141083, 141112), 'os.path.isfile', 'os.path.isfile', (['save_pkl_path'], {}), '(save_pkl_path)\n', (141097, 141112), False, 'import os, sys, glob\n'), ((143689, 143718), 'os.path.isfile', 'os.path.isfile', (['save_pkl_path'], {}), '(save_pkl_path)\n', (143703, 143718), False, 'import os, sys, glob\n'), ((148530, 148615), 'os.path.join', 'os.path.join', (['output_experiment_path', '"""files"""', '"""csv"""', '"""duration_split_ratios.csv"""'], {}), "(output_experiment_path, 'files', 'csv',\n 'duration_split_ratios.csv')\n", (148542, 148615), False, 'import os, sys, glob\n'), ((152537, 152554), 'numpy.argsort', 'np.argsort', (['day_l'], {}), '(day_l)\n', (152547, 152554), True, 'import numpy as np\n'), ((152765, 152822), 'os.path.join', 'os.path.join', (['output_folder', 'experiment_id_l[0]', 'days_str'], {}), '(output_folder, experiment_id_l[0], days_str)\n', (152777, 152822), False, 'import os, sys, glob\n'), ((153351, 153368), 'numpy.argsort', 'np.argsort', (['day_l'], {}), '(day_l)\n', (153361, 153368), True, 'import numpy as np\n'), ((153583, 153617), 'os.path.join', 'os.path.join', (['output_folder', '"""all"""'], {}), "(output_folder, 'all')\n", (153595, 153617), False, 'import os, sys, glob\n'), ((158091, 158154), 'numpy.array', 'np.array', (['[astroA.activity_ratios[k] for k in activity_ratio_k]'], {}), '([astroA.activity_ratios[k] for k in activity_ratio_k])\n', (158099, 158154), True, 'import numpy as np\n'), ((158280, 158308), 'numpy.argsort', 'np.argsort', (['activity_ratio_l'], {}), '(activity_ratio_l)\n', (158290, 158308), True, 'import numpy as np\n'), ((158798, 158863), 'analysis.general_utils.plotly_utils.apply_fun_axis_fig', 'plotly_utils.apply_fun_axis_fig', (['fig', '(lambda x: x * 100)'], {'axis': '"""y"""'}), "(fig, lambda x: x * 100, axis='y')\n", (158829, 158863), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((159417, 159447), 'numpy.argsort', 'np.argsort', (['activity_ratios_np'], {}), '(activity_ratios_np)\n', (159427, 159447), True, 'import numpy as np\n'), ((160039, 160104), 'analysis.general_utils.plotly_utils.apply_fun_axis_fig', 'plotly_utils.apply_fun_axis_fig', (['fig', '(lambda x: x * 100)'], {'axis': '"""y"""'}), "(fig, lambda x: x * 100, axis='y')\n", (160070, 160104), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((160756, 160782), 'numpy.argsort', 'np.argsort', (['activity_means'], {}), '(activity_means)\n', (160766, 160782), True, 'import numpy as np\n'), ((160934, 161078), 'analysis.general_utils.plotly_utils.plot_point_box_revised', 'plotly_utils.plot_point_box_revised', (['x', 'y'], {'title': '"""Activity ratio"""', 'x_title': '""""""', 'y_title': '"""Events per voxel (%)"""', 'lines': 'lines', 'with_stats': '(True)'}), "(x, y, title='Activity ratio', x_title=\n '', y_title='Events per voxel (%)', lines=lines, with_stats=True)\n", (160969, 161078), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((161693, 161720), 'numpy.argsort', 'np.argsort', (['activity_num_np'], {}), '(activity_num_np)\n', (161703, 161720), True, 'import numpy as np\n'), ((163093, 163119), 'numpy.argsort', 'np.argsort', (['activity_means'], {}), '(activity_means)\n', (163103, 163119), True, 'import numpy as np\n'), ((163269, 163420), 'analysis.general_utils.plotly_utils.plot_point_box_revised', 'plotly_utils.plot_point_box_revised', (['x', 'y'], {'title': '"""Activity number"""', 'x_title': '""""""', 'y_title': '"""Events per minute in state"""', 'lines': 'lines', 'with_stats': '(True)'}), "(x, y, title='Activity number', x_title=\n '', y_title='Events per minute in state', lines=lines, with_stats=True)\n", (163304, 163420), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((164530, 164550), 'numpy.array', 'np.array', (['areas_conf'], {}), '(areas_conf)\n', (164538, 164550), True, 'import numpy as np\n'), ((164570, 164592), 'numpy.argsort', 'np.argsort', (['areas_mean'], {}), '(areas_mean)\n', (164580, 164592), True, 'import numpy as np\n'), ((166218, 166239), 'numpy.array', 'np.array', (['area_l_mean'], {}), '(area_l_mean)\n', (166226, 166239), True, 'import numpy as np\n'), ((166261, 166281), 'numpy.array', 'np.array', (['area_l_std'], {}), '(area_l_std)\n', (166269, 166281), True, 'import numpy as np\n'), ((166301, 166324), 'numpy.argsort', 'np.argsort', (['area_l_mean'], {}), '(area_l_mean)\n', (166311, 166324), True, 'import numpy as np\n'), ((166470, 166653), 'analysis.general_utils.plotly_utils.plot_bar', 'plotly_utils.plot_bar', ([], {'x': 'area_keys_s', 'y': 'area_l_mean_s', 'text_values': '[]', 'text_size': '(20)', 'title': '"""Sizes of events"""', 'x_title': '""""""', 'y_title': '"""Event sizes (μm<sup>2</sup>)"""', 'margin_b': '(150)'}), "(x=area_keys_s, y=area_l_mean_s, text_values=[],\n text_size=20, title='Sizes of events', x_title='', y_title=\n 'Event sizes (μm<sup>2</sup>)', margin_b=150)\n", (166491, 166653), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((166967, 166986), 'numpy.array', 'np.array', (['am_l_mean'], {}), '(am_l_mean)\n', (166975, 166986), True, 'import numpy as np\n'), ((167002, 167023), 'numpy.argsort', 'np.argsort', (['am_l_mean'], {}), '(am_l_mean)\n', (167012, 167023), True, 'import numpy as np\n'), ((167111, 167277), 'analysis.general_utils.plotly_utils.plot_bar', 'plotly_utils.plot_bar', ([], {'x': 'am_keys_s', 'y': 'am_l_mean_s', 'text_values': '[]', 'text_size': '(20)', 'title': '"""Amplitude (df/f) of events"""', 'x_title': '""""""', 'y_title': '"""df/f"""', 'margin_b': '(150)'}), "(x=am_keys_s, y=am_l_mean_s, text_values=[], text_size\n =20, title='Amplitude (df/f) of events', x_title='', y_title='df/f',\n margin_b=150)\n", (167132, 167277), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((184564, 184608), 'numpy.arange', 'np.arange', (['(-before_range)', '(after_range + 1)', '(1)'], {}), '(-before_range, after_range + 1, 1)\n', (184573, 184608), True, 'import numpy as np\n'), ((187041, 187107), 'analysis.general_utils.plotly_utils.apply_fun_axis_fig', 'plotly_utils.apply_fun_axis_fig', (['fig3', '(lambda x: x * 100)'], {'axis': '"""y"""'}), "(fig3, lambda x: x * 100, axis='y')\n", (187072, 187107), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((189564, 189597), 'numpy.nanmean', 'np.nanmean', (['bh_val_all_np'], {'axis': '(0)'}), '(bh_val_all_np, axis=0)\n', (189574, 189597), True, 'import numpy as np\n'), ((189611, 189655), 'numpy.arange', 'np.arange', (['(-before_range)', '(after_range + 1)', '(1)'], {}), '(-before_range, after_range + 1, 1)\n', (189620, 189655), True, 'import numpy as np\n'), ((193327, 193346), 'numpy.mean', 'np.mean', (['all_events'], {}), '(all_events)\n', (193334, 193346), True, 'import numpy as np\n'), ((193372, 193390), 'numpy.std', 'np.std', (['all_events'], {}), '(all_events)\n', (193378, 193390), True, 'import numpy as np\n'), ((193416, 193540), 'numpy.array', 'np.array', (['[all_events_mean + all_events_std, all_events_mean + 2 * all_events_std, \n all_events_mean + 3 * all_events_std]'], {}), '([all_events_mean + all_events_std, all_events_mean + 2 *\n all_events_std, all_events_mean + 3 * all_events_std])\n', (193424, 193540), True, 'import numpy as np\n'), ((197994, 198032), 'numpy.array', 'np.array', (["measure_values_all['before']"], {}), "(measure_values_all['before'])\n", (198002, 198032), True, 'import numpy as np\n'), ((198071, 198108), 'numpy.array', 'np.array', (["measure_values_all['after']"], {}), "(measure_values_all['after'])\n", (198079, 198108), True, 'import numpy as np\n'), ((198820, 199010), 'analysis.general_utils.plotly_utils.plot_scatter_mult', 'plotly_utils.plot_scatter_mult', (['x_l', 'y_l_l'], {'name_l': 'name_l', 'mode': '"""lines+markers"""', 'title': '"""scatter"""', 'x_title': '""""""', 'y_title': '""""""', 'xrange': 'None', 'yrange': 'None', 'confidence': '(False)', 'with_stats': '(True)'}), "(x_l, y_l_l, name_l=name_l, mode=\n 'lines+markers', title='scatter', x_title='', y_title='', xrange=None,\n yrange=None, confidence=False, with_stats=True)\n", (198850, 199010), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((202571, 202591), 'numpy.zeros', 'np.zeros', (['[interval]'], {}), '([interval])\n', (202579, 202591), True, 'import numpy as np\n'), ((202612, 202632), 'numpy.zeros', 'np.zeros', (['[interval]'], {}), '([interval])\n', (202620, 202632), True, 'import numpy as np\n'), ((213321, 213554), 'analysis.general_utils.plotly_utils.plot_group_bar', 'plotly_utils.plot_group_bar', ([], {'x': 'x', 'y_l': 'y_l', 'text_values_l': 'text_values_l', 'legends': 'legends', 'x_title': '"""Behaviour"""', 'y_title': '"""Relative difference to default"""', 'title': '"""Relative difference in short,medium,long signals to default"""'}), "(x=x, y_l=y_l, text_values_l=text_values_l,\n legends=legends, x_title='Behaviour', y_title=\n 'Relative difference to default', title=\n 'Relative difference in short,medium,long signals to default')\n", (213348, 213554), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((213998, 214130), 'analysis.general_utils.plotly_utils.plot_scatter_signal', 'plotly_utils.plot_scatter_signal', ([], {'x': 'x1', 'y': 'y1', 'begin_i': 't_begin', 'end_i': 't_end', 'mode': '"""lines"""', 'title': '"""Signal"""', 'x_title': '""""""', 'y_title': '""""""'}), "(x=x1, y=y1, begin_i=t_begin, end_i=t_end,\n mode='lines', title='Signal', x_title='', y_title='')\n", (214030, 214130), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((218076, 218284), 'analysis.general_utils.compare_astro_utils.alignment_counter', 'compare_astro_utils.alignment_counter', (['astro_l_pair[0]', 'astro_l_pair[1]'], {'n_fake_samples': '(0)', 'align_setting': 'align_setting', 'eval_setting': '"""xcorr"""', 'fake_sample_setting': '"""from_astro"""', 'p': '(1)', 'behaviour': '"""default"""'}), "(astro_l_pair[0], astro_l_pair[1],\n n_fake_samples=0, align_setting=align_setting, eval_setting='xcorr',\n fake_sample_setting='from_astro', p=1, behaviour='default')\n", (218113, 218284), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((218749, 219004), 'analysis.general_utils.compare_astro_utils.alignment_counter', 'compare_astro_utils.alignment_counter', (['astro_l_pair[0]', 'astro_l_pair[1]'], {'n_fake_samples': 'self.n_samples_corr_fake', 'align_setting': '"""param"""', 'eval_setting': '"""xcorr"""', 'fake_sample_setting': '"""from_astro"""', 'move_vector': 'move_vector', 'p': '(1)', 'behaviour': 'behaviour'}), "(astro_l_pair[0], astro_l_pair[1],\n n_fake_samples=self.n_samples_corr_fake, align_setting='param',\n eval_setting='xcorr', fake_sample_setting='from_astro', move_vector=\n move_vector, p=1, behaviour=behaviour)\n", (218786, 219004), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((220558, 220576), 'numpy.max', 'np.max', (['day_0_grid'], {}), '(day_0_grid)\n', (220564, 220576), True, 'import numpy as np\n'), ((220593, 220611), 'numpy.max', 'np.max', (['day_1_grid'], {}), '(day_1_grid)\n', (220599, 220611), True, 'import numpy as np\n'), ((221838, 222034), 'analysis.general_utils.plotly_utils.plot_contour_threshold', 'plotly_utils.plot_contour_threshold', (['(astroA.event_grids_1min[bh] if not dff_mode else astroA.\n event_grids_1min_dff[bh])'], {'title': "(bh + '_event grid')", 'threshold_perc': 'threshold', 'with_details': '(True)'}), "(astroA.event_grids_1min[bh] if not\n dff_mode else astroA.event_grids_1min_dff[bh], title=bh + '_event grid',\n threshold_perc=threshold, with_details=True)\n", (221873, 222034), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((231035, 231192), 'analysis.general_utils.plotly_utils.plot_point_box_revised', 'plotly_utils.plot_point_box_revised', (['x_fixed', 'y_fixed'], {'title': '"""Split correlation to full"""', 'x_title': '"""Minutes splits"""', 'y_title': '"""Correlation (max = 1)"""'}), "(x_fixed, y_fixed, title=\n 'Split correlation to full', x_title='Minutes splits', y_title=\n 'Correlation (max = 1)')\n", (231070, 231192), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((231199, 231352), 'analysis.general_utils.plotly_utils.plot_scatter_error', 'plotly_utils.plot_scatter_error', (['x_fixed', 'y_fixed'], {'title': '"""Split correlation to full"""', 'x_title': '"""Minutes splits"""', 'y_title': '"""Correlation (max = 1)"""'}), "(x_fixed, y_fixed, title=\n 'Split correlation to full', x_title='Minutes splits', y_title=\n 'Correlation (max = 1)')\n", (231230, 231352), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((231887, 231916), 'os.path.isfile', 'os.path.isfile', (['save_pkl_path'], {}), '(save_pkl_path)\n', (231901, 231916), False, 'import os, sys, glob\n'), ((236783, 236938), 'analysis.general_utils.plotly_utils.plot_point_box_revised', 'plotly_utils.plot_point_box_revised', (['x_fixed', 'y_fixed'], {'title': '"""Split-split correlation"""', 'x_title': '"""Minutes splits"""', 'y_title': '"""Correlation (max = 1)"""'}), "(x_fixed, y_fixed, title=\n 'Split-split correlation', x_title='Minutes splits', y_title=\n 'Correlation (max = 1)')\n", (236818, 236938), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((236945, 237104), 'analysis.general_utils.plotly_utils.plot_scatter_error', 'plotly_utils.plot_scatter_error', (['x_fixed', 'y_fixed'], {'title': '"""Split-split correlation to full"""', 'x_title': '"""Minutes splits"""', 'y_title': '"""Correlation (max = 1)"""'}), "(x_fixed, y_fixed, title=\n 'Split-split correlation to full', x_title='Minutes splits', y_title=\n 'Correlation (max = 1)')\n", (236976, 237104), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((238786, 238844), 'analysis.general_utils.aqua_utils.get_event_subsets', 'aqua_utils.get_event_subsets', (['indices_d_temp', 'astroA.res_d'], {}), '(indices_d_temp, astroA.res_d)\n', (238814, 238844), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((238873, 239010), 'analysis.general_utils.aqua_utils.get_event_grid_from_x2D', 'aqua_utils.get_event_grid_from_x2D', (["astroA.res_d['x2D'][event_subsets_temp['first']]", '(astroA.input_shape[0], astroA.input_shape[1])'], {}), "(astroA.res_d['x2D'][event_subsets_temp[\n 'first']], (astroA.input_shape[0], astroA.input_shape[1]))\n", (238907, 239010), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((239032, 239168), 'analysis.general_utils.aqua_utils.get_event_grid_from_x2D', 'aqua_utils.get_event_grid_from_x2D', (["astroA.res_d['x2D'][event_subsets_temp['last']]", '(astroA.input_shape[0], astroA.input_shape[1])'], {}), "(astroA.res_d['x2D'][event_subsets_temp[\n 'last']], (astroA.input_shape[0], astroA.input_shape[1]))\n", (239066, 239168), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((240299, 240357), 'analysis.general_utils.aqua_utils.get_event_subsets', 'aqua_utils.get_event_subsets', (['indices_d_temp', 'astroA.res_d'], {}), '(indices_d_temp, astroA.res_d)\n', (240327, 240357), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((241107, 241127), 'numpy.array', 'np.array', (['corr_res_l'], {}), '(corr_res_l)\n', (241115, 241127), True, 'import numpy as np\n'), ((241615, 241841), 'analysis.general_utils.compare_astro_utils.alignment_counter', 'compare_astro_utils.alignment_counter', (['astro_l_pair[0]', 'astro_l_pair[1]'], {'n_fake_samples': '(0)', 'align_setting': '"""xcorr"""', 'eval_setting': '"""xcorr"""', 'fake_sample_setting': '"""from_astro"""', 'p': '(1)', 'behaviour': '"""default"""', 'dff_mode': 'dff_mode'}), "(astro_l_pair[0], astro_l_pair[1],\n n_fake_samples=0, align_setting='xcorr', eval_setting='xcorr',\n fake_sample_setting='from_astro', p=1, behaviour='default', dff_mode=\n dff_mode)\n", (241652, 241841), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((244346, 244401), 'analysis.general_utils.plotly_utils.plot_point_box_revised', 'plotly_utils.plot_point_box_revised', (['x', 'y'], {'margin_b': '(400)'}), '(x, y, margin_b=400)\n', (244381, 244401), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((251260, 251277), 'numpy.array', 'np.array', (['amp_l_1'], {}), '(amp_l_1)\n', (251268, 251277), True, 'import numpy as np\n'), ((251296, 251313), 'numpy.array', 'np.array', (['amp_l_2'], {}), '(amp_l_2)\n', (251304, 251313), True, 'import numpy as np\n'), ((251462, 251558), 'analysis.general_utils.plotly_utils.plot_violin_duo', 'plotly_utils.plot_violin_duo', (['bh_1', 'bh_2', 'amp_l_1', 'amp_l_2'], {'title': '""""""', 'x_title': '""""""', 'y_title': '""""""'}), "(bh_1, bh_2, amp_l_1, amp_l_2, title='',\n x_title='', y_title='')\n", (251490, 251558), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((252042, 252061), 'numpy.array', 'np.array', (['sizes_l_1'], {}), '(sizes_l_1)\n', (252050, 252061), True, 'import numpy as np\n'), ((252082, 252101), 'numpy.array', 'np.array', (['sizes_l_2'], {}), '(sizes_l_2)\n', (252090, 252101), True, 'import numpy as np\n'), ((252265, 252365), 'analysis.general_utils.plotly_utils.plot_violin_duo', 'plotly_utils.plot_violin_duo', (['bh_1', 'bh_2', 'sizes_l_1', 'sizes_l_2'], {'title': '""""""', 'x_title': '""""""', 'y_title': '""""""'}), "(bh_1, bh_2, sizes_l_1, sizes_l_2, title='',\n x_title='', y_title='')\n", (252293, 252365), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((252820, 252843), 'numpy.array', 'np.array', (['durations_l_1'], {}), '(durations_l_1)\n', (252828, 252843), True, 'import numpy as np\n'), ((252868, 252891), 'numpy.array', 'np.array', (['durations_l_2'], {}), '(durations_l_2)\n', (252876, 252891), True, 'import numpy as np\n'), ((253091, 253231), 'analysis.general_utils.plotly_utils.plot_violin_duo', 'plotly_utils.plot_violin_duo', (['bh_1', 'bh_2', 'durations_l_1', 'durations_l_2'], {'title': '"""Signal duration (s) distribution"""', 'x_title': '""""""', 'y_title': '""""""'}), "(bh_1, bh_2, durations_l_1, durations_l_2,\n title='Signal duration (s) distribution', x_title='', y_title='')\n", (253119, 253231), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((253236, 253312), 'analysis.general_utils.plotly_utils.apply_fun_axis_fig', 'plotly_utils.apply_fun_axis_fig', (['fig', '(lambda x: x / astroA_l[0].fr)'], {'axis': '"""y"""'}), "(fig, lambda x: x / astroA_l[0].fr, axis='y')\n", (253267, 253312), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((253633, 253650), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (253647, 253650), True, 'import numpy as np\n'), ((258948, 258967), 'numpy.copy', 'np.copy', (['pair_fakes'], {}), '(pair_fakes)\n', (258955, 258967), True, 'import numpy as np\n'), ((258998, 259019), 'numpy.copy', 'np.copy', (['pair_corrs_l'], {}), '(pair_corrs_l)\n', (259005, 259019), True, 'import numpy as np\n'), ((260714, 260743), 'os.path.isfile', 'os.path.isfile', (['save_pkl_path'], {}), '(save_pkl_path)\n', (260728, 260743), False, 'import os, sys, glob\n'), ((264468, 264589), 'analysis.general_utils.plotly_utils.plot_point_box_revised', 'plotly_utils.plot_point_box_revised', (['x', 'y'], {'title': '"""Behaviour correlations"""', 'x_title': '"""Behaviour"""', 'y_title': '"""Xcorr value"""'}), "(x, y, title='Behaviour correlations',\n x_title='Behaviour', y_title='Xcorr value')\n", (264503, 264589), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((264804, 264833), 'os.path.isfile', 'os.path.isfile', (['save_pkl_path'], {}), '(save_pkl_path)\n', (264818, 264833), False, 'import os, sys, glob\n'), ((268642, 268763), 'analysis.general_utils.plotly_utils.plot_point_box_revised', 'plotly_utils.plot_point_box_revised', (['x', 'y'], {'title': '"""Behaviour correlations"""', 'x_title': '"""Behaviour"""', 'y_title': '"""Xcorr value"""'}), "(x, y, title='Behaviour correlations',\n x_title='Behaviour', y_title='Xcorr value')\n", (268677, 268763), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((268947, 268976), 'os.path.isfile', 'os.path.isfile', (['save_pkl_path'], {}), '(save_pkl_path)\n', (268961, 268976), False, 'import os, sys, glob\n'), ((271346, 271494), 'analysis.general_utils.plotly_utils.plot_point_box_revised', 'plotly_utils.plot_point_box_revised', (['x', 'y'], {'title': '"""Between group correlations vs random (95% confidence)"""', 'x_title': '""""""', 'y_title': '"""Xcorr value"""'}), "(x, y, title=\n 'Between group correlations vs random (95% confidence)', x_title='',\n y_title='Xcorr value')\n", (271381, 271494), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((272387, 272570), 'analysis.general_utils.plotly_utils.plot_group_bar', 'plotly_utils.plot_group_bar', (['x', 'y_pair_l'], {'text_values_l': 'length_l', 'title': '""""""', 'text_size': '(20)', 'x_title': '""""""', 'y_title': '""""""', 'legends': 'bh_list', 'std_y': 'err_pair_l', 'margin_b': '(300)', 'margin_r': '(300)'}), "(x, y_pair_l, text_values_l=length_l, title='',\n text_size=20, x_title='', y_title='', legends=bh_list, std_y=err_pair_l,\n margin_b=300, margin_r=300)\n", (272414, 272570), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((273376, 273552), 'analysis.general_utils.plotly_utils.plot_scatter_mult_with_avg', 'plotly_utils.plot_scatter_mult_with_avg', ([], {'x_l': 'x_l', 'y_l_l': 'y_pair_l_l', 'y_mean': 'None', 'name_l': 'name_l', 'mode': '"""lines"""', 'x_title': '""""""', 'y_title': '""""""', 'confidence': '(True)', 'with_stats': '(True)'}), "(x_l=x_l, y_l_l=y_pair_l_l, y_mean=\n None, name_l=name_l, mode='lines', x_title='', y_title='', confidence=\n True, with_stats=True)\n", (273415, 273552), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((273914, 274019), 'analysis.general_utils.aqua_utils.filter_range_inds', 'aqua_utils.filter_range_inds', (['inds', 'astroA.indices_d[before_bh]'], {'range': '(-before_range, -1)', 'prop': '(1.0)'}), '(inds, astroA.indices_d[before_bh], range=(-\n before_range, -1), prop=1.0)\n', (273942, 274019), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((274044, 274144), 'analysis.general_utils.aqua_utils.filter_range_inds', 'aqua_utils.filter_range_inds', (['inds', 'astroA.indices_d[after_bh]'], {'range': '(1, after_range)', 'prop': '(1.0)'}), '(inds, astroA.indices_d[after_bh], range=(1,\n after_range), prop=1.0)\n', (274072, 274144), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((274813, 274899), 'analysis.general_utils.aqua_utils.get_delay_info_from_res', 'aqua_utils.get_delay_info_from_res', (['indices_filt', 'astroA.res_d'], {}), '(indices_filt, astroA.res_d, **\n delay_info_args)\n', (274847, 274899), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((277173, 277349), 'analysis.general_utils.plotly_utils.plot_scatter_mult_with_avg', 'plotly_utils.plot_scatter_mult_with_avg', ([], {'x_l': 'x_l', 'y_l_l': 'y_pair_l_l', 'y_mean': 'None', 'name_l': 'name_l', 'mode': '"""lines"""', 'x_title': '""""""', 'y_title': '""""""', 'confidence': '(True)', 'with_stats': '(True)'}), "(x_l=x_l, y_l_l=y_pair_l_l, y_mean=\n None, name_l=name_l, mode='lines', x_title='', y_title='', confidence=\n True, with_stats=True)\n", (277212, 277349), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((279009, 279122), 'analysis.general_utils.aqua_utils.radius_event_extraction', 'aqua_utils.radius_event_extraction', (['event_distances_from_center', 'clandmark_center', 'border_mask'], {'n_bins': 'n_bins'}), '(event_distances_from_center,\n clandmark_center, border_mask, n_bins=n_bins)\n', (279043, 279122), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((281008, 281129), 'analysis.general_utils.plotly_utils.plot_bar', 'plotly_utils.plot_bar', (['x', 'y'], {'err_y': 'err_l', 'text_size': '(20)', 'text_values': 'text_values_l', 'y_title': 'y_title', 'x_title': '"""Radius"""'}), "(x, y, err_y=err_l, text_size=20, text_values=\n text_values_l, y_title=y_title, x_title='Radius')\n", (281029, 281129), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((283825, 283851), 'numpy.mean', 'np.mean', (['y_mean_np'], {'axis': '(0)'}), '(y_mean_np, axis=0)\n', (283832, 283851), True, 'import numpy as np\n'), ((10173, 10230), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['fig_signal', 'fig_signal_path'], {}), '(fig_signal, fig_signal_path)\n', (10201, 10230), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((10391, 10457), 'os.path.join', 'os.path.join', (['output_experiment_path', '"""plots"""', '"""borders"""', '"""border"""'], {}), "(output_experiment_path, 'plots', 'borders', 'border')\n", (10403, 10457), False, 'import os, sys, glob\n'), ((31803, 31928), 'analysis.general_utils.aqua_utils.get_event_grid_from_x2D', 'aqua_utils.get_event_grid_from_x2D', (["astro.res_d['x2D'][bh_event_subsets[i]]", '(astro.input_shape[0], astro.input_shape[1])'], {}), "(astro.res_d['x2D'][bh_event_subsets[i]],\n (astro.input_shape[0], astro.input_shape[1]))\n", (31837, 31928), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((32260, 32349), 'analysis.general_utils.plotly_utils.plot_contour', 'plotly_utils.plot_contour', (['x2d_all_normalized'], {'title': '""""""', 'tick_x': '[0.2, 0.4, 0.6, 0.8]'}), "(x2d_all_normalized, title='', tick_x=[0.2, 0.4, \n 0.6, 0.8])\n", (32285, 32349), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((33590, 33645), 'analysis.general_utils.general_utils.merge_l_l', 'general_utils.merge_l_l', (['events_ot_l', 'downsample_length'], {}), '(events_ot_l, downsample_length)\n', (33613, 33645), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((33949, 34072), 'analysis.general_utils.aqua_utils.get_event_grid_from_x2D', 'aqua_utils.get_event_grid_from_x2D', (["astro.res_d['x2D'][segment_events_l]", '(astro.input_shape[0], astro.input_shape[1])'], {}), "(astro.res_d['x2D'][segment_events_l], (\n astro.input_shape[0], astro.input_shape[1]))\n", (33983, 34072), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((34452, 34541), 'analysis.general_utils.plotly_utils.plot_contour', 'plotly_utils.plot_contour', (['x2d_all_normalized'], {'title': '""""""', 'tick_x': '[0.2, 0.4, 0.6, 0.8]'}), "(x2d_all_normalized, title='', tick_x=[0.2, 0.4, \n 0.6, 0.8])\n", (34477, 34541), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((35259, 35316), 'numpy.unravel_index', 'np.unravel_index', (['event', '[dim_1, dim_2, dim_3]'], {'order': '"""F"""'}), "(event, [dim_1, dim_2, dim_3], order='F')\n", (35275, 35316), True, 'import numpy as np\n'), ((35343, 35363), 'numpy.min', 'np.min', (['unraveled[2]'], {}), '(unraveled[2])\n', (35349, 35363), True, 'import numpy as np\n'), ((35387, 35407), 'numpy.max', 'np.max', (['unraveled[2]'], {}), '(unraveled[2])\n', (35393, 35407), True, 'import numpy as np\n'), ((35433, 35457), 'numpy.zeros', 'np.zeros', (['[dim_1, dim_2]'], {}), '([dim_1, dim_2])\n', (35441, 35457), True, 'import numpy as np\n'), ((35993, 36070), 'numpy.sum', 'np.sum', (['a_3d[:, :, i * downsample_length:(i + 1) * downsample_length]'], {'axis': '(2)'}), '(a_3d[:, :, i * downsample_length:(i + 1) * downsample_length], axis=2)\n', (35999, 36070), True, 'import numpy as np\n'), ((36414, 36503), 'analysis.general_utils.plotly_utils.plot_contour', 'plotly_utils.plot_contour', (['x2d_all_normalized'], {'title': '""""""', 'tick_x': '[0.2, 0.4, 0.6, 0.8]'}), "(x2d_all_normalized, title='', tick_x=[0.2, 0.4, \n 0.6, 0.8])\n", (36439, 36503), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((76094, 76157), 'numpy.histogram', 'np.histogram', (['grid_flat_nz'], {'bins': '(20)', 'range': '(0, 1)', 'density': '(True)'}), '(grid_flat_nz, bins=20, range=(0, 1), density=True)\n', (76106, 76157), True, 'import numpy as np\n'), ((77567, 77587), 'numpy.max', 'np.max', (['grid_flat_nz'], {}), '(grid_flat_nz)\n', (77573, 77587), True, 'import numpy as np\n'), ((77618, 77681), 'numpy.histogram', 'np.histogram', (['grid_flat_nz'], {'bins': '(20)', 'range': '(0, 1)', 'density': '(True)'}), '(grid_flat_nz, bins=20, range=(0, 1), density=True)\n', (77630, 77681), True, 'import numpy as np\n'), ((138673, 138712), 'analysis.general_utils.saving_utils.load_pickle', 'saving_utils.load_pickle', (['save_pkl_path'], {}), '(save_pkl_path)\n', (138697, 138712), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((140570, 140616), 'analysis.general_utils.saving_utils.save_pickle', 'saving_utils.save_pickle', (['res_l', 'save_pkl_path'], {}), '(res_l, save_pkl_path)\n', (140594, 140616), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((141167, 141206), 'analysis.general_utils.saving_utils.load_pickle', 'saving_utils.load_pickle', (['save_pkl_path'], {}), '(save_pkl_path)\n', (141191, 141206), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((143169, 143215), 'analysis.general_utils.saving_utils.save_pickle', 'saving_utils.save_pickle', (['res_l', 'save_pkl_path'], {}), '(res_l, save_pkl_path)\n', (143193, 143215), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((143773, 143812), 'analysis.general_utils.saving_utils.load_pickle', 'saving_utils.load_pickle', (['save_pkl_path'], {}), '(save_pkl_path)\n', (143797, 143812), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((146870, 146916), 'analysis.general_utils.saving_utils.save_pickle', 'saving_utils.save_pickle', (['res_l', 'save_pkl_path'], {}), '(res_l, save_pkl_path)\n', (146894, 146916), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((147908, 148033), 'analysis.general_utils.correlation_utils.get_corr_astro_samples_v2', 'correlation_utils.get_corr_astro_samples_v2', ([], {'astro_xc': 'astroA', 'astro_base': 'astroA', 'p': 'p', 'n_samples': 'self.n_samples_corr_fake'}), '(astro_xc=astroA, astro_base=\n astroA, p=p, n_samples=self.n_samples_corr_fake)\n', (147951, 148033), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((148041, 148100), 'analysis.general_utils.saving_utils.save_pickle', 'saving_utils.save_pickle', (['samples_corr_d', 'samples_save_path'], {}), '(samples_corr_d, samples_save_path)\n', (148065, 148100), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((149353, 149409), 'analysis.general_utils.compare_astro_utils.get_filters_compare', 'compare_astro_utils.get_filters_compare', (['astroA_l_s'], {'p': 'p'}), '(astroA_l_s, p=p)\n', (149392, 149409), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((149522, 149615), 'analysis.general_utils.correlation_utils.get_cross_correlation_2D_info_compare', 'correlation_utils.get_cross_correlation_2D_info_compare', (['astro_filt_l[0]', 'astro_filt_l[0]'], {}), '(astro_filt_l[0],\n astro_filt_l[0])\n', (149577, 149615), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((149669, 149762), 'analysis.general_utils.correlation_utils.get_cross_correlation_2D_info_compare', 'correlation_utils.get_cross_correlation_2D_info_compare', (['astro_filt_l[0]', 'astro_filt_l[1]'], {}), '(astro_filt_l[0],\n astro_filt_l[1])\n', (149724, 149762), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((149842, 149976), 'analysis.general_utils.correlation_utils.get_corr_astro_samples_v2', 'correlation_utils.get_corr_astro_samples_v2', ([], {'astro_xc': 'astroA_l[0]', 'astro_base': 'astroA_l[1]', 'p': 'p', 'n_samples': 'self.n_samples_corr_fake'}), '(astro_xc=astroA_l[0],\n astro_base=astroA_l[1], p=p, n_samples=self.n_samples_corr_fake)\n', (149885, 149976), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((150444, 150495), 'analysis.general_utils.saving_utils.save_pickle', 'saving_utils.save_pickle', (['d', 'corr_compare_save_path'], {}), '(d, corr_compare_save_path)\n', (150468, 150495), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((150879, 150971), 'os.path.join', 'os.path.join', (['output_experiment_path_comparison', '"""files/correlations/corr_compare_*.pkl"""'], {}), "(output_experiment_path_comparison,\n 'files/correlations/corr_compare_*.pkl')\n", (150891, 150971), False, 'import os, sys, glob\n'), ((151168, 151209), 'analysis.general_utils.saving_utils.load_pickle', 'saving_utils.load_pickle', (['comparison_path'], {}), '(comparison_path)\n', (151192, 151209), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((151456, 151525), 'os.path.join', 'os.path.join', (['experiment_path', '"""files/correlations/fake_sample_*.pkl"""'], {}), "(experiment_path, 'files/correlations/fake_sample_*.pkl')\n", (151468, 151525), False, 'import os, sys, glob\n'), ((151809, 151851), 'analysis.general_utils.saving_utils.load_pickle', 'saving_utils.load_pickle', (['fake_sample_path'], {}), '(fake_sample_path)\n', (151833, 151851), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((155349, 155371), 'numpy.zeros', 'np.zeros', (['[arr_length]'], {}), '([arr_length])\n', (155357, 155371), True, 'import numpy as np\n'), ((156201, 156350), 'analysis.general_utils.plotly_utils.plot_contour', 'plotly_utils.plot_contour', (["(astroA.res_d['border_mask'] + astroA.res_d['clandmark_mask'])"], {'title': '"""border_and_landmark_mask"""', 'height': '(600)', 'width': '(800)'}), "(astroA.res_d['border_mask'] + astroA.res_d[\n 'clandmark_mask'], title='border_and_landmark_mask', height=600, width=800)\n", (156226, 156350), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((156379, 156481), 'analysis.general_utils.plotly_utils.plot_contour', 'plotly_utils.plot_contour', (["astroA.res_d['border_mask']"], {'title': '"""border_mask"""', 'height': '(600)', 'width': '(800)'}), "(astroA.res_d['border_mask'], title='border_mask',\n height=600, width=800)\n", (156404, 156481), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((156769, 156874), 'analysis.general_utils.plotly_utils.plot_contour', 'plotly_utils.plot_contour', (['astroA.event_grids_1min[k]'], {'title': "(k + '_event grid')", 'height': '(600)', 'width': '(800)'}), "(astroA.event_grids_1min[k], title=k +\n '_event grid', height=600, width=800)\n", (156794, 156874), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((156957, 157070), 'analysis.general_utils.plotly_utils.plot_contour', 'plotly_utils.plot_contour', (['astroA.event_grids_1min_dff[k]'], {'title': "(k + '_event grid dff')", 'height': '(600)', 'width': '(800)'}), "(astroA.event_grids_1min_dff[k], title=k +\n '_event grid dff', height=600, width=800)\n", (156982, 157070), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((158502, 158543), 'numpy.where', 'np.where', (["(activity_ratio_k_s == 'default')"], {}), "(activity_ratio_k_s == 'default')\n", (158510, 158543), True, 'import numpy as np\n'), ((159477, 159491), 'numpy.array', 'np.array', (['bh_l'], {}), '(bh_l)\n', (159485, 159491), True, 'import numpy as np\n'), ((159592, 159633), 'numpy.where', 'np.where', (["(activity_ratio_k_s == 'default')"], {}), "(activity_ratio_k_s == 'default')\n", (159600, 159633), True, 'import numpy as np\n'), ((160668, 160692), 'numpy.mean', 'np.mean', (['activity_ratios'], {}), '(activity_ratios)\n', (160675, 160692), True, 'import numpy as np\n'), ((160796, 160810), 'numpy.array', 'np.array', (['bh_l'], {}), '(bh_l)\n', (160804, 160810), True, 'import numpy as np\n'), ((161749, 161763), 'numpy.array', 'np.array', (['bh_l'], {}), '(bh_l)\n', (161757, 161763), True, 'import numpy as np\n'), ((161857, 161896), 'numpy.where', 'np.where', (["(activity_num_k_s == 'default')"], {}), "(activity_num_k_s == 'default')\n", (161865, 161896), True, 'import numpy as np\n'), ((163011, 163033), 'numpy.mean', 'np.mean', (['activity_nums'], {}), '(activity_nums)\n', (163018, 163033), True, 'import numpy as np\n'), ((163133, 163147), 'numpy.array', 'np.array', (['bh_l'], {}), '(bh_l)\n', (163141, 163147), True, 'import numpy as np\n'), ((164416, 164473), 'analysis.general_utils.stat_utils.mean_confidence_interval', 'stat_utils.mean_confidence_interval', (['v_l'], {'confidence': '(0.95)'}), '(v_l, confidence=0.95)\n', (164451, 164473), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((164616, 164630), 'numpy.array', 'np.array', (['bh_l'], {}), '(bh_l)\n', (164624, 164630), True, 'import numpy as np\n'), ((164658, 164673), 'numpy.array', 'np.array', (['areas'], {}), '(areas)\n', (164666, 164673), True, 'import numpy as np\n'), ((164706, 164726), 'numpy.array', 'np.array', (['areas_mean'], {}), '(areas_mean)\n', (164714, 164726), True, 'import numpy as np\n'), ((164758, 164777), 'numpy.array', 'np.array', (['areas_std'], {}), '(areas_std)\n', (164766, 164777), True, 'import numpy as np\n'), ((164810, 164830), 'numpy.array', 'np.array', (['areas_conf'], {}), '(areas_conf)\n', (164818, 164830), True, 'import numpy as np\n'), ((172167, 172190), 'numpy.sort', 'np.sort', (['stick_v_l_d[k]'], {}), '(stick_v_l_d[k])\n', (172174, 172190), True, 'import numpy as np\n'), ((172215, 172240), 'numpy.sort', 'np.sort', (['running_v_l_d[k]'], {}), '(running_v_l_d[k])\n', (172222, 172240), True, 'import numpy as np\n'), ((172268, 172296), 'numpy.sort', 'np.sort', (['no_running_v_l_d[k]'], {}), '(no_running_v_l_d[k])\n', (172275, 172296), True, 'import numpy as np\n'), ((172316, 172533), 'analysis.general_utils.plotly_utils.plot_waterfall', 'plotly_utils.plot_waterfall', ([], {'arrays_l': '[stick_v, running_v, no_running_v]', 'legend_names': "['stick', 'running', 'rest']", 'title': '"""Signal (event) delays after behaviour"""', 'x_title': '"""Delay (s)"""', 'y_title': '"""Event id"""'}), "(arrays_l=[stick_v, running_v, no_running_v],\n legend_names=['stick', 'running', 'rest'], title=\n 'Signal (event) delays after behaviour', x_title='Delay (s)', y_title=\n 'Event id')\n", (172343, 172533), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((172533, 172609), 'analysis.general_utils.plotly_utils.apply_fun_axis_fig', 'plotly_utils.apply_fun_axis_fig', (['fig', '(lambda x: x / astroA_l[0].fr)'], {'axis': '"""x"""'}), "(fig, lambda x: x / astroA_l[0].fr, axis='x')\n", (172564, 172609), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((172636, 172884), 'analysis.general_utils.plotly_utils.plot_waterfall_interpolate', 'plotly_utils.plot_waterfall_interpolate', ([], {'arrays_l': '[stick_v, running_v, no_running_v]', 'legend_names': "['stick', 'running', 'rest']", 'title': '"""Signal (event) delays after behaviour (scaled) All axons"""', 'x_title': '"""Delay (s)"""', 'y_title': '"""Event id"""'}), "(arrays_l=[stick_v, running_v,\n no_running_v], legend_names=['stick', 'running', 'rest'], title=\n 'Signal (event) delays after behaviour (scaled) All axons', x_title=\n 'Delay (s)', y_title='Event id')\n", (172675, 172884), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((172883, 172970), 'analysis.general_utils.plotly_utils.apply_fun_axis_fig', 'plotly_utils.apply_fun_axis_fig', (['fig_interp', '(lambda x: x / astroA_l[0].fr)'], {'axis': '"""x"""'}), "(fig_interp, lambda x: x / astroA_l[0].fr,\n axis='x')\n", (172914, 172970), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((178697, 178802), 'analysis.general_utils.aqua_utils.filter_range_inds', 'aqua_utils.filter_range_inds', (['inds', 'astroA.indices_d[before_bh]'], {'range': '(-before_range, -1)', 'prop': '(1.0)'}), '(inds, astroA.indices_d[before_bh], range=(-\n before_range, -1), prop=1.0)\n', (178725, 178802), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((178831, 178931), 'analysis.general_utils.aqua_utils.filter_range_inds', 'aqua_utils.filter_range_inds', (['inds', 'astroA.indices_d[after_bh]'], {'range': '(1, after_range)', 'prop': '(1.0)'}), '(inds, astroA.indices_d[after_bh], range=(1,\n after_range), prop=1.0)\n', (178859, 178931), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((179643, 179729), 'analysis.general_utils.aqua_utils.get_delay_info_from_res', 'aqua_utils.get_delay_info_from_res', (['indices_filt', 'astroA.res_d'], {}), '(indices_filt, astroA.res_d, **\n delay_info_args)\n', (179677, 179729), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((181930, 181972), 'numpy.zeros', 'np.zeros', (['[after_range + before_range + 1]'], {}), '([after_range + before_range + 1])\n', (181938, 181972), True, 'import numpy as np\n'), ((181994, 182036), 'numpy.zeros', 'np.zeros', (['[after_range + before_range + 1]'], {}), '([after_range + before_range + 1])\n', (182002, 182036), True, 'import numpy as np\n'), ((183402, 183429), 'numpy.sum', 'np.sum', (['prop_all_np'], {'axis': '(0)'}), '(prop_all_np, axis=0)\n', (183408, 183429), True, 'import numpy as np\n'), ((183718, 183759), 'numpy.count_nonzero', 'np.count_nonzero', (['ev_count_all_np'], {'axis': '(0)'}), '(ev_count_all_np, axis=0)\n', (183734, 183759), True, 'import numpy as np\n'), ((186345, 186416), 'analysis.general_utils.plotly_utils.apply_fun_axis_fig', 'plotly_utils.apply_fun_axis_fig', (['fig', '(lambda x: x / astroA.fr)'], {'axis': '"""x"""'}), "(fig, lambda x: x / astroA.fr, axis='x')\n", (186376, 186416), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((186430, 186502), 'analysis.general_utils.plotly_utils.apply_fun_axis_fig', 'plotly_utils.apply_fun_axis_fig', (['fig2', '(lambda x: x / astroA.fr)'], {'axis': '"""x"""'}), "(fig2, lambda x: x / astroA.fr, axis='x')\n", (186461, 186502), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((187164, 187236), 'analysis.general_utils.plotly_utils.apply_fun_axis_fig', 'plotly_utils.apply_fun_axis_fig', (['fig3', '(lambda x: x / astroA.fr)'], {'axis': '"""x"""'}), "(fig3, lambda x: x / astroA.fr, axis='x')\n", (187195, 187236), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((188006, 188111), 'analysis.general_utils.aqua_utils.filter_range_inds', 'aqua_utils.filter_range_inds', (['inds', 'astroA.indices_d[before_bh]'], {'range': '(-before_range, -1)', 'prop': '(1.0)'}), '(inds, astroA.indices_d[before_bh], range=(-\n before_range, -1), prop=1.0)\n', (188034, 188111), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((188140, 188240), 'analysis.general_utils.aqua_utils.filter_range_inds', 'aqua_utils.filter_range_inds', (['inds', 'astroA.indices_d[after_bh]'], {'range': '(1, after_range)', 'prop': '(1.0)'}), '(inds, astroA.indices_d[after_bh], range=(1,\n after_range), prop=1.0)\n', (188168, 188240), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((189523, 189541), 'numpy.array', 'np.array', (['bh_val_l'], {}), '(bh_val_l)\n', (189531, 189541), True, 'import numpy as np\n'), ((190305, 190376), 'analysis.general_utils.plotly_utils.apply_fun_axis_fig', 'plotly_utils.apply_fun_axis_fig', (['fig', '(lambda x: x / astroA.fr)'], {'axis': '"""x"""'}), "(fig, lambda x: x / astroA.fr, axis='x')\n", (190336, 190376), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((191227, 191299), 'analysis.general_utils.plotly_utils.apply_fun_axis_fig', 'plotly_utils.apply_fun_axis_fig', (['fig2', '(lambda x: x / astroA.fr)'], {'axis': '"""x"""'}), "(fig2, lambda x: x / astroA.fr, axis='x')\n", (191258, 191299), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((193738, 193843), 'analysis.general_utils.aqua_utils.filter_range_inds', 'aqua_utils.filter_range_inds', (['inds', 'astroA.indices_d[before_bh]'], {'range': '(-before_range, -1)', 'prop': '(1.0)'}), '(inds, astroA.indices_d[before_bh], range=(-\n before_range, -1), prop=1.0)\n', (193766, 193843), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((193872, 193972), 'analysis.general_utils.aqua_utils.filter_range_inds', 'aqua_utils.filter_range_inds', (['inds', 'astroA.indices_d[after_bh]'], {'range': '(1, after_range)', 'prop': '(1.0)'}), '(inds, astroA.indices_d[after_bh], range=(1,\n after_range), prop=1.0)\n', (193900, 193972), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((195147, 195233), 'analysis.general_utils.aqua_utils.get_delay_info_from_res', 'aqua_utils.get_delay_info_from_res', (['indices_filt', 'astroA.res_d'], {}), '(indices_filt, astroA.res_d, **\n delay_info_args)\n', (195181, 195233), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((203792, 203904), 'analysis.general_utils.aqua_utils.get_event_subsets', 'aqua_utils.get_event_subsets', (['split_d', 'astroA.res_d'], {'after_i': '(0)', 'before_i': '(0)', 'to_print': '(False)', 'return_info': '(True)'}), '(split_d, astroA.res_d, after_i=0, before_i=0,\n to_print=False, return_info=True)\n', (203820, 203904), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((204811, 204831), 'numpy.sum', 'np.sum', (['running_prop'], {}), '(running_prop)\n', (204817, 204831), True, 'import numpy as np\n'), ((204864, 204881), 'numpy.sum', 'np.sum', (['rest_prop'], {}), '(rest_prop)\n', (204870, 204881), True, 'import numpy as np\n'), ((206100, 206213), 'analysis.general_utils.aqua_utils.radius_event_extraction', 'aqua_utils.radius_event_extraction', (['event_distances_from_center', 'clandmark_center', 'border_mask'], {'n_bins': 'n_bins'}), '(event_distances_from_center,\n clandmark_center, border_mask, n_bins=n_bins)\n', (206134, 206213), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((206794, 206830), 'numpy.copy', 'np.copy', (["astroA.res_d['border_mask']"], {}), "(astroA.res_d['border_mask'])\n", (206801, 206830), True, 'import numpy as np\n'), ((208443, 208765), 'analysis.general_utils.plotly_utils.plot_event_triplet', 'plotly_utils.plot_event_triplet', ([], {'num_events_bins': 'n_events_arr_norm[:-1]', 'distances_bins': 'r_bins[:-1]', 'sizes_bins_lists': 'event_areas_bins_l[:-1]', 'durations_bins_lists': 'event_durations_bins_l[:-1]', 'height': '(1000)', 'width': '(1000)', 'spatial_res': 'astroA.spatial_res', 'fr': '(1.0 / astroA.fr_inv)', 'title': "(k + '_event_triplet_plot')"}), "(num_events_bins=n_events_arr_norm[:-1],\n distances_bins=r_bins[:-1], sizes_bins_lists=event_areas_bins_l[:-1],\n durations_bins_lists=event_durations_bins_l[:-1], height=1000, width=\n 1000, spatial_res=astroA.spatial_res, fr=1.0 / astroA.fr_inv, title=k +\n '_event_triplet_plot')\n", (208474, 208765), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((209444, 209521), 'csv.writer', 'csv.writer', (['csv_file'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(csv_file, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (209454, 209521), False, 'import csv\n'), ((210673, 210750), 'csv.writer', 'csv.writer', (['csv_file'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(csv_file, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (210683, 210750), False, 'import csv\n'), ((212019, 212063), 'numpy.sum', 'np.sum', (['(astroA.all_durations_class_d[k] == 1)'], {}), '(astroA.all_durations_class_d[k] == 1)\n', (212025, 212063), True, 'import numpy as np\n'), ((212097, 212141), 'numpy.sum', 'np.sum', (['(astroA.all_durations_class_d[k] == 2)'], {}), '(astroA.all_durations_class_d[k] == 2)\n', (212103, 212141), True, 'import numpy as np\n'), ((212173, 212217), 'numpy.sum', 'np.sum', (['(astroA.all_durations_class_d[k] == 3)'], {}), '(astroA.all_durations_class_d[k] == 3)\n', (212179, 212217), True, 'import numpy as np\n'), ((212496, 212556), 'analysis.general_utils.general_utils.truncate', 'general_utils.truncate', (['(short_signals_len / total_signals)', '(4)'], {}), '(short_signals_len / total_signals, 4)\n', (212518, 212556), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((212588, 212647), 'analysis.general_utils.general_utils.truncate', 'general_utils.truncate', (['(long_signals_len / total_signals)', '(4)'], {}), '(long_signals_len / total_signals, 4)\n', (212610, 212647), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((212681, 212742), 'analysis.general_utils.general_utils.truncate', 'general_utils.truncate', (['(medium_signals_len / total_signals)', '(4)'], {}), '(medium_signals_len / total_signals, 4)\n', (212703, 212742), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((214689, 214760), 'numpy.random.choice', 'np.random.choice', (['astroA.event_subsets[bk]', 'sample_num_x'], {'replace': '(False)'}), '(astroA.event_subsets[bk], sample_num_x, replace=False)\n', (214705, 214760), True, 'import numpy as np\n'), ((215637, 215754), 'analysis.general_utils.plotly_utils.plot_point_box_revised', 'plotly_utils.plot_point_box_revised', (['x', 'y'], {'title': '"""Max correlations"""', 'x_title': '""""""', 'y_title': '"""Max correlation value"""'}), "(x, y, title='Max correlations', x_title\n ='', y_title='Max correlation value')\n", (215672, 215754), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((216020, 216232), 'analysis.general_utils.compare_astro_utils.alignment_counter', 'compare_astro_utils.alignment_counter', (['astro_l_pair[0]', 'astro_l_pair[1]'], {'n_fake_samples': '(0)', 'align_setting': 'align_setting', 'eval_setting': '"""counter"""', 'fake_sample_setting': '"""from_grid"""', 'behaviour': '"""default"""', 'p': '(0.05)'}), "(astro_l_pair[0], astro_l_pair[1],\n n_fake_samples=0, align_setting=align_setting, eval_setting='counter',\n fake_sample_setting='from_grid', behaviour='default', p=0.05)\n", (216057, 216232), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((219526, 219539), 'numpy.mean', 'np.mean', (['y[0]'], {}), '(y[0])\n', (219533, 219539), True, 'import numpy as np\n'), ((219541, 219553), 'numpy.std', 'np.std', (['y[0]'], {}), '(y[0])\n', (219547, 219553), True, 'import numpy as np\n'), ((219566, 219579), 'numpy.mean', 'np.mean', (['y[1]'], {}), '(y[1])\n', (219573, 219579), True, 'import numpy as np\n'), ((219581, 219593), 'numpy.std', 'np.std', (['y[1]'], {}), '(y[1])\n', (219587, 219593), True, 'import numpy as np\n'), ((220678, 220795), 'analysis.general_utils.plotly_utils.plot_contour_threshold', 'plotly_utils.plot_contour_threshold', (['day_0_grid'], {'threshold_perc': '(1.0)', 'title': "(bh + '_event grid')", 'with_details': '(True)'}), "(day_0_grid, threshold_perc=1.0, title=\n bh + '_event grid', with_details=True)\n", (220713, 220795), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((220881, 221013), 'analysis.general_utils.plotly_utils.plot_contour_threshold', 'plotly_utils.plot_contour_threshold', (['day_1_grid'], {'threshold_perc': 'None', 'set_min_v': 'min_v', 'set_max_v': 'max_v', 'title': "(bh + '_event_grid')"}), "(day_1_grid, threshold_perc=None,\n set_min_v=min_v, set_max_v=max_v, title=bh + '_event_grid')\n", (220916, 221013), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((221062, 221179), 'analysis.general_utils.plotly_utils.plot_contour_threshold', 'plotly_utils.plot_contour_threshold', (['day_1_grid'], {'threshold_perc': '(1.0)', 'title': "(bh + '_event grid')", 'with_details': '(True)'}), "(day_1_grid, threshold_perc=1.0, title=\n bh + '_event grid', with_details=True)\n", (221097, 221179), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((221265, 221397), 'analysis.general_utils.plotly_utils.plot_contour_threshold', 'plotly_utils.plot_contour_threshold', (['day_0_grid'], {'threshold_perc': 'None', 'set_min_v': 'min_v', 'set_max_v': 'max_v', 'title': "(bh + '_event_grid')"}), "(day_0_grid, threshold_perc=None,\n set_min_v=min_v, set_max_v=max_v, title=bh + '_event_grid')\n", (221300, 221397), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((222401, 222556), 'analysis.general_utils.plotly_utils.plot_contour_threshold', 'plotly_utils.plot_contour_threshold', (['sample'], {'threshold_perc': 'None', 'set_min_v': 'min_v', 'set_max_v': 'max_v', 'title': "(bh + '_random_event_grid')", 'with_details': '(True)'}), "(sample, threshold_perc=None, set_min_v=\n min_v, set_max_v=max_v, title=bh + '_random_event_grid', with_details=True)\n", (222436, 222556), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((223138, 223228), 'analysis.general_utils.compare_astro_utils.split_astro_grid', 'compare_astro_utils.split_astro_grid', (['astroA'], {'split_frames': 'split_frames', 'bk': '"""default"""'}), "(astroA, split_frames=split_frames, bk=\n 'default')\n", (223174, 223228), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((226725, 226780), 'analysis.general_utils.compare_astro_utils.get_filters_compare', 'compare_astro_utils.get_filters_compare', (['astroA_l'], {'p': 'pk'}), '(astroA_l, p=pk)\n', (226764, 226780), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((229803, 229893), 'analysis.general_utils.compare_astro_utils.split_astro_grid', 'compare_astro_utils.split_astro_grid', (['astroA'], {'split_frames': 'split_frames', 'bk': '"""default"""'}), "(astroA, split_frames=split_frames, bk=\n 'default')\n", (229839, 229893), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((231971, 232010), 'analysis.general_utils.saving_utils.load_pickle', 'saving_utils.load_pickle', (['save_pkl_path'], {}), '(save_pkl_path)\n', (231995, 232010), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((232394, 232521), 'analysis.general_utils.aqua_utils.get_event_subsets', 'aqua_utils.get_event_subsets', (["{'default': default_ind, 'cut': cut_ind}", 'astroA.res_d'], {'after_i': '(0)', 'before_i': '(0)', 'to_print': '(False)'}), "({'default': default_ind, 'cut': cut_ind},\n astroA.res_d, after_i=0, before_i=0, to_print=False)\n", (232422, 232521), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((232600, 232726), 'analysis.general_utils.aqua_utils.get_event_grid_from_x2D', 'aqua_utils.get_event_grid_from_x2D', (["astroA.res_d['x2D'][cut_event_subsets]", '(astroA.input_shape[0], astroA.input_shape[1])'], {}), "(astroA.res_d['x2D'][cut_event_subsets],\n (astroA.input_shape[0], astroA.input_shape[1]))\n", (232634, 232726), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((234636, 234682), 'analysis.general_utils.saving_utils.save_pickle', 'saving_utils.save_pickle', (['res_d', 'save_pkl_path'], {}), '(res_d, save_pkl_path)\n', (234660, 234682), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((235329, 235419), 'analysis.general_utils.compare_astro_utils.split_astro_grid', 'compare_astro_utils.split_astro_grid', (['astroA'], {'split_frames': 'split_frames', 'bk': '"""default"""'}), "(astroA, split_frames=split_frames, bk=\n 'default')\n", (235365, 235419), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((237417, 237529), 'analysis.general_utils.compare_astro_utils.get_fake_astrocyte_sample_from_areas', 'compare_astro_utils.get_fake_astrocyte_sample_from_areas', (['astroA', 'event_areas'], {'mode': '"""append"""', 'filter_ratio': '(1)'}), "(astroA,\n event_areas, mode='append', filter_ratio=1)\n", (237473, 237529), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((238375, 238409), 'numpy.round', 'np.round', (['(astroA.fr * num_min * 60)'], {}), '(astroA.fr * num_min * 60)\n', (238383, 238409), True, 'import numpy as np\n'), ((239637, 239671), 'numpy.round', 'np.round', (['(astroA.fr * num_min * 60)'], {}), '(astroA.fr * num_min * 60)\n', (239645, 239671), True, 'import numpy as np\n'), ((240556, 240687), 'analysis.general_utils.aqua_utils.get_event_grid_from_x2D', 'aqua_utils.get_event_grid_from_x2D', (["astroA.res_d['x2D'][event_subsets_temp[i]]", '(astroA.input_shape[0], astroA.input_shape[1])'], {}), "(astroA.res_d['x2D'][event_subsets_temp[i\n ]], (astroA.input_shape[0], astroA.input_shape[1]))\n", (240590, 240687), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((240907, 241043), 'analysis.general_utils.correlation_utils.get_cross_correlation_2D_info_compare', 'correlation_utils.get_cross_correlation_2D_info_compare', (['event_grid_split_l[0]', 'event_grid_split_l[i]'], {'normalize': '(True)', 'mode': '"""valid"""'}), "(event_grid_split_l[\n 0], event_grid_split_l[i], normalize=True, mode='valid')\n", (240962, 241043), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((242351, 242596), 'analysis.general_utils.compare_astro_utils.alignment_counter', 'compare_astro_utils.alignment_counter', (['astro_l_pair[0]', 'astro_l_pair[1]'], {'n_fake_samples': '(0)', 'align_setting': '"""param"""', 'eval_setting': '"""xcorr"""', 'fake_sample_setting': '"""from_astro"""', 'move_vector': 'move_vector', 'p': '(1)', 'behaviour': 'bh_i', 'dff_mode': 'dff_mode'}), "(astro_l_pair[0], astro_l_pair[1],\n n_fake_samples=0, align_setting='param', eval_setting='xcorr',\n fake_sample_setting='from_astro', move_vector=move_vector, p=1,\n behaviour=bh_i, dff_mode=dff_mode)\n", (242388, 242596), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((245674, 245709), 'numpy.int', 'np.int', (['((max_range - min_range) / 2)'], {}), '((max_range - min_range) / 2)\n', (245680, 245709), True, 'import numpy as np\n'), ((246556, 246641), 'numpy.histogram', 'np.histogram', (['all_events_measure_l'], {'bins': 'num_bins_x', 'range': '(min_range, max_range)'}), '(all_events_measure_l, bins=num_bins_x, range=(min_range,\n max_range))\n', (246568, 246641), True, 'import numpy as np\n'), ((250081, 250129), 'scipy.optimize.curve_fit', 'optimize.curve_fit', (['test_func', 'x_l[i]', 'bh_y_l[i]'], {}), '(test_func, x_l[i], bh_y_l[i])\n', (250099, 250129), False, 'from scipy import optimize\n'), ((256482, 256520), 'os.path.isfile', 'os.path.isfile', (['pair_save_results_path'], {}), '(pair_save_results_path)\n', (256496, 256520), False, 'import os, sys, glob\n'), ((260765, 260804), 'analysis.general_utils.saving_utils.load_pickle', 'saving_utils.load_pickle', (['save_pkl_path'], {}), '(save_pkl_path)\n', (260789, 260804), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((263846, 263892), 'analysis.general_utils.saving_utils.save_pickle', 'saving_utils.save_pickle', (['res_d', 'save_pkl_path'], {}), '(res_d, save_pkl_path)\n', (263870, 263892), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((264855, 264894), 'analysis.general_utils.saving_utils.load_pickle', 'saving_utils.load_pickle', (['save_pkl_path'], {}), '(save_pkl_path)\n', (264879, 264894), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((268404, 268450), 'analysis.general_utils.saving_utils.save_pickle', 'saving_utils.save_pickle', (['res_d', 'save_pkl_path'], {}), '(res_d, save_pkl_path)\n', (268428, 268450), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((268998, 269037), 'analysis.general_utils.saving_utils.load_pickle', 'saving_utils.load_pickle', (['save_pkl_path'], {}), '(save_pkl_path)\n', (269022, 269037), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((275134, 275159), 'numpy.array', 'np.array', (['signal_delays_l'], {}), '(signal_delays_l)\n', (275142, 275159), True, 'import numpy as np\n'), ((281876, 281989), 'analysis.general_utils.aqua_utils.radius_event_extraction', 'aqua_utils.radius_event_extraction', (['event_distances_from_center', 'clandmark_center', 'border_mask'], {'n_bins': 'n_bins'}), '(event_distances_from_center,\n clandmark_center, border_mask, n_bins=n_bins)\n', (281910, 281989), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((10837, 10876), 'os.path.join', 'os.path.join', (['heatmap_grid_base_path', 'k'], {}), '(heatmap_grid_base_path, k)\n', (10849, 10876), False, 'import os, sys, glob\n'), ((10945, 10992), 'os.path.join', 'os.path.join', (['heatmap_grid_base_path', "(k + 'dff')"], {}), "(heatmap_grid_base_path, k + 'dff')\n", (10957, 10992), False, 'import os, sys, glob\n'), ((12499, 12562), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['fig_bk_signal', 'fig_bk_signal_path'], {}), '(fig_bk_signal, fig_bk_signal_path)\n', (12527, 12562), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((18592, 18643), 'os.path.join', 'os.path.join', (['durations_base_path', "(k + '-durations')"], {}), "(durations_base_path, k + '-durations')\n", (18604, 18643), False, 'import os, sys, glob\n'), ((76243, 76255), 'numpy.sum', 'np.sum', (['hist'], {}), '(hist)\n', (76249, 76255), True, 'import numpy as np\n'), ((76728, 76753), 'numpy.array', 'np.array', (["stats_d['data']"], {}), "(stats_d['data'])\n", (76736, 76753), True, 'import numpy as np\n'), ((77767, 77779), 'numpy.sum', 'np.sum', (['hist'], {}), '(hist)\n', (77773, 77779), True, 'import numpy as np\n'), ((78254, 78279), 'numpy.array', 'np.array', (["stats_d['data']"], {}), "(stats_d['data'])\n", (78262, 78279), True, 'import numpy as np\n'), ((138920, 138985), 'analysis.general_utils.aqua_utils.split_n_event_grids', 'aqua_utils.split_n_event_grids', (['astroA'], {'bh': 'bh_pair[0]', 'n': 'n_chunks'}), '(astroA, bh=bh_pair[0], n=n_chunks)\n', (138950, 138985), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((140674, 140689), 'numpy.array', 'np.array', (['res_l'], {}), '(res_l)\n', (140682, 140689), True, 'import numpy as np\n'), ((141414, 141471), 'analysis.general_utils.aqua_utils.split_n_event_grids', 'aqua_utils.split_n_event_grids', (['astroA'], {'bh': 'bh', 'n': 'n_chunks'}), '(astroA, bh=bh, n=n_chunks)\n', (141444, 141471), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((143273, 143288), 'numpy.array', 'np.array', (['res_l'], {}), '(res_l)\n', (143281, 143288), True, 'import numpy as np\n'), ((144170, 144234), 'analysis.general_utils.aqua_utils.split_n_event_grids', 'aqua_utils.split_n_event_grids', (['astro_pair[0]'], {'bh': 'bh', 'n': 'n_chunks'}), '(astro_pair[0], bh=bh, n=n_chunks)\n', (144200, 144234), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((144290, 144354), 'analysis.general_utils.aqua_utils.split_n_event_grids', 'aqua_utils.split_n_event_grids', (['astro_pair[1]'], {'bh': 'bh', 'n': 'n_chunks'}), '(astro_pair[1], bh=bh, n=n_chunks)\n', (144320, 144354), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((144695, 144917), 'analysis.general_utils.compare_astro_utils.alignment_counter', 'compare_astro_utils.alignment_counter', (['astro_pair[0]', 'astro_pair[1]'], {'n_fake_samples': '(0)', 'align_setting': '"""xcorr"""', 'eval_setting': '"""xcorr"""', 'fake_sample_setting': '"""from_astro"""', 'p': '(1)', 'behaviour': '"""default"""', 'dff_mode': 'dff_mode'}), "(astro_pair[0], astro_pair[1],\n n_fake_samples=0, align_setting='xcorr', eval_setting='xcorr',\n fake_sample_setting='from_astro', p=1, behaviour='default', dff_mode=\n dff_mode)\n", (144732, 144917), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((146974, 146989), 'numpy.array', 'np.array', (['res_l'], {}), '(res_l)\n', (146982, 146989), True, 'import numpy as np\n'), ((150566, 150588), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (150582, 150588), False, 'import os, sys, glob\n'), ((164250, 164261), 'numpy.std', 'np.std', (['v_l'], {}), '(v_l)\n', (164256, 164261), True, 'import numpy as np\n'), ((164312, 164324), 'numpy.mean', 'np.mean', (['v_l'], {}), '(v_l)\n', (164319, 164324), True, 'import numpy as np\n'), ((164930, 165130), 'analysis.general_utils.plotly_utils.plot_bar', 'plotly_utils.plot_bar', ([], {'x': 'area_keys_s', 'y': 'areas_mean_s', 'text_values': '[]', 'text_size': '(20)', 'title': 'title', 'x_title': 'x_title', 'y_title': 'y_title', 'margin_b': '(150)', 'err_y': 'areas_std_s', 'err_symmetric': 'err_symmetric'}), '(x=area_keys_s, y=areas_mean_s, text_values=[],\n text_size=20, title=title, x_title=x_title, y_title=y_title, margin_b=\n 150, err_y=areas_std_s, err_symmetric=err_symmetric)\n', (164951, 165130), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((165427, 165570), 'analysis.general_utils.plotly_utils.plot_point_box_revised', 'plotly_utils.plot_point_box_revised', ([], {'x': 'area_keys_s', 'y': 'areas_s', 'title': 'title', 'x_title': 'x_title', 'y_title': 'y_title', 'margin_b': '(150)', 'y_range': 'y_range'}), '(x=area_keys_s, y=areas_s, title=title,\n x_title=x_title, y_title=y_title, margin_b=150, y_range=y_range)\n', (165462, 165570), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((166132, 166147), 'numpy.mean', 'np.mean', (['area_k'], {}), '(area_k)\n', (166139, 166147), True, 'import numpy as np\n'), ((166179, 166193), 'numpy.std', 'np.std', (['area_k'], {}), '(area_k)\n', (166185, 166193), True, 'import numpy as np\n'), ((166928, 166944), 'numpy.mean', 'np.mean', (['dff_res'], {}), '(dff_res)\n', (166935, 166944), True, 'import numpy as np\n'), ((174163, 174205), 'numpy.zeros', 'np.zeros', (['[after_range + before_range + 1]'], {}), '([after_range + before_range + 1])\n', (174171, 174205), True, 'import numpy as np\n'), ((175656, 175685), 'numpy.array', 'np.array', (['signal_delays_all_l'], {}), '(signal_delays_all_l)\n', (175664, 175685), True, 'import numpy as np\n'), ((176675, 176760), 'analysis.general_utils.plotly_utils.apply_fun_axis_fig', 'plotly_utils.apply_fun_axis_fig', (['figs[plot_id]', '(lambda x: x / astroA.fr)'], {'axis': '"""x"""'}), "(figs[plot_id], lambda x: x / astroA.fr,\n axis='x')\n", (176706, 176760), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((183063, 183090), 'numpy.sum', 'np.sum', (['prop_all_np'], {'axis': '(0)'}), '(prop_all_np, axis=0)\n', (183069, 183090), True, 'import numpy as np\n'), ((183320, 183347), 'numpy.sum', 'np.sum', (['prop_all_np'], {'axis': '(0)'}), '(prop_all_np, axis=0)\n', (183326, 183347), True, 'import numpy as np\n'), ((183350, 183369), 'numpy.sum', 'np.sum', (['prop_all_np'], {}), '(prop_all_np)\n', (183356, 183369), True, 'import numpy as np\n'), ((183841, 183868), 'numpy.sum', 'np.sum', (['prop_all_np'], {'axis': '(0)'}), '(prop_all_np, axis=0)\n', (183847, 183868), True, 'import numpy as np\n'), ((188552, 188618), 'numpy.arange', 'np.arange', (['(center_ind - before_range)', '(center_ind + after_range + 1)'], {}), '(center_ind - before_range, center_ind + after_range + 1)\n', (188561, 188618), True, 'import numpy as np\n'), ((189197, 189214), 'numpy.copy', 'np.copy', (['bh_val_l'], {}), '(bh_val_l)\n', (189204, 189214), True, 'import numpy as np\n'), ((193215, 193245), 'numpy.mean', 'np.mean', (['all_events_individual'], {}), '(all_events_individual)\n', (193222, 193245), True, 'import numpy as np\n'), ((198389, 198438), 'numpy.sum', 'np.sum', (['(measure_values_all[state] > std_threshold)'], {}), '(measure_values_all[state] > std_threshold)\n', (198395, 198438), True, 'import numpy as np\n'), ((200371, 200408), 'numpy.zeros', 'np.zeros', (['[max_delay - min_delay + 1]'], {}), '([max_delay - min_delay + 1])\n', (200379, 200408), True, 'import numpy as np\n'), ((202156, 202304), 'analysis.general_utils.plotly_utils.plot_scatter_mult', 'plotly_utils.plot_scatter_mult', (['x_l', 'y_l'], {'name_l': "['stick', 'running', 'rest']", 'mode': '"""lines"""', 'title': 'title', 'x_title': '"""Delay (s)"""', 'y_title': '"""Events"""'}), "(x_l, y_l, name_l=['stick', 'running', 'rest'\n ], mode='lines', title=title, x_title='Delay (s)', y_title='Events')\n", (202186, 202304), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((202316, 202401), 'analysis.general_utils.plotly_utils.apply_fun_axis_fig', 'plotly_utils.apply_fun_axis_fig', (['figs[plot_id]', '(lambda x: x / astroA.fr)'], {'axis': '"""x"""'}), "(figs[plot_id], lambda x: x / astroA.fr,\n axis='x')\n", (202347, 202401), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((208210, 208360), 'analysis.general_utils.plotly_utils.plot_contour', 'plotly_utils.plot_contour', (['border_mask_temp'], {'title': '"""radius_extension_from_center"""', 'height': '(1000)', 'width': '(1000)', 'color_bar_title': '"""Radius (pixels)"""'}), "(border_mask_temp, title=\n 'radius_extension_from_center', height=1000, width=1000,\n color_bar_title='Radius (pixels)')\n", (208235, 208360), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((209380, 209398), 'os.path.join', 'os.path.join', (['path'], {}), '(path)\n', (209392, 209398), False, 'import os, sys, glob\n'), ((209706, 209750), 'numpy.sum', 'np.sum', (['(astroA.all_durations_class_d[k] == 1)'], {}), '(astroA.all_durations_class_d[k] == 1)\n', (209712, 209750), True, 'import numpy as np\n'), ((209788, 209832), 'numpy.sum', 'np.sum', (['(astroA.all_durations_class_d[k] == 2)'], {}), '(astroA.all_durations_class_d[k] == 2)\n', (209794, 209832), True, 'import numpy as np\n'), ((209868, 209912), 'numpy.sum', 'np.sum', (['(astroA.all_durations_class_d[k] == 3)'], {}), '(astroA.all_durations_class_d[k] == 3)\n', (209874, 209912), True, 'import numpy as np\n'), ((210042, 210102), 'analysis.general_utils.general_utils.truncate', 'general_utils.truncate', (['(short_signals_len / total_signals)', '(2)'], {}), '(short_signals_len / total_signals, 2)\n', (210064, 210102), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((210138, 210197), 'analysis.general_utils.general_utils.truncate', 'general_utils.truncate', (['(long_signals_len / total_signals)', '(2)'], {}), '(long_signals_len / total_signals, 2)\n', (210160, 210197), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((210235, 210296), 'analysis.general_utils.general_utils.truncate', 'general_utils.truncate', (['(medium_signals_len / total_signals)', '(2)'], {}), '(medium_signals_len / total_signals, 2)\n', (210257, 210296), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((210604, 210627), 'os.path.join', 'os.path.join', (['save_path'], {}), '(save_path)\n', (210616, 210627), False, 'import os, sys, glob\n'), ((216749, 217002), 'analysis.general_utils.compare_astro_utils.alignment_counter', 'compare_astro_utils.alignment_counter', (['astro_l_pair[0]', 'astro_l_pair[1]'], {'n_fake_samples': 'self.n_samples_corr_fake', 'align_setting': '"""param"""', 'eval_setting': '"""counter"""', 'fake_sample_setting': '"""from_grid"""', 'move_vector': 'move_vector_d[align_setting]', 'p': 'pk'}), "(astro_l_pair[0], astro_l_pair[1],\n n_fake_samples=self.n_samples_corr_fake, align_setting='param',\n eval_setting='counter', fake_sample_setting='from_grid', move_vector=\n move_vector_d[align_setting], p=pk)\n", (216786, 217002), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((225240, 225293), 'numpy.mean', 'np.mean', (["data_d[pk][split_frames]['num_fake_ratio_l']"], {}), "(data_d[pk][split_frames]['num_fake_ratio_l'])\n", (225247, 225293), True, 'import numpy as np\n'), ((225316, 225368), 'numpy.std', 'np.std', (["data_d[pk][split_frames]['num_fake_ratio_l']"], {}), "(data_d[pk][split_frames]['num_fake_ratio_l'])\n", (225322, 225368), True, 'import numpy as np\n'), ((225464, 225520), 'numpy.mean', 'np.mean', (["data_d[pk][split_frames]['num_compare_ratio_l']"], {}), "(data_d[pk][split_frames]['num_compare_ratio_l'])\n", (225471, 225520), True, 'import numpy as np\n'), ((225543, 225598), 'numpy.std', 'np.std', (["data_d[pk][split_frames]['num_compare_ratio_l']"], {}), "(data_d[pk][split_frames]['num_compare_ratio_l'])\n", (225549, 225598), True, 'import numpy as np\n'), ((225706, 225756), 'scipy.stats.ttest_ind_from_stats', 'ttest_ind_from_stats', (['c_m', 'c_s', 'c_l', 'f_m', 'f_s', 'f_l'], {}), '(c_m, c_s, c_l, f_m, f_s, f_l)\n', (225726, 225756), False, 'from scipy.stats import ttest_ind_from_stats\n'), ((227752, 227800), 'analysis.general_utils.plotly_utils.plot_contour', 'plotly_utils.plot_contour', (['grid'], {'title': 'titles[i]'}), '(grid, title=titles[i])\n', (227777, 227800), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((228056, 228149), 'analysis.general_utils.compare_astro_utils.split_astro_grid', 'compare_astro_utils.split_astro_grid', (['astroA'], {'split_frames': 'num_frames_split', 'bk': '"""default"""'}), "(astroA, split_frames=num_frames_split,\n bk='default')\n", (228092, 228149), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((229444, 229485), 'numpy.round', 'np.round', (['(astroA.fr * split_frames_m * 60)'], {}), '(astroA.fr * split_frames_m * 60)\n', (229452, 229485), True, 'import numpy as np\n'), ((230014, 230356), 'analysis.general_utils.compare_astro_utils.alignment_counter', 'compare_astro_utils.alignment_counter', (['astroA', 'astroA'], {'grid_target': 'grid_1', 'grid_source': 'grid_2', 'n_fake_samples': '(0)', 'align_setting': '"""param"""', 'eval_setting': '"""xcorr"""', 'fake_sample_setting': '"""from_grid"""', 'move_vector': '[0, 0]', 'p': '(1)', 'dff_mode': '(False)', 'behaviour': '"""default"""', 'filter_duration': '(None, None)', 'with_output_details': '(False)', 'border_nan': '(True)'}), "(astroA, astroA, grid_target=grid_1,\n grid_source=grid_2, n_fake_samples=0, align_setting='param',\n eval_setting='xcorr', fake_sample_setting='from_grid', move_vector=[0, \n 0], p=1, dff_mode=False, behaviour='default', filter_duration=(None,\n None), with_output_details=False, border_nan=True)\n", (230051, 230356), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((233176, 233287), 'analysis.general_utils.compare_astro_utils.split_astro_grid', 'compare_astro_utils.split_astro_grid', (['astroA'], {'split_frames': 'split_frames', 'bk': '"""default"""', 'inds_subset': 'cut_ind'}), "(astroA, split_frames=split_frames, bk=\n 'default', inds_subset=cut_ind)\n", (233212, 233287), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((235092, 235133), 'numpy.round', 'np.round', (['(astroA.fr * split_frames_m * 60)'], {}), '(astroA.fr * split_frames_m * 60)\n', (235100, 235133), True, 'import numpy as np\n'), ((237632, 237700), 'analysis.general_utils.plotly_utils.plot_contour', 'plotly_utils.plot_contour', (['sample'], {'title': '"""Random event contour plot"""'}), "(sample, title='Random event contour plot')\n", (237657, 237700), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((246091, 246164), 'numpy.histogram', 'np.histogram', (['measure_d[k]'], {'bins': 'num_bins_x', 'range': '(min_range, max_range)'}), '(measure_d[k], bins=num_bins_x, range=(min_range, max_range))\n', (246103, 246164), True, 'import numpy as np\n'), ((246715, 246753), 'numpy.sum', 'np.sum', (["measure_counts_d['all_events']"], {}), "(measure_counts_d['all_events'])\n", (246721, 246753), True, 'import numpy as np\n'), ((249492, 249511), 'numpy.log', 'np.log', (['min_measure'], {}), '(min_measure)\n', (249498, 249511), True, 'import numpy as np\n'), ((249580, 249599), 'numpy.log', 'np.log', (['max_measure'], {}), '(max_measure)\n', (249586, 249599), True, 'import numpy as np\n'), ((249929, 249945), 'numpy.exp', 'np.exp', (['(-(x / b))'], {}), '(-(x / b))\n', (249935, 249945), True, 'import numpy as np\n'), ((254991, 255022), 'numpy.arange', 'np.arange', (['(0)', '(adj_to - adj_from)'], {}), '(0, adj_to - adj_from)\n', (255000, 255022), True, 'import numpy as np\n'), ((255283, 255419), 'analysis.general_utils.plotly_utils.plot_scatter_signal', 'plotly_utils.plot_scatter_signal', (['x', 'y', 'adj_begin', 'adj_end'], {'mode': '"""lines"""', 'title': '"""scatter"""', 'x_title': '""""""', 'y_title': '""""""', 'with_legend': '(False)'}), "(x, y, adj_begin, adj_end, mode='lines',\n title='scatter', x_title='', y_title='', with_legend=False)\n", (255315, 255419), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((256542, 256590), 'analysis.general_utils.saving_utils.load_pickle', 'saving_utils.load_pickle', (['pair_save_results_path'], {}), '(pair_save_results_path)\n', (256566, 256590), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((259095, 259113), 'numpy.mean', 'np.mean', (['pair_fake'], {}), '(pair_fake)\n', (259102, 259113), True, 'import numpy as np\n'), ((259356, 259379), 'numpy.array', 'np.array', (['pair_fakes[i]'], {}), '(pair_fakes[i])\n', (259364, 259379), True, 'import numpy as np\n'), ((271177, 271223), 'analysis.general_utils.saving_utils.save_pickle', 'saving_utils.save_pickle', (['res_d', 'save_pkl_path'], {}), '(res_d, save_pkl_path)\n', (271201, 271223), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((272128, 272193), 'analysis.general_utils.stat_utils.mean_confidence_interval', 'stat_utils.mean_confidence_interval', (['measure_res'], {'confidence': '(0.95)'}), '(measure_res, confidence=0.95)\n', (272163, 272193), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((280702, 280773), 'analysis.general_utils.stat_utils.mean_confidence_interval', 'stat_utils.mean_confidence_interval', (['event_durations_i'], {'confidence': '(0.95)'}), '(event_durations_i, confidence=0.95)\n', (280737, 280773), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((2357, 2405), 'os.path.join', 'os.path.join', (['output_experiment_path', '"""plots"""', 'p'], {}), "(output_experiment_path, 'plots', p)\n", (2369, 2405), False, 'import os, sys, glob\n'), ((2611, 2659), 'os.path.join', 'os.path.join', (['output_experiment_path', '"""files"""', 'p'], {}), "(output_experiment_path, 'files', p)\n", (2623, 2659), False, 'import os, sys, glob\n'), ((2689, 2737), 'os.path.join', 'os.path.join', (['output_experiment_path', '"""files"""', 'p'], {}), "(output_experiment_path, 'files', p)\n", (2701, 2737), False, 'import os, sys, glob\n'), ((3105, 3164), 'os.path.join', 'os.path.join', (['output_experiment_path_comparison', '"""plots"""', 'p'], {}), "(output_experiment_path_comparison, 'plots', p)\n", (3117, 3164), False, 'import os, sys, glob\n'), ((3420, 3479), 'os.path.join', 'os.path.join', (['output_experiment_path_comparison', '"""files"""', 'p'], {}), "(output_experiment_path_comparison, 'files', p)\n", (3432, 3479), False, 'import os, sys, glob\n'), ((3509, 3568), 'os.path.join', 'os.path.join', (['output_experiment_path_comparison', '"""files"""', 'p'], {}), "(output_experiment_path_comparison, 'files', p)\n", (3521, 3568), False, 'import os, sys, glob\n'), ((8665, 8728), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', 'p'], {}), "(output_experiment_path_all_comparison, 'plots', p)\n", (8677, 8728), False, 'import os, sys, glob\n'), ((8874, 8936), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""data"""', 'p'], {}), "(output_experiment_path_all_comparison, 'data', p)\n", (8886, 8936), False, 'import os, sys, glob\n'), ((31994, 32010), 'numpy.copy', 'np.copy', (['x2d_all'], {}), '(x2d_all)\n', (32001, 32010), True, 'import numpy as np\n'), ((32154, 32180), 'numpy.min', 'np.min', (['x2d_all_normalized'], {}), '(x2d_all_normalized)\n', (32160, 32180), True, 'import numpy as np\n'), ((32185, 32211), 'numpy.max', 'np.max', (['x2d_all_normalized'], {}), '(x2d_all_normalized)\n', (32191, 32211), True, 'import numpy as np\n'), ((32214, 32240), 'numpy.min', 'np.min', (['x2d_all_normalized'], {}), '(x2d_all_normalized)\n', (32220, 32240), True, 'import numpy as np\n'), ((34160, 34176), 'numpy.copy', 'np.copy', (['x2d_all'], {}), '(x2d_all)\n', (34167, 34176), True, 'import numpy as np\n'), ((34346, 34372), 'numpy.min', 'np.min', (['x2d_all_normalized'], {}), '(x2d_all_normalized)\n', (34352, 34372), True, 'import numpy as np\n'), ((34377, 34403), 'numpy.max', 'np.max', (['x2d_all_normalized'], {}), '(x2d_all_normalized)\n', (34383, 34403), True, 'import numpy as np\n'), ((34406, 34432), 'numpy.min', 'np.min', (['x2d_all_normalized'], {}), '(x2d_all_normalized)\n', (34412, 34432), True, 'import numpy as np\n'), ((36121, 36133), 'numpy.copy', 'np.copy', (['x2d'], {}), '(x2d)\n', (36128, 36133), True, 'import numpy as np\n'), ((36308, 36334), 'numpy.min', 'np.min', (['x2d_all_normalized'], {}), '(x2d_all_normalized)\n', (36314, 36334), True, 'import numpy as np\n'), ((36339, 36365), 'numpy.max', 'np.max', (['x2d_all_normalized'], {}), '(x2d_all_normalized)\n', (36345, 36365), True, 'import numpy as np\n'), ((36368, 36394), 'numpy.min', 'np.min', (['x2d_all_normalized'], {}), '(x2d_all_normalized)\n', (36374, 36394), True, 'import numpy as np\n'), ((139441, 139783), 'analysis.general_utils.compare_astro_utils.alignment_counter', 'compare_astro_utils.alignment_counter', (['astros_d[astroA_k]', 'astros_d[astroA_k]'], {'n_fake_samples': '(0)', 'align_setting': '"""param"""', 'eval_setting': '"""xcorr"""', 'fake_sample_setting': '"""from_astro"""', 'grid_target': 'bh_split', 'grid_source': 'split_i', 'move_vector': '[0, 0]', 'p': '(1)', 'behaviour': 'bh_pair[0]', 'filter_duration': 'filter_duration', 'with_output_details': '(True)'}), "(astros_d[astroA_k], astros_d[astroA_k\n ], n_fake_samples=0, align_setting='param', eval_setting='xcorr',\n fake_sample_setting='from_astro', grid_target=bh_split, grid_source=\n split_i, move_vector=[0, 0], p=1, behaviour=bh_pair[0], filter_duration\n =filter_duration, with_output_details=True)\n", (139478, 139783), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((165183, 165384), 'analysis.general_utils.plotly_utils.plot_bar', 'plotly_utils.plot_bar', ([], {'x': 'area_keys_s', 'y': 'areas_mean_s', 'text_values': '[]', 'text_size': '(20)', 'title': 'title', 'x_title': 'x_title', 'y_title': 'y_title', 'margin_b': '(150)', 'err_y': 'areas_conf_s', 'err_symmetric': 'err_symmetric'}), '(x=area_keys_s, y=areas_mean_s, text_values=[],\n text_size=20, title=title, x_title=x_title, y_title=y_title, margin_b=\n 150, err_y=areas_conf_s, err_symmetric=err_symmetric)\n', (165204, 165384), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((169429, 169528), 'analysis.general_utils.aqua_utils.get_delay_info_from_res', 'aqua_utils.get_delay_info_from_res', (['astroA.indices_d[stick_id]', 'astroA.res_d'], {}), '(astroA.indices_d[stick_id], astroA.res_d,\n **delay_info_args)\n', (169463, 169528), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((169596, 169682), 'analysis.general_utils.aqua_utils.get_delay_info_from_res', 'aqua_utils.get_delay_info_from_res', (['rand_running', 'astroA.res_d'], {}), '(rand_running, astroA.res_d, **\n delay_info_args)\n', (169630, 169682), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((169755, 169844), 'analysis.general_utils.aqua_utils.get_delay_info_from_res', 'aqua_utils.get_delay_info_from_res', (['rand_no_running', 'astroA.res_d'], {}), '(rand_no_running, astroA.res_d, **\n delay_info_args)\n', (169789, 169844), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((169871, 169902), 'numpy.sort', 'np.sort', (['signal_delays_stick_np'], {}), '(signal_delays_stick_np)\n', (169878, 169902), True, 'import numpy as np\n'), ((169935, 169968), 'numpy.sort', 'np.sort', (['signal_delays_running_np'], {}), '(signal_delays_running_np)\n', (169942, 169968), True, 'import numpy as np\n'), ((170004, 170040), 'numpy.sort', 'np.sort', (['signal_delays_no_running_np'], {}), '(signal_delays_no_running_np)\n', (170011, 170040), True, 'import numpy as np\n'), ((170246, 170463), 'analysis.general_utils.plotly_utils.plot_waterfall', 'plotly_utils.plot_waterfall', ([], {'arrays_l': '[stick_v, running_v, no_running_v]', 'legend_names': "['stick', 'running', 'rest']", 'title': '"""Signal (event) delays after behaviour"""', 'x_title': '"""Delay (s)"""', 'y_title': '"""Event id"""'}), "(arrays_l=[stick_v, running_v, no_running_v],\n legend_names=['stick', 'running', 'rest'], title=\n 'Signal (event) delays after behaviour', x_title='Delay (s)', y_title=\n 'Event id')\n", (170273, 170463), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((170470, 170555), 'analysis.general_utils.plotly_utils.apply_fun_axis_fig', 'plotly_utils.apply_fun_axis_fig', (['figs[plot_id]', '(lambda x: x / astroA.fr)'], {'axis': '"""x"""'}), "(figs[plot_id], lambda x: x / astroA.fr,\n axis='x')\n", (170501, 170555), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((170596, 170833), 'analysis.general_utils.plotly_utils.plot_waterfall_interpolate', 'plotly_utils.plot_waterfall_interpolate', ([], {'arrays_l': '[stick_v, running_v, no_running_v]', 'legend_names': "['stick', 'running', 'rest']", 'title': '"""Signal (event) delays after behaviour (scaled)"""', 'x_title': '"""Delay (s)"""', 'y_title': '"""Event id"""'}), "(arrays_l=[stick_v, running_v,\n no_running_v], legend_names=['stick', 'running', 'rest'], title=\n 'Signal (event) delays after behaviour (scaled)', x_title='Delay (s)',\n y_title='Event id')\n", (170635, 170833), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((170841, 170934), 'analysis.general_utils.plotly_utils.apply_fun_axis_fig', 'plotly_utils.apply_fun_axis_fig', (['figs_interp[plot_id]', '(lambda x: x / astroA.fr)'], {'axis': '"""x"""'}), "(figs_interp[plot_id], lambda x: x / astroA.\n fr, axis='x')\n", (170872, 170934), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((174417, 174522), 'analysis.general_utils.aqua_utils.filter_range_inds', 'aqua_utils.filter_range_inds', (['inds', 'astroA.indices_d[before_bh]'], {'range': '(-before_range, -1)', 'prop': '(1.0)'}), '(inds, astroA.indices_d[before_bh], range=(-\n before_range, -1), prop=1.0)\n', (174445, 174522), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((174559, 174659), 'analysis.general_utils.aqua_utils.filter_range_inds', 'aqua_utils.filter_range_inds', (['inds', 'astroA.indices_d[after_bh]'], {'range': '(1, after_range)', 'prop': '(1.0)'}), '(inds, astroA.indices_d[after_bh], range=(1,\n after_range), prop=1.0)\n', (174587, 174659), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((175466, 175552), 'analysis.general_utils.aqua_utils.get_delay_info_from_res', 'aqua_utils.get_delay_info_from_res', (['indices_filt', 'astroA.res_d'], {}), '(indices_filt, astroA.res_d, **\n delay_info_args)\n', (175500, 175552), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((176352, 176410), 'numpy.arange', 'np.arange', (['(-before_range)', '(after_range + 1)', 'delay_step_size'], {}), '(-before_range, after_range + 1, delay_step_size)\n', (176361, 176410), True, 'import numpy as np\n'), ((200573, 200704), 'analysis.general_utils.aqua_utils.filter_range_inds', 'aqua_utils.filter_range_inds', (['astroA.indices_d[stick_id]', "astroA.indices_d['running']"], {'range': '(min_delay, max_delay)', 'prop': '(0.95)'}), "(astroA.indices_d[stick_id], astroA.indices_d[\n 'running'], range=(min_delay, max_delay), prop=0.95)\n", (200601, 200704), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((201138, 201230), 'analysis.general_utils.aqua_utils.get_delay_info_from_res', 'aqua_utils.get_delay_info_from_res', (['stick_indices_filt', 'astroA.res_d'], {}), '(stick_indices_filt, astroA.res_d, **\n delay_info_args)\n', (201172, 201230), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((201767, 201796), 'numpy.array', 'np.array', (['signal_delays_all_l'], {}), '(signal_delays_all_l)\n', (201775, 201796), True, 'import numpy as np\n'), ((202010, 202045), 'numpy.arange', 'np.arange', (['min_delay', '(max_delay + 1)'], {}), '(min_delay, max_delay + 1)\n', (202019, 202045), True, 'import numpy as np\n'), ((204285, 204324), 'numpy.sum', 'np.sum', (['indices_events_k_subset'], {'axis': '(1)'}), '(indices_events_k_subset, axis=1)\n', (204291, 204324), True, 'import numpy as np\n'), ((223946, 223974), 'numpy.random.permutation', 'np.random.permutation', (['pairs'], {}), '(pairs)\n', (223967, 223974), True, 'import numpy as np\n'), ((224227, 224428), 'analysis.general_utils.compare_astro_utils.alignment_counter', 'compare_astro_utils.alignment_counter', (['astroA', 'astroA'], {'grid_target': 'event_grid_splits_l[i]', 'grid_source': 'event_grid_splits_l[j]', 'n_fake_samples': '(1)', 'align_setting': '"""param"""', 'move_vector': '[0, 0]', 'p': 'pk'}), "(astroA, astroA, grid_target=\n event_grid_splits_l[i], grid_source=event_grid_splits_l[j],\n n_fake_samples=1, align_setting='param', move_vector=[0, 0], p=pk)\n", (224264, 224428), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((230622, 230680), 'analysis.general_utils.general_utils.truncate', 'general_utils.truncate', (['(split_frames / (astroA.fr * 60))', '(1)'], {}), '(split_frames / (astroA.fr * 60), 1)\n', (230644, 230680), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((232888, 232929), 'numpy.round', 'np.round', (['(astroA.fr * split_frames_m * 60)'], {}), '(astroA.fr * split_frames_m * 60)\n', (232896, 232929), True, 'import numpy as np\n'), ((233420, 233762), 'analysis.general_utils.compare_astro_utils.alignment_counter', 'compare_astro_utils.alignment_counter', (['astroA', 'astroA'], {'grid_target': 'grid_1', 'grid_source': 'grid_2', 'n_fake_samples': '(0)', 'align_setting': '"""param"""', 'eval_setting': '"""xcorr"""', 'fake_sample_setting': '"""from_grid"""', 'move_vector': '[0, 0]', 'p': '(1)', 'dff_mode': '(False)', 'behaviour': '"""default"""', 'filter_duration': '(None, None)', 'with_output_details': '(False)', 'border_nan': '(True)'}), "(astroA, astroA, grid_target=grid_1,\n grid_source=grid_2, n_fake_samples=0, align_setting='param',\n eval_setting='xcorr', fake_sample_setting='from_grid', move_vector=[0, \n 0], p=1, dff_mode=False, behaviour='default', filter_duration=(None,\n None), with_output_details=False, border_nan=True)\n", (233457, 233762), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((236370, 236428), 'analysis.general_utils.general_utils.truncate', 'general_utils.truncate', (['(split_frames / (astroA.fr * 60))', '(1)'], {}), '(split_frames / (astroA.fr * 60), 1)\n', (236392, 236428), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((243360, 243635), 'analysis.general_utils.compare_astro_utils.alignment_counter', 'compare_astro_utils.alignment_counter', (['astro_l_pair[i]', 'astro_l_pair[i]'], {'n_fake_samples': '(0)', 'align_setting': '"""param"""', 'eval_setting': '"""xcorr"""', 'fake_sample_setting': '"""from_astro"""', 'move_vector': '[0, 0]', 'p': '(1)', 'behaviour': '[behaviour_l[bh_i], behaviour_l[bh_j]]', 'dff_mode': 'dff_mode'}), "(astro_l_pair[i], astro_l_pair[i],\n n_fake_samples=0, align_setting='param', eval_setting='xcorr',\n fake_sample_setting='from_astro', move_vector=[0, 0], p=1, behaviour=[\n behaviour_l[bh_i], behaviour_l[bh_j]], dff_mode=dff_mode)\n", (243397, 243635), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((245269, 245289), 'numpy.min', 'np.min', (['measure_d[k]'], {}), '(measure_d[k])\n', (245275, 245289), True, 'import numpy as np\n'), ((245433, 245453), 'numpy.max', 'np.max', (['measure_d[k]'], {}), '(measure_d[k])\n', (245439, 245453), True, 'import numpy as np\n'), ((246225, 246252), 'numpy.sum', 'np.sum', (['measure_counts_d[k]'], {}), '(measure_counts_d[k])\n', (246231, 246252), True, 'import numpy as np\n'), ((248101, 248111), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (248108, 248111), True, 'import numpy as np\n'), ((249377, 249387), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (249384, 249387), True, 'import numpy as np\n'), ((249630, 249643), 'numpy.array', 'np.array', (['x_l'], {}), '(x_l)\n', (249638, 249643), True, 'import numpy as np\n'), ((256725, 256792), 'analysis.general_utils.compare_astro_utils.get_move_vector_xcorr_default', 'compare_astro_utils.get_move_vector_xcorr_default', (['astro_1', 'astro_2'], {}), '(astro_1, astro_2)\n', (256774, 256792), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((256864, 257156), 'analysis.general_utils.compare_astro_utils.alignment_counter', 'compare_astro_utils.alignment_counter', (['astro_1', 'astro_2'], {'n_fake_samples': 'n_fake_samples', 'align_setting': '"""param"""', 'eval_setting': '"""xcorr"""', 'fake_sample_setting': '"""from_astro"""', 'move_vector': 'move_vector', 'p': '(1)', 'behaviour': 'behaviour', 'filter_duration': 'filter_duration', 'with_output_details': '(True)'}), "(astro_1, astro_2, n_fake_samples=\n n_fake_samples, align_setting='param', eval_setting='xcorr',\n fake_sample_setting='from_astro', move_vector=move_vector, p=1,\n behaviour=behaviour, filter_duration=filter_duration,\n with_output_details=True)\n", (256901, 257156), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((258712, 258763), 'analysis.general_utils.saving_utils.save_pickle', 'saving_utils.save_pickle', (['d', 'pair_save_results_path'], {}), '(d, pair_save_results_path)\n', (258736, 258763), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((265178, 265283), 'analysis.general_utils.compare_astro_utils.get_filters_compare', 'compare_astro_utils.get_filters_compare', (['[astro]'], {'p': '(1)', 'dff_mode': 'dff_mode', 'behaviour': 'behaviour_pair[0]'}), '([astro], p=1, dff_mode=dff_mode,\n behaviour=behaviour_pair[0])\n', (265217, 265283), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((265374, 265479), 'analysis.general_utils.compare_astro_utils.get_filters_compare', 'compare_astro_utils.get_filters_compare', (['[astro]'], {'p': '(1)', 'dff_mode': 'dff_mode', 'behaviour': 'behaviour_pair[1]'}), '([astro], p=1, dff_mode=dff_mode,\n behaviour=behaviour_pair[1])\n', (265413, 265479), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((279891, 279908), 'numpy.sum', 'np.sum', (['area_bins'], {}), '(area_bins)\n', (279897, 279908), True, 'import numpy as np\n'), ((279975, 279992), 'numpy.sum', 'np.sum', (['area_bins'], {}), '(area_bins)\n', (279981, 279992), True, 'import numpy as np\n'), ((282991, 283027), 'numpy.mean', 'np.mean', (['num_events_pp_pm_norm_whole'], {}), '(num_events_pp_pm_norm_whole)\n', (282998, 283027), True, 'import numpy as np\n'), ((283673, 283699), 'numpy.mean', 'np.mean', (['event_durations_i'], {}), '(event_durations_i)\n', (283680, 283699), True, 'import numpy as np\n'), ((14070, 14107), 'analysis.general_utils.aqua_utils.get_measure_names', 'aqua_utils.get_measure_names', (['measure'], {}), '(measure)\n', (14098, 14107), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((14817, 14874), 'analysis.general_utils.saving_utils.save_pth_plt_l_log', 'saving_utils.save_pth_plt_l_log', (['[plot]', '[path]'], {'axis': '"""x"""'}), "([plot], [path], axis='x')\n", (14848, 14874), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((14899, 14956), 'analysis.general_utils.saving_utils.save_pth_plt_l_log', 'saving_utils.save_pth_plt_l_log', (['[plot]', '[path]'], {'axis': '"""y"""'}), "([plot], [path], axis='y')\n", (14930, 14956), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((142001, 142334), 'analysis.general_utils.compare_astro_utils.alignment_counter', 'compare_astro_utils.alignment_counter', (['astros_d[astroA_k]', 'astros_d[astroA_k]'], {'n_fake_samples': '(0)', 'align_setting': '"""param"""', 'eval_setting': '"""xcorr"""', 'fake_sample_setting': '"""from_astro"""', 'grid_target': 'split_i', 'grid_source': 'split_j', 'move_vector': '[0, 0]', 'p': '(1)', 'behaviour': 'bh', 'filter_duration': 'filter_duration', 'with_output_details': '(True)'}), "(astros_d[astroA_k], astros_d[astroA_k\n ], n_fake_samples=0, align_setting='param', eval_setting='xcorr',\n fake_sample_setting='from_astro', grid_target=split_i, grid_source=\n split_j, move_vector=[0, 0], p=1, behaviour=bh, filter_duration=\n filter_duration, with_output_details=True)\n", (142038, 142334), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((145706, 146033), 'analysis.general_utils.compare_astro_utils.alignment_counter', 'compare_astro_utils.alignment_counter', (['astro_pair[0]', 'astro_pair[1]'], {'n_fake_samples': '(0)', 'align_setting': '"""param"""', 'eval_setting': '"""xcorr"""', 'fake_sample_setting': '"""from_astro"""', 'grid_target': 'split_i', 'grid_source': 'split_j', 'move_vector': 'move_vector', 'p': '(1)', 'behaviour': 'bh', 'filter_duration': 'filter_duration', 'with_output_details': '(True)'}), "(astro_pair[0], astro_pair[1],\n n_fake_samples=0, align_setting='param', eval_setting='xcorr',\n fake_sample_setting='from_astro', grid_target=split_i, grid_source=\n split_j, move_vector=move_vector, p=1, behaviour=bh, filter_duration=\n filter_duration, with_output_details=True)\n", (145743, 146033), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((197782, 197792), 'sys.exit', 'sys.exit', ([], {}), '()\n', (197790, 197792), False, 'import os, sys, glob\n'), ((197833, 197869), 'numpy.array', 'np.array', (['event_measure_all_l_l[s_i]'], {}), '(event_measure_all_l_l[s_i])\n', (197841, 197869), True, 'import numpy as np\n'), ((207515, 207583), 'analysis.general_utils.aqua_utils.get_euclidean_distances', 'aqua_utils.get_euclidean_distances', (['clandmark_center_flipped', '[i, j]'], {}), '(clandmark_center_flipped, [i, j])\n', (207549, 207583), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((207623, 207668), 'numpy.searchsorted', 'np.searchsorted', (['r_bins', 'r_dist'], {'side': '"""right"""'}), "(r_bins, r_dist, side='right')\n", (207638, 207668), True, 'import numpy as np\n'), ((211055, 211083), 'analysis.general_utils.general_utils.truncate', 'general_utils.truncate', (['v', '(4)'], {}), '(v, 4)\n', (211077, 211083), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((211200, 211228), 'analysis.general_utils.general_utils.truncate', 'general_utils.truncate', (['v', '(4)'], {}), '(v, 4)\n', (211222, 211228), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((211342, 211389), 'analysis.general_utils.general_utils.truncate', 'general_utils.truncate', (['pair_corrs_before[i]', '(4)'], {}), '(pair_corrs_before[i], 4)\n', (211364, 211389), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((211473, 211519), 'analysis.general_utils.general_utils.truncate', 'general_utils.truncate', (['pair_corrs_after[i]', '(4)'], {}), '(pair_corrs_after[i], 4)\n', (211495, 211519), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((234092, 234150), 'analysis.general_utils.general_utils.truncate', 'general_utils.truncate', (['(split_frames / (astroA.fr * 60))', '(1)'], {}), '(split_frames / (astroA.fr * 60), 1)\n', (234114, 234150), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((235723, 236065), 'analysis.general_utils.compare_astro_utils.alignment_counter', 'compare_astro_utils.alignment_counter', (['astroA', 'astroA'], {'grid_target': 'grid_1', 'grid_source': 'grid_2', 'n_fake_samples': '(0)', 'align_setting': '"""param"""', 'eval_setting': '"""xcorr"""', 'fake_sample_setting': '"""from_grid"""', 'move_vector': '[0, 0]', 'p': '(1)', 'dff_mode': '(False)', 'behaviour': '"""default"""', 'filter_duration': '(None, None)', 'with_output_details': '(False)', 'border_nan': '(True)'}), "(astroA, astroA, grid_target=grid_1,\n grid_source=grid_2, n_fake_samples=0, align_setting='param',\n eval_setting='xcorr', fake_sample_setting='from_grid', move_vector=[0, \n 0], p=1, dff_mode=False, behaviour='default', filter_duration=(None,\n None), with_output_details=False, border_nan=True)\n", (235760, 236065), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((257792, 258079), 'analysis.general_utils.compare_astro_utils.alignment_counter', 'compare_astro_utils.alignment_counter', (['astro_1', 'astro_2'], {'n_fake_samples': 'n_fake_samples', 'align_setting': '"""param"""', 'eval_setting': '"""xcorr_free"""', 'fake_sample_setting': '"""from_astro"""', 'move_vector': 'None', 'p': '(1)', 'behaviour': 'behaviour', 'filter_duration': 'filter_duration', 'with_output_details': '(True)'}), "(astro_1, astro_2, n_fake_samples=\n n_fake_samples, align_setting='param', eval_setting='xcorr_free',\n fake_sample_setting='from_astro', move_vector=None, p=1, behaviour=\n behaviour, filter_duration=filter_duration, with_output_details=True)\n", (257829, 258079), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((261263, 261330), 'analysis.general_utils.compare_astro_utils.get_move_vector_xcorr_default', 'compare_astro_utils.get_move_vector_xcorr_default', (['astro_1', 'astro_2'], {}), '(astro_1, astro_2)\n', (261312, 261330), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((261410, 261755), 'analysis.general_utils.compare_astro_utils.alignment_counter', 'compare_astro_utils.alignment_counter', (['astro_1', 'astro_2'], {'n_fake_samples': "(n_fake_samples if behaviour == 'default' else 0)", 'align_setting': '"""param"""', 'eval_setting': '"""xcorr"""', 'fake_sample_setting': '"""from_astro"""', 'move_vector': 'move_vector', 'p': '(1)', 'behaviour': 'behaviour', 'filter_duration': 'filter_duration', 'with_output_details': '(True)', 'dff_mode': 'dff_mode'}), "(astro_1, astro_2, n_fake_samples=\n n_fake_samples if behaviour == 'default' else 0, align_setting='param',\n eval_setting='xcorr', fake_sample_setting='from_astro', move_vector=\n move_vector, p=1, behaviour=behaviour, filter_duration=filter_duration,\n with_output_details=True, dff_mode=dff_mode)\n", (261447, 261755), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((265655, 265722), 'analysis.general_utils.compare_astro_utils.get_move_vector_xcorr_default', 'compare_astro_utils.get_move_vector_xcorr_default', (['astro_1', 'astro_2'], {}), '(astro_1, astro_2)\n', (265704, 265722), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((265752, 266143), 'analysis.general_utils.compare_astro_utils.alignment_counter', 'compare_astro_utils.alignment_counter', (['astro_1', 'astro_2'], {'n_fake_samples': "(n_fake_samples if behaviour_pair[0] == 'default' else 0)", 'align_setting': '"""param"""', 'eval_setting': '"""xcorr"""', 'fake_sample_setting': '"""from_astro"""', 'grid_target': 'astro_a_grid', 'grid_source': 'astro_b_grid', 'move_vector': 'move_vector', 'p': '(1)', 'behaviour': '"""default"""', 'filter_duration': 'filter_duration', 'with_output_details': '(True)'}), "(astro_1, astro_2, n_fake_samples=\n n_fake_samples if behaviour_pair[0] == 'default' else 0, align_setting=\n 'param', eval_setting='xcorr', fake_sample_setting='from_astro',\n grid_target=astro_a_grid, grid_source=astro_b_grid, move_vector=\n move_vector, p=1, behaviour='default', filter_duration=filter_duration,\n with_output_details=True)\n", (265789, 266143), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((282820, 282837), 'numpy.sum', 'np.sum', (['area_bins'], {}), '(area_bins)\n', (282826, 282837), True, 'import numpy as np\n'), ((14718, 14790), 'analysis.general_utils.plotly_utils.apply_fun_axis_fig', 'plotly_utils.apply_fun_axis_fig', (['plot', '(lambda x: x / astroA.fr)'], {'axis': '"""x"""'}), "(plot, lambda x: x / astroA.fr, axis='x')\n", (14749, 14790), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((197584, 197613), 'numpy.array', 'np.array', (['signal_delays_all_l'], {}), '(signal_delays_all_l)\n', (197592, 197613), True, 'import numpy as np\n'), ((225910, 225968), 'analysis.general_utils.general_utils.truncate', 'general_utils.truncate', (['(split_frames / (astroA.fr * 60))', '(1)'], {}), '(split_frames / (astroA.fr * 60), 1)\n', (225932, 225968), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((254449, 254493), 'numpy.where', 'np.where', (["(astroA.res_d['tBegin'] > time_from)"], {}), "(astroA.res_d['tBegin'] > time_from)\n", (254457, 254493), True, 'import numpy as np\n'), ((254504, 254544), 'numpy.where', 'np.where', (["(astroA.res_d['tEnd'] < time_to)"], {}), "(astroA.res_d['tEnd'] < time_to)\n", (254512, 254544), True, 'import numpy as np\n'), ((262502, 262845), 'analysis.general_utils.compare_astro_utils.alignment_counter', 'compare_astro_utils.alignment_counter', (['astro_1', 'astro_2'], {'n_fake_samples': "(n_fake_samples if behaviour == 'default' else 0)", 'align_setting': '"""param"""', 'eval_setting': '"""xcorr_free"""', 'fake_sample_setting': '"""from_astro"""', 'move_vector': 'None', 'p': '(1)', 'behaviour': 'behaviour', 'filter_duration': 'filter_duration', 'with_output_details': '(True)', 'dff_mode': 'dff_mode'}), "(astro_1, astro_2, n_fake_samples=\n n_fake_samples if behaviour == 'default' else 0, align_setting='param',\n eval_setting='xcorr_free', fake_sample_setting='from_astro',\n move_vector=None, p=1, behaviour=behaviour, filter_duration=\n filter_duration, with_output_details=True, dff_mode=dff_mode)\n", (262539, 262845), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((266953, 267342), 'analysis.general_utils.compare_astro_utils.alignment_counter', 'compare_astro_utils.alignment_counter', (['astro_1', 'astro_2'], {'n_fake_samples': "(n_fake_samples if behaviour_pair[0] == 'default' else 0)", 'align_setting': '"""param"""', 'eval_setting': '"""xcorr_free"""', 'fake_sample_setting': '"""from_astro"""', 'grid_target': 'astro_a_grid', 'grid_source': 'astro_b_grid', 'move_vector': 'None', 'p': '(1)', 'behaviour': '"""default"""', 'filter_duration': 'filter_duration', 'with_output_details': '(True)'}), "(astro_1, astro_2, n_fake_samples=\n n_fake_samples if behaviour_pair[0] == 'default' else 0, align_setting=\n 'param', eval_setting='xcorr_free', fake_sample_setting='from_astro',\n grid_target=astro_a_grid, grid_source=astro_b_grid, move_vector=None, p\n =1, behaviour='default', filter_duration=filter_duration,\n with_output_details=True)\n", (266990, 267342), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((269929, 270220), 'analysis.general_utils.compare_astro_utils.alignment_counter', 'compare_astro_utils.alignment_counter', (['astro_pair[0]', 'astro_pair[1]'], {'n_fake_samples': 'n_fake_samples', 'align_setting': '"""xcorr"""', 'eval_setting': '"""xcorr_random_both"""', 'fake_sample_setting': '"""from_astro"""', 'p': '(1)', 'behaviour': '"""default"""', 'dff_mode': 'dff_mode', 'border_nan': '(True)', 'with_output_details': '(True)'}), "(astro_pair[0], astro_pair[1],\n n_fake_samples=n_fake_samples, align_setting='xcorr', eval_setting=\n 'xcorr_random_both', fake_sample_setting='from_astro', p=1, behaviour=\n 'default', dff_mode=dff_mode, border_nan=True, with_output_details=True)\n", (269966, 270220), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((15236, 15253), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (15247, 15253), False, 'import os, sys, glob\n'), ((176062, 176098), 'numpy.sum', 'np.sum', (['(signal_delays_all == delay_x)'], {}), '(signal_delays_all == delay_x)\n', (176068, 176098), True, 'import numpy as np\n'), ((182517, 182546), 'numpy.array', 'np.array', (['signal_delays_all_l'], {}), '(signal_delays_all_l)\n', (182525, 182546), True, 'import numpy as np\n'), ((197704, 197733), 'numpy.array', 'np.array', (['signal_delays_all_l'], {}), '(signal_delays_all_l)\n', (197712, 197733), True, 'import numpy as np\n'), ((201923, 201959), 'numpy.sum', 'np.sum', (['(signal_delays_all == delay_x)'], {}), '(signal_delays_all == delay_x)\n', (201929, 201959), True, 'import numpy as np\n'), ((182304, 182333), 'numpy.array', 'np.array', (['signal_delays_all_l'], {}), '(signal_delays_all_l)\n', (182312, 182333), True, 'import numpy as np\n'), ((182424, 182453), 'numpy.array', 'np.array', (['signal_delays_all_l'], {}), '(signal_delays_all_l)\n', (182432, 182453), True, 'import numpy as np\n'), ((182731, 182767), 'numpy.array', 'np.array', (['event_measure_all_l_l[s_i]'], {}), '(event_measure_all_l_l[s_i])\n', (182739, 182767), True, 'import numpy as np\n'), ((201402, 201443), 'numpy.sum', 'np.sum', (['(signal_delays_stick_np == delay_x)'], {}), '(signal_delays_stick_np == delay_x)\n', (201408, 201443), True, 'import numpy as np\n'), ((15677, 15701), 'numpy.array', 'np.array', (["temp_d['data']"], {}), "(temp_d['data'])\n", (15685, 15701), True, 'import numpy as np\n'), ((182777, 182806), 'numpy.array', 'np.array', (['signal_delays_all_l'], {}), '(signal_delays_all_l)\n', (182785, 182806), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import argparse
import asyncio
import os
import sys
import discord
import numpy as np
import sounddevice as sd
class SoundDeviceSource(discord.AudioSource):
def __init__(self, device):
self.stream = sd.InputStream(samplerate=48000,
channels=1,
device=device,
dtype='int16',
latency='low')
self.stream.start()
def is_opus(self):
return False
def read(self):
(data, _) = self.stream.read(960)
data = np.repeat(data, 2, 1)
return data.data.tobytes()
def cleanup(self):
self.stream.stop()
class VoxSource(discord.AudioSource):
def __init__(self, source):
self.source = source
self.active = False
self.threshold = 16
self.duration = 25
self.silent_for = 0
if self.source.is_opus():
raise ValueError("cannot use VoxSource with an Opus source")
self.voice = None
self.task = None
def is_opus(self):
return self.source.is_opus()
def read(self):
data = self.source.read()
if self.active:
if max(data) < self.threshold:
self.silent_for += 1
if self.silent_for >= self.duration:
print('VOX off')
self.active = False
return bytes([])
else:
self.silent_for = 0
return data
def cleanup(self):
pass
async def on_vox(self):
loop = asyncio.get_running_loop()
while True:
start_time = loop.time()
if not self.active:
data = self.read()
if max(data) >= self.threshold:
print('VOX on')
self.active = True
self.voice.play(self)
await asyncio.sleep(loop.time() - start_time + 0.002)
def start_vox(self, voice):
self.voice = voice
self.task = asyncio.create_task(self.on_vox())
def stop_vox(self):
self.task.cancel()
class AudioPatchClient(discord.Client):
def __init__(self, channel, guild=None, input_device=sd.default.device[0]):
super().__init__()
try:
input_device = int(input_device)
except ValueError:
pass
real_source = SoundDeviceSource(device=input_device)
self.source = VoxSource(real_source)
try:
self.channel_id = int(channel)
except ValueError:
self.channel_id = None
self.channel_name = channel
self.guild = guild
self.voice = None
async def on_ready(self):
print("Logged on as", self.user)
if self.channel_id is not None:
channel = self.get_channel(self.channel_id)
else:
channel = None
for guild in self.guilds:
if self.guild and guild.name != self.guild:
continue
for guild_channel in guild.voice_channels:
if guild_channel.name == self.channel_name:
channel = guild_channel
break
if not channel:
print("{0}: error: can't find channel '{1}'"
.format(sys.argv[0], self.channel_id),
file=sys.stderr)
sys.exit(1)
self.voice = await channel.connect()
print("Connected to voice channel", self.voice.channel.name)
self.source.start_vox(self.voice)
def main():
parser = argparse.ArgumentParser(
description="Patch a pair of audio devices to a Discord voice channel")
parser.add_argument('channel', metavar='CHANNEL', nargs='?',
help=
"voice channel to patch (channel ID or name)")
parser.add_argument('--guild',
default=None,
help="guild name")
parser.add_argument('--token',
default=os.environ.get('DISCORD_TOKEN', None),
help="Discord token (default: $DISCORD_TOKEN)")
parser.add_argument('--input',
default=sd.default.device[0],
help="input audio device (ID or name)")
parser.add_argument('--list-devices', action='store_true',
help="list audio devices")
args = parser.parse_args()
if args.list_devices:
print('Input devices:')
device_id = 0
for device in sd.query_devices():
if device['max_input_channels'] > 0:
print(' {0}:'.format(device_id), device['name'])
device_id += 1
print()
sys.exit(0)
if not args.token:
print("{0}: error: --token or $DISCORD_TOKEN required".format(sys.argv[0]),
file=sys.stderr)
sys.exit(1)
if args.channel is None:
print("{0}: error: CHANNEL required".format(sys.argv[0]),
file=sys.stderr)
sys.exit(1)
client = AudioPatchClient(channel=args.channel,
guild=args.guild,
input_device=args.input)
client.run(args.token)
if __name__ == '__main__':
main()
| [
"sounddevice.InputStream",
"numpy.repeat",
"argparse.ArgumentParser",
"os.environ.get",
"sounddevice.query_devices",
"sys.exit",
"asyncio.get_running_loop"
] | [((3713, 3813), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Patch a pair of audio devices to a Discord voice channel"""'}), "(description=\n 'Patch a pair of audio devices to a Discord voice channel')\n", (3736, 3813), False, 'import argparse\n'), ((238, 331), 'sounddevice.InputStream', 'sd.InputStream', ([], {'samplerate': '(48000)', 'channels': '(1)', 'device': 'device', 'dtype': '"""int16"""', 'latency': '"""low"""'}), "(samplerate=48000, channels=1, device=device, dtype='int16',\n latency='low')\n", (252, 331), True, 'import sounddevice as sd\n'), ((627, 648), 'numpy.repeat', 'np.repeat', (['data', '(2)', '(1)'], {}), '(data, 2, 1)\n', (636, 648), True, 'import numpy as np\n'), ((1652, 1678), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (1676, 1678), False, 'import asyncio\n'), ((4679, 4697), 'sounddevice.query_devices', 'sd.query_devices', ([], {}), '()\n', (4695, 4697), True, 'import sounddevice as sd\n'), ((4865, 4876), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4873, 4876), False, 'import sys\n'), ((5024, 5035), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5032, 5035), False, 'import sys\n'), ((5171, 5182), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5179, 5182), False, 'import sys\n'), ((4167, 4204), 'os.environ.get', 'os.environ.get', (['"""DISCORD_TOKEN"""', 'None'], {}), "('DISCORD_TOKEN', None)\n", (4181, 4204), False, 'import os\n'), ((3516, 3527), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3524, 3527), False, 'import sys\n')] |
from collections import Mapping, Iterable
import copy as copy_
import numpy as np
import datetime as dt
from . import misc
def select_var(d, name, sel):
var_dims = list(d['.'][name]['.dims'])
d['.'][name]['.dims'] = var_dims
for key, value in sel.items():
if isinstance(value, Mapping):
if len(sel) > 1: raise ValueError('invalid selector')
newdim = key
dims = value.keys()
idxs = value.values()
selector = tuple([
idxs[dims.index(var_dim)] if var_dim in dims else slice(None)
for var_dim in var_dims
])
d[name] = d[name][selector]
for dim in dims:
if dim in var_dims:
var_dims.remove(dim)
d['.'][name]['.dims'].append(newdim)
else:
dim, idxs = key, value
idxs = np.array(idxs) if type(idxs) in (list, tuple) else idxs
if isinstance(idxs, np.ndarray) and idxs.dtype == np.bool:
idxs = np.nonzero(idxs)[0]
if dim in var_dims:
i = var_dims.index(dim)
d[name] = np.take(d[name], idxs, axis=i)
if not isinstance(idxs, np.ndarray):
var_dims.remove(dim)
def filter_hidden(x):
if isinstance(x, Mapping):
return {k: v for k, v in x.items() if not k.startswith('.')}
if isinstance(x, Iterable):
return [k for k in x if not k.startswith('.')]
return x
def select(d, sel):
for name in d.keys():
if name.startswith('.'):
continue
select_var(d, name, sel)
def get_dims(d, name=None):
if name is None:
dims = {}
for name in get_vars(d):
data = get_var(d, name)
for i, dim in enumerate(get_dims(d, name)):
dims[dim] = data.shape[i]
return dims
else:
try: return d['.'][name]['.dims']
except KeyError: return gen_dims(d, name)
def get_vars(d):
return filter_hidden(d.keys())
def get_var(d, name):
data = d[name]
if type(data) is np.ndarray:
return data
else:
return np.array(data)
def get_meta(d, name=None):
if name is None:
return d.get('.', {})
else:
try: return d['.'][name]
except KeyError: return {}
def get_attrs(d, name=None):
if name is None:
try: return filter_hidden(d['.']['.'])
except KeyError: return {}
else:
try: return filter_hidden(d['.'][name])
except KeyError: return {}
def gen_dims(d, name):
data = get_var(d, name)
return [name + ('_%d' % i) for i in range(1, data.ndim + 1)]
def parse_time(t):
formats = [
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%dT%H:%M:%SZ',
]
for f in formats:
try: return dt.datetime.strptime(t, f)
except: pass
return None
def time_dt(time):
return [parse_time(t) for t in time]
def merge_var(dd, var, dim):
if len(dd) == 0:
return None, None
x0 = dd[0][var]
meta0 = dd[0]['.'][var]
dims0 = meta0['.dims']
meta = copy_.deepcopy(meta0)
if dim in dims0:
i = dims0.index(dim)
x = np.concatenate(
[d[var] for d in dd if d['.'][var]['.dims'] == dims0],
axis=i
)
else:
meta['.dims'] = [dim] + list(meta['.dims'])
x = np.stack([d[var] for d in dd if d['.'][var]['.dims'] == dims0])
return x, meta
def merge(dd, dim, new=None, variables=None):
dx = {'.': {'.': {}}}
vars_ = list(set([x for d in dd for x in get_vars(d)]))
dims = [k for d in dd for k in get_dims(d).keys()]
is_new = dim not in dims
for var in vars_:
var_dims = get_dims(dd[0], var)
if is_new and (variables is None or var in variables) or \
dim in var_dims:
x, meta = merge_var(dd, var, dim)
elif new is not None and (variables is None or var in variables):
x, meta = merge_var(dd, var, new)
else:
x, meta = dd[0][var], dd[0]['.'][var]
dx[var] = x
dx['.'][var] = meta
for d in dd:
if '.' in d['.']:
dx['.']['.'].update(d['.']['.'])
return dx
def rename_dim(d, old, new):
if old == new:
return
if '.' in d:
for var in d['.'].keys():
meta = d['.'][var]
if '.dims' in d['.'][var]:
dims = d['.'][var]['.dims']
for i, dim in enumerate(dims):
if dim == old:
dims[i] = new
def rename(d, old, new):
if old == new:
return
if old in d:
d[new] = d[old]
d['.'][new] = d['.'][old]
del d[old]
del d['.'][old]
rename_dim(d, old, new)
def copy(d):
d2 = {}
for var in get_vars(d):
d2[var] = d[var]
d2['.'] = copy_.deepcopy(d['.'])
return d2
def group_by(d, dim, group, func):
groups = sorted(list(set(group)))
vars = get_vars(d)
n = len(groups)
for var in vars:
dims = d['.'][var]['.dims']
try:
i = dims.index(dim)
except ValueError:
continue
size = list(d[var].shape)
size[i] = n
x = np.empty(size, d[var].dtype)
for j, g in enumerate(groups):
mask = group == g
slice_x = misc.sel_slice({dim: j}, dims)
slice_y = misc.sel_slice({dim: mask}, dims)
y = d[var][slice_y]
x[slice_x] = func(y, axis=i)
d[var] = x
| [
"datetime.datetime.strptime",
"numpy.take",
"numpy.stack",
"numpy.array",
"numpy.empty",
"numpy.concatenate",
"copy.deepcopy",
"numpy.nonzero"
] | [((2638, 2659), 'copy.deepcopy', 'copy_.deepcopy', (['meta0'], {}), '(meta0)\n', (2652, 2659), True, 'import copy as copy_\n'), ((4084, 4106), 'copy.deepcopy', 'copy_.deepcopy', (["d['.']"], {}), "(d['.'])\n", (4098, 4106), True, 'import copy as copy_\n'), ((1785, 1799), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1793, 1799), True, 'import numpy as np\n'), ((2707, 2784), 'numpy.concatenate', 'np.concatenate', (["[d[var] for d in dd if d['.'][var]['.dims'] == dims0]"], {'axis': 'i'}), "([d[var] for d in dd if d['.'][var]['.dims'] == dims0], axis=i)\n", (2721, 2784), True, 'import numpy as np\n'), ((2854, 2917), 'numpy.stack', 'np.stack', (["[d[var] for d in dd if d['.'][var]['.dims'] == dims0]"], {}), "([d[var] for d in dd if d['.'][var]['.dims'] == dims0])\n", (2862, 2917), True, 'import numpy as np\n'), ((4385, 4413), 'numpy.empty', 'np.empty', (['size', 'd[var].dtype'], {}), '(size, d[var].dtype)\n', (4393, 4413), True, 'import numpy as np\n'), ((2383, 2409), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['t', 'f'], {}), '(t, f)\n', (2403, 2409), True, 'import datetime as dt\n'), ((721, 735), 'numpy.array', 'np.array', (['idxs'], {}), '(idxs)\n', (729, 735), True, 'import numpy as np\n'), ((935, 965), 'numpy.take', 'np.take', (['d[name]', 'idxs'], {'axis': 'i'}), '(d[name], idxs, axis=i)\n', (942, 965), True, 'import numpy as np\n'), ((850, 866), 'numpy.nonzero', 'np.nonzero', (['idxs'], {}), '(idxs)\n', (860, 866), True, 'import numpy as np\n')] |
import os
import pandas as pd
import csv
import pickle
import numpy as np
import torch
import argparse
def write_answer_to_file(answer, args):
if not os.path.exists(args.output): os.mkdir(args.output)
file_path = os.path.join(args.output, "subtask2.csv")
# turn to Int
answer = answer.astype(int)
b = pd.DataFrame(answer, columns=['a']).astype(int)
b.to_csv(file_path, header=0)
# import IPython; IPython.embed(); exit(1)
# np.savetxt(file_path, answer, delimiter=",")
def get_answer(args):
with open(args.input, "rb") as file:
answer = pickle.load(file)
return answer
def enssmble(answer):
weight = [(a["acc"] - 0.83)*100. if a["acc"] > 0.8 else 1 for a in answer ]
accs = [a["acc"] for a in answer]
print("各个模型的acc :" , accs)
real_answer = np.zeros_like(answer[0]["answer"])
for idx, w in enumerate(weight):
real_answer += w*answer[idx]["answer"]
return torch.argmax(torch.tensor(real_answer), dim=1).cpu().numpy()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='convert pickle file to csv file')
parser.add_argument('--input', type=str, help='pickle file path', default="./answer_file/roberta_enhanced")
parser.add_argument('--output', type=str, help='csv file path', default="./answer_file")
args = parser.parse_args()
answer = []
albert_86 = "albert_86_128/result.pkl"
roberta_sliding_85 = "roberta_sliding-85.5/result.pkl"
roberta_sliding_87 = "roberta-87-256-smoothing_lr7/result.pkl"
ronghe = "ronghe/result.pkl"
robert_90 = "answer_file/task2/roberta_90_1/result.pkl"
xlnet_86 = "xlnet/result_large_origin.pkl"
# roberta single model
# roberta, albert
# answer_list = ["roberta-87-256-smoothing_lr7/result.pkl", "albert_86_128/result.pkl"]
# answer_list = [albert_86, roberta_sliding_85, roberta_sliding_87, xlnet_86]
# # gogogogo
# answer_list = ["roberta_90/result.pkl", "roberta_90_1/result.pkl"]
# answer_list = ["roberta/2/86-6.pkl", "roberta/2/89.pkl"]
"""
5个模型我取最高的,
"""
answer_list= [
# "albert_decay-89/result.pkl",
# "roberta_90/result.pkl",
# "roberta/2/89.pkl",
# "xlnet/result_large_task2_1.pkl"
"deberta/91_1.pkl",
"deberta/91_2.pkl"
]
answer_list = [os.path.join("./answer_file/task2",a) for a in answer_list]
for a in answer_list:
args.input = a
answer.append(get_answer(args))
answer = enssmble(answer)
write_answer_to_file(answer, args)
os.system("zip -j ./answer_file/deberta_model_best.zip ./answer_file/subtask2.csv")
# import IPython; IPython.embed(); exit(1)
# assert answer["acc"] > 0.82, "什么臭鱼烂虾, 82都不到, guna"
# answer = torch.argmax(torch.tensor(answer["answer"]), dim=1).cpu().numpy()
# write_answer_to_file(answer, args)
| [
"os.path.exists",
"argparse.ArgumentParser",
"os.path.join",
"pickle.load",
"torch.tensor",
"os.mkdir",
"pandas.DataFrame",
"os.system",
"numpy.zeros_like"
] | [((225, 266), 'os.path.join', 'os.path.join', (['args.output', '"""subtask2.csv"""'], {}), "(args.output, 'subtask2.csv')\n", (237, 266), False, 'import os\n'), ((814, 848), 'numpy.zeros_like', 'np.zeros_like', (["answer[0]['answer']"], {}), "(answer[0]['answer'])\n", (827, 848), True, 'import numpy as np\n'), ((1052, 1122), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""convert pickle file to csv file"""'}), "(description='convert pickle file to csv file')\n", (1075, 1122), False, 'import argparse\n'), ((2576, 2665), 'os.system', 'os.system', (['"""zip -j ./answer_file/deberta_model_best.zip ./answer_file/subtask2.csv"""'], {}), "(\n 'zip -j ./answer_file/deberta_model_best.zip ./answer_file/subtask2.csv')\n", (2585, 2665), False, 'import os\n'), ((158, 185), 'os.path.exists', 'os.path.exists', (['args.output'], {}), '(args.output)\n', (172, 185), False, 'import os\n'), ((187, 208), 'os.mkdir', 'os.mkdir', (['args.output'], {}), '(args.output)\n', (195, 208), False, 'import os\n'), ((587, 604), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (598, 604), False, 'import pickle\n'), ((2351, 2389), 'os.path.join', 'os.path.join', (['"""./answer_file/task2"""', 'a'], {}), "('./answer_file/task2', a)\n", (2363, 2389), False, 'import os\n'), ((325, 360), 'pandas.DataFrame', 'pd.DataFrame', (['answer'], {'columns': "['a']"}), "(answer, columns=['a'])\n", (337, 360), True, 'import pandas as pd\n'), ((962, 987), 'torch.tensor', 'torch.tensor', (['real_answer'], {}), '(real_answer)\n', (974, 987), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu May 20 15:49:11 2021
@author: ANalundasan
DTC - Categorical Naive Bayes
"""
from sklearn.naive_bayes import GaussianNB
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import CategoricalNB
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
# read in data and drop unnecessary columns
data = pd.read_csv('raw_data_numerical_target_features.csv', sep = ',')
# set X and Y values
X = data.values[:, 0:4]
Y = data.values[:, -1]
# separate train data vs test data
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.20)
## CATEGORICAL NAIVE BAYES ##
clf = CategoricalNB()
clf.fit(Xtrain, Ytrain)
y_fitted = clf.predict(Xtest)
acc = accuracy_score(Ytest, y_fitted)
print("Categorical Naive Bayes: ", clf.predict(Xtest))
print("Accuracy score is", acc)
# solve for misclassification
misrate = np.sum(np.abs(Ytest-y_fitted))/len(Ytest)
print("Misclassification rate is: ", misrate)
## Try to make a plot ##
COVIDUNAW_yes = [i for i in range(len(y_fitted)) if y_fitted[i]==1]
COVIDUNAW_no = [i for i in range(len(y_fitted)) if y_fitted[i]==2]
X_yes = X[COVIDUNAW_yes,:]
X_no = X[COVIDUNAW_no,:]
plt.scatter(X_yes[:, 0], X_yes[:, 1], label='COVIDUNAW_yes', c='b')
plt.scatter(X_no[:, 0], X_no[:, 1], label='COVIDUNAW_no', c='r')
plt.legend()
# plt.ylabel("Why Unemployed")
# plt.xlabel("Count")
plt.title("Naive Bayes Classification Plot")
plt.show()
## GAUSSIAN NAIVE BAYES ##
# gnb = GaussianNB()
# gnb.fit(Xtrain, Ytrain)
# model = GaussianNB()
# model = CategoricalNB()
# model.fit(Xtrain, Ytrain)
# y_fitted = model.predict(Xtest)
# colors=np.array(["red", "blue"])
# # plt.scatter(X, color=colors[y_fitted])
# plt.scatter([Ytrain], label='Train', c='b')
# # plt.scatter([Ytrain], [y_fitted], label='Train', c='r')
# plt.legend()
# plt.show()
# plt.scatter([Xtest], [Ytest], color="red", label="0")
# plt.scatter([Ytrain], [y_fitted], color="blue", label="1")
| [
"numpy.abs",
"pandas.read_csv",
"sklearn.naive_bayes.CategoricalNB",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((448, 510), 'pandas.read_csv', 'pd.read_csv', (['"""raw_data_numerical_target_features.csv"""'], {'sep': '""","""'}), "('raw_data_numerical_target_features.csv', sep=',')\n", (459, 510), True, 'import pandas as pd\n'), ((656, 693), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.2)'}), '(X, Y, test_size=0.2)\n', (672, 693), False, 'from sklearn.model_selection import train_test_split\n'), ((735, 750), 'sklearn.naive_bayes.CategoricalNB', 'CategoricalNB', ([], {}), '()\n', (748, 750), False, 'from sklearn.naive_bayes import CategoricalNB\n'), ((814, 845), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Ytest', 'y_fitted'], {}), '(Ytest, y_fitted)\n', (828, 845), False, 'from sklearn.metrics import accuracy_score\n'), ((1293, 1360), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_yes[:, 0]', 'X_yes[:, 1]'], {'label': '"""COVIDUNAW_yes"""', 'c': '"""b"""'}), "(X_yes[:, 0], X_yes[:, 1], label='COVIDUNAW_yes', c='b')\n", (1304, 1360), True, 'import matplotlib.pyplot as plt\n'), ((1362, 1426), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_no[:, 0]', 'X_no[:, 1]'], {'label': '"""COVIDUNAW_no"""', 'c': '"""r"""'}), "(X_no[:, 0], X_no[:, 1], label='COVIDUNAW_no', c='r')\n", (1373, 1426), True, 'import matplotlib.pyplot as plt\n'), ((1428, 1440), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1438, 1440), True, 'import matplotlib.pyplot as plt\n'), ((1497, 1541), 'matplotlib.pyplot.title', 'plt.title', (['"""Naive Bayes Classification Plot"""'], {}), "('Naive Bayes Classification Plot')\n", (1506, 1541), True, 'import matplotlib.pyplot as plt\n'), ((1543, 1553), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1551, 1553), True, 'import matplotlib.pyplot as plt\n'), ((986, 1010), 'numpy.abs', 'np.abs', (['(Ytest - y_fitted)'], {}), '(Ytest - y_fitted)\n', (992, 1010), True, 'import numpy as np\n')] |
"""
Color palette choices
=====================
"""
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white", context="talk")
rs = np.random.RandomState(7)
x = np.array(list("ABCDEFGHI"))
f, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(8, 6), sharex=True)
y1 = np.arange(1, 10)
sns.barplot(x, y1, ci=None, palette="BuGn_d", hline=.1, ax=ax1)
ax1.set_ylabel("Sequential")
y2 = y1 - 5
sns.barplot(x, y2, ci=None, palette="coolwarm", hline=0, ax=ax2)
ax2.set_ylabel("Diverging")
y3 = rs.choice(y1, 9, replace=False)
sns.barplot(x, y3, ci=None, palette="Paired", hline=.1, ax=ax3)
ax3.set_ylabel("Qualitative")
sns.despine(bottom=True)
plt.setp(f.axes, yticks=[])
plt.tight_layout(h_pad=3)
| [
"matplotlib.pyplot.setp",
"seaborn.set",
"seaborn.despine",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"seaborn.barplot",
"matplotlib.pyplot.subplots",
"numpy.random.RandomState"
] | [((126, 164), 'seaborn.set', 'sns.set', ([], {'style': '"""white"""', 'context': '"""talk"""'}), "(style='white', context='talk')\n", (133, 164), True, 'import seaborn as sns\n'), ((170, 194), 'numpy.random.RandomState', 'np.random.RandomState', (['(7)'], {}), '(7)\n', (191, 194), True, 'import numpy as np\n'), ((250, 297), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(8, 6)', 'sharex': '(True)'}), '(3, 1, figsize=(8, 6), sharex=True)\n', (262, 297), True, 'import matplotlib.pyplot as plt\n'), ((304, 320), 'numpy.arange', 'np.arange', (['(1)', '(10)'], {}), '(1, 10)\n', (313, 320), True, 'import numpy as np\n'), ((321, 385), 'seaborn.barplot', 'sns.barplot', (['x', 'y1'], {'ci': 'None', 'palette': '"""BuGn_d"""', 'hline': '(0.1)', 'ax': 'ax1'}), "(x, y1, ci=None, palette='BuGn_d', hline=0.1, ax=ax1)\n", (332, 385), True, 'import seaborn as sns\n'), ((427, 491), 'seaborn.barplot', 'sns.barplot', (['x', 'y2'], {'ci': 'None', 'palette': '"""coolwarm"""', 'hline': '(0)', 'ax': 'ax2'}), "(x, y2, ci=None, palette='coolwarm', hline=0, ax=ax2)\n", (438, 491), True, 'import seaborn as sns\n'), ((558, 622), 'seaborn.barplot', 'sns.barplot', (['x', 'y3'], {'ci': 'None', 'palette': '"""Paired"""', 'hline': '(0.1)', 'ax': 'ax3'}), "(x, y3, ci=None, palette='Paired', hline=0.1, ax=ax3)\n", (569, 622), True, 'import seaborn as sns\n'), ((653, 677), 'seaborn.despine', 'sns.despine', ([], {'bottom': '(True)'}), '(bottom=True)\n', (664, 677), True, 'import seaborn as sns\n'), ((678, 705), 'matplotlib.pyplot.setp', 'plt.setp', (['f.axes'], {'yticks': '[]'}), '(f.axes, yticks=[])\n', (686, 705), True, 'import matplotlib.pyplot as plt\n'), ((706, 731), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'h_pad': '(3)'}), '(h_pad=3)\n', (722, 731), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import io
from PIL import Image
import tensorflow as tf
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array
import matplotlib.pyplot as plt
from flask import Flask,render_template,redirect,url_for,request
import urllib3
from tensorflow.keras.preprocessing import image
import os
app = Flask(__name__)
model = load_model('shape2.h5')
# Image preprocessing/prediction function
def predict_(img_path, show=False):
img = image.load_img(img_path, target_size=(224, 224))
img_tensor = image.img_to_array(img) # (height, width, channels)
img_tensor = np.expand_dims(img_tensor, axis=0) # (1, height, width, channels), add a dimension because the model expects this shape: (batch_size, height, width, channels)
img_tensor /= 255. # imshow expects values in the range [0, 1]
prediction = model.predict(img_tensor)
if show:
plt.imshow(img_tensor[0])
plt.axis('off')
plt.show()
return prediction
# Home
@app.route('/', methods=['GET'])
def api():
return {
'userid':1,
'title':'Flask react app',
'completed': False
}
@app.route('/predict',methods = ['GET' , 'POST'])
def predictions():
error = ''
# Change image dir to fit needs
target_img = os.path.join(os.getcwd() , 'static/images')
if request.method == 'POST':
if (request.files):
file = request.files['file']
if file:
file.save(os.path.join(target_img , file.filename))
img_path = os.path.join(target_img , file.filename)
img = file.filename
predictions = predict_(img_path)
# Change to a switch-case later
if round(predictions[0][0]) == 0:
pred = 'Circle'
if round(predictions[0][0]) == 1:
pred = 'Rectangle'
if round(predictions[0][0]) == 2:
pred = 'Star'
if round(predictions[0][0]) == 3:
pred = 'Triangle'
else:
pred = 'No existe XD'
predictions = {
'Prediction': pred
}
else:
error = "Please draw a shape to predict."
if(len(error) == 0):
# No error
pass
else:
# Error
pass
else:
# Method no fue POST XD
pass
if __name__ == '__main__':
app.run(debug=True) | [
"tensorflow.keras.preprocessing.image.load_img",
"matplotlib.pyplot.imshow",
"flask.Flask",
"os.path.join",
"os.getcwd",
"tensorflow.keras.models.load_model",
"numpy.expand_dims",
"matplotlib.pyplot.axis",
"tensorflow.keras.preprocessing.image.img_to_array",
"matplotlib.pyplot.show"
] | [((364, 379), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (369, 379), False, 'from flask import Flask, render_template, redirect, url_for, request\n'), ((389, 412), 'tensorflow.keras.models.load_model', 'load_model', (['"""shape2.h5"""'], {}), "('shape2.h5')\n", (399, 412), False, 'from tensorflow.keras.models import load_model\n'), ((503, 551), 'tensorflow.keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(224, 224)'}), '(img_path, target_size=(224, 224))\n', (517, 551), False, 'from tensorflow.keras.preprocessing import image\n'), ((569, 592), 'tensorflow.keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (587, 592), False, 'from tensorflow.keras.preprocessing import image\n'), ((657, 691), 'numpy.expand_dims', 'np.expand_dims', (['img_tensor'], {'axis': '(0)'}), '(img_tensor, axis=0)\n', (671, 691), True, 'import numpy as np\n'), ((994, 1019), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_tensor[0]'], {}), '(img_tensor[0])\n', (1004, 1019), True, 'import matplotlib.pyplot as plt\n'), ((1055, 1070), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1063, 1070), True, 'import matplotlib.pyplot as plt\n'), ((1079, 1089), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1087, 1089), True, 'import matplotlib.pyplot as plt\n'), ((1424, 1435), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1433, 1435), False, 'import os\n'), ((1673, 1712), 'os.path.join', 'os.path.join', (['target_img', 'file.filename'], {}), '(target_img, file.filename)\n', (1685, 1712), False, 'import os\n'), ((1604, 1643), 'os.path.join', 'os.path.join', (['target_img', 'file.filename'], {}), '(target_img, file.filename)\n', (1616, 1643), False, 'import os\n')] |
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import scipy
import scipy.spatial.distance
from scipy.stats import spearmanr
import torch
import utils
__author__ = "<NAME>"
__version__ = "CS224u, Stanford, Spring 2021"
def euclidean(u, v):
return scipy.spatial.distance.euclidean(u, v)
def vector_length(u):
return np.sqrt(u.dot(u))
def length_norm(u):
return u / vector_length(u)
def cosine(u, v):
return scipy.spatial.distance.cosine(u, v)
def matching(u, v):
return np.sum(np.minimum(u, v))
def jaccard(u, v):
return 1.0 - (matching(u, v) / np.sum(np.maximum(u, v)))
def neighbors(word, df, distfunc=cosine):
"""
Tool for finding the nearest neighbors of `word` in `df` according
to `distfunc`. The comparisons are between row vectors.
Parameters
----------
word : str
The anchor word. Assumed to be in `rownames`.
df : pd.DataFrame
The vector-space model.
distfunc : function mapping vector pairs to floats (default: `cosine`)
The measure of distance between vectors. Can also be `euclidean`,
`matching`, `jaccard`, as well as any other distance measure
between 1d vectors.
Raises
------
ValueError
If word is not in `df.index`.
Returns
-------
pd.Series
Ordered by closeness to `word`.
"""
if word not in df.index:
raise ValueError('{} is not in this VSM'.format(word))
w = df.loc[word]
dists = df.apply(lambda x: distfunc(w, x), axis=1)
return dists.sort_values()
def observed_over_expected(df):
col_totals = df.sum(axis=0)
total = col_totals.sum()
row_totals = df.sum(axis=1)
expected = np.outer(row_totals, col_totals) / total
oe = df / expected
return oe
def pmi(df, positive=True):
df = observed_over_expected(df)
# Silence distracting warnings about log(0):
with np.errstate(divide='ignore'):
df = np.log(df)
df[np.isinf(df)] = 0.0 # log(0) = 0
if positive:
df[df < 0] = 0.0
return df
def tfidf(df):
# Inverse document frequencies:
doccount = float(df.shape[1])
freqs = df.astype(bool).sum(axis=1)
idfs = np.log(doccount / freqs)
idfs[np.isinf(idfs)] = 0.0 # log(0) = 0
# Term frequencies:
col_totals = df.sum(axis=0)
tfs = df / col_totals
return (tfs.T * idfs).T
def ngram_vsm(df, n=2):
"""Create a character-level VSM from `df`.
Parameters
----------
df : pd.DataFrame
n : int
The n-gram size.
Returns
-------
pd.DataFrame
This will have the same column dimensionality as `df`, but the
rows will be expanded with representations giving the sum of
all the original rows in `df` that contain that row's n-gram.
"""
unigram2vecs = defaultdict(list)
for w, x in df.iterrows():
for c in get_character_ngrams(w, n):
unigram2vecs[c].append(x)
unigram2vecs = {c: np.array(x).sum(axis=0)
for c, x in unigram2vecs.items()}
cf = pd.DataFrame(unigram2vecs).T
cf.columns = df.columns
return cf
def get_character_ngrams(w, n):
"""Map a word to its character-level n-grams, with boundary
symbols '<w>' and '</w>'.
Parameters
----------
w : str
n : int
The n-gram size.
Returns
-------
list of str
"""
if n > 1:
w = ["<w>"] + list(w) + ["</w>"]
else:
w = list(w)
return ["".join(w[i: i+n]) for i in range(len(w)-n+1)]
def character_level_rep(word, cf, n=4):
"""Get a representation for `word` as the sum of all the
representations of `n`grams that it contains, according to `cf`.
Parameters
----------
word : str
The word to represent.
cf : pd.DataFrame
The character-level VSM (e.g, the output of `ngram_vsm`).
n : int
The n-gram size.
Returns
-------
np.array
"""
ngrams = get_character_ngrams(word, n)
ngrams = [n for n in ngrams if n in cf.index]
reps = cf.loc[ngrams].values
return reps.sum(axis=0)
def tsne_viz(df, colors=None, output_filename=None, figsize=(40, 50), random_state=None):
"""
2d plot of `df` using t-SNE, with the points labeled by `df.index`,
aligned with `colors` (defaults to all black).
Parameters
----------
df : pd.DataFrame
The matrix to visualize.
colors : list of colornames or None (default: None)
Optional list of colors for the vocab. The color names just
need to be interpretable by matplotlib. If they are supplied,
they need to have the same length as `df.index`. If `colors=None`,
then all the words are displayed in black.
output_filename : str (default: None)
If not None, then the output image is written to this location.
The filename suffix determines the image type. If `None`, then
`plt.plot()` is called, with the behavior determined by the
environment.
figsize : (int, int) (default: (40, 50))
Default size of the output in display units.
random_state : int or None
Optionally set the `random_seed` passed to `PCA` and `TSNE`.
"""
# Colors:
vocab = df.index
if not colors:
colors = ['black' for i in vocab]
# Recommended reduction via PCA or similar:
n_components = 50 if df.shape[1] >= 50 else df.shape[1]
dimreduce = PCA(n_components=n_components, random_state=random_state)
X = dimreduce.fit_transform(df)
# t-SNE:
tsne = TSNE(n_components=2, random_state=random_state)
tsnemat = tsne.fit_transform(X)
# Plot values:
xvals = tsnemat[: , 0]
yvals = tsnemat[: , 1]
# Plotting:
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=figsize)
ax.plot(xvals, yvals, marker='', linestyle='')
# Text labels:
for word, x, y, color in zip(vocab, xvals, yvals, colors):
try:
ax.annotate(word, (x, y), fontsize=8, color=color)
except UnicodeDecodeError: ## Python 2 won't cooperate!
pass
# Output:
if output_filename:
plt.savefig(output_filename, bbox_inches='tight')
else:
plt.show()
def lsa(df, k=100):
"""
Latent Semantic Analysis using pure scipy.
Parameters
----------
df : pd.DataFrame
The matrix to operate on.
k : int (default: 100)
Number of dimensions to truncate to.
Returns
-------
pd.DataFrame
The SVD-reduced version of `df` with dimension (m x k), where
m is the rowcount of mat and `k` is either the user-supplied
k or the column count of `mat`, whichever is smaller.
"""
rowmat, singvals, colmat = np.linalg.svd(df, full_matrices=False)
singvals = np.diag(singvals)
trunc = np.dot(rowmat[:, 0:k], singvals[0:k, 0:k])
return pd.DataFrame(trunc, index=df.index)
def hf_represent(batch_ids, model, layer=-1):
"""
Encode a batch of sequences of ids using a Hugging Face
Transformer-based model `model`. The model's `forward` method is
`output_hidden_states=True`, and we get the hidden states from
`layer`.
Parameters
----------
batch_ids : iterable, shape (n_examples, n_tokens)
Sequences of indices into the model vocabulary.
model : Hugging Face transformer model
later : int
The layer to return. This will get all the hidden states at
this layer. `layer=0` gives the embedding, and `layer=-1`
gives the final output states.
Returns
-------
Tensor of shape `(n_examples, n_tokens, n_dimensions)`
where `n_dimensions` is the dimensionality of the
Transformer model
"""
with torch.no_grad():
reps = model(batch_ids, output_hidden_states=True)
return reps.hidden_states[layer]
def hf_encode(text, tokenizer, add_special_tokens=False):
"""
Get the indices for the tokens in `text` according to `tokenizer`.
If no tokens can be obtained from `text`, then the tokenizer.unk_token`
is used as the only token.
Parameters
----------
text: str
tokenizer: Hugging Face tokenizer
add_special_tokens : bool
A Hugging Face parameter to the tokenizer.
Returns
-------
torch.Tensor of shape `(1, m)`
A batch of 1 example of `m` tokens`, where `m` is determined
by `text` and the nature of `tokenizer`.
"""
encoding = tokenizer.encode(
text,
add_special_tokens=add_special_tokens,
return_tensors='pt')
if encoding.shape[1] == 0:
text = tokenizer.unk_token
encoding = torch.tensor([[tokenizer.vocab[text]]])
return encoding
def mean_pooling(hidden_states):
"""
Get the mean along `axis=1` of a Tensor.
Parameters
----------
hidden_states : torch.Tensor, shape `(k, m, n)`
Where `k` is the number of examples, `m` is the number of vectors
for each example, and `n` is dimensionality of each vector.
Returns
-------
torch.Tensor of dimension `(k, n)`.
"""
_check_pooling_dimensionality(hidden_states)
return torch.mean(hidden_states, axis=1)
def max_pooling(hidden_states):
"""
Get the max values along `axis=1` of a Tensor.
Parameters
----------
hidden_states : torch.Tensor, shape `(k, m, n)`
Where `k` is the number of examples, `m` is the number of vectors
for each example, and `n` is dimensionality of each vector.
Raises
------
ValueError
If `hidden_states` does not have 3 dimensions.
Returns
-------
torch.Tensor of dimension `(k, n)`.
"""
_check_pooling_dimensionality(hidden_states)
return torch.amax(hidden_states, axis=1)
def min_pooling(hidden_states):
"""
Get the min values along `axis=1` of a Tensor.
Parameters
----------
hidden_states : torch.Tensor, shape `(k, m, n)`
Where `k` is the number of examples, `m` is the number of vectors
for each example, and `n` is dimensionality of each vector.
Raises
------
ValueError
If `hidden_states` does not have 3 dimensions.
Returns
-------
torch.Tensor of dimension `(k, n)`.
"""
_check_pooling_dimensionality(hidden_states)
return torch.amin(hidden_states, axis=1)
def last_pooling(hidden_states):
"""Get the final vector in second dimension (`axis=1`) of a Tensor.
Parameters
----------
hidden_states : torch.Tensor, shape (b, m, n)
Where b is the number of examples, m is the number of vectors
for each example, and `n` is dimensionality of each vector.
Raises
------
ValueError
If `hidden_states` does not have 3 dimensions.
Returns
-------
torch.Tensor of dimension `(k, n)`.
"""
_check_pooling_dimensionality(hidden_states)
return hidden_states[:, -1]
def _check_pooling_dimensionality(hidden_states):
if not len(hidden_states.shape) == 3:
raise ValueError(
"The input to the pooling function should have 3 dimensions: "
"it's a batch of k examples, where each example has m vectors, "
"each of dimensionality n. The function will pool the vectors "
"for each example, returning a Tensor of shape (k, n).")
def create_subword_pooling_vsm(vocab, tokenizer, model, layer=1, pool_func=mean_pooling):
vocab_ids = [hf_encode(w, tokenizer) for w in vocab]
vocab_hiddens = [hf_represent(w, model, layer=layer) for w in vocab_ids]
pooled = [pool_func(h) for h in vocab_hiddens]
pooled = [p.squeeze().cpu().numpy() for p in pooled]
return pd.DataFrame(pooled, index=vocab)
def word_relatedness_evaluation(dataset_df, vsm_df, distfunc=cosine):
"""
Main function for word relatedness evaluations used in the assignment
and bakeoff. The function makes predictions for word pairs in
`dataset_df` using `vsm_df` and `distfunc`, and it returns a copy of
`dataset_df` with a new column `'prediction'`, as well as the Spearman
rank correlation between those preductions and the `'score'` column
in `dataset_df`.
The prediction for a word pair (w1, w1) is determined by applying
`distfunc` to the representations of w1 and w2 in `vsm_df`. We return
the negative of this value since it is assumed that `distfunc` is a
distance function and the scores in `dataset_df` are for positive
relatedness.
Parameters
----------
dataset_df : pd.DataFrame
Required to have columns {'word1', 'word2', 'score'}.
vsm_df : pd.DataFrame
The vector space model used to get representations for the
words in `dataset_df`. The index must contain every word
represented in `dataset_df`.
distfunc : function mapping vector pairs to floats (default: `cosine`)
The measure of distance between vectors. Can also be `euclidean`,
`matching`, `jaccard`, as well as any other distance measure
between 1d vectors.
Raises
------
ValueError
If any words in `dataset_df` are not in the index of `vsm_df`.
Returns
-------
tuple (dataset_df, rho)
Where `dataset_df` is a `pd.DataFrame` -- a copy of the
input with a new column `'prediction'` -- and `rho` is a float
giving the Spearman rank correlation between the `'score'`
and `prediction` values.
"""
dataset_df = dataset_df.copy()
dataset_vocab = set(dataset_df.word1.values) | set(dataset_df.word2.values)
vsm_vocab = set(vsm_df.index)
missing = dataset_vocab - vsm_vocab
if missing:
raise ValueError(
"The following words are in the evaluation dataset but not in the "
"VSM. Please switch to a VSM with an appropriate vocabulary:\n"
"{}".format(sorted(missing)))
def predict(row):
x1 = vsm_df.loc[row.word1]
x2 = vsm_df.loc[row.word2]
return -distfunc(x1, x2)
dataset_df['prediction'] = dataset_df.apply(predict, axis=1)
rho = None
if 'score' in dataset_df.columns:
rho, pvalue = spearmanr(
dataset_df.score.values,
dataset_df.prediction.values)
return dataset_df, rho
| [
"numpy.log",
"numpy.array",
"scipy.spatial.distance.cosine",
"sklearn.decomposition.PCA",
"torch.mean",
"sklearn.manifold.TSNE",
"numpy.dot",
"pandas.DataFrame",
"scipy.stats.spearmanr",
"numpy.isinf",
"numpy.maximum",
"matplotlib.pyplot.savefig",
"numpy.outer",
"numpy.linalg.svd",
"matp... | [((417, 455), 'scipy.spatial.distance.euclidean', 'scipy.spatial.distance.euclidean', (['u', 'v'], {}), '(u, v)\n', (449, 455), False, 'import scipy\n'), ((606, 641), 'scipy.spatial.distance.cosine', 'scipy.spatial.distance.cosine', (['u', 'v'], {}), '(u, v)\n', (635, 641), False, 'import scipy\n'), ((2434, 2458), 'numpy.log', 'np.log', (['(doccount / freqs)'], {}), '(doccount / freqs)\n', (2440, 2458), True, 'import numpy as np\n'), ((3084, 3101), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3095, 3101), False, 'from collections import defaultdict\n'), ((5806, 5863), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_components', 'random_state': 'random_state'}), '(n_components=n_components, random_state=random_state)\n', (5809, 5863), False, 'from sklearn.decomposition import PCA\n'), ((5927, 5974), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'random_state': 'random_state'}), '(n_components=2, random_state=random_state)\n', (5931, 5974), False, 'from sklearn.manifold import TSNE\n'), ((6120, 6167), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': 'figsize'}), '(nrows=1, ncols=1, figsize=figsize)\n', (6132, 6167), True, 'import matplotlib.pyplot as plt\n'), ((7138, 7176), 'numpy.linalg.svd', 'np.linalg.svd', (['df'], {'full_matrices': '(False)'}), '(df, full_matrices=False)\n', (7151, 7176), True, 'import numpy as np\n'), ((7193, 7210), 'numpy.diag', 'np.diag', (['singvals'], {}), '(singvals)\n', (7200, 7210), True, 'import numpy as np\n'), ((7224, 7266), 'numpy.dot', 'np.dot', (['rowmat[:, 0:k]', 'singvals[0:k, 0:k]'], {}), '(rowmat[:, 0:k], singvals[0:k, 0:k])\n', (7230, 7266), True, 'import numpy as np\n'), ((7279, 7314), 'pandas.DataFrame', 'pd.DataFrame', (['trunc'], {'index': 'df.index'}), '(trunc, index=df.index)\n', (7291, 7314), True, 'import pandas as pd\n'), ((9655, 9688), 'torch.mean', 'torch.mean', (['hidden_states'], {'axis': '(1)'}), '(hidden_states, axis=1)\n', (9665, 9688), False, 'import torch\n'), ((10258, 10291), 'torch.amax', 'torch.amax', (['hidden_states'], {'axis': '(1)'}), '(hidden_states, axis=1)\n', (10268, 10291), False, 'import torch\n'), ((10861, 10894), 'torch.amin', 'torch.amin', (['hidden_states'], {'axis': '(1)'}), '(hidden_states, axis=1)\n', (10871, 10894), False, 'import torch\n'), ((12270, 12303), 'pandas.DataFrame', 'pd.DataFrame', (['pooled'], {'index': 'vocab'}), '(pooled, index=vocab)\n', (12282, 12303), True, 'import pandas as pd\n'), ((686, 702), 'numpy.minimum', 'np.minimum', (['u', 'v'], {}), '(u, v)\n', (696, 702), True, 'import numpy as np\n'), ((1923, 1955), 'numpy.outer', 'np.outer', (['row_totals', 'col_totals'], {}), '(row_totals, col_totals)\n', (1931, 1955), True, 'import numpy as np\n'), ((2133, 2161), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (2144, 2161), True, 'import numpy as np\n'), ((2177, 2187), 'numpy.log', 'np.log', (['df'], {}), '(df)\n', (2183, 2187), True, 'import numpy as np\n'), ((2196, 2208), 'numpy.isinf', 'np.isinf', (['df'], {}), '(df)\n', (2204, 2208), True, 'import numpy as np\n'), ((2469, 2483), 'numpy.isinf', 'np.isinf', (['idfs'], {}), '(idfs)\n', (2477, 2483), True, 'import numpy as np\n'), ((3332, 3358), 'pandas.DataFrame', 'pd.DataFrame', (['unigram2vecs'], {}), '(unigram2vecs)\n', (3344, 3358), True, 'import pandas as pd\n'), ((6515, 6564), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_filename'], {'bbox_inches': '"""tight"""'}), "(output_filename, bbox_inches='tight')\n", (6526, 6564), True, 'import matplotlib.pyplot as plt\n'), ((6585, 6595), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6593, 6595), True, 'import matplotlib.pyplot as plt\n'), ((8171, 8186), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8184, 8186), False, 'import torch\n'), ((9128, 9167), 'torch.tensor', 'torch.tensor', (['[[tokenizer.vocab[text]]]'], {}), '([[tokenizer.vocab[text]]])\n', (9140, 9167), False, 'import torch\n'), ((14814, 14878), 'scipy.stats.spearmanr', 'spearmanr', (['dataset_df.score.values', 'dataset_df.prediction.values'], {}), '(dataset_df.score.values, dataset_df.prediction.values)\n', (14823, 14878), False, 'from scipy.stats import spearmanr\n'), ((771, 787), 'numpy.maximum', 'np.maximum', (['u', 'v'], {}), '(u, v)\n', (781, 787), True, 'import numpy as np\n'), ((3243, 3254), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3251, 3254), True, 'import numpy as np\n')] |
import os, sys
from os import system
import tensorflow as tf
import numpy as np
np.set_printoptions(threshold=sys.maxsize)
##############################################################################
##############################################################################
## System Paths ##
path = './'
parameters = {'0': 'DeepCas9_example_input.txt'} # Dictionary can be expanded for multiple test parameters
## Run Parameters ##
TEST_NUM_SET = [0] # List can be expanded in case of multiple test parameters
best_model_path_list = ['./DeepCas9_Final/']
# Model
length = 30
class Deep_xCas9(object):
def __init__(self, filter_size, filter_num, node_1 = 80, node_2 = 60, l_rate = 0.005):
self.inputs = tf.placeholder(tf.float32, [None, 1, length, 4])
self.targets = tf.placeholder(tf.float32, [None, 1])
self.is_training = tf.placeholder(tf.bool)
def create_new_conv_layer(input_data, num_input_channels, num_filters, filter_shape, pool_shape, name):
# setup the filter input shape for tf.nn.conv_2d
conv_filt_shape = [filter_shape[0], filter_shape[1], num_input_channels,
num_filters]
# initialise weights and bias for the filter
weights = tf.Variable(tf.truncated_normal(conv_filt_shape, stddev=0.03),
name=name+'_W')
bias = tf.Variable(tf.truncated_normal([num_filters]), name=name+'_b')
# setup the convolutional layer operation
out_layer = tf.nn.conv2d(input_data, weights, [1, 1, 1, 1], padding='VALID')
# add the bias
out_layer += bias
# apply a ReLU non-linear activation
out_layer = tf.layers.dropout(tf.nn.relu(out_layer), 0.3, self.is_training)
# now perform max pooling
ksize = [1, pool_shape[0], pool_shape[1], 1]
strides = [1, 1, 2, 1]
out_layer = tf.nn.avg_pool(out_layer, ksize=ksize, strides=strides,
padding='SAME')
return out_layer
#def end: create_new_conv_layer
L_pool_0 = create_new_conv_layer(self.inputs, 4, filter_num[0], [1, filter_size[0]], [1, 2], name='conv1')
L_pool_1 = create_new_conv_layer(self.inputs, 4, filter_num[1], [1, filter_size[1]], [1, 2], name='conv2')
L_pool_2 = create_new_conv_layer(self.inputs, 4, filter_num[2], [1, filter_size[2]], [1, 2], name='conv3')
with tf.variable_scope('Fully_Connected_Layer1'):
layer_node_0 = int((length-filter_size[0])/2)+1
node_num_0 = layer_node_0*filter_num[0]
layer_node_1 = int((length-filter_size[1])/2)+1
node_num_1 = layer_node_1*filter_num[1]
layer_node_2 = int((length-filter_size[2])/2)+1
node_num_2 = layer_node_2*filter_num[2]
L_flatten_0 = tf.reshape(L_pool_0, [-1, node_num_0])
L_flatten_1 = tf.reshape(L_pool_1, [-1, node_num_1])
L_flatten_2 = tf.reshape(L_pool_2, [-1, node_num_2])
L_flatten = tf.concat([L_flatten_0, L_flatten_1, L_flatten_2], 1, name='concat')
node_num = node_num_0 + node_num_1 + node_num_2
W_fcl1 = tf.get_variable("W_fcl1", shape=[node_num, node_1])
B_fcl1 = tf.get_variable("B_fcl1", shape=[node_1])
L_fcl1_pre = tf.nn.bias_add(tf.matmul(L_flatten, W_fcl1), B_fcl1)
L_fcl1 = tf.nn.relu(L_fcl1_pre)
L_fcl1_drop = tf.layers.dropout(L_fcl1, 0.3, self.is_training)
with tf.variable_scope('Fully_Connected_Layer2'):
W_fcl2 = tf.get_variable("W_fcl2", shape=[node_1, node_2])
B_fcl2 = tf.get_variable("B_fcl2", shape=[node_2])
L_fcl2_pre = tf.nn.bias_add(tf.matmul(L_fcl1_drop, W_fcl2), B_fcl2)
L_fcl2 = tf.nn.relu(L_fcl2_pre)
L_fcl2_drop = tf.layers.dropout(L_fcl2, 0.3, self.is_training)
with tf.variable_scope('Output_Layer'):
W_out = tf.get_variable("W_out", shape=[node_2, 1])#, initializer=tf.contrib.layers.xavier_initializer())
B_out = tf.get_variable("B_out", shape=[1])#, initializer=tf.contrib.layers.xavier_initializer())
self.outputs = tf.nn.bias_add(tf.matmul(L_fcl2_drop, W_out), B_out)
# Define loss function and optimizer
self.obj_loss = tf.reduce_mean(tf.square(self.targets - self.outputs))
self.optimizer = tf.train.AdamOptimizer(l_rate).minimize(self.obj_loss)
#def end: def __init__
#class end: Deep_xCas9
def Model_Finaltest(sess, TEST_X, filter_size, filter_num, if3d, model, load_episode, best_model_path):
test_batch = 500
test_spearman = 0.0
optimizer = model.optimizer
TEST_Z = np.zeros((TEST_X.shape[0], 1), dtype=float)
for i in range(int(np.ceil(float(TEST_X.shape[0])/float(test_batch)))):
Dict = {model.inputs: TEST_X[i*test_batch:(i+1)*test_batch], model.is_training: False}
TEST_Z[i*test_batch:(i+1)*test_batch] = sess.run([model.outputs], feed_dict=Dict)[0]
OUT = open("RANK_final_{}.txt".format(best_model_path.split('/')[1]), "a")
OUT.write("Testing final \n {} ".format(tuple(TEST_Z.reshape([np.shape(TEST_Z)[0]]))))
OUT.write("\n")
OUT.close()
return
#def end: Model_Finaltest
def preprocess_seq(data):
print("Start preprocessing the sequence done 2d")
length = 30
DATA_X = np.zeros((len(data),1,length,4), dtype=int)
print(np.shape(data), len(data), length)
for l in range(len(data)):
for i in range(length):
try: data[l][i]
except: print(data[l], i, length, len(data))
if data[l][i]in "Aa": DATA_X[l, 0, i, 0] = 1
elif data[l][i] in "Cc": DATA_X[l, 0, i, 1] = 1
elif data[l][i] in "Gg": DATA_X[l, 0, i, 2] = 1
elif data[l][i] in "Tt": DATA_X[l, 0, i, 3] = 1
elif data[l][i] in "Nn": DATA_X[l, 0, i, 0] = 1
else:
print("Non-ATGC character " + data[l])
print(i)
print(data[l][i])
sys.exit()
#loop end: i
#loop end: l
print("Preprocessing the sequence done")
return DATA_X
#def end: preprocess_seq
def getseq(filenum):
param = parameters['%s' % filenum]
FILE = open(path+param, "r")
data = FILE.readlines()
data_n = len(data)
seq = []
for l in range(17, data_n):
try:
data_split = data[l].split(',')
seq.append(data_split[0])
except:
print(data[l])
seq.append(data[l])
#loop end: l
FILE.close()
processed_full_seq = preprocess_seq(seq)
return processed_full_seq, seq
#def end: getseq
#TensorFlow config
conf = tf.ConfigProto()
conf.gpu_options.allow_growth = True
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
best_model_cv = 0.0
best_model_list = []
for best_model_path in best_model_path_list:
for modelname in os.listdir(best_model_path):
if "meta" in modelname:
best_model_list.append(modelname[:-5])
#loop end: best_model_path
TEST_X = []
TEST_X_nohot = []
for TEST_NUM in TEST_NUM_SET:
tmp_X, tmp_X_nohot = getseq(TEST_NUM)
TEST_X.append(tmp_X)
TEST_X_nohot.append(tmp_X_nohot)
#loop end: TEST_NUM
for index in range(len(best_model_list)):
best_model_path = best_model_path_list[index]
best_model = best_model_list[index]
valuelist = best_model.split('-')
fulllist = []
for value in valuelist:
if value == 'True': value=True
elif value == 'False': value=False
else:
try:
value=int(value)
except:
try: value=float(value)
except: pass
fulllist.append(value)
#loop end: value
print(fulllist[2:])
filter_size_1, filter_size_2, filter_size_3, filter_num_1, filter_num_2, filter_num_3, l_rate, load_episode, node_1, node_2 = fulllist[2:]
filter_size = [filter_size_1, filter_size_2, filter_size_3]
filter_num = [filter_num_1, filter_num_2, filter_num_3]
if3d = False
inception = False
args = [filter_size, filter_num, l_rate, load_episode]
tf.reset_default_graph()
with tf.Session(config=conf) as sess:
sess.run(tf.global_variables_initializer())
model = Deep_xCas9(filter_size, filter_num, node_1, node_2, args[2])
saver = tf.train.Saver()
saver.restore(sess, best_model_path + best_model)
OUT = open("RANK_final_{}.txt".format(best_model_path.split('/')[1]), "a")
OUT.write("{}".format(best_model))
OUT.write("\n")
OUT.close()
TEST_Y = []
for i in range(len(TEST_NUM_SET)):
print ("TEST_NUM : {}".format(TEST_NUM_SET[i]))
OUT = open("RANK_final_{}.txt".format(best_model_path.split('/')[1]), "a")
OUT.write("\n")
OUT.write("TEST_FILE : {}".format(parameters['{}'.format(TEST_NUM_SET[i])]))
OUT.write("\n")
OUT.close()
Model_Finaltest(sess, TEST_X[i], filter_size, filter_num, if3d, model, load_episode, best_model_path)
#loop end: i
OUT = open("RANK_final_{}.txt".format(best_model_path.split('/')[1]), "a")
OUT.write("\n")
OUT.close()
| [
"tensorflow.get_variable",
"sys.exit",
"os.listdir",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.concat",
"tensorflow.layers.dropout",
"tensorflow.matmul",
"tensorflow.square",
"tensorflow.ConfigProto",
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.conv2d",
"tensorflow.var... | [((81, 123), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize'}), '(threshold=sys.maxsize)\n', (100, 123), True, 'import numpy as np\n'), ((6996, 7012), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (7010, 7012), True, 'import tensorflow as tf\n'), ((4940, 4983), 'numpy.zeros', 'np.zeros', (['(TEST_X.shape[0], 1)'], {'dtype': 'float'}), '((TEST_X.shape[0], 1), dtype=float)\n', (4948, 4983), True, 'import numpy as np\n'), ((7248, 7275), 'os.listdir', 'os.listdir', (['best_model_path'], {}), '(best_model_path)\n', (7258, 7275), False, 'import os, sys\n'), ((8509, 8533), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (8531, 8533), True, 'import tensorflow as tf\n'), ((773, 821), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1, length, 4]'], {}), '(tf.float32, [None, 1, length, 4])\n', (787, 821), True, 'import tensorflow as tf\n'), ((852, 889), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]'], {}), '(tf.float32, [None, 1])\n', (866, 889), True, 'import tensorflow as tf\n'), ((920, 943), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {}), '(tf.bool)\n', (934, 943), True, 'import tensorflow as tf\n'), ((5660, 5674), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (5668, 5674), True, 'import numpy as np\n'), ((8543, 8566), 'tensorflow.Session', 'tf.Session', ([], {'config': 'conf'}), '(config=conf)\n', (8553, 8566), True, 'import tensorflow as tf\n'), ((8722, 8738), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (8736, 8738), True, 'import tensorflow as tf\n'), ((1619, 1683), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['input_data', 'weights', '[1, 1, 1, 1]'], {'padding': '"""VALID"""'}), "(input_data, weights, [1, 1, 1, 1], padding='VALID')\n", (1631, 1683), True, 'import tensorflow as tf\n'), ((2041, 2112), 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['out_layer'], {'ksize': 'ksize', 'strides': 'strides', 'padding': '"""SAME"""'}), "(out_layer, ksize=ksize, strides=strides, padding='SAME')\n", (2055, 2112), True, 'import tensorflow as tf\n'), ((2580, 2623), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Fully_Connected_Layer1"""'], {}), "('Fully_Connected_Layer1')\n", (2597, 2623), True, 'import tensorflow as tf\n'), ((2994, 3032), 'tensorflow.reshape', 'tf.reshape', (['L_pool_0', '[-1, node_num_0]'], {}), '(L_pool_0, [-1, node_num_0])\n', (3004, 3032), True, 'import tensorflow as tf\n'), ((3060, 3098), 'tensorflow.reshape', 'tf.reshape', (['L_pool_1', '[-1, node_num_1]'], {}), '(L_pool_1, [-1, node_num_1])\n', (3070, 3098), True, 'import tensorflow as tf\n'), ((3126, 3164), 'tensorflow.reshape', 'tf.reshape', (['L_pool_2', '[-1, node_num_2]'], {}), '(L_pool_2, [-1, node_num_2])\n', (3136, 3164), True, 'import tensorflow as tf\n'), ((3192, 3260), 'tensorflow.concat', 'tf.concat', (['[L_flatten_0, L_flatten_1, L_flatten_2]', '(1)'], {'name': '"""concat"""'}), "([L_flatten_0, L_flatten_1, L_flatten_2], 1, name='concat')\n", (3201, 3260), True, 'import tensorflow as tf\n'), ((3352, 3403), 'tensorflow.get_variable', 'tf.get_variable', (['"""W_fcl1"""'], {'shape': '[node_num, node_1]'}), "('W_fcl1', shape=[node_num, node_1])\n", (3367, 3403), True, 'import tensorflow as tf\n'), ((3431, 3472), 'tensorflow.get_variable', 'tf.get_variable', (['"""B_fcl1"""'], {'shape': '[node_1]'}), "('B_fcl1', shape=[node_1])\n", (3446, 3472), True, 'import tensorflow as tf\n'), ((3580, 3602), 'tensorflow.nn.relu', 'tf.nn.relu', (['L_fcl1_pre'], {}), '(L_fcl1_pre)\n', (3590, 3602), True, 'import tensorflow as tf\n'), ((3630, 3678), 'tensorflow.layers.dropout', 'tf.layers.dropout', (['L_fcl1', '(0.3)', 'self.is_training'], {}), '(L_fcl1, 0.3, self.is_training)\n', (3647, 3678), True, 'import tensorflow as tf\n'), ((3693, 3736), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Fully_Connected_Layer2"""'], {}), "('Fully_Connected_Layer2')\n", (3710, 3736), True, 'import tensorflow as tf\n'), ((3765, 3814), 'tensorflow.get_variable', 'tf.get_variable', (['"""W_fcl2"""'], {'shape': '[node_1, node_2]'}), "('W_fcl2', shape=[node_1, node_2])\n", (3780, 3814), True, 'import tensorflow as tf\n'), ((3842, 3883), 'tensorflow.get_variable', 'tf.get_variable', (['"""B_fcl2"""'], {'shape': '[node_2]'}), "('B_fcl2', shape=[node_2])\n", (3857, 3883), True, 'import tensorflow as tf\n'), ((3993, 4015), 'tensorflow.nn.relu', 'tf.nn.relu', (['L_fcl2_pre'], {}), '(L_fcl2_pre)\n', (4003, 4015), True, 'import tensorflow as tf\n'), ((4043, 4091), 'tensorflow.layers.dropout', 'tf.layers.dropout', (['L_fcl2', '(0.3)', 'self.is_training'], {}), '(L_fcl2, 0.3, self.is_training)\n', (4060, 4091), True, 'import tensorflow as tf\n'), ((4106, 4139), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Output_Layer"""'], {}), "('Output_Layer')\n", (4123, 4139), True, 'import tensorflow as tf\n'), ((4168, 4211), 'tensorflow.get_variable', 'tf.get_variable', (['"""W_out"""'], {'shape': '[node_2, 1]'}), "('W_out', shape=[node_2, 1])\n", (4183, 4211), True, 'import tensorflow as tf\n'), ((4293, 4328), 'tensorflow.get_variable', 'tf.get_variable', (['"""B_out"""'], {'shape': '[1]'}), "('B_out', shape=[1])\n", (4308, 4328), True, 'import tensorflow as tf\n'), ((4551, 4589), 'tensorflow.square', 'tf.square', (['(self.targets - self.outputs)'], {}), '(self.targets - self.outputs)\n', (4560, 4589), True, 'import tensorflow as tf\n'), ((8593, 8626), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8624, 8626), True, 'import tensorflow as tf\n'), ((1339, 1388), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['conv_filt_shape'], {'stddev': '(0.03)'}), '(conv_filt_shape, stddev=0.03)\n', (1358, 1388), True, 'import tensorflow as tf\n'), ((1488, 1522), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[num_filters]'], {}), '([num_filters])\n', (1507, 1522), True, 'import tensorflow as tf\n'), ((1834, 1855), 'tensorflow.nn.relu', 'tf.nn.relu', (['out_layer'], {}), '(out_layer)\n', (1844, 1855), True, 'import tensorflow as tf\n'), ((3515, 3543), 'tensorflow.matmul', 'tf.matmul', (['L_flatten', 'W_fcl1'], {}), '(L_flatten, W_fcl1)\n', (3524, 3543), True, 'import tensorflow as tf\n'), ((3926, 3956), 'tensorflow.matmul', 'tf.matmul', (['L_fcl1_drop', 'W_fcl2'], {}), '(L_fcl1_drop, W_fcl2)\n', (3935, 3956), True, 'import tensorflow as tf\n'), ((4425, 4454), 'tensorflow.matmul', 'tf.matmul', (['L_fcl2_drop', 'W_out'], {}), '(L_fcl2_drop, W_out)\n', (4434, 4454), True, 'import tensorflow as tf\n'), ((4618, 4648), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['l_rate'], {}), '(l_rate)\n', (4640, 4648), True, 'import tensorflow as tf\n'), ((5395, 5411), 'numpy.shape', 'np.shape', (['TEST_Z'], {}), '(TEST_Z)\n', (5403, 5411), True, 'import numpy as np\n'), ((6293, 6303), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6301, 6303), False, 'import os, sys\n')] |
import numpy as np
from scipy.stats import norm
def make_grid(xx, yy):
"""
Returns two n-by-n matrices. The first one contains all the x values
and the second all the y values of a cartesian product between `xx` and `yy`.
"""
n = len(xx)
xx, yy = np.meshgrid(xx, yy)
grid = np.array([xx.ravel(), yy.ravel()]).T
x = grid[:, 0].reshape(n, n)
y = grid[:, 1].reshape(n, n)
return x, y
def plot(model, X, Y, axes, cmap, N_plot=100):
xx = np.linspace(X.min() - 1, X.max() + 1, N_plot)[:, None]
yy = np.linspace(Y.min() - 1, Y.max() + 1, N_plot)
pis, mus, sigmas = [v.numpy() for v in model.eval_network(xx)]
probs = norm.pdf(yy[:, None, None], loc=mus[None, :, :], scale=sigmas[None, :, :])
probs = np.sum(probs * pis[None, :, :], axis=-1)
plot_x, plot_y = make_grid(xx, yy)
axes[0].set_title("Posterior density and trainings data")
axes[0].contourf(plot_x, plot_y, np.log(probs), 500, cmap=cmap, vmin=-5, vmax=5)
axes[0].plot(X, Y, 'ro', alpha=0.2, ms=3, label="data")
axes[0].legend(loc=4)
axes[1].set_title(r"$\mu_m(x)$ and their relative contribution shown by size")
axes[1].scatter(np.repeat(xx.flatten(), repeats=mus.shape[1]),
mus.flatten(),
s=pis.flatten()*20)
| [
"numpy.meshgrid",
"numpy.log",
"scipy.stats.norm.pdf",
"numpy.sum"
] | [((275, 294), 'numpy.meshgrid', 'np.meshgrid', (['xx', 'yy'], {}), '(xx, yy)\n', (286, 294), True, 'import numpy as np\n'), ((673, 747), 'scipy.stats.norm.pdf', 'norm.pdf', (['yy[:, None, None]'], {'loc': 'mus[None, :, :]', 'scale': 'sigmas[None, :, :]'}), '(yy[:, None, None], loc=mus[None, :, :], scale=sigmas[None, :, :])\n', (681, 747), False, 'from scipy.stats import norm\n'), ((760, 800), 'numpy.sum', 'np.sum', (['(probs * pis[None, :, :])'], {'axis': '(-1)'}), '(probs * pis[None, :, :], axis=-1)\n', (766, 800), True, 'import numpy as np\n'), ((939, 952), 'numpy.log', 'np.log', (['probs'], {}), '(probs)\n', (945, 952), True, 'import numpy as np\n')] |
#import torch.nn as nn
import torch
from torch.nn import functional as F
#from PIL import Image
import numpy as np
import pandas as pd
#import os
import os.path as osp
import shutil
#import math
def save_checkpoint(state,best_pred, epoch,is_best,checkpoint_path,filename='./checkpoint/checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, osp.join(checkpoint_path,'model_{:03d}_{:.4f}.pth.tar'.format((epoch + 1),best_pred)))
def adjust_learning_rate(opt, optimizer, epoch):
"""
Sets the learning rate to the initial LR decayed by 10 every 30 epochs(step = 30)
"""
if opt.lr_mode == 'step':
lr = opt.lr * (0.1 ** (epoch // opt.step))
elif opt.lr_mode == 'poly':
lr = opt.lr * (1 - epoch / opt.num_epochs) ** 0.9
else:
raise ValueError('Unknown lr mode {}'.format(opt.lr_mode))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def one_hot_it(label, label_info):
# return semantic_map -> [H, W, num_classes]
semantic_map = []
for info in label_info:
color = label_info[info]
# colour_map = np.full((label.shape[0], label.shape[1], label.shape[2]), colour, dtype=int)
equality = np.equal(label, color)
class_map = np.all(equality, axis=-1)
semantic_map.append(class_map)
semantic_map = np.stack(semantic_map, axis=-1)
return semantic_map
def compute_score(predict, target, forground = 1,smooth=1):
score = 0
count = 0
target[target!=forground]=0
predict[predict!=forground]=0
assert(predict.shape == target.shape)
overlap = ((predict == forground)*(target == forground)).sum() #TP
union=(predict == forground).sum() + (target == forground).sum()-overlap #FP+FN+TP
FP=(predict == forground).sum()-overlap #FP
FN=(target == forground).sum()-overlap #FN
TN= target.shape[0]*target.shape[1]-union #TN
#print('overlap:',overlap)
dice=(2*overlap +smooth)/ (union+overlap+smooth)
precsion=((predict == target).sum()+smooth) / (target.shape[0]*target.shape[1]+smooth)
jaccard=(overlap+smooth) / (union+smooth)
Sensitivity=(overlap+smooth) / ((target == forground).sum()+smooth)
Specificity=(TN+smooth) / (FP+TN+smooth)
return dice,precsion,jaccard,Sensitivity,Specificity
def eval_multi_seg(predict, target,num_classes):
# pred_seg=torch.argmax(torch.exp(predict),dim=1).int()
pred_seg = predict.data.cpu().numpy()
label_seg = target.data.cpu().numpy().astype(dtype=np.int)
assert(pred_seg.shape == label_seg.shape)
# Dice = []
# Precsion = []
# Jaccard = []
# Sensitivity=[]
# Specificity=[]
# n = pred_seg.shape[0]
Dice=[]
for classes in range(1,num_classes):
overlap=((pred_seg==classes)*(label_seg==classes)).sum()
union=(pred_seg==classes).sum()+(label_seg==classes).sum()
Dice.append((2*overlap+0.1)/(union+0.1))
return Dice
# for i in range(n):
# dice,precsion,jaccard,sensitivity,specificity= compute_score(pred_seg[i],label_seg[i])
# Dice.append(dice)
# Precsion .append(precsion)
# Jaccard.append(jaccard)
# Sensitivity.append(sensitivity)
# Specificity.append(specificity)
# return Dice,Precsion,Jaccard,Sensitivity,Specificity
def eval_seg(predict, target, forground = 1):
pred_seg=torch.round(torch.sigmoid(predict)).int()
pred_seg = pred_seg.data.cpu().numpy()
label_seg = target.data.cpu().numpy().astype(dtype=np.int)
assert(pred_seg.shape == label_seg.shape)
Dice = []
Precsion = []
Jaccard = []
n = pred_seg.shape[0]
for i in range(n):
dice,precsion,jaccard = compute_score(pred_seg[i],label_seg[i])
Dice .append(dice)
Precsion .append(precsion)
Jaccard.append(jaccard)
return Dice,Precsion,Jaccard
def batch_pix_accuracy(pred,label,nclass=1):
if nclass==1:
pred=torch.round(torch.sigmoid(pred)).int()
pred=pred.cpu().numpy()
else:
pred=torch.max(pred,dim=1)
pred=pred.cpu().numpy()
label=label.cpu().numpy()
pixel_labeled = np.sum(label >=0)
pixel_correct=np.sum(pred==label)
assert pixel_correct <= pixel_labeled, \
"Correct area should be smaller than Labeled"
return pixel_correct,pixel_labeled
def batch_intersection_union(predict, target, nclass):
"""Batch Intersection of Union
Args:
predict: input 4D tensor
target: label 3D tensor
nclass: number of categories (int),note: not include background
"""
if nclass==1:
pred=torch.round(torch.sigmoid(predict)).int()
pred=pred.cpu().numpy()
target = target.cpu().numpy()
area_inter=np.sum(pred*target)
area_union=np.sum(pred)+np.sum(target)-area_inter
return area_inter,area_union
if nclass>1:
_, predict = torch.max(predict, 1)
mini = 1
maxi = nclass
nbins = nclass
predict = predict.cpu().numpy() + 1
target = target.cpu().numpy() + 1
# target = target + 1
predict = predict * (target > 0).astype(predict.dtype)
intersection = predict * (predict == target)
# areas of intersection and union
area_inter, _ = np.histogram(intersection, bins=nbins-1, range=(mini+1, maxi))
area_pred, _ = np.histogram(predict, bins=nbins-1, range=(mini+1, maxi))
area_lab, _ = np.histogram(target, bins=nbins-1, range=(mini+1, maxi))
area_union = area_pred + area_lab - area_inter
assert (area_inter <= area_union).all(), \
"Intersection area should be smaller than Union area"
return area_inter, area_union
def pixel_accuracy(im_pred, im_lab):
im_pred = np.asarray(im_pred)
im_lab = np.asarray(im_lab)
# Remove classes from unlabeled pixels in gt image.
# We should not penalize detections in unlabeled portions of the image.
pixel_labeled = np.sum(im_lab > 0)
pixel_correct = np.sum((im_pred == im_lab) * (im_lab > 0))
#pixel_accuracy = 1.0 * pixel_correct / pixel_labeled
return pixel_correct, pixel_labeled
def reverse_one_hot(image):
"""
Transform a 2D array in one-hot format (depth is num_classes),
to a 2D array with only 1 channel, where each pixel value is
the classified class key.
# Arguments
image: The one-hot format image
# Returns
A 2D array with the same width and height as the input, but
with a depth size of 1, where each pixel value is the classified
class key.
"""
# w = image.shape[0]
# h = image.shape[1]
# x = np.zeros([w,h,1])
# for i in range(0, w):
# for j in range(0, h):
# index, value = max(enumerate(image[i, j, :]), key=operator.itemgetter(1))
# x[i, j] = index
image = image.permute(1, 2, 0)
x = torch.argmax(image, dim=-1)
return x
def colour_code_segmentation(image, label_values):
"""
Given a 1-channel array of class keys, colour code the segmentation results.
# Arguments
image: single channel array where each value represents the class key.
label_values
# Returns
Colour coded image for segmentation visualization
"""
# w = image.shape[0]
# h = image.shape[1]
# x = np.zeros([w,h,3])
# colour_codes = label_values
# for i in range(0, w):
# for j in range(0, h):
# x[i, j, :] = colour_codes[int(image[i, j])]
label_values = [label_values[key] for key in label_values]
colour_codes = np.array(label_values)
x = colour_codes[image.astype(int)]
return x
#def compute_global_accuracy(pred, label):
# pred = pred.flatten()
# label = label.flatten()
# total = len(label)
# count = 0.0
# for i in range(total):
# if pred[i] == label[i]:
# count = count + 1.0
# return float(count) / float(total) | [
"numpy.histogram",
"torch.max",
"numpy.asarray",
"torch.sigmoid",
"numpy.equal",
"numpy.stack",
"numpy.sum",
"numpy.array",
"torch.save",
"numpy.all",
"torch.argmax"
] | [((312, 339), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (322, 339), False, 'import torch\n'), ((1371, 1402), 'numpy.stack', 'np.stack', (['semantic_map'], {'axis': '(-1)'}), '(semantic_map, axis=-1)\n', (1379, 1402), True, 'import numpy as np\n'), ((4205, 4223), 'numpy.sum', 'np.sum', (['(label >= 0)'], {}), '(label >= 0)\n', (4211, 4223), True, 'import numpy as np\n'), ((4241, 4262), 'numpy.sum', 'np.sum', (['(pred == label)'], {}), '(pred == label)\n', (4247, 4262), True, 'import numpy as np\n'), ((5854, 5873), 'numpy.asarray', 'np.asarray', (['im_pred'], {}), '(im_pred)\n', (5864, 5873), True, 'import numpy as np\n'), ((5887, 5905), 'numpy.asarray', 'np.asarray', (['im_lab'], {}), '(im_lab)\n', (5897, 5905), True, 'import numpy as np\n'), ((6060, 6078), 'numpy.sum', 'np.sum', (['(im_lab > 0)'], {}), '(im_lab > 0)\n', (6066, 6078), True, 'import numpy as np\n'), ((6099, 6141), 'numpy.sum', 'np.sum', (['((im_pred == im_lab) * (im_lab > 0))'], {}), '((im_pred == im_lab) * (im_lab > 0))\n', (6105, 6141), True, 'import numpy as np\n'), ((6907, 6934), 'torch.argmax', 'torch.argmax', (['image'], {'dim': '(-1)'}), '(image, dim=-1)\n', (6919, 6934), False, 'import torch\n'), ((7568, 7590), 'numpy.array', 'np.array', (['label_values'], {}), '(label_values)\n', (7576, 7590), True, 'import numpy as np\n'), ((1259, 1281), 'numpy.equal', 'np.equal', (['label', 'color'], {}), '(label, color)\n', (1267, 1281), True, 'import numpy as np\n'), ((1296, 1321), 'numpy.all', 'np.all', (['equality'], {'axis': '(-1)'}), '(equality, axis=-1)\n', (1302, 1321), True, 'import numpy as np\n'), ((4101, 4123), 'torch.max', 'torch.max', (['pred'], {'dim': '(1)'}), '(pred, dim=1)\n', (4110, 4123), False, 'import torch\n'), ((4818, 4839), 'numpy.sum', 'np.sum', (['(pred * target)'], {}), '(pred * target)\n', (4824, 4839), True, 'import numpy as np\n'), ((4976, 4997), 'torch.max', 'torch.max', (['predict', '(1)'], {}), '(predict, 1)\n', (4985, 4997), False, 'import torch\n'), ((5367, 5433), 'numpy.histogram', 'np.histogram', (['intersection'], {'bins': '(nbins - 1)', 'range': '(mini + 1, maxi)'}), '(intersection, bins=nbins - 1, range=(mini + 1, maxi))\n', (5379, 5433), True, 'import numpy as np\n'), ((5453, 5514), 'numpy.histogram', 'np.histogram', (['predict'], {'bins': '(nbins - 1)', 'range': '(mini + 1, maxi)'}), '(predict, bins=nbins - 1, range=(mini + 1, maxi))\n', (5465, 5514), True, 'import numpy as np\n'), ((5533, 5593), 'numpy.histogram', 'np.histogram', (['target'], {'bins': '(nbins - 1)', 'range': '(mini + 1, maxi)'}), '(target, bins=nbins - 1, range=(mini + 1, maxi))\n', (5545, 5593), True, 'import numpy as np\n'), ((3439, 3461), 'torch.sigmoid', 'torch.sigmoid', (['predict'], {}), '(predict)\n', (3452, 3461), False, 'import torch\n'), ((4857, 4869), 'numpy.sum', 'np.sum', (['pred'], {}), '(pred)\n', (4863, 4869), True, 'import numpy as np\n'), ((4870, 4884), 'numpy.sum', 'np.sum', (['target'], {}), '(target)\n', (4876, 4884), True, 'import numpy as np\n'), ((4019, 4038), 'torch.sigmoid', 'torch.sigmoid', (['pred'], {}), '(pred)\n', (4032, 4038), False, 'import torch\n'), ((4699, 4721), 'torch.sigmoid', 'torch.sigmoid', (['predict'], {}), '(predict)\n', (4712, 4721), False, 'import torch\n')] |
import json
import math
import multiprocessing
import os
import tempfile
from pathlib import Path
import gym
import numpy as np
import pandas as pd
from ray import tune
from ray.rllib.models import ModelCatalog
from ray.rllib.utils import try_import_tf
from examples.rllib_agent import TrainingModel
from smarts.core.agent import Agent, AgentSpec
from smarts.env.custom_observations import lane_ttc_observation_adapter
from smarts.env.rllib_hiway_env import RLlibHiWayEnv
from smarts.core.agent_interface import AgentInterface, AgentType
HORIZON = 5000
tf = try_import_tf()
class RLlibTFSavedModelAgent(Agent):
def __init__(self, path_to_model, observation_space):
self._prep = ModelCatalog.get_preprocessor_for_space(observation_space)
self._path_to_model = path_to_model
def setup(self):
self._sess = tf.compat.v1.Session(graph=tf.Graph())
self._sess.__enter__()
tf.compat.v1.saved_model.load(
self._sess, export_dir=self._path_to_model, tags=["serve"]
)
def act(self, obs):
obs = self._prep.transform(obs)
graph = tf.compat.v1.get_default_graph()
# These tensor names were found by inspecting the trained model
output_node = graph.get_tensor_by_name("default_policy/add:0")
input_node = graph.get_tensor_by_name("default_policy/observation:0")
res = self._sess.run(output_node, feed_dict={input_node: [obs]})
action = res[0]
return action
ACTION_SPACE = gym.spaces.Box(
low=np.array([0.0, 0.0, -1.0]), high=np.array([1.0, 1.0, 1.0]), dtype=np.float32
)
OBSERVATION_SPACE = lane_ttc_observation_adapter.space
def observation_adapter(env_observation):
return lane_ttc_observation_adapter.transform(env_observation)
def reward_adapter(env_obs, env_reward):
return env_reward
def action_adapter(model_action):
throttle, brake, steering = model_action
return np.array([throttle, brake, steering])
def run_experiment(log_path, experiment_name, training_iteration=100):
model_path = Path(__file__).parent / "model"
agent_spec = AgentSpec(
interface=AgentInterface.from_type(AgentType.Standard, max_episode_steps=5000),
policy=RLlibTFSavedModelAgent(
model_path.absolute(),
OBSERVATION_SPACE,
),
observation_adapter=observation_adapter,
reward_adapter=reward_adapter,
action_adapter=action_adapter,
)
rllib_policies = {
"policy": (
None,
OBSERVATION_SPACE,
ACTION_SPACE,
{"model": {"custom_model": TrainingModel.NAME}},
)
}
scenario_path = Path(__file__).parent / "../../scenarios/loop"
scenario_path = str(scenario_path.absolute())
tune_confg = {
"env": RLlibHiWayEnv,
"env_config": {
"scenarios": [scenario_path],
"seed": 42,
"headless": True,
"agent_specs": {"Agent-007": agent_spec},
},
"multiagent": {
"policies": rllib_policies,
"policy_mapping_fn": lambda _: "policy",
},
"log_level": "WARN",
"num_workers": multiprocessing.cpu_count() - 1,
"horizon": HORIZON,
}
analysis = tune.run(
"PPO",
name=experiment_name,
stop={"training_iteration": training_iteration},
max_failures=10,
local_dir=log_path,
config=tune_confg,
)
return analysis
def create_df(file_path):
data = {}
with open(file_path, encoding="utf-8", errors="ignore") as json_data:
for i, r in enumerate(json_data.readlines()):
data[i] = json.loads(r)
df = pd.DataFrame.from_dict(data, orient="index")
return df
def main():
with tempfile.TemporaryDirectory() as tmpdirname:
# Change these consts if needed
experiments_count = 10
iteration_times = 100
experiment_name = "learning_regression_test"
for i in range(experiments_count):
run_experiment(tmpdirname, experiment_name, iteration_times)
p = Path(os.path.join(tmpdirname, experiment_name))
data_frames = [] # data frame objects of these experiments
for d in p.iterdir():
if d.is_dir():
f = d / "result.json"
if f.exists():
data_frames.append(create_df(f.absolute()))
df_experiments = pd.concat(tuple(data_frames)).groupby(level=0)
mean_reward_stats = df_experiments["episode_reward_mean"].agg(
["mean", "count", "std"]
)
# Only ci95_lo will be used
ci95_hi = []
ci95_lo = []
for i in mean_reward_stats.index:
m, c, s = mean_reward_stats.loc[i]
ci95_hi.append(m + 1.96 * s / math.sqrt(c))
ci95_lo.append(m - 1.96 * s / math.sqrt(c))
mean_reward_stats["ci95_hi"] = ci95_hi
mean_reward_stats["ci95_lo"] = ci95_lo
print("CI95_REWARD_MEAN:", ci95_lo[iteration_times - 1])
ci95_file = Path(__file__).parent / "../../smarts/env/tests/ci95_reward_lo"
with ci95_file.open("w+") as f:
f.write(str(ci95_lo[iteration_times - 1]))
if __name__ == "__main__":
main()
| [
"tempfile.TemporaryDirectory",
"smarts.core.agent_interface.AgentInterface.from_type",
"json.loads",
"smarts.env.custom_observations.lane_ttc_observation_adapter.transform",
"pathlib.Path",
"ray.rllib.models.ModelCatalog.get_preprocessor_for_space",
"os.path.join",
"math.sqrt",
"pandas.DataFrame.fro... | [((562, 577), 'ray.rllib.utils.try_import_tf', 'try_import_tf', ([], {}), '()\n', (575, 577), False, 'from ray.rllib.utils import try_import_tf\n'), ((1718, 1773), 'smarts.env.custom_observations.lane_ttc_observation_adapter.transform', 'lane_ttc_observation_adapter.transform', (['env_observation'], {}), '(env_observation)\n', (1756, 1773), False, 'from smarts.env.custom_observations import lane_ttc_observation_adapter\n'), ((1931, 1968), 'numpy.array', 'np.array', (['[throttle, brake, steering]'], {}), '([throttle, brake, steering])\n', (1939, 1968), True, 'import numpy as np\n'), ((3268, 3419), 'ray.tune.run', 'tune.run', (['"""PPO"""'], {'name': 'experiment_name', 'stop': "{'training_iteration': training_iteration}", 'max_failures': '(10)', 'local_dir': 'log_path', 'config': 'tune_confg'}), "('PPO', name=experiment_name, stop={'training_iteration':\n training_iteration}, max_failures=10, local_dir=log_path, config=tune_confg\n )\n", (3276, 3419), False, 'from ray import tune\n'), ((3702, 3746), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data'], {'orient': '"""index"""'}), "(data, orient='index')\n", (3724, 3746), True, 'import pandas as pd\n'), ((696, 754), 'ray.rllib.models.ModelCatalog.get_preprocessor_for_space', 'ModelCatalog.get_preprocessor_for_space', (['observation_space'], {}), '(observation_space)\n', (735, 754), False, 'from ray.rllib.models import ModelCatalog\n'), ((1527, 1553), 'numpy.array', 'np.array', (['[0.0, 0.0, -1.0]'], {}), '([0.0, 0.0, -1.0])\n', (1535, 1553), True, 'import numpy as np\n'), ((1560, 1585), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (1568, 1585), True, 'import numpy as np\n'), ((3784, 3813), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (3811, 3813), False, 'import tempfile\n'), ((2059, 2073), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2063, 2073), False, 'from pathlib import Path\n'), ((2137, 2205), 'smarts.core.agent_interface.AgentInterface.from_type', 'AgentInterface.from_type', (['AgentType.Standard'], {'max_episode_steps': '(5000)'}), '(AgentType.Standard, max_episode_steps=5000)\n', (2161, 2205), False, 'from smarts.core.agent_interface import AgentInterface, AgentType\n'), ((2673, 2687), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2677, 2687), False, 'from pathlib import Path\n'), ((3185, 3212), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (3210, 3212), False, 'import multiprocessing\n'), ((3679, 3692), 'json.loads', 'json.loads', (['r'], {}), '(r)\n', (3689, 3692), False, 'import json\n'), ((4118, 4159), 'os.path.join', 'os.path.join', (['tmpdirname', 'experiment_name'], {}), '(tmpdirname, experiment_name)\n', (4130, 4159), False, 'import os\n'), ((5071, 5085), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5075, 5085), False, 'from pathlib import Path\n'), ((4821, 4833), 'math.sqrt', 'math.sqrt', (['c'], {}), '(c)\n', (4830, 4833), False, 'import math\n'), ((4877, 4889), 'math.sqrt', 'math.sqrt', (['c'], {}), '(c)\n', (4886, 4889), False, 'import math\n')] |
"""
MIT License
Copyright (c) 2017 s0hvaperuna
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import argparse
import asyncio
import logging
import os
import sys
from collections import OrderedDict
from functools import partial
from io import BytesIO
from itertools import zip_longest
from threading import Lock
import discord
import numpy as np
from PIL import Image, ImageFont
from colour import Color
from discord.ext.commands import BucketType
from matplotlib import pyplot as plt
from matplotlib.patches import Polygon, Circle
from numpy import pi, random
from bot.bot import command, cooldown, bot_has_permissions
from cogs.cog import Cog
from utils.imagetools import (create_shadow, create_text,
create_geopattern_background, shift_color,
remove_background,
resize_keep_aspect_ratio, get_color,
IMAGES_PATH, image_from_url, GeoPattern,
color_distance, MAX_COLOR_DIFF)
from utils.utilities import (get_picture_from_msg, y_n_check,
check_negative, normalize_text,
get_image, basic_check, test_url)
terminal = logging.getLogger('terminal')
HALFWIDTH_TO_FULLWIDTH = str.maketrans(
'0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&()*+,-./:;<=>?@[]^_`{|}~ ',
'0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!゛#$%&()*+、ー。/:;〈=〉?@[]^_‘{|}~ ')
LETTERS_TO_INT = {k: idx for idx, k in enumerate(['A', 'B', 'C', 'D', 'E'])}
INT_TO_LETTER = ['A', 'B', 'C', 'D', 'E']
POWERS = ['power', 'speed', 'range', 'durability', 'precision', 'potential']
class ArgumentParser(argparse.ArgumentParser):
def _get_action_from_name(self, name):
"""Given a name, get the Action instance registered with this parser.
If only it were made available in the ArgumentError object. It is
passed as it's first arg...
"""
container = self._actions
if name is None:
return None
for action in container:
if '/'.join(action.option_strings) == name:
return action
elif action.metavar == name:
return action
elif action.dest == name:
return action
def error(self, message):
exc = sys.exc_info()[1]
if exc:
exc.argument = self._get_action_from_name(exc.argument_name)
raise exc
super(ArgumentParser, self).error(message)
class JoJo(Cog):
def __init__(self, bot):
super().__init__(bot)
self.stat_lock = Lock()
self.stats = OrderedDict.fromkeys(POWERS, None)
self.stat_spread_figure = plt.figure()
self.line_points = [1 - 0.2*i for i in range(6)]
self.parser = ArgumentParser()
args = ['-blur', '-canny_thresh_1', '-canny_thresh_2', '-mask_dilate_iter', '-mask_erode_iter']
for arg in args:
self.parser.add_argument(arg, type=int, default=argparse.SUPPRESS,
required=False)
def cog_unload(self): # skipcq: PYL-R0201
plt.close('all')
def create_empty_stats_circle(self, color='k'):
fig = plt.figure()
ax = fig.add_subplot(111)
for i in range(6):
power = POWERS[i]
rot = 60 * i / 180 * pi # Lines every 60 degrees
# Rotate the points in the line rot degrees
x = list(map(lambda x: x * np.sin(rot), self.line_points))
y = list(map(lambda y: y * np.cos(rot), self.line_points))
line = ax.plot(x, y, '-', color=color, alpha=0.6, markersize=6,
marker=(2, 0, 360 - 90 - 60 * i))
if i == 0:
x, y = line[0].get_data()
# Shift the letters so the are not on top of the line
correctionx = 0.15
correctiony = 0.05
for l, idx in LETTERS_TO_INT.items():
ax.text(x[idx] + correctionx, y[idx] - correctiony, l,
horizontalalignment='right', color=color, alpha=0.65,
fontsize=10)
self.stats[power] = line
return fig, ax
def create_stats_circle(self, color='b', bg_color=None, **kwargs):
c = 'black'
if color_distance(Color(c), bg_color) < (MAX_COLOR_DIFF/2):
c = 'white'
inner_circle = Circle((0, 0), radius=1.1, fc='none', ec=c)
outer_circle = Circle((0, 0), radius=1.55, fc='none', ec=c)
outest_circle = Circle((0, 0), radius=1.65, fc='none', ec=c)
fig, ax = self.create_empty_stats_circle(c)
stat_spread = []
for idx, line in enumerate(self.stats.values()):
x, y = line[0].get_data()
power = POWERS[idx]
power_value = kwargs.get(power, 'E')
if power_value is None:
power_value = 'E'
power_value = power_value.upper()
power_int = LETTERS_TO_INT.get(power_value, 0)
# Small correction to the text position
correction = 0.03
r = 60 * idx / 180 * pi
sinr = np.round(np.sin(r), 5)
cosr = np.round(np.cos(r), 5)
if sinr < 0:
lx = 1.25 * sinr - correction
else:
lx = 1.25 * sinr + correction
if cosr < 0:
ly = 1.25 * cosr - correction
else:
ly = 1.25 * cosr + correction
rot = (0 + min(check_negative(cosr) * 180, 0)) - 60 * idx
if sinr == 0:
rot = 0
ax.text(lx, ly, power_value, color=c, alpha=0.9, fontsize=14,
weight='bold', ha='center', va='center')
ax.text(lx * 1.50, ly * 1.50, power, color=c, fontsize=17,
ha='center', rotation=rot, va='center')
x = x[power_int]
y = y[power_int]
stat_spread.append([x, y])
r1 = outer_circle.radius
r2 = outest_circle.radius
w = 3.0
for r in range(0, 360, 15):
sinr = np.round(np.sin(np.deg2rad(r)), 5)
cosr = np.round(np.cos(np.deg2rad(r)), 5)
x = (r1*sinr, r2*sinr)
y = (r1*cosr, r2*cosr)
ax.plot(x, y, '-', color=c, linewidth=w)
pol = Polygon(stat_spread, fc='y', alpha=0.7)
pol.set_color(color)
fig.gca().add_patch(inner_circle)
fig.gca().add_patch(outer_circle)
fig.gca().add_patch(outest_circle)
fig.gca().add_patch(pol)
fig.gca().autoscale(True)
fig.gca().set_axis_off()
ax.axis('scaled')
fig.canvas.draw()
return fig, ax
@staticmethod
def _standify_text(s, type_=0):
types = ['『』', '「」', '']
bracket = types[type_]
s = normalize_text(s)
s = s.translate(HALFWIDTH_TO_FULLWIDTH)
if type_ > 1:
return s
s = bracket[0] + s + bracket[1]
return s
@staticmethod
def pattern_check(msg):
return msg.content.lower() in GeoPattern.available_generators
@command(aliases=['stand'])
async def standify(self, ctx, *, stand):
"""Standify text using these brackets 『』"""
stand = self._standify_text(stand)
await ctx.send(stand)
@command(aliases=['stand2'])
async def standify2(self, ctx, *, stand):
"""Standify text using these brackets 「」"""
stand = self._standify_text(stand, 1)
await ctx.send(stand)
@command(aliases=['stand3'])
async def standify3(self, ctx, *, stand):
"""Standify text using no brackets"""
stand = self._standify_text(stand, 2)
await ctx.send(stand)
async def subcommand(self, ctx, content, delete_after=60, author=None, channel=None, check=None, del_msg=True):
m = await ctx.send(content, delete_after=delete_after)
if callable(check):
def _check(msg):
return check(msg) and basic_check(author, channel)
else:
_check = basic_check(author, channel)
try:
msg = await self.bot.wait_for('message', check=_check, timeout=delete_after)
except asyncio.TimeoutError:
msg = None
if del_msg:
try:
await m.delete()
except discord.HTTPException:
pass
return msg
@command(aliases=['stand_generator', 'standgen'])
@cooldown(1, 10, BucketType.user)
@bot_has_permissions(attach_files=True)
async def stand_gen(self, ctx, stand, user, image=None, *, params=None):
"""Generate a stand card. Arguments are stand name, user name and an image
Image can be an attachment or a link. Passing -advanced as the last argument
will enable advanced mode which gives the ability to tune some numbers.
Use quotes for names that have spaces e.g.
`{prefix}{name} "Star Platinum" "Jotaro Kujo" [image]`
You can also answer all parameters in one command by adding the parameters on their own line
It works something like this
```
{prefix}{name} "stand" "user"
A A A A A A
backround_image.png
triangles
n
```
You can make the bot ask you the parameters by setting it just as an empty line. e.g.
```
A A A A A A
triangles
```
Would make the bot sak the background from you.
Here is a tree of the questions being asked.
If some answers lead to extra question they are indented to make it clear
1. Stats from A to E in the order power, speed, range, durability, precision, potential
2. Link to the background. Leave empty if you want a randomized pattern
2.......a) The geopattern and bg color separated by space. If either is left out it will be selected randomly
3. Do you want automatic background removal. Recommended answer is no. Typing y or yes will use it
"""
author = ctx.author
channel = ctx.channel
stand = self._standify_text(stand, 2)
user = '[STAND MASTER]\n' + user
stand = '[STAND NAME]\n' + stand
size = (1100, 700)
shift = 800
advanced = False
if image is None:
pass
elif test_url(image):
pass
else:
params = params if params else ''
params = image + params
image = None
if params:
params = params.strip().split('\n')
advanced = params[-1].strip()
if advanced.endswith('-advanced'):
advanced = advanced[:-9].strip()
if advanced:
params[-1] = advanced
else:
params.pop(-1)
advanced = True
else:
advanced = False
else:
params = []
if advanced:
await ctx.send(f'{author} Advanced mode activated', delete_after=20)
image = await get_image(ctx, image)
if not image:
return
def get_next_param():
try:
return params.pop(0)
except IndexError:
return
stats = get_next_param()
if not stats:
msg = await self.subcommand(ctx,
'{} Give the stand **stats** in the given order ranging from **A** to **E** '
'separated by **spaces**.\nDefault value is E\n`{}`'.format(author, '` `'.join(POWERS)),
delete_after=120, author=author, channel=channel)
if msg is None:
await ctx.send(f'Timed out. {author} cancelling stand generation')
return
stats = msg.content
stats = stats.split(' ')
stats = dict(zip_longest(POWERS, stats[:6]))
bg = get_next_param()
if not bg:
msg = await self.subcommand(ctx,
f'{author}`Use a custom background by uploading a **picture** or using a **link**. '
'Posting something other than an image will use the **generated background**',
delete_after=120, author=author, channel=channel)
bg = get_picture_from_msg(msg)
else:
if not test_url(bg):
bg = None
color = None
if bg is not None:
try:
bg = bg.strip()
bg = await image_from_url(bg, self.bot.aiohttp_client)
def process_bg():
nonlocal bg, color
bg = bg.convert('RGB')
dominant_color = get_color(bg)
color = Color(rgb=list(map(lambda c: c/255, dominant_color)))
bg = resize_keep_aspect_ratio(bg, size, crop_to_size=True)
return color, bg
color, bg = await self.bot.loop.run_in_executor(self.bot.threadpool, process_bg)
except Exception:
terminal.exception('Failed to get background')
await ctx.send(f'{author} Failed to use custom background. Using generated one',
delete_after=60)
bg = None
if bg is None:
pattern = random.choice(GeoPattern.available_generators)
msg = get_next_param()
if not msg:
msg = await self.subcommand(ctx,
"{} Generating background. Select a **pattern** and **color** separated by space. "
"Otherwise they'll will be randomly chosen. Available patterns:\n"
'{}'.format(author, '\n'.join(GeoPattern.available_generators)),
delete_after=120, channel=channel, author=author)
msg = msg.content if msg else ''
if not msg:
await ctx.send('{} Selecting randomly'.format(author), delete_after=20)
else:
msg = msg.split(' ')
# Temporary containers for pattern and color
_pattern, _color = None, None
if len(msg) == 1:
_pattern = msg[0]
elif len(msg) > 1:
_pattern, _color = msg[:2]
if _pattern in GeoPattern.available_generators:
pattern = _pattern
else:
await ctx.send('{} Pattern {} not found. Selecting randomly'.format(author, _pattern),
delete_after=20)
if _color:
try:
color = Color(_color)
except (ValueError, AttributeError):
await ctx.send('{} {} not an available color'.format(author, _color),
delete_after=20)
def do_bg():
return create_geopattern_background(size, stand + user,
generator=pattern, color=color)
bg, color = await self.bot.loop.run_in_executor(self.bot.threadpool, do_bg)
if advanced:
msg = get_next_param()
if not msg:
msg = await self.subcommand(ctx,
'{} Input color value change as an **integer**. Default is {}. '
'You can also input a **color** instead of the change value. '
'The resulting color will be used in the stats circle'.format(author, shift),
delete_after=120, channel=channel, author=author)
msg = msg.content if msg else ''
try:
shift = int(msg.strip())
except ValueError:
try:
color = Color(msg)
shift = 0
except (ValueError, AttributeError):
await ctx.send(f'{author} Could not set color or color change int. Using default values',
delete_after=15)
try:
if not isinstance(color, str):
color = Color(color.get_hex_l())
bg_color = Color(color)
except (AttributeError, ValueError):
terminal.exception(f'Failed to set bg color from {color}')
return await ctx.send('Failed to set bg color')
def do_stuff():
# Shift color hue and saturation so it's not the same as the bg
shift_color(color, shift)
fig, _ = self.create_stats_circle(color=color.get_hex_l(), bg_color=bg_color, **stats)
path = os.path.join(IMAGES_PATH, 'stats.png')
with self.stat_lock:
try:
fig.savefig(path, transparent=True)
stat_img = Image.open(path)
except:
terminal.exception('Could not create image')
return '{} Could not create picture because of an error.'.format(author)
plt.close(fig)
stat_img = stat_img.resize((int(stat_img.width * 0.85),
int(stat_img.height * 0.85)),
Image.BILINEAR)
full = Image.new('RGBA', size)
# Coords for stat circle
x, y = (-60, full.height - stat_img.height)
stat_corner = (x + stat_img.width, y + stat_img.height)
full.paste(stat_img, (x, y, *stat_corner))
font = ImageFont.truetype(os.path.join('M-1c', 'mplus-1c-bold.ttf'), 40)
# Small glow blur can be created with create_glow and setting amount to 1 or lower
text = create_text(stand, font, '#FFFFFF', (int(full.width*0.75), int(y*0.8)), (10, 10))
text = create_shadow(text, 80, 3, 2, 4).convert('RGBA')
full.paste(text, (20, 20), text)
text2 = create_text(user, font, '#FFFFFF', (int((full.width - stat_corner[0])*0.8), int(full.height*0.7)), (10, 10))
text2 = create_shadow(text2, 80, 3, 2, 4).convert('RGBA')
text2.load()
return full, stat_corner, text2
res = await self.bot.loop.run_in_executor(self.bot.threadpool, do_stuff)
if isinstance(res, str):
return await ctx.send(res)
full, stat_corner, text2 = res
if image is not None:
# No clue what this does so leaving it out
#im = trim_image(image)
im = image
msg = get_next_param()
if not msg:
msg = await self.subcommand(ctx,
f'{author} Try to automatically remove background (y/n)? '
'This might fuck the picture up and will take a moment',
author=author, channel=channel, delete_after=120, check=y_n_check)
msg = msg.content if msg else ''
if msg and msg.lower() in ['y', 'yes']:
kwargs = {}
if advanced:
msg = get_next_param()
if not msg:
msg = await self.subcommand(ctx,
f'{author} Change the arguments of background removing. Available'
' arguments are `blur`, `canny_thresh_1`, `canny_thresh_2`, '
'`mask_dilate_iter`, `mask_erode_iter`. '
'Accepted values are integers.\nArguments are added like this '
'`-blur 30 -canny_thresh_2 50`. All arguments are optional',
channel=channel, author=author, delete_after=140)
msg = msg.content if msg else ''
await channel.trigger_typing()
if msg is not None:
try:
kwargs = self.parser.parse_known_args(msg.split(' '))[0].__dict__
except (SystemExit, IndexError, AttributeError):
await ctx.send(f'{author} Could not get arguments from {msg}',
delete_after=20)
try:
im = await self.bot.loop.run_in_executor(self.bot.threadpool, partial(remove_background, im, **kwargs))
except Exception:
terminal.exception('Failed to remove bg from image')
await ctx.send(f'{author} Could not remove background because of an error',
delete_after=30)
def resize_image():
nonlocal im
# Size of user pic
box = (500, 600)
im = resize_keep_aspect_ratio(im, box, can_be_bigger=False, resample=Image.BICUBIC)
im = create_shadow(im, 70, 3, -22, -7).convert('RGBA')
full.paste(im, (full.width - im.width, int((full.height - im.height)/2)), im)
await self.bot.loop.run_in_executor(self.bot.threadpool, resize_image)
await channel.trigger_typing()
def finalize_image():
full.paste(text2, (int((full.width - stat_corner[0]) * 0.9), int(full.height * 0.7)), text2)
bg.paste(full, (0, 0), full)
file = BytesIO()
bg.save(file, format='PNG')
file.seek(0)
return file
file = await self.bot.loop.run_in_executor(self.bot.threadpool, finalize_image)
await ctx.send(file=discord.File(file, filename='stand_card.png'))
def setup(bot):
bot.add_cog(JoJo(bot))
| [
"logging.getLogger",
"utils.utilities.get_image",
"bot.bot.command",
"PIL.Image.new",
"bot.bot.bot_has_permissions",
"io.BytesIO",
"sys.exc_info",
"utils.imagetools.create_shadow",
"numpy.sin",
"utils.imagetools.get_color",
"collections.OrderedDict.fromkeys",
"utils.utilities.get_picture_from_... | [((2206, 2235), 'logging.getLogger', 'logging.getLogger', (['"""terminal"""'], {}), "('terminal')\n", (2223, 2235), False, 'import logging\n'), ((8207, 8233), 'bot.bot.command', 'command', ([], {'aliases': "['stand']"}), "(aliases=['stand'])\n", (8214, 8233), False, 'from bot.bot import command, cooldown, bot_has_permissions\n'), ((8410, 8437), 'bot.bot.command', 'command', ([], {'aliases': "['stand2']"}), "(aliases=['stand2'])\n", (8417, 8437), False, 'from bot.bot import command, cooldown, bot_has_permissions\n'), ((8618, 8645), 'bot.bot.command', 'command', ([], {'aliases': "['stand3']"}), "(aliases=['stand3'])\n", (8625, 8645), False, 'from bot.bot import command, cooldown, bot_has_permissions\n'), ((9504, 9552), 'bot.bot.command', 'command', ([], {'aliases': "['stand_generator', 'standgen']"}), "(aliases=['stand_generator', 'standgen'])\n", (9511, 9552), False, 'from bot.bot import command, cooldown, bot_has_permissions\n'), ((9558, 9590), 'bot.bot.cooldown', 'cooldown', (['(1)', '(10)', 'BucketType.user'], {}), '(1, 10, BucketType.user)\n', (9566, 9590), False, 'from bot.bot import command, cooldown, bot_has_permissions\n'), ((9596, 9634), 'bot.bot.bot_has_permissions', 'bot_has_permissions', ([], {'attach_files': '(True)'}), '(attach_files=True)\n', (9615, 9634), False, 'from bot.bot import command, cooldown, bot_has_permissions\n'), ((3636, 3642), 'threading.Lock', 'Lock', ([], {}), '()\n', (3640, 3642), False, 'from threading import Lock\n'), ((3664, 3698), 'collections.OrderedDict.fromkeys', 'OrderedDict.fromkeys', (['POWERS', 'None'], {}), '(POWERS, None)\n', (3684, 3698), False, 'from collections import OrderedDict\n'), ((3733, 3745), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3743, 3745), True, 'from matplotlib import pyplot as plt\n'), ((4160, 4176), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4169, 4176), True, 'from matplotlib import pyplot as plt\n'), ((4244, 4256), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4254, 4256), True, 'from matplotlib import pyplot as plt\n'), ((5475, 5518), 'matplotlib.patches.Circle', 'Circle', (['(0, 0)'], {'radius': '(1.1)', 'fc': '"""none"""', 'ec': 'c'}), "((0, 0), radius=1.1, fc='none', ec=c)\n", (5481, 5518), False, 'from matplotlib.patches import Polygon, Circle\n'), ((5542, 5586), 'matplotlib.patches.Circle', 'Circle', (['(0, 0)'], {'radius': '(1.55)', 'fc': '"""none"""', 'ec': 'c'}), "((0, 0), radius=1.55, fc='none', ec=c)\n", (5548, 5586), False, 'from matplotlib.patches import Polygon, Circle\n'), ((5611, 5655), 'matplotlib.patches.Circle', 'Circle', (['(0, 0)'], {'radius': '(1.65)', 'fc': '"""none"""', 'ec': 'c'}), "((0, 0), radius=1.65, fc='none', ec=c)\n", (5617, 5655), False, 'from matplotlib.patches import Polygon, Circle\n'), ((7411, 7450), 'matplotlib.patches.Polygon', 'Polygon', (['stat_spread'], {'fc': '"""y"""', 'alpha': '(0.7)'}), "(stat_spread, fc='y', alpha=0.7)\n", (7418, 7450), False, 'from matplotlib.patches import Polygon, Circle\n'), ((7917, 7934), 'utils.utilities.normalize_text', 'normalize_text', (['s'], {}), '(s)\n', (7931, 7934), False, 'from utils.utilities import get_picture_from_msg, y_n_check, check_negative, normalize_text, get_image, basic_check, test_url\n'), ((3353, 3367), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (3365, 3367), False, 'import sys\n'), ((9153, 9181), 'utils.utilities.basic_check', 'basic_check', (['author', 'channel'], {}), '(author, channel)\n', (9164, 9181), False, 'from utils.utilities import get_picture_from_msg, y_n_check, check_negative, normalize_text, get_image, basic_check, test_url\n'), ((11432, 11447), 'utils.utilities.test_url', 'test_url', (['image'], {}), '(image)\n', (11440, 11447), False, 'from utils.utilities import get_picture_from_msg, y_n_check, check_negative, normalize_text, get_image, basic_check, test_url\n'), ((12170, 12191), 'utils.utilities.get_image', 'get_image', (['ctx', 'image'], {}), '(ctx, image)\n', (12179, 12191), False, 'from utils.utilities import get_picture_from_msg, y_n_check, check_negative, normalize_text, get_image, basic_check, test_url\n'), ((12962, 12992), 'itertools.zip_longest', 'zip_longest', (['POWERS', 'stats[:6]'], {}), '(POWERS, stats[:6])\n', (12973, 12992), False, 'from itertools import zip_longest\n'), ((13370, 13395), 'utils.utilities.get_picture_from_msg', 'get_picture_from_msg', (['msg'], {}), '(msg)\n', (13390, 13395), False, 'from utils.utilities import get_picture_from_msg, y_n_check, check_negative, normalize_text, get_image, basic_check, test_url\n'), ((14413, 14459), 'numpy.random.choice', 'random.choice', (['GeoPattern.available_generators'], {}), '(GeoPattern.available_generators)\n', (14426, 14459), False, 'from numpy import pi, random\n'), ((17297, 17309), 'colour.Color', 'Color', (['color'], {}), '(color)\n', (17302, 17309), False, 'from colour import Color\n'), ((17599, 17624), 'utils.imagetools.shift_color', 'shift_color', (['color', 'shift'], {}), '(color, shift)\n', (17610, 17624), False, 'from utils.imagetools import create_shadow, create_text, create_geopattern_background, shift_color, remove_background, resize_keep_aspect_ratio, get_color, IMAGES_PATH, image_from_url, GeoPattern, color_distance, MAX_COLOR_DIFF\n'), ((17744, 17782), 'os.path.join', 'os.path.join', (['IMAGES_PATH', '"""stats.png"""'], {}), "(IMAGES_PATH, 'stats.png')\n", (17756, 17782), False, 'import os\n'), ((18136, 18150), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (18145, 18150), True, 'from matplotlib import pyplot as plt\n'), ((18364, 18387), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', 'size'], {}), "('RGBA', size)\n", (18373, 18387), False, 'from PIL import Image, ImageFont\n'), ((22381, 22390), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (22388, 22390), False, 'from io import BytesIO\n'), ((5385, 5393), 'colour.Color', 'Color', (['c'], {}), '(c)\n', (5390, 5393), False, 'from colour import Color\n'), ((6232, 6241), 'numpy.sin', 'np.sin', (['r'], {}), '(r)\n', (6238, 6241), True, 'import numpy as np\n'), ((6274, 6283), 'numpy.cos', 'np.cos', (['r'], {}), '(r)\n', (6280, 6283), True, 'import numpy as np\n'), ((13429, 13441), 'utils.utilities.test_url', 'test_url', (['bg'], {}), '(bg)\n', (13437, 13441), False, 'from utils.utilities import get_picture_from_msg, y_n_check, check_negative, normalize_text, get_image, basic_check, test_url\n'), ((16034, 16119), 'utils.imagetools.create_geopattern_background', 'create_geopattern_background', (['size', '(stand + user)'], {'generator': 'pattern', 'color': 'color'}), '(size, stand + user, generator=pattern, color=color\n )\n', (16062, 16119), False, 'from utils.imagetools import create_shadow, create_text, create_geopattern_background, shift_color, remove_background, resize_keep_aspect_ratio, get_color, IMAGES_PATH, image_from_url, GeoPattern, color_distance, MAX_COLOR_DIFF\n'), ((18642, 18683), 'os.path.join', 'os.path.join', (['"""M-1c"""', '"""mplus-1c-bold.ttf"""'], {}), "('M-1c', 'mplus-1c-bold.ttf')\n", (18654, 18683), False, 'import os\n'), ((21816, 21894), 'utils.imagetools.resize_keep_aspect_ratio', 'resize_keep_aspect_ratio', (['im', 'box'], {'can_be_bigger': '(False)', 'resample': 'Image.BICUBIC'}), '(im, box, can_be_bigger=False, resample=Image.BICUBIC)\n', (21840, 21894), False, 'from utils.imagetools import create_shadow, create_text, create_geopattern_background, shift_color, remove_background, resize_keep_aspect_ratio, get_color, IMAGES_PATH, image_from_url, GeoPattern, color_distance, MAX_COLOR_DIFF\n'), ((7200, 7213), 'numpy.deg2rad', 'np.deg2rad', (['r'], {}), '(r)\n', (7210, 7213), True, 'import numpy as np\n'), ((7254, 7267), 'numpy.deg2rad', 'np.deg2rad', (['r'], {}), '(r)\n', (7264, 7267), True, 'import numpy as np\n'), ((9089, 9117), 'utils.utilities.basic_check', 'basic_check', (['author', 'channel'], {}), '(author, channel)\n', (9100, 9117), False, 'from utils.utilities import get_picture_from_msg, y_n_check, check_negative, normalize_text, get_image, basic_check, test_url\n'), ((13595, 13638), 'utils.imagetools.image_from_url', 'image_from_url', (['bg', 'self.bot.aiohttp_client'], {}), '(bg, self.bot.aiohttp_client)\n', (13609, 13638), False, 'from utils.imagetools import create_shadow, create_text, create_geopattern_background, shift_color, remove_background, resize_keep_aspect_ratio, get_color, IMAGES_PATH, image_from_url, GeoPattern, color_distance, MAX_COLOR_DIFF\n'), ((13793, 13806), 'utils.imagetools.get_color', 'get_color', (['bg'], {}), '(bg)\n', (13802, 13806), False, 'from utils.imagetools import create_shadow, create_text, create_geopattern_background, shift_color, remove_background, resize_keep_aspect_ratio, get_color, IMAGES_PATH, image_from_url, GeoPattern, color_distance, MAX_COLOR_DIFF\n'), ((13914, 13967), 'utils.imagetools.resize_keep_aspect_ratio', 'resize_keep_aspect_ratio', (['bg', 'size'], {'crop_to_size': '(True)'}), '(bg, size, crop_to_size=True)\n', (13938, 13967), False, 'from utils.imagetools import create_shadow, create_text, create_geopattern_background, shift_color, remove_background, resize_keep_aspect_ratio, get_color, IMAGES_PATH, image_from_url, GeoPattern, color_distance, MAX_COLOR_DIFF\n'), ((17924, 17940), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (17934, 17940), False, 'from PIL import Image, ImageFont\n'), ((18905, 18937), 'utils.imagetools.create_shadow', 'create_shadow', (['text', '(80)', '(3)', '(2)', '(4)'], {}), '(text, 80, 3, 2, 4)\n', (18918, 18937), False, 'from utils.imagetools import create_shadow, create_text, create_geopattern_background, shift_color, remove_background, resize_keep_aspect_ratio, get_color, IMAGES_PATH, image_from_url, GeoPattern, color_distance, MAX_COLOR_DIFF\n'), ((19149, 19182), 'utils.imagetools.create_shadow', 'create_shadow', (['text2', '(80)', '(3)', '(2)', '(4)'], {}), '(text2, 80, 3, 2, 4)\n', (19162, 19182), False, 'from utils.imagetools import create_shadow, create_text, create_geopattern_background, shift_color, remove_background, resize_keep_aspect_ratio, get_color, IMAGES_PATH, image_from_url, GeoPattern, color_distance, MAX_COLOR_DIFF\n'), ((22597, 22642), 'discord.File', 'discord.File', (['file'], {'filename': '"""stand_card.png"""'}), "(file, filename='stand_card.png')\n", (22609, 22642), False, 'import discord\n'), ((15764, 15777), 'colour.Color', 'Color', (['_color'], {}), '(_color)\n', (15769, 15777), False, 'from colour import Color\n'), ((16911, 16921), 'colour.Color', 'Color', (['msg'], {}), '(msg)\n', (16916, 16921), False, 'from colour import Color\n'), ((21916, 21949), 'utils.imagetools.create_shadow', 'create_shadow', (['im', '(70)', '(3)', '(-22)', '(-7)'], {}), '(im, 70, 3, -22, -7)\n', (21929, 21949), False, 'from utils.imagetools import create_shadow, create_text, create_geopattern_background, shift_color, remove_background, resize_keep_aspect_ratio, get_color, IMAGES_PATH, image_from_url, GeoPattern, color_distance, MAX_COLOR_DIFF\n'), ((4506, 4517), 'numpy.sin', 'np.sin', (['rot'], {}), '(rot)\n', (4512, 4517), True, 'import numpy as np\n'), ((4577, 4588), 'numpy.cos', 'np.cos', (['rot'], {}), '(rot)\n', (4583, 4588), True, 'import numpy as np\n'), ((6587, 6607), 'utils.utilities.check_negative', 'check_negative', (['cosr'], {}), '(cosr)\n', (6601, 6607), False, 'from utils.utilities import get_picture_from_msg, y_n_check, check_negative, normalize_text, get_image, basic_check, test_url\n'), ((21369, 21409), 'functools.partial', 'partial', (['remove_background', 'im'], {}), '(remove_background, im, **kwargs)\n', (21376, 21409), False, 'from functools import partial\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 30 09:08:26 2017
@author: hp
"""
'''
SeriousDlqin2yrsY/N超过90天或更糟的逾期拖欠
RevolvingUtilizationOfUnsecuredLines
无担保放款的循环利用:除了不动产和像车贷那样除以信用额度总和的无分期付款债务的信用卡和个人信用额度总额
NumberOfTime30-59DaysPastDueNotWorse35-59天逾期但不糟糕次数
DebtRatio负债比率
NumberOfOpenCreditLinesAndLoans
开放式信贷和贷款数量,开放式贷款(分期付款如汽车贷款或抵押贷款)和信贷(如信用卡)的数量
NumberOfTimes90DaysLate
90天逾期次数:借款者有90天或更高逾期的次数
NumberRealEstateLoansOrLines
不动产贷款或额度数量:抵押贷款和不动产放款包括房屋净值信贷额度
NumberOfTime60-89DaysPastDueNotWorse
60-89天逾期但不糟糕次数:借款人在在过去两年内有60-89天逾期还款但不糟糕的次数
NumberOfDependents
家属数量:不包括本人在内的家属数量
'''
import re
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.preprocessing as preprocessing
from sklearn.ensemble import RandomForestRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.grid_search import RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn import linear_model
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegressionCV
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.cross_validation import cross_val_score
from sklearn import cross_validation
from sklearn.metrics import confusion_matrix
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_recall_curve, roc_curve, auc
from sklearn.metrics import roc_auc_score
from sklearn import tree
from sklearn.cluster import KMeans
from sklearn.tree import export_graphviz
import pydotplus
from IPython.display import Image
from sklearn.neighbors import NearestNeighbors
import math
from scipy import stats
from sklearn.utils.multiclass import type_of_target
from sklearn.cross_validation import train_test_split
data_train=pd.read_csv('...cs-training.csv')
data_test=pd.read_csv('...cs-test.csv')
data_train=data_train.ix[:,1:]
data_test=data_test.ix[:,1:]
data=pd.concat([data_train,data_test])
data.reset_index(inplace=True)
data.drop('index',axis=1,inplace=True)
data=data.reindex_axis(data_train.columns,axis=1)
#缺失值填充
data_test[data_test.columns[data_test.isnull().any()].tolist()].isnull().sum()
#monthlyincome
data_nul=data.drop(['SeriousDlqin2yrs','NumberOfDependents'],axis=1)
train=data_nul[(data_nul['MonthlyIncome'].notnull())]
test=data_nul[(data_nul['MonthlyIncome'].isnull())]
train_x=train.drop(['MonthlyIncome'],axis=1)
train_y=train['MonthlyIncome']
test_x=test.drop(['MonthlyIncome'],axis=1)
gbMod = GradientBoostingRegressor(loss='ls', learning_rate=0.1, n_estimators=30, subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_depth=3, init=None,
random_state=None, max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False)
gbMod.fit(train_x,train_y)
m=gbMod.predict(test_x)
gbMod.feature_importances_
new=[]
for x in m :
if x<=0:
new.append(0)
else:
new.append(x)
data.loc[(data['MonthlyIncome'].isnull()),'MonthlyIncome']=new
data_nul=data.drop(['SeriousDlqin2yrs'],axis=1)
train=data_nul[(data_nul['NumberOfDependents'].notnull())]
test=data_nul[(data_nul['NumberOfDependents'].isnull())]
train_x=train.drop(['NumberOfDependents'],axis=1)
train_y=train['NumberOfDependents']
test_x=test.drop(['NumberOfDependents'],axis=1)
gbMod.fit(train_x,train_y)
m=gbMod.predict(test_x)
new=[]
for x in m :
if x<=0:
new.append(0)
else:
new.append(x)
data.loc[(data['NumberOfDependents'].isnull()),'NumberOfDependents']=new
data['fuzhaijine']=data['DebtRatio']*data['MonthlyIncome']
data['shifouweiyue']=(data['NumberOfTime60-89DaysPastDueNotWorse']+data['NumberOfTimes90DaysLate']+data['NumberOfTime30-59DaysPastDueNotWorse'])/(data['NumberOfTime60-89DaysPastDueNotWorse']+data['NumberOfTimes90DaysLate']+data['NumberOfTime30-59DaysPastDueNotWorse'])
new=[]
for x in data['shifouweiyue']:
if x==1:
new.append(1)
else:
new.append(0)
data['shifouweiyue']=new
data_test=data[data['SeriousDlqin2yrs'].isnull()]
#采样
class Smote:
def __init__(self,samples,N=10,k=3):
self.n_samples,self.n_attrs=samples.shape
self.N=N
self.k=k
self.samples=samples
self.newindex=0
# self.synthetic=np.zeros((self.n_samples*N,self.n_attrs))
def over_sampling(self):
N=int(self.N/100)
self.synthetic = np.zeros((self.n_samples * N, self.n_attrs))
neighbors=NearestNeighbors(n_neighbors=self.k).fit(self.samples)
print ('neighbors',neighbors)
for i in range(len(self.samples)):
nnarray=neighbors.kneighbors(self.samples[i].reshape(1,-1),return_distance=False)[0]
#print nnarray
self._populate(N,i,nnarray)
return self.synthetic
# for each minority class samples,choose N of the k nearest neighbors and generate N synthetic samples.
def _populate(self,N,i,nnarray):
for j in range(N):
nn=random.randint(0,self.k-1)
dif=self.samples[nnarray[nn]]-self.samples[i]
gap=random.random()
self.synthetic[self.newindex]=self.samples[i]+gap*dif
self.newindex+=1
a=np.array(data.iloc[:len(data_train),:][data['SeriousDlqin2yrs']==1])
s=Smote(a,N=500)
data_train_sampling=s.over_sampling()
data_train_sampling=pd.DataFrame(data_train_sampling,columns=list(data.columns))
#负样本随机采样
data_train_samplingz=(data.iloc[:len(data_train),:][data_train['SeriousDlqin2yrs']==0]).sample(n=60000)
train_data_sampling=pd.concat([data_train_sampling,data_train_samplingz,data.iloc[:len(data_train),:][data_train['SeriousDlqin2yrs']==1]])
train_data_sampling[['SeriousDlqin2yrs','age','NumberOfTime30-59DaysPastDueNotWorse','NumberOfOpenCreditLinesAndLoans','NumberOfTimes90DaysLate',
'NumberRealEstateLoansOrLines','NumberOfTime60-89DaysPastDueNotWorse','NumberOfDependents']] = train_data_sampling[['SeriousDlqin2yrs','age','NumberOfTime30-59DaysPastDueNotWorse','NumberOfOpenCreditLinesAndLoans','NumberOfTimes90DaysLate',
'NumberRealEstateLoansOrLines','NumberOfTime60-89DaysPastDueNotWorse','NumberOfDependents']].round()
train_data_sampling['SeriousDlqin2yrs'].value_counts()
train_data_sampling.to_csv('C:/Users/hp/Desktop/在家学习/信用评分/sampling.csv')
cut_data=pd.DataFrame()
#age
train_data_sampling=train_data_sampling.drop(data[data['age']==0].index)
train_data_sampling.reset_index(inplace=True)
train_data_sampling.drop('index',axis=1,inplace=True)
k_age=KMeans(n_clusters=9,random_state=4,init='random')
k_age.fit_transform(train_data_sampling[['age']])
k_age.cluster_centers_#28 35 40 47 54 61 68 77 86
cut_age=pd.cut(train_data_sampling.age,bins=[0,28,35,40,47,54,61,68,77,86,110])
pd.crosstab(train_data_sampling.loc[:,'age'],train_data_sampling.loc[:,'SeriousDlqin2yrs'])
#RevolvingUtilizationOfUnsecuredLines保留变量,因为相关性不高
'''
k_Rev=KMeans(n_clusters=4,random_state=4,init='random')
k_Rev.fit_transform(data[['RevolvingUtilizationOfUnsecuredLines']])
k_Rev.cluster_centers_
'''
cut_Rev=pd.qcut(train_data_sampling.RevolvingUtilizationOfUnsecuredLines,q=5)
cut_Rev.value_counts()
#NumberOfTime30-59DaysPastDueNotWorse
max=train_data_sampling.loc[(train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']!=98)&(train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']!=96),'NumberOfTime30-59DaysPastDueNotWorse'].max()
New=[]
for val in train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']:
if ((val == 98) | (val == 96)):
New.append(max)
else:
New.append(val)
train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']=New
cut_NumberOf3059Time=pd.cut(train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse'],bins=[-np.inf,0,1,2,4,np.inf])
#cut_NumberOf3059Time=pd.qcut(train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse'],q=5)
cut_NumberOf3059Time.value_counts()
#DebtRatio
cut_ratio=pd.qcut(train_data_sampling.DebtRatio,q=5)
cut_ratio.value_counts()
#MonthlyIncome
cut_income=pd.qcut(train_data_sampling.MonthlyIncome,q=10)
cut_income.value_counts()
#NumberOfOpenCreditLinesAndLoans
train_data_sampling['NumberOfOpenCreditLinesAndLoans'].value_counts()
cut_loans=pd.qcut(train_data_sampling['NumberOfOpenCreditLinesAndLoans'],q=10)
cut_loans.value_counts()
#NumberOfTimes90DaysLate
max=train_data_sampling.loc[(train_data_sampling['NumberOfTimes90DaysLate']!=98)&(train_data_sampling['NumberOfTimes90DaysLate']!=96),'NumberOfTimes90DaysLate'].max()
New=[]
for val in train_data_sampling['NumberOfTimes90DaysLate']:
if ((val == 98) | (val == 96)):
New.append(max)
else:
New.append(val)
train_data_sampling['NumberOfTimes90DaysLate']=New
cut_NumberOf90time=pd.cut(train_data_sampling['NumberOfTimes90DaysLate'],bins=[-np.inf,0,1,2,4,np.inf])
cut_NumberOf90time.value_counts()
#NumberRealEstateLoansOrLines
cut_EstateLoansOrLines=pd.cut(train_data_sampling['NumberRealEstateLoansOrLines'],bins=[-np.inf,0,1,2,4,np.inf])
cut_EstateLoansOrLines.value_counts()
#NumberOfTime60-89DaysPastDueNotWorse
cut_NumberOfTime6089Days=pd.cut(train_data_sampling['NumberOfTime60-89DaysPastDueNotWorse'],bins=[-np.inf,0,1,2,4,np.inf])
cut_NumberOfTime6089Days.value_counts()
#NumberOfDependents
cut_Dependents=pd.cut(train_data_sampling['NumberOfDependents'],bins=[-np.inf,0,1,2,np.inf])
cut_Dependents.value_counts()
#fuzhaijine
cut_fuzhaijine=pd.qcut(train_data_sampling['fuzhaijine'],q=5)
cut_fuzhaijine.value_counts()
#shifouweiyue
new=[]
for x in train_data_sampling.shifouweiyue:
if x<0.5:
new.append(0)
else:
new.append(1)
train_data_sampling.shifouweiyue=new
train_data_sampling_cut=train_data_sampling.copy()
train_data_sampling_cut['age']=cut_age
train_data_sampling_cut['RevolvingUtilizationOfUnsecuredLines']=cut_Rev
train_data_sampling_cut['NumberOfTime30-59DaysPastDueNotWorse']=cut_NumberOf3059Time
train_data_sampling_cut['DebtRatio']=cut_ratio
train_data_sampling_cut['MonthlyIncome']=cut_income
train_data_sampling_cut['NumberOfOpenCreditLinesAndLoans']=cut_loans
train_data_sampling_cut['NumberOfTimes90DaysLate']=cut_NumberOf90time
train_data_sampling_cut['NumberRealEstateLoansOrLines']=cut_EstateLoansOrLines
train_data_sampling_cut['NumberOfTime60-89DaysPastDueNotWorse']=cut_NumberOfTime6089Days
train_data_sampling_cut['NumberOfDependents']=cut_Dependents
train_data_sampling_cut['fuzhaijine']=cut_fuzhaijine
train_data_sampling_cut['shifouweiyue']=train_data_sampling['shifouweiyue']
train_data_sampling_cut['SeriousDlqin2yrs'].value_counts()
'''
tree1=tree.DecisionTreeClassifier(max_depth=6,min_samples_split=1000)
#data_tree=pd.concat([train_data_sampling['age'],train_data_sampling['SeriousDlqin2yrs']],axis=1)
tree2=tree1.fit(train_data_sampling[['MonthlyIncome']],train_data_sampling['SeriousDlqin2yrs'])
dot_data = tree.export_graphviz(tree2, out_file=None,
feature_names='MonthlyIncome',
class_names='SeriousDlqin2yrs',
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data)
#Image(graph.create_png())
graph.write_pdf('C:/Users/hp/Desktop/在家学习/信用评分/w.pdf')
'''
#计算woe
totalgood = len(train_data_sampling_cut[train_data_sampling_cut['SeriousDlqin2yrs']==0])
totalbad = len(train_data_sampling_cut[train_data_sampling_cut['SeriousDlqin2yrs']==1])
def getwoe(a,p,q):
good=len(train_data_sampling[(a>p)&(a<=q)&(train_data_sampling['SeriousDlqin2yrs']==0)])
bad=len(train_data_sampling[(a>p)&(a<=q)&(train_data_sampling['SeriousDlqin2yrs']==1)])
WOE=np.log((bad/totalbad)/(good/totalgood))
return WOE
def getgoodlen(a,p,q):
good=len(train_data_sampling[(a>p)&(a<=q)&(train_data_sampling['SeriousDlqin2yrs']==0)])
goodlen=good/totalgood
return goodlen
def getbadlen(a,p,q):
bad=len(train_data_sampling[(a>p)&(a<=q)&(train_data_sampling['SeriousDlqin2yrs']==1)])
badlen=bad/totalgood
return badlen
#data.loc[(data1[data1['MonthlyIncome']>9000]).index,'MonthlyIncome']
woe_train_data=train_data_sampling.copy()
getwoe(train_data_sampling['age'],0,28)
pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['age'])
woe_age1=getwoe(train_data_sampling['age'],0,28)
woe_age2=getwoe(train_data_sampling['age'],28,35)
woe_age3=getwoe(train_data_sampling['age'],35,40)
woe_age4=getwoe(train_data_sampling['age'],40,47)
woe_age5=getwoe(train_data_sampling['age'],47,54)
woe_age6=getwoe(train_data_sampling['age'],54,61)
woe_age7=getwoe(train_data_sampling['age'],61,68)
woe_age8=getwoe(train_data_sampling['age'],68,77)
woe_age9=getwoe(train_data_sampling['age'],77,86)
woe_age10=getwoe(train_data_sampling['age'],86,110)
woe_age=[woe_age1,woe_age2,woe_age3,woe_age4,woe_age5,woe_age6,woe_age7,woe_age8,woe_age9,woe_age10]
woe_train_data.loc[train_data_sampling['age']<=28,'age']=woe_age1
woe_train_data.loc[(train_data_sampling['age']>28)&(train_data_sampling['age']<=35),'age']=woe_age2
woe_train_data.loc[(train_data_sampling['age']>35)&(train_data_sampling['age']<=40),'age']=woe_age3
woe_train_data.loc[(train_data_sampling['age']>40)&(train_data_sampling['age']<=47),'age']=woe_age4
woe_train_data.loc[(train_data_sampling['age']>47)&(train_data_sampling['age']<=54),'age']=woe_age5
woe_train_data.loc[(train_data_sampling['age']>54)&(train_data_sampling['age']<=61),'age']=woe_age6
woe_train_data.loc[(train_data_sampling['age']>61)&(train_data_sampling['age']<=68),'age']=woe_age7
woe_train_data.loc[(train_data_sampling['age']>68)&(train_data_sampling['age']<=77),'age']=woe_age8
woe_train_data.loc[(train_data_sampling['age']>77)&(train_data_sampling['age']<=86),'age']=woe_age9
woe_train_data.loc[(train_data_sampling['age']>86)&(train_data_sampling['age']<=111),'age']=woe_age10
woe_train_data.age.value_counts()
iv_age1=(getbadlen(train_data_sampling['age'],0,28)-getgoodlen(train_data_sampling['age'],0,28))*woe_age1
iv_age2=(getbadlen(train_data_sampling['age'],28,35)-getgoodlen(train_data_sampling['age'],28,35))*woe_age2
iv_age3=(getbadlen(train_data_sampling['age'],35,40)-getgoodlen(train_data_sampling['age'],35,40))*woe_age3
iv_age4=(getbadlen(train_data_sampling['age'],40,47)-getgoodlen(train_data_sampling['age'],40,47))*woe_age4
iv_age5=(getbadlen(train_data_sampling['age'],47,54)-getgoodlen(train_data_sampling['age'],47,54))*woe_age5
iv_age6=(getbadlen(train_data_sampling['age'],54,61)-getgoodlen(train_data_sampling['age'],54,61))*woe_age6
iv_age7=(getbadlen(train_data_sampling['age'],61,68)-getgoodlen(train_data_sampling['age'],61,68))*woe_age7
iv_age8=(getbadlen(train_data_sampling['age'],68,77)-getgoodlen(train_data_sampling['age'],68,77))*woe_age8
iv_age9=(getbadlen(train_data_sampling['age'],77,86)-getgoodlen(train_data_sampling['age'],77,86))*woe_age9
iv_age10=(getbadlen(train_data_sampling['age'],86,110)-getgoodlen(train_data_sampling['age'],86,110))*woe_age10
iv_age=iv_age1+iv_age2+iv_age3+iv_age4+iv_age5+iv_age6+iv_age7+iv_age8+iv_age9+iv_age10#0.25819490968759973
#RevolvingUtilizationOfUnsecuredLines
pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['RevolvingUtilizationOfUnsecuredLines'])
woe_Revolving1=np.log((3198/totalbad)/(20834/totalgood))
woe_Revolving2=np.log((6745/totalbad)/(17285/totalgood))
woe_Revolving3=np.log((13531/totalbad)/(10500/totalgood))
woe_Revolving4=np.log((18043/totalbad)/(5989/totalgood))
woe_Revolving5=np.log((18639/totalbad)/(5391/totalgood))
woe_train_data['RevolvingUtilizationOfUnsecuredLines'].max()
woe_train_data.loc[(train_data_sampling['RevolvingUtilizationOfUnsecuredLines']<=0.0535)&(train_data_sampling['RevolvingUtilizationOfUnsecuredLines']>=0),'RevolvingUtilizationOfUnsecuredLines']=woe_Revolving1
woe_train_data.loc[(train_data_sampling['RevolvingUtilizationOfUnsecuredLines']>0.0535)&(train_data_sampling['RevolvingUtilizationOfUnsecuredLines']<=0.281),'RevolvingUtilizationOfUnsecuredLines']=woe_Revolving2
woe_train_data.loc[(train_data_sampling['RevolvingUtilizationOfUnsecuredLines']>0.281)&(train_data_sampling['RevolvingUtilizationOfUnsecuredLines']<=0.652),'RevolvingUtilizationOfUnsecuredLines']=woe_Revolving3
woe_train_data.loc[(train_data_sampling['RevolvingUtilizationOfUnsecuredLines']>0.652)&(train_data_sampling['RevolvingUtilizationOfUnsecuredLines']<=0.967),'RevolvingUtilizationOfUnsecuredLines']=woe_Revolving4
woe_train_data.loc[(train_data_sampling['RevolvingUtilizationOfUnsecuredLines']>0.967)&(train_data_sampling['RevolvingUtilizationOfUnsecuredLines']<=60000),'RevolvingUtilizationOfUnsecuredLines']=woe_Revolving5
woe_train_data['RevolvingUtilizationOfUnsecuredLines'].value_counts()
iv_Revolv1=(3198/totalbad-20834/totalgood)*woe_Revolving1
iv_Revolv2=(6745/totalbad-17285/totalgood)*woe_Revolving2
iv_Revolv3=(13531/totalbad-10500/totalgood)*woe_Revolving3
iv_Revolv4=(18043/totalbad-5989/totalgood)*woe_Revolving4
iv_Revolv5=(18639/totalbad-5391/totalgood)*woe_Revolving5
iv_Revolv=iv_Revolv1+iv_Revolv2+iv_Revolv3+iv_Revolv4+iv_Revolv5#1.2229730587073095
#NumberOfTime30-59DaysPastDueNotWorse
pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['NumberOfTime30-59DaysPastDueNotWorse'])
woe_30591=np.log((28490/totalbad)/(51935/totalgood))
woe_30592=np.log((16626/totalbad)/(5743/totalgood))
woe_30593=np.log((7862/totalbad)/(1460/totalgood))
woe_30594=np.log((5133/totalbad)/(670/totalgood))
woe_30595=np.log((2045/totalbad)/(191/totalgood))
woe_train_data['NumberOfTime30-59DaysPastDueNotWorse'].max()
woe_train_data.loc[train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']==0,'NumberOfTime30-59DaysPastDueNotWorse']=woe_30591
woe_train_data.loc[(train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']==1),'NumberOfTime30-59DaysPastDueNotWorse']=woe_30592
woe_train_data.loc[(train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']>1)&(train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']<=2),'NumberOfTime30-59DaysPastDueNotWorse']=woe_30593
woe_train_data.loc[(train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']>2)&(train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']<=4),'NumberOfTime30-59DaysPastDueNotWorse']=woe_30594
woe_train_data.loc[(train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']>4)&(train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']<=97),'NumberOfTime30-59DaysPastDueNotWorse']=woe_30595
woe_train_data['NumberOfTime30-59DaysPastDueNotWorse'].value_counts()
iv_30591=(28490/totalbad-51935/totalgood)*woe_30591
iv_30592=(16626/totalbad-5743/totalgood)*woe_30592
iv_30593=(7862/totalbad-1460/totalgood)*woe_30593
iv_30594=(5133/totalbad-670/totalgood)*woe_30594
iv_30595=(2045/totalbad-191/totalgood)*woe_30595
iv_3059=iv_30591+iv_30592+iv_30593+iv_30594+iv_30595#0.83053544388188838
#DebtRatio
woe_train_data['DebtRatio'].max()
pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['DebtRatio'])
woe_Ratio1=np.log((10577/totalbad)/(13454/totalgood))
woe_Ratio2=np.log((11320/totalbad)/(12711/totalgood))
woe_Ratio3=np.log((12385/totalbad)/(11646/totalgood))
woe_Ratio4=np.log((14783/totalbad)/(9251/totalgood))
woe_Ratio5=np.log((11091/totalbad)/(12937/totalgood))
woe_train_data.loc[train_data_sampling['DebtRatio']<=0.153,'DebtRatio']=-woe_Ratio1
woe_train_data.loc[(train_data_sampling['DebtRatio']>0.153)&(train_data_sampling['DebtRatio']<=0.311),'DebtRatio']=woe_Ratio2
woe_train_data.loc[(train_data_sampling['DebtRatio']>0.311)&(train_data_sampling['DebtRatio']<=0.5),'DebtRatio']=woe_Ratio3
woe_train_data.loc[(train_data_sampling['DebtRatio']>0.5)&(train_data_sampling['DebtRatio']<=1.49),'DebtRatio']=woe_Ratio4
woe_train_data.loc[(train_data_sampling['DebtRatio']>1.49)&(train_data_sampling['DebtRatio']<=400000),'DebtRatio']=woe_Ratio5
woe_train_data['DebtRatio'].value_counts()
iv_Ratio1=(10577/totalbad-13454/totalgood)*woe_Ratio1
iv_Ratio2=(11320/totalbad-12711/totalgood)*woe_Ratio2
iv_Ratio3=(12385/totalbad-11646/totalgood)*woe_Ratio3
iv_Ratio4=(14783/totalbad-9251/totalgood)*woe_Ratio4
iv_Ratio5=(11091/totalbad-12937/totalgood)*woe_Ratio5
iv_Ratio=iv_Ratio1+iv_Ratio2+iv_Ratio3+iv_Ratio4+iv_Ratio5#0.062844824089719628
#MonthlyIncome
pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['MonthlyIncome'])
woe_incom1=np.log((6134/totalbad)/(5886/totalgood))
woe_incom2=np.log((5942/totalbad)/(6185/totalgood))
woe_incom3=np.log((7055/totalbad)/(5243/totalgood))
woe_incom4=np.log((7016/totalbad)/(5605/totalgood))
woe_incom5=np.log((6120/totalbad)/(4898/totalgood))
woe_incom6=np.log((6384/totalbad)/(5626/totalgood))
woe_incom7=np.log((6167/totalbad)/(5860/totalgood))
woe_incom8=np.log((5555/totalbad)/(6452/totalgood))
woe_incom9=np.log((5145/totalbad)/(6868/totalgood))
woe_incom10=np.log((4638/totalbad)/(7376/totalgood))
woe_train_data.loc[train_data_sampling['MonthlyIncome']<=1140.342,'MonthlyIncome']=woe_incom1
woe_train_data.loc[(train_data_sampling['MonthlyIncome']>1140.342)&(train_data_sampling['MonthlyIncome']<=1943.438),'MonthlyIncome']=woe_incom2
woe_train_data.loc[(train_data_sampling['MonthlyIncome']>1943.438)&(train_data_sampling['MonthlyIncome']<=2800.0),'MonthlyIncome']=woe_incom3
woe_train_data.loc[(train_data_sampling['MonthlyIncome']>2800.0)&(train_data_sampling['MonthlyIncome']<=3500.0),'MonthlyIncome']=woe_incom4
woe_train_data.loc[(train_data_sampling['MonthlyIncome']>3500.0)&(train_data_sampling['MonthlyIncome']<=4225.0),'MonthlyIncome']=woe_incom5
woe_train_data.loc[(train_data_sampling['MonthlyIncome']>4225.0)&(train_data_sampling['MonthlyIncome']<=5125.153),'MonthlyIncome']=woe_incom6
woe_train_data.loc[(train_data_sampling['MonthlyIncome']>5125.153)&(train_data_sampling['MonthlyIncome']<=6184.002),'MonthlyIncome']=woe_incom7
woe_train_data.loc[(train_data_sampling['MonthlyIncome']>6184.002)&(train_data_sampling['MonthlyIncome']<=7675.0),'MonthlyIncome']=woe_incom8
woe_train_data.loc[(train_data_sampling['MonthlyIncome']>7675.0)&(train_data_sampling['MonthlyIncome']<=10166.0),'MonthlyIncome']=woe_incom9
woe_train_data.loc[(train_data_sampling['MonthlyIncome']>10166.0),'MonthlyIncome']=woe_incom10
woe_train_data.MonthlyIncome.value_counts()
iv_incom1=(6134/totalbad-5886/totalgood)*woe_incom1
iv_incom2=(5942/totalbad-6185/totalgood)*woe_incom2
iv_incom3=(7055/totalbad-5243/totalgood)*woe_incom3
iv_incom4=(7016/totalbad-5605/totalgood)*woe_incom4
iv_incom5=(6120/totalbad-4898/totalgood)*woe_incom5
iv_incom6=(6384/totalbad-5626/totalgood)*woe_incom6
iv_incom7=(6167/totalbad-5860/totalgood)*woe_incom7
iv_incom8=(5555/totalbad-6452/totalgood)*woe_incom8
iv_incom9=(5145/totalbad-6868/totalgood)*woe_incom9
iv_incom10=(4638/totalbad-7376/totalgood)*woe_incom10
iv_incom=iv_incom1+iv_incom2+iv_incom3+iv_incom4+iv_incom5+iv_incom6+iv_incom7+iv_incom8+iv_incom9+iv_incom10#0.05260337229962106
#NumberOfOpenCreditLinesAndLoans
pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['NumberOfOpenCreditLinesAndLoans'])
woe_Loans1=np.log((9379/totalbad)/(4883/totalgood))
woe_Loans2=np.log((8800/totalbad)/(8259/totalgood))
woe_Loans3=np.log((5067/totalbad)/(5146/totalgood))
woe_Loans4=np.log((4660/totalbad)/(5509/totalgood))
woe_Loans5=np.log((4522/totalbad)/(5302/totalgood))
woe_Loans6=np.log((8005/totalbad)/(9696/totalgood))
woe_Loans7=np.log((3590/totalbad)/(3916/totalgood))
woe_Loans8=np.log((5650/totalbad)/(6123/totalgood))
woe_Loans9=np.log((5409/totalbad)/(5627/totalgood))
woe_Loans10=np.log((5074/totalbad)/(5538/totalgood))
woe_train_data.loc[woe_train_data['NumberOfOpenCreditLinesAndLoans']<=2.0,'NumberOfOpenCreditLinesAndLoans']=woe_Loans1
woe_train_data.loc[(woe_train_data['NumberOfOpenCreditLinesAndLoans']>2.0)&(woe_train_data['NumberOfOpenCreditLinesAndLoans']<=4.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans2
woe_train_data.loc[(woe_train_data['NumberOfOpenCreditLinesAndLoans']>4.0)&(woe_train_data['NumberOfOpenCreditLinesAndLoans']<=5.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans3
woe_train_data.loc[(woe_train_data['NumberOfOpenCreditLinesAndLoans']>5.0)&(woe_train_data['NumberOfOpenCreditLinesAndLoans']<=6.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans4
woe_train_data.loc[(woe_train_data['NumberOfOpenCreditLinesAndLoans']>6.0)&(woe_train_data['NumberOfOpenCreditLinesAndLoans']<=7.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans5
woe_train_data.loc[(woe_train_data['NumberOfOpenCreditLinesAndLoans']>7.0)&(woe_train_data['NumberOfOpenCreditLinesAndLoans']<=9.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans6
woe_train_data.loc[(woe_train_data['NumberOfOpenCreditLinesAndLoans']>9.0)&(woe_train_data['NumberOfOpenCreditLinesAndLoans']<=10.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans7
woe_train_data.loc[(woe_train_data['NumberOfOpenCreditLinesAndLoans']>10.0)&(woe_train_data['NumberOfOpenCreditLinesAndLoans']<=12.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans8
woe_train_data.loc[(woe_train_data['NumberOfOpenCreditLinesAndLoans']>12.0)&(woe_train_data['NumberOfOpenCreditLinesAndLoans']<=15.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans9
woe_train_data.loc[(woe_train_data['NumberOfOpenCreditLinesAndLoans']>15.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans10
woe_train_data.NumberOfOpenCreditLinesAndLoans.value_counts()
iv_Loans1=(9379/totalbad-4883/totalgood)*woe_Loans1
iv_Loans2=(8800/totalbad-8259/totalgood)*woe_Loans2
iv_Loans3=(5067/totalbad-5146/totalgood)*woe_Loans3
iv_Loans4=(4660/totalbad-5509/totalgood)*woe_Loans4
iv_Loans5=(4522/totalbad-5302/totalgood)*woe_Loans5
iv_Loans6=(8005/totalbad-9696/totalgood)*woe_Loans6
iv_Loans7=(3590/totalbad-3916/totalgood)*woe_Loans7
iv_Loans8=(5650/totalbad-6123/totalgood)*woe_Loans8
iv_Loans9=(5409/totalbad-5627/totalgood)*woe_Loans9
iv_Loans10=(5074/totalbad-5538/totalgood)*woe_Loans10
iv_Loans=iv_Loans1+iv_Loans2+iv_Loans3+iv_Loans4+iv_Loans5+iv_Loans6+iv_Loans7+iv_Loans8+iv_Loans9+iv_Loans10#0.061174706202253015
#NumberOfTimes90DaysLate
woe_train_data['NumberOfTimes90DaysLate'].max()
pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['NumberOfTimes90DaysLate'])
woe_901=np.log((38146/totalbad)/(38146/totalgood))
woe_902=np.log((12389/totalbad)/(1521/totalgood))
woe_903=np.log((4774/totalbad)/(344/totalgood))
woe_904=np.log((3085/totalbad)/(179/totalgood))
woe_905=np.log((1762/totalbad)/(95/totalgood))
woe_train_data.loc[train_data_sampling['NumberOfTimes90DaysLate']==0.0,'NumberOfTimes90DaysLate']=woe_901
woe_train_data.loc[(train_data_sampling['NumberOfTimes90DaysLate']==1.0),'NumberOfTimes90DaysLate']=woe_902
woe_train_data.loc[(train_data_sampling['NumberOfTimes90DaysLate']>1.0)&(train_data_sampling['NumberOfTimes90DaysLate']<=2.0),'NumberOfTimes90DaysLate']=woe_903
woe_train_data.loc[(train_data_sampling['NumberOfTimes90DaysLate']>2.0)&(train_data_sampling['NumberOfTimes90DaysLate']<=4.0),'NumberOfTimes90DaysLate']=woe_904
woe_train_data.loc[(train_data_sampling['NumberOfTimes90DaysLate']>4.0)&(train_data_sampling['NumberOfTimes90DaysLate']<=97),'NumberOfTimes90DaysLate']=woe_905
woe_train_data.NumberOfTimes90DaysLate.value_counts()
iv_901=(38146/totalbad-4883/totalgood)*woe_901
iv_902=(12389/totalbad-1521/totalgood)*woe_902
iv_903=(4774/totalbad-344/totalgood)*woe_903
iv_904=(3085/totalbad-179/totalgood)*woe_904
iv_905=(1762/totalbad-95/totalgood)*woe_905
iv_90=iv_901+iv_902+iv_903+iv_904+iv_905#0.55829418354740168
#NumberRealEstateLoansOrLines
pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['NumberRealEstateLoansOrLines'])
woe_Lines1=np.log((26932/totalbad)/(22100/totalgood))
woe_Lines2=np.log((17936/totalbad)/(21270/totalgood))
woe_Lines3=np.log((10526/totalbad)/(12656/totalgood))
woe_Lines4=np.log((3621/totalbad)/(3429/totalgood))
woe_Lines5=np.log((1141/totalbad)/(544/totalgood))
woe_train_data.loc[train_data_sampling['NumberRealEstateLoansOrLines']<=0.0,'NumberRealEstateLoansOrLines']=woe_Lines1
woe_train_data.loc[(train_data_sampling['NumberRealEstateLoansOrLines']>0.0)&(train_data_sampling['NumberRealEstateLoansOrLines']<=1.0),'NumberRealEstateLoansOrLines']=woe_Lines2
woe_train_data.loc[(train_data_sampling['NumberRealEstateLoansOrLines']>1.0)&(train_data_sampling['NumberRealEstateLoansOrLines']<=2.0),'NumberRealEstateLoansOrLines']=woe_Lines3
woe_train_data.loc[(train_data_sampling['NumberRealEstateLoansOrLines']>2.0)&(train_data_sampling['NumberRealEstateLoansOrLines']<=4.0),'NumberRealEstateLoansOrLines']=woe_Lines4
woe_train_data.loc[(train_data_sampling['NumberRealEstateLoansOrLines']>4.0)&(train_data_sampling['NumberRealEstateLoansOrLines']<=54),'NumberRealEstateLoansOrLines']=woe_Lines5
woe_train_data.NumberRealEstateLoansOrLines.value_counts()
iv_Lines1=(26932/totalbad-22100/totalgood)*woe_Lines1
iv_Lines2=(17936/totalbad-21270/totalgood)*woe_Lines2
iv_Lines3=(10526/totalbad-12656/totalgood)*woe_Lines3
iv_Lines4=(3621/totalbad-3429/totalgood)*woe_Lines4
iv_Lines5=(1141/totalbad-544/totalgood)*woe_Lines5
iv_Lines=iv_Lines1+iv_Lines2+iv_Lines3+iv_Lines4+iv_Lines5#0.039425418770289836
woe_train_data['NumberRealEstateLoansOrLines'].max()
#NumberOfTime60-89DaysPastDueNotWorse
woe_train_data['NumberOfTime60-89DaysPastDueNotWorse'].min()
pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['NumberOfTime60-89DaysPastDueNotWorse'])
woe_60891=np.log((42678/totalbad)/(57972/totalgood))
woe_60892=np.log((12210/totalbad)/(1653/totalgood))
woe_60893=np.log((3103/totalbad)/(248/totalgood))
woe_60894=np.log((1117/totalbad)/(77/totalgood))
woe_60895=np.log((1048/totalbad)/(49/totalgood))
woe_train_data.loc[(train_data_sampling['NumberOfTime60-89DaysPastDueNotWorse']<=0.0),'NumberOfTime60-89DaysPastDueNotWorse']=woe_60891
woe_train_data.loc[(train_data_sampling['NumberOfTime60-89DaysPastDueNotWorse']>0.0)&(train_data_sampling['NumberOfTime60-89DaysPastDueNotWorse']<=1.0),'NumberOfTime60-89DaysPastDueNotWorse']=woe_60892
woe_train_data.loc[(train_data_sampling['NumberOfTime60-89DaysPastDueNotWorse']>1.0)&(train_data_sampling['NumberOfTime60-89DaysPastDueNotWorse']<=2.0),'NumberOfTime60-89DaysPastDueNotWorse']=woe_60893
woe_train_data.loc[(train_data_sampling['NumberOfTime60-89DaysPastDueNotWorse']>2.0)&(train_data_sampling['NumberOfTime60-89DaysPastDueNotWorse']<=4.0),'NumberOfTime60-89DaysPastDueNotWorse']=woe_60894
woe_train_data.loc[(train_data_sampling['NumberOfTime60-89DaysPastDueNotWorse']>4.0)&(train_data_sampling['NumberOfTime60-89DaysPastDueNotWorse']<=98),'NumberOfTime60-89DaysPastDueNotWorse']=woe_60895
woe_train_data['NumberOfTime60-89DaysPastDueNotWorse'].value_counts()
iv_60891=(42678/totalbad-22100/totalgood)*woe_60891
iv_60892=(12210/totalbad-21270/totalgood)*woe_60892
iv_60893=(3103/totalbad-248/totalgood)*woe_60893
iv_60894=(1117/totalbad-77/totalgood)*woe_60894
iv_60895=(1048/totalbad-49/totalgood)*woe_60895
iv_6089=iv_60891+iv_60892+iv_60893+iv_60894+iv_60895#-0.19122287642712696
#NumberOfDependents
woe_train_data['NumberOfDependents'].max()
pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['NumberOfDependents'])
woe_Dependents1=np.log((29464/totalbad)/(36205/totalgood))
woe_Dependents2=np.log((14313/totalbad)/(10825/totalgood))
woe_Dependents3=np.log((9926/totalbad)/(7763/totalgood))
woe_Dependents4=np.log((6453/totalbad)/(5206/totalgood))
woe_train_data.loc[(train_data_sampling['NumberOfDependents']==0.0),'NumberOfDependents']=woe_Dependents1
woe_train_data.loc[(train_data_sampling['NumberOfDependents']==1.0),'NumberOfDependents']=woe_Dependents2
woe_train_data.loc[(train_data_sampling['NumberOfDependents']==2.0),'NumberOfDependents']=woe_Dependents3
woe_train_data.loc[(train_data_sampling['NumberOfDependents']>2.0)&(train_data_sampling['NumberOfDependents']<=20),'NumberOfDependents']=woe_Dependents4
woe_train_data['NumberOfDependents'].value_counts()
iv_Dependents1=(29464/totalbad-36205/totalgood)*woe_Dependents1
iv_Dependents2=(14313/totalbad-10825/totalgood)*woe_Dependents2
iv_Dependents3=(9926/totalbad-7763/totalgood)*woe_Dependents3
iv_Dependents4=(6453/totalbad-5206/totalgood)*woe_Dependents4
iv_Dependents=iv_Dependents1+iv_Dependents2+iv_Dependents3+iv_Dependents4# 0.05263266442133803
#fuzhaijine
pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['fuzhaijine'])
woe_fuzhaijine1=getwoe(train_data_sampling['fuzhaijine'],-0.001,538.43)
woe_fuzhaijine2=getwoe(train_data_sampling['fuzhaijine'],538.43,1495.849)
woe_fuzhaijine3=getwoe(train_data_sampling['fuzhaijine'],1495.849,2752.647)
woe_fuzhaijine4=getwoe(train_data_sampling['fuzhaijine'],2752.647,6402.004)
woe_fuzhaijine5=getwoe(train_data_sampling['fuzhaijine'],6402.004,1539561248.52)
woe_train_data.loc[(train_data_sampling['fuzhaijine']>-0.001)&(train_data_sampling['fuzhaijine']<=538.43),'fuzhaijine']=woe_fuzhaijine1
woe_train_data.loc[(train_data_sampling['fuzhaijine']>538.43)&(train_data_sampling['fuzhaijine']<=1495.849),'fuzhaijine']=woe_fuzhaijine2
woe_train_data.loc[(train_data_sampling['fuzhaijine']>1495.849)&(train_data_sampling['fuzhaijine']<=2752.647),'fuzhaijine']=woe_fuzhaijine3
woe_train_data.loc[(train_data_sampling['fuzhaijine']>2752.647)&(train_data_sampling['fuzhaijine']<=6402.004),'fuzhaijine']=woe_fuzhaijine4
woe_train_data.loc[(train_data_sampling['fuzhaijine']>6402.004)&(train_data_sampling['fuzhaijine']<=1539561248.52),'fuzhaijine']=woe_fuzhaijine5
woe_train_data['NumberOfDependents'].value_counts()
iv_fuzhaijine1=(getbadlen(train_data_sampling['fuzhaijine'],-0.001,538.43)-getgoodlen(train_data_sampling['fuzhaijine'],-0.001,538.43))*woe_fuzhaijine1
iv_fuzhaijine2=(getbadlen(train_data_sampling['fuzhaijine'],538.43,1495.849)-getgoodlen(train_data_sampling['fuzhaijine'],538.43,1495.849))*woe_fuzhaijine2
iv_fuzhaijine3=(getbadlen(train_data_sampling['fuzhaijine'],1495.849,2752.647)-getgoodlen(train_data_sampling['fuzhaijine'],1495.849,2752.647))*woe_fuzhaijine3
iv_fuzhaijine4=(getbadlen(train_data_sampling['fuzhaijine'],2752.647,56402.004)-getgoodlen(train_data_sampling['fuzhaijine'],2752.647,56402.004))*woe_fuzhaijine4
iv_fuzhaijine5=(getbadlen(train_data_sampling['fuzhaijine'],6402.004,2029810649.54)-getgoodlen(train_data_sampling['fuzhaijine'],6402.004,2029810649.54))*woe_fuzhaijine5
iv_fuzhaijine=iv_fuzhaijine1+iv_fuzhaijine2+iv_fuzhaijine3+iv_fuzhaijine4+iv_fuzhaijine5# 0.0086596257811806399
#shifouweiyue
pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['shifouweiyue'])
woe_shifou1=getwoe(train_data_sampling['shifouweiyue'],-1,0)
woe_shifou2=getwoe(train_data_sampling['shifouweiyue'],0,1)
woe_train_data.loc[(train_data_sampling['shifouweiyue']==0.0),'shifouweiyue']=woe_shifou1
woe_train_data.loc[(train_data_sampling['shifouweiyue']==1.0),'shifouweiyue']=woe_shifou2
woe_train_data['shifouweiyue'].value_counts()
iv_shifou1=(getbadlen(train_data_sampling['shifouweiyue'],-1,0)-getgoodlen(train_data_sampling['shifouweiyue'],-1,0))*woe_fuzhaijine1
iv_shifou2=(getbadlen(train_data_sampling['shifouweiyue'],0,1)-getgoodlen(train_data_sampling['shifouweiyue'],0,1))*woe_fuzhaijine2
iv_shifou=iv_shifou1+iv_shifou2#0.050769156098225278
#建模
from sklearn.cross_validation import ShuffleSplit
clf_bl=RandomForestClassifier()
names=woe_train_data.columns
clf_bl.fit(woe_train_data.iloc[:,[1,2,3,4,5,6,7,8,9,10,11,12]],woe_train_data['SeriousDlqin2yrs'])
clf_bl.feature_importances_
print (sorted(zip(map(lambda x: round(x, 4), clf_bl.feature_importances_), names),reverse=True))
'''
基尼不纯度
这里特征得分实际上采用的是 Gini Importance 。使用基于不纯度的方法的时候,要记住:1、这种方法存在 偏向 ,对具有更多类别的变量会更有利;2、对于存在关联的多个特征,其中任意一个都可以作为指示器(优秀的特征),并且一旦某个特征被选择之后,其他特征的重要度就会急剧下降,因为不纯度已经被选中的那个特征降下来了,其他的特征就很难再降低那么多不纯度了,这样一来,只有先被选中的那个特征重要度很高,其他的关联特征重要度往往较低。在理解数据时,这就会造成误解,导致错误的认为先被选中的特征是很重要的,而其余的特征是不重要的,但实际上这些特征对响应变量的作用确实非常接近的(这跟Lasso是很像的)。
[(0.17549999999999999, 'SeriousDlqin2yrs'),
(0.1221, 'MonthlyIncome'),
(0.1076, 'RevolvingUtilizationOfUnsecuredLines'),
(0.1055, 'DebtRatio'),
(0.1042, 'NumberOfOpenCreditLinesAndLoans'),
(0.10059999999999999, 'fuzhaijine'),
(0.077899999999999997, 'age'),
(0.048500000000000001, 'NumberOfTime60-89DaysPastDueNotWorse'),
(0.045600000000000002, 'NumberOfTimes90DaysLate'),
(0.045100000000000001, 'NumberOfTime30-59DaysPastDueNotWorse'),
(0.037400000000000003, 'NumberOfDependents'),
(0.029999999999999999, 'NumberRealEstateLoansOrLines')]
'''
scores=cross_val_score(clf_bl,woe_train_data.iloc[:,[1,2,3,4,5,7,8,9,10,11,12]],woe_train_data['SeriousDlqin2yrs'])
scores.mean()# 0.80926299767756171
names=woe_train_data.iloc[:,[1,2,3,4,5,6,7,8,9,10,11,12]].columns
scores=[]
for i in range(woe_train_data.iloc[:,[1,2,3,4,5,6,7,8,9,10,11,12]].shape[1]):
score=cross_val_score(clf_bl,woe_train_data.iloc[:,i:i+1],woe_train_data['SeriousDlqin2yrs'],
scoring='roc_auc',cv=ShuffleSplit(len(woe_train_data.iloc[:,[1,2,3,4,5,7,8,9,10,11,12]]),n_iter=3,test_size=0.3))
scores.append((round(np.mean(score),3),names[i]))
print (sorted(scores))
'''
[(0.54900000000000004, 'NumberRealEstateLoansOrLines'),
(0.55600000000000005, 'MonthlyIncome'),
(0.56299999999999994, 'NumberOfDependents'),
(0.56399999999999995, 'NumberOfTimes90DaysLate'),
(0.56499999999999995, 'shifouweiyue'),
(0.63400000000000001, 'fuzhaijine'),
(0.63500000000000001, 'NumberOfTime30-59DaysPastDueNotWorse'),
(0.67400000000000004, 'NumberOfTime60-89DaysPastDueNotWorse'),
(0.70699999999999996, 'DebtRatio'),
(0.78300000000000003, 'age'),
(1.0, 'RevolvingUtilizationOfUnsecuredLines')]
'''
param_test1={'C':np.arange(1,3,0.5)}
gsearch1=GridSearchCV(estimator=LogisticRegression(),param_grid=param_test1,cv=10)
gsearch1.fit(woe_train_data.iloc[:,[1,2,3,4,5,7,8,9,10,11,12]],woe_train_data['SeriousDlqin2yrs'])
gsearch1.grid_scores_, gsearch1.best_params_, gsearch1.best_score_#650
clf=LogisticRegression(penalty='l2',C=2.5)
clf.fit(woe_train_data.iloc[:,[1,2,3,4,5,7,8,9,10,11,12]],woe_train_data['SeriousDlqin2yrs'])
clf.coef_#6的系数是负的可能多重共线性
train_x,test_x,train_y,test_y = train_test_split(woe_train_data.iloc[:,[1,2,3,4,5,7,8,9,10,11,12]],woe_train_data.iloc[:,0],test_size=0.4)
clf.fit(train_x,train_y)
y_pred=clf.predict(test_x)
answer=clf.predict_proba(test_x)[:,1]
y_pred_=[]
y_test_=[]
for x in answer:
if x>0.5:
y_pred_.append(1)
else:
y_pred_.append(0)
for x in test_y:
if x>0.5:
y_test_.append(1)
else:
y_test_.append(0)
#confusion_matrix=confusion_matrix(np.array(y_test_),np.array(y_pred_))
#pd.DataFrame(y_pred).describe()
roc_auc_score(test_y,y_pred)#0.78
#0.79367391796888187
#0.79728808571478227
#0.79570582284625913去掉6
data_test=data.iloc[len(data_train):,:]
data_test_woe=data_test.copy()
data_test_woe.loc[data_test['age']<=28,'age']=woe_age1
data_test_woe.loc[(data_test['age']>28)&(data_test['age']<=35),'age']=woe_age2
data_test_woe.loc[(data_test['age']>35)&(data_test['age']<=40),'age']=woe_age3
data_test_woe.loc[(data_test['age']>40)&(data_test['age']<=47),'age']=woe_age4
data_test_woe.loc[(data_test['age']>47)&(data_test['age']<=54),'age']=woe_age5
data_test_woe.loc[(data_test['age']>54)&(data_test['age']<=61),'age']=woe_age6
data_test_woe.loc[(data_test['age']>61)&(data_test['age']<=68),'age']=woe_age7
data_test_woe.loc[(data_test['age']>68)&(data_test['age']<=77),'age']=woe_age8
data_test_woe.loc[(data_test['age']>77)&(data_test['age']<=86),'age']=woe_age9
data_test_woe.loc[(data_test['age']>86)&(data_test['age']<=111),'age']=woe_age10
data_test_woe.age.value_counts()
#RevolvingUtilizationOfUnsecuredLines保留变量,因为相关性不高
data_test_woe.loc[(data_test['RevolvingUtilizationOfUnsecuredLines']<=0.0535)&(data_test['RevolvingUtilizationOfUnsecuredLines']>=0),'RevolvingUtilizationOfUnsecuredLines']=woe_Revolving1
data_test_woe.loc[(data_test['RevolvingUtilizationOfUnsecuredLines']>0.0535)&(data_test['RevolvingUtilizationOfUnsecuredLines']<=0.281),'RevolvingUtilizationOfUnsecuredLines']=woe_Revolving2
data_test_woe.loc[(data_test['RevolvingUtilizationOfUnsecuredLines']>0.281)&(data_test['RevolvingUtilizationOfUnsecuredLines']<=0.652),'RevolvingUtilizationOfUnsecuredLines']=woe_Revolving3
data_test_woe.loc[(data_test['RevolvingUtilizationOfUnsecuredLines']>0.652)&(data_test['RevolvingUtilizationOfUnsecuredLines']<=0.967),'RevolvingUtilizationOfUnsecuredLines']=woe_Revolving4
data_test_woe.loc[(data_test['RevolvingUtilizationOfUnsecuredLines']>0.967)&(data_test['RevolvingUtilizationOfUnsecuredLines']<=60000),'RevolvingUtilizationOfUnsecuredLines']=woe_Revolving5
data_test_woe['RevolvingUtilizationOfUnsecuredLines'].value_counts()
#NumberOfTime30-59DaysPastDueNotWorse
data_test_woe.loc[data_test['NumberOfTime30-59DaysPastDueNotWorse']==0,'NumberOfTime30-59DaysPastDueNotWorse']=woe_30591
data_test_woe.loc[(data_test['NumberOfTime30-59DaysPastDueNotWorse']==1),'NumberOfTime30-59DaysPastDueNotWorse']=woe_30592
data_test_woe.loc[(data_test['NumberOfTime30-59DaysPastDueNotWorse']>1)&(data_test['NumberOfTime30-59DaysPastDueNotWorse']<=2),'NumberOfTime30-59DaysPastDueNotWorse']=woe_30593
data_test_woe.loc[(data_test['NumberOfTime30-59DaysPastDueNotWorse']>2)&(data_test['NumberOfTime30-59DaysPastDueNotWorse']<=4),'NumberOfTime30-59DaysPastDueNotWorse']=woe_30594
data_test_woe.loc[(data_test['NumberOfTime30-59DaysPastDueNotWorse']>4)&(data_test['NumberOfTime30-59DaysPastDueNotWorse']<=98),'NumberOfTime30-59DaysPastDueNotWorse']=woe_30595
data_test_woe['NumberOfTime30-59DaysPastDueNotWorse'].value_counts()
#DebtRatio
data_test_woe.loc[data_test['DebtRatio']<=0.153,'DebtRatio']=-woe_Ratio1
data_test_woe.loc[(data_test['DebtRatio']>0.153)&(data_test['DebtRatio']<=0.311),'DebtRatio']=woe_Ratio2
data_test_woe.loc[(data_test['DebtRatio']>0.311)&(data_test['DebtRatio']<=0.5),'DebtRatio']=woe_Ratio3
data_test_woe.loc[(data_test['DebtRatio']>0.5)&(data_test['DebtRatio']<=1.49),'DebtRatio']=woe_Ratio4
data_test_woe.loc[(data_test['DebtRatio']>1.49)&(data_test['DebtRatio']<=400000),'DebtRatio']=woe_Ratio5
data_test_woe['DebtRatio'].value_counts()
#MonthlyIncome
data_test_woe.loc[data_test['MonthlyIncome']<=1140.342,'MonthlyIncome']=woe_incom1
data_test_woe.loc[(data_test['MonthlyIncome']>1140.342)&(data_test['MonthlyIncome']<=1943.438),'MonthlyIncome']=woe_incom2
data_test_woe.loc[(data_test['MonthlyIncome']>1943.438)&(data_test['MonthlyIncome']<=2800.0),'MonthlyIncome']=woe_incom3
data_test_woe.loc[(data_test['MonthlyIncome']>2800.0)&(data_test['MonthlyIncome']<=3500.0),'MonthlyIncome']=woe_incom4
data_test_woe.loc[(data_test['MonthlyIncome']>3500.0)&(data_test['MonthlyIncome']<=4225.0),'MonthlyIncome']=woe_incom5
data_test_woe.loc[(data_test['MonthlyIncome']>4225.0)&(data_test['MonthlyIncome']<=5125.153),'MonthlyIncome']=woe_incom6
data_test_woe.loc[(data_test['MonthlyIncome']>5125.153)&(data_test['MonthlyIncome']<=6184.002),'MonthlyIncome']=woe_incom7
data_test_woe.loc[(data_test['MonthlyIncome']>6184.002)&(data_test['MonthlyIncome']<=7675.0),'MonthlyIncome']=woe_incom8
data_test_woe.loc[(data_test['MonthlyIncome']>7675.0)&(data_test['MonthlyIncome']<=10166.0),'MonthlyIncome']=woe_incom9
data_test_woe.loc[(data_test['MonthlyIncome']>10166.0),'MonthlyIncome']=woe_incom10
data_test_woe.MonthlyIncome.value_counts()
#NumberOfOpenCreditLinesAndLoans
data_test_woe.loc[data_test['NumberOfOpenCreditLinesAndLoans']<=2.0,'NumberOfOpenCreditLinesAndLoans']=woe_Loans1
data_test_woe.loc[(data_test['NumberOfOpenCreditLinesAndLoans']>2.0)&(data_test['NumberOfOpenCreditLinesAndLoans']<=4.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans2
data_test_woe.loc[(data_test['NumberOfOpenCreditLinesAndLoans']>4.0)&(data_test['NumberOfOpenCreditLinesAndLoans']<=5.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans3
data_test_woe.loc[(data_test['NumberOfOpenCreditLinesAndLoans']>5.0)&(data_test['NumberOfOpenCreditLinesAndLoans']<=6.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans4
data_test_woe.loc[(data_test['NumberOfOpenCreditLinesAndLoans']>6.0)&(data_test['NumberOfOpenCreditLinesAndLoans']<=7.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans5
data_test_woe.loc[(data_test['NumberOfOpenCreditLinesAndLoans']>7.0)&(data_test['NumberOfOpenCreditLinesAndLoans']<=9.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans6
data_test_woe.loc[(data_test['NumberOfOpenCreditLinesAndLoans']>9.0)&(data_test['NumberOfOpenCreditLinesAndLoans']<=10.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans7
data_test_woe.loc[(data_test['NumberOfOpenCreditLinesAndLoans']>10.0)&(data_test['NumberOfOpenCreditLinesAndLoans']<=12.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans8
data_test_woe.loc[(data_test['NumberOfOpenCreditLinesAndLoans']>12.0)&(data_test['NumberOfOpenCreditLinesAndLoans']<=15.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans9
data_test_woe.loc[(data_test['NumberOfOpenCreditLinesAndLoans']>15.0),'NumberOfOpenCreditLinesAndLoans']=woe_Loans10
data_test_woe.NumberOfOpenCreditLinesAndLoans.value_counts()
#NumberOfTimes90DaysLate
data_test_woe.loc[data_test['NumberOfTimes90DaysLate']==0.0,'NumberOfTimes90DaysLate']=woe_901
data_test_woe.loc[(data_test['NumberOfTimes90DaysLate']==1.0),'NumberOfTimes90DaysLate']=woe_902
data_test_woe.loc[(data_test['NumberOfTimes90DaysLate']>1.0)&(data_test['NumberOfTimes90DaysLate']<=2.0),'NumberOfTimes90DaysLate']=woe_903
data_test_woe.loc[(data_test['NumberOfTimes90DaysLate']>2.0)&(data_test['NumberOfTimes90DaysLate']<=4.0),'NumberOfTimes90DaysLate']=woe_904
data_test_woe.loc[(data_test['NumberOfTimes90DaysLate']>4.0)&(data_test['NumberOfTimes90DaysLate']<=98),'NumberOfTimes90DaysLate']=woe_905
data_test_woe.NumberOfTimes90DaysLate.value_counts()
#NumberRealEstateLoansOrLines
data_test_woe.loc[data_test['NumberRealEstateLoansOrLines']<=0.0,'NumberRealEstateLoansOrLines']=woe_Lines1
data_test_woe.loc[(data_test['NumberRealEstateLoansOrLines']>0.0)&(data_test['NumberRealEstateLoansOrLines']<=1.0),'NumberRealEstateLoansOrLines']=woe_Lines2
data_test_woe.loc[(data_test['NumberRealEstateLoansOrLines']>1.0)&(data_test['NumberRealEstateLoansOrLines']<=2.0),'NumberRealEstateLoansOrLines']=woe_Lines3
data_test_woe.loc[(data_test['NumberRealEstateLoansOrLines']>2.0)&(data_test['NumberRealEstateLoansOrLines']<=4.0),'NumberRealEstateLoansOrLines']=woe_Lines4
data_test_woe.loc[(data_test['NumberRealEstateLoansOrLines']>4.0)&(data_test['NumberRealEstateLoansOrLines']<=37),'NumberRealEstateLoansOrLines']=woe_Lines5
data_test_woe.NumberRealEstateLoansOrLines.value_counts()
#NumberOfTime60-89DaysPastDueNotWorse
data_test_woe.loc[(data_test['NumberOfTime60-89DaysPastDueNotWorse']<=0.0),'NumberOfTime60-89DaysPastDueNotWorse']=woe_60891
data_test_woe.loc[(data_test['NumberOfTime60-89DaysPastDueNotWorse']>0.0)&(data_test['NumberOfTime60-89DaysPastDueNotWorse']<=1.0),'NumberOfTime60-89DaysPastDueNotWorse']=woe_60892
data_test_woe.loc[(data_test['NumberOfTime60-89DaysPastDueNotWorse']>1.0)&(data_test['NumberOfTime60-89DaysPastDueNotWorse']<=2.0),'NumberOfTime60-89DaysPastDueNotWorse']=woe_60893
data_test_woe.loc[(data_test['NumberOfTime60-89DaysPastDueNotWorse']>2.0)&(data_test['NumberOfTime60-89DaysPastDueNotWorse']<=4.0),'NumberOfTime60-89DaysPastDueNotWorse']=woe_60894
data_test_woe.loc[(data_test['NumberOfTime60-89DaysPastDueNotWorse']>4.0)&(data_test['NumberOfTime60-89DaysPastDueNotWorse']<=98),'NumberOfTime60-89DaysPastDueNotWorse']=woe_60895
data_test_woe['NumberOfTime60-89DaysPastDueNotWorse'].value_counts()
#NumberOfDependents
data_test_woe.loc[data_test['NumberOfDependents']<=0.5,'NumberOfDependents']=0
data_test_woe.loc[(data_test['NumberOfDependents']<=1.5)&(data_test['NumberOfDependents']>=0.5),'NumberOfDependents']=1
data_test_woe.loc[(data_test['NumberOfDependents']<=2.5)&(data_test['NumberOfDependents']>=1.5),'NumberOfDependents']=2
data_test_woe.loc[(data_test['NumberOfDependents']>2.5),'NumberOfDependents']=4
data_test_woe.loc[(data_test['NumberOfDependents']==0.0),'NumberOfDependents']=woe_Dependents1
data_test_woe.loc[(data_test['NumberOfDependents']==1.0),'NumberOfDependents']=woe_Dependents2
data_test_woe.loc[(data_test['NumberOfDependents']==2.0),'NumberOfDependents']=woe_Dependents3
data_test_woe.loc[(data_test['NumberOfDependents']>2.0)&(data_test_woe['NumberOfDependents']<=20),'NumberOfDependents']=woe_Dependents4
data_test_woe['NumberOfDependents'].value_counts()
#fuzhaijine
pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['fuzhaijine'])
data_test_woe.loc[(data_test['fuzhaijine']>-0.001)&(data_test['fuzhaijine']<=538.43),'fuzhaijine']=woe_fuzhaijine1
data_test_woe.loc[(data_test['fuzhaijine']>538.43)&(data_test['fuzhaijine']<=1495.849),'fuzhaijine']=woe_fuzhaijine2
data_test_woe.loc[(data_test['fuzhaijine']>1495.849)&(data_test['fuzhaijine']<=2752.647),'fuzhaijine']=woe_fuzhaijine3
data_test_woe.loc[(data_test['fuzhaijine']>2752.647)&(data_test['fuzhaijine']<=6402.004),'fuzhaijine']=woe_fuzhaijine4
data_test_woe.loc[(data_test['fuzhaijine']>6402.004)&(data_test['fuzhaijine']<=1539561248.52),'fuzhaijine']=woe_fuzhaijine5
data_test_woe['fuzhaijine'].value_counts()
#shifouweiyue
pd.crosstab(train_data_sampling_cut['SeriousDlqin2yrs'],train_data_sampling_cut['shifouweiyue'])
data_test_woe.loc[(data_test['shifouweiyue']==0.0),'shifouweiyue']=woe_shifou1
data_test_woe.loc[(data_test['shifouweiyue']==1.0),'shifouweiyue']=woe_shifou2
data_test_woe['shifouweiyue'].value_counts()
data_test_woe.reset_index(inplace=True)
data_test_woe.drop('index',axis=1,inplace=True)
test_y_pred=clf.predict(data_test_woe.iloc[:,[1,2,3,4,5,7,8,9,10,11,12]])
answer1=clf.predict_proba(data_test_woe.iloc[:,[1,2,3,4,5,7,8,9,10,11,12]])[:,1]
y_submission=answer1
result = pd.DataFrame({"Id": data_test_woe.index+1, "Probability":y_submission})
result.to_csv('C:/Users/hp/Desktop/在家学习/信用评分/stack_result.csv', index=False)#0.847352
#0.859255
corrmat=woe_train_data.corr()
f, ax = plt.subplots(figsize=(12, 9))
sns.heatmap(corrmat, vmax=.8, square=True)#0.847518
#绘制roc曲线
from sklearn.cross_validation import StratifiedKFold
from scipy import interp
cv = StratifiedKFold(woe_train_data['SeriousDlqin2yrs'],n_folds=6)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
train_x,test_x,train_y,test_y
for i, (train, test) in enumerate(cv):
probas_ = clf.fit(train_x, train_y).predict_proba(test_x)
fpr, tpr, thresholds = roc_curve(test_y, probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr) #对mean_tpr在mean_fpr处进行插值,通过scipy包调用interp()函数
mean_tpr[0] = 0.0 #初始处为0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
#画对角线
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv) #在mean_fpr100个点,每个点处插值插值多次取平均
mean_tpr[-1] = 1.0 #坐标最后一个点为(1,1)
mean_auc = auc(mean_fpr, mean_tpr) #计算平均AUC值
#画平均ROC曲线
#print mean_fpr,len(mean_fpr)
#print mean_tpr
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
ks=(tpr-fpr).max()
print(ks)#0.595559905785#cut的最佳切分点
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.auc",
"numpy.log",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.roc_curve",
"numpy.arange",
"numpy.mean",
"scipy.interp",
"pandas.qcut",
"sklearn.cross_validation.cross_val_score",
"matplotlib.pyplot.xlabel",
"matplotlib.py... | [((1955, 1988), 'pandas.read_csv', 'pd.read_csv', (['"""...cs-training.csv"""'], {}), "('...cs-training.csv')\n", (1966, 1988), True, 'import pandas as pd\n'), ((1999, 2028), 'pandas.read_csv', 'pd.read_csv', (['"""...cs-test.csv"""'], {}), "('...cs-test.csv')\n", (2010, 2028), True, 'import pandas as pd\n'), ((2094, 2128), 'pandas.concat', 'pd.concat', (['[data_train, data_test]'], {}), '([data_train, data_test])\n', (2103, 2128), True, 'import pandas as pd\n'), ((2653, 2950), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {'loss': '"""ls"""', 'learning_rate': '(0.1)', 'n_estimators': '(30)', 'subsample': '(1.0)', 'min_samples_split': '(2)', 'min_samples_leaf': '(1)', 'min_weight_fraction_leaf': '(0.0)', 'max_depth': '(3)', 'init': 'None', 'random_state': 'None', 'max_features': 'None', 'alpha': '(0.9)', 'verbose': '(0)', 'max_leaf_nodes': 'None', 'warm_start': '(False)'}), "(loss='ls', learning_rate=0.1, n_estimators=30,\n subsample=1.0, min_samples_split=2, min_samples_leaf=1,\n min_weight_fraction_leaf=0.0, max_depth=3, init=None, random_state=None,\n max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,\n warm_start=False)\n", (2678, 2950), False, 'from sklearn.ensemble import GradientBoostingRegressor\n'), ((6539, 6553), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6551, 6553), True, 'import pandas as pd\n'), ((6739, 6790), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(9)', 'random_state': '(4)', 'init': '"""random"""'}), "(n_clusters=9, random_state=4, init='random')\n", (6745, 6790), False, 'from sklearn.cluster import KMeans\n'), ((6897, 6983), 'pandas.cut', 'pd.cut', (['train_data_sampling.age'], {'bins': '[0, 28, 35, 40, 47, 54, 61, 68, 77, 86, 110]'}), '(train_data_sampling.age, bins=[0, 28, 35, 40, 47, 54, 61, 68, 77, 86,\n 110])\n', (6903, 6983), True, 'import pandas as pd\n'), ((6969, 7067), 'pandas.crosstab', 'pd.crosstab', (["train_data_sampling.loc[:, 'age']", "train_data_sampling.loc[:, 'SeriousDlqin2yrs']"], {}), "(train_data_sampling.loc[:, 'age'], train_data_sampling.loc[:,\n 'SeriousDlqin2yrs'])\n", (6980, 7067), True, 'import pandas as pd\n'), ((7274, 7344), 'pandas.qcut', 'pd.qcut', (['train_data_sampling.RevolvingUtilizationOfUnsecuredLines'], {'q': '(5)'}), '(train_data_sampling.RevolvingUtilizationOfUnsecuredLines, q=5)\n', (7281, 7344), True, 'import pandas as pd\n'), ((7870, 7978), 'pandas.cut', 'pd.cut', (["train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse']"], {'bins': '[-np.inf, 0, 1, 2, 4, np.inf]'}), "(train_data_sampling['NumberOfTime30-59DaysPastDueNotWorse'], bins=[-\n np.inf, 0, 1, 2, 4, np.inf])\n", (7876, 7978), True, 'import pandas as pd\n'), ((8120, 8163), 'pandas.qcut', 'pd.qcut', (['train_data_sampling.DebtRatio'], {'q': '(5)'}), '(train_data_sampling.DebtRatio, q=5)\n', (8127, 8163), True, 'import pandas as pd\n'), ((8214, 8262), 'pandas.qcut', 'pd.qcut', (['train_data_sampling.MonthlyIncome'], {'q': '(10)'}), '(train_data_sampling.MonthlyIncome, q=10)\n', (8221, 8262), True, 'import pandas as pd\n'), ((8401, 8470), 'pandas.qcut', 'pd.qcut', (["train_data_sampling['NumberOfOpenCreditLinesAndLoans']"], {'q': '(10)'}), "(train_data_sampling['NumberOfOpenCreditLinesAndLoans'], q=10)\n", (8408, 8470), True, 'import pandas as pd\n'), ((8917, 9011), 'pandas.cut', 'pd.cut', (["train_data_sampling['NumberOfTimes90DaysLate']"], {'bins': '[-np.inf, 0, 1, 2, 4, np.inf]'}), "(train_data_sampling['NumberOfTimes90DaysLate'], bins=[-np.inf, 0, 1,\n 2, 4, np.inf])\n", (8923, 9011), True, 'import pandas as pd\n'), ((9089, 9189), 'pandas.cut', 'pd.cut', (["train_data_sampling['NumberRealEstateLoansOrLines']"], {'bins': '[-np.inf, 0, 1, 2, 4, np.inf]'}), "(train_data_sampling['NumberRealEstateLoansOrLines'], bins=[-np.inf, \n 0, 1, 2, 4, np.inf])\n", (9095, 9189), True, 'import pandas as pd\n'), ((9280, 9388), 'pandas.cut', 'pd.cut', (["train_data_sampling['NumberOfTime60-89DaysPastDueNotWorse']"], {'bins': '[-np.inf, 0, 1, 2, 4, np.inf]'}), "(train_data_sampling['NumberOfTime60-89DaysPastDueNotWorse'], bins=[-\n np.inf, 0, 1, 2, 4, np.inf])\n", (9286, 9388), True, 'import pandas as pd\n'), ((9453, 9539), 'pandas.cut', 'pd.cut', (["train_data_sampling['NumberOfDependents']"], {'bins': '[-np.inf, 0, 1, 2, np.inf]'}), "(train_data_sampling['NumberOfDependents'], bins=[-np.inf, 0, 1, 2,\n np.inf])\n", (9459, 9539), True, 'import pandas as pd\n'), ((9588, 9635), 'pandas.qcut', 'pd.qcut', (["train_data_sampling['fuzhaijine']"], {'q': '(5)'}), "(train_data_sampling['fuzhaijine'], q=5)\n", (9595, 9635), True, 'import pandas as pd\n'), ((12351, 12443), 'pandas.crosstab', 'pd.crosstab', (["train_data_sampling_cut['SeriousDlqin2yrs']", "train_data_sampling_cut['age']"], {}), "(train_data_sampling_cut['SeriousDlqin2yrs'],\n train_data_sampling_cut['age'])\n", (12362, 12443), True, 'import pandas as pd\n'), ((15271, 15396), 'pandas.crosstab', 'pd.crosstab', (["train_data_sampling_cut['SeriousDlqin2yrs']", "train_data_sampling_cut['RevolvingUtilizationOfUnsecuredLines']"], {}), "(train_data_sampling_cut['SeriousDlqin2yrs'],\n train_data_sampling_cut['RevolvingUtilizationOfUnsecuredLines'])\n", (15282, 15396), True, 'import pandas as pd\n'), ((15407, 15452), 'numpy.log', 'np.log', (['(3198 / totalbad / (20834 / totalgood))'], {}), '(3198 / totalbad / (20834 / totalgood))\n', (15413, 15452), True, 'import numpy as np\n'), ((15464, 15509), 'numpy.log', 'np.log', (['(6745 / totalbad / (17285 / totalgood))'], {}), '(6745 / totalbad / (17285 / totalgood))\n', (15470, 15509), True, 'import numpy as np\n'), ((15521, 15567), 'numpy.log', 'np.log', (['(13531 / totalbad / (10500 / totalgood))'], {}), '(13531 / totalbad / (10500 / totalgood))\n', (15527, 15567), True, 'import numpy as np\n'), ((15579, 15624), 'numpy.log', 'np.log', (['(18043 / totalbad / (5989 / totalgood))'], {}), '(18043 / totalbad / (5989 / totalgood))\n', (15585, 15624), True, 'import numpy as np\n'), ((15636, 15681), 'numpy.log', 'np.log', (['(18639 / totalbad / (5391 / totalgood))'], {}), '(18639 / totalbad / (5391 / totalgood))\n', (15642, 15681), True, 'import numpy as np\n'), ((17278, 17403), 'pandas.crosstab', 'pd.crosstab', (["train_data_sampling_cut['SeriousDlqin2yrs']", "train_data_sampling_cut['NumberOfTime30-59DaysPastDueNotWorse']"], {}), "(train_data_sampling_cut['SeriousDlqin2yrs'],\n train_data_sampling_cut['NumberOfTime30-59DaysPastDueNotWorse'])\n", (17289, 17403), True, 'import pandas as pd\n'), ((17410, 17456), 'numpy.log', 'np.log', (['(28490 / totalbad / (51935 / totalgood))'], {}), '(28490 / totalbad / (51935 / totalgood))\n', (17416, 17456), True, 'import numpy as np\n'), ((17463, 17508), 'numpy.log', 'np.log', (['(16626 / totalbad / (5743 / totalgood))'], {}), '(16626 / totalbad / (5743 / totalgood))\n', (17469, 17508), True, 'import numpy as np\n'), ((17515, 17559), 'numpy.log', 'np.log', (['(7862 / totalbad / (1460 / totalgood))'], {}), '(7862 / totalbad / (1460 / totalgood))\n', (17521, 17559), True, 'import numpy as np\n'), ((17566, 17609), 'numpy.log', 'np.log', (['(5133 / totalbad / (670 / totalgood))'], {}), '(5133 / totalbad / (670 / totalgood))\n', (17572, 17609), True, 'import numpy as np\n'), ((17616, 17659), 'numpy.log', 'np.log', (['(2045 / totalbad / (191 / totalgood))'], {}), '(2045 / totalbad / (191 / totalgood))\n', (17622, 17659), True, 'import numpy as np\n'), ((19019, 19117), 'pandas.crosstab', 'pd.crosstab', (["train_data_sampling_cut['SeriousDlqin2yrs']", "train_data_sampling_cut['DebtRatio']"], {}), "(train_data_sampling_cut['SeriousDlqin2yrs'],\n train_data_sampling_cut['DebtRatio'])\n", (19030, 19117), True, 'import pandas as pd\n'), ((19124, 19170), 'numpy.log', 'np.log', (['(10577 / totalbad / (13454 / totalgood))'], {}), '(10577 / totalbad / (13454 / totalgood))\n', (19130, 19170), True, 'import numpy as np\n'), ((19178, 19224), 'numpy.log', 'np.log', (['(11320 / totalbad / (12711 / totalgood))'], {}), '(11320 / totalbad / (12711 / totalgood))\n', (19184, 19224), True, 'import numpy as np\n'), ((19232, 19278), 'numpy.log', 'np.log', (['(12385 / totalbad / (11646 / totalgood))'], {}), '(12385 / totalbad / (11646 / totalgood))\n', (19238, 19278), True, 'import numpy as np\n'), ((19286, 19331), 'numpy.log', 'np.log', (['(14783 / totalbad / (9251 / totalgood))'], {}), '(14783 / totalbad / (9251 / totalgood))\n', (19292, 19331), True, 'import numpy as np\n'), ((19339, 19385), 'numpy.log', 'np.log', (['(11091 / totalbad / (12937 / totalgood))'], {}), '(11091 / totalbad / (12937 / totalgood))\n', (19345, 19385), True, 'import numpy as np\n'), ((20390, 20492), 'pandas.crosstab', 'pd.crosstab', (["train_data_sampling_cut['SeriousDlqin2yrs']", "train_data_sampling_cut['MonthlyIncome']"], {}), "(train_data_sampling_cut['SeriousDlqin2yrs'],\n train_data_sampling_cut['MonthlyIncome'])\n", (20401, 20492), True, 'import pandas as pd\n'), ((20499, 20543), 'numpy.log', 'np.log', (['(6134 / totalbad / (5886 / totalgood))'], {}), '(6134 / totalbad / (5886 / totalgood))\n', (20505, 20543), True, 'import numpy as np\n'), ((20551, 20595), 'numpy.log', 'np.log', (['(5942 / totalbad / (6185 / totalgood))'], {}), '(5942 / totalbad / (6185 / totalgood))\n', (20557, 20595), True, 'import numpy as np\n'), ((20603, 20647), 'numpy.log', 'np.log', (['(7055 / totalbad / (5243 / totalgood))'], {}), '(7055 / totalbad / (5243 / totalgood))\n', (20609, 20647), True, 'import numpy as np\n'), ((20655, 20699), 'numpy.log', 'np.log', (['(7016 / totalbad / (5605 / totalgood))'], {}), '(7016 / totalbad / (5605 / totalgood))\n', (20661, 20699), True, 'import numpy as np\n'), ((20707, 20751), 'numpy.log', 'np.log', (['(6120 / totalbad / (4898 / totalgood))'], {}), '(6120 / totalbad / (4898 / totalgood))\n', (20713, 20751), True, 'import numpy as np\n'), ((20759, 20803), 'numpy.log', 'np.log', (['(6384 / totalbad / (5626 / totalgood))'], {}), '(6384 / totalbad / (5626 / totalgood))\n', (20765, 20803), True, 'import numpy as np\n'), ((20811, 20855), 'numpy.log', 'np.log', (['(6167 / totalbad / (5860 / totalgood))'], {}), '(6167 / totalbad / (5860 / totalgood))\n', (20817, 20855), True, 'import numpy as np\n'), ((20863, 20907), 'numpy.log', 'np.log', (['(5555 / totalbad / (6452 / totalgood))'], {}), '(5555 / totalbad / (6452 / totalgood))\n', (20869, 20907), True, 'import numpy as np\n'), ((20915, 20959), 'numpy.log', 'np.log', (['(5145 / totalbad / (6868 / totalgood))'], {}), '(5145 / totalbad / (6868 / totalgood))\n', (20921, 20959), True, 'import numpy as np\n'), ((20968, 21012), 'numpy.log', 'np.log', (['(4638 / totalbad / (7376 / totalgood))'], {}), '(4638 / totalbad / (7376 / totalgood))\n', (20974, 21012), True, 'import numpy as np\n'), ((23063, 23183), 'pandas.crosstab', 'pd.crosstab', (["train_data_sampling_cut['SeriousDlqin2yrs']", "train_data_sampling_cut['NumberOfOpenCreditLinesAndLoans']"], {}), "(train_data_sampling_cut['SeriousDlqin2yrs'],\n train_data_sampling_cut['NumberOfOpenCreditLinesAndLoans'])\n", (23074, 23183), True, 'import pandas as pd\n'), ((23190, 23234), 'numpy.log', 'np.log', (['(9379 / totalbad / (4883 / totalgood))'], {}), '(9379 / totalbad / (4883 / totalgood))\n', (23196, 23234), True, 'import numpy as np\n'), ((23242, 23286), 'numpy.log', 'np.log', (['(8800 / totalbad / (8259 / totalgood))'], {}), '(8800 / totalbad / (8259 / totalgood))\n', (23248, 23286), True, 'import numpy as np\n'), ((23294, 23338), 'numpy.log', 'np.log', (['(5067 / totalbad / (5146 / totalgood))'], {}), '(5067 / totalbad / (5146 / totalgood))\n', (23300, 23338), True, 'import numpy as np\n'), ((23346, 23390), 'numpy.log', 'np.log', (['(4660 / totalbad / (5509 / totalgood))'], {}), '(4660 / totalbad / (5509 / totalgood))\n', (23352, 23390), True, 'import numpy as np\n'), ((23398, 23442), 'numpy.log', 'np.log', (['(4522 / totalbad / (5302 / totalgood))'], {}), '(4522 / totalbad / (5302 / totalgood))\n', (23404, 23442), True, 'import numpy as np\n'), ((23450, 23494), 'numpy.log', 'np.log', (['(8005 / totalbad / (9696 / totalgood))'], {}), '(8005 / totalbad / (9696 / totalgood))\n', (23456, 23494), True, 'import numpy as np\n'), ((23502, 23546), 'numpy.log', 'np.log', (['(3590 / totalbad / (3916 / totalgood))'], {}), '(3590 / totalbad / (3916 / totalgood))\n', (23508, 23546), True, 'import numpy as np\n'), ((23554, 23598), 'numpy.log', 'np.log', (['(5650 / totalbad / (6123 / totalgood))'], {}), '(5650 / totalbad / (6123 / totalgood))\n', (23560, 23598), True, 'import numpy as np\n'), ((23606, 23650), 'numpy.log', 'np.log', (['(5409 / totalbad / (5627 / totalgood))'], {}), '(5409 / totalbad / (5627 / totalgood))\n', (23612, 23650), True, 'import numpy as np\n'), ((23659, 23703), 'numpy.log', 'np.log', (['(5074 / totalbad / (5538 / totalgood))'], {}), '(5074 / totalbad / (5538 / totalgood))\n', (23665, 23703), True, 'import numpy as np\n'), ((26161, 26273), 'pandas.crosstab', 'pd.crosstab', (["train_data_sampling_cut['SeriousDlqin2yrs']", "train_data_sampling_cut['NumberOfTimes90DaysLate']"], {}), "(train_data_sampling_cut['SeriousDlqin2yrs'],\n train_data_sampling_cut['NumberOfTimes90DaysLate'])\n", (26172, 26273), True, 'import pandas as pd\n'), ((26277, 26323), 'numpy.log', 'np.log', (['(38146 / totalbad / (38146 / totalgood))'], {}), '(38146 / totalbad / (38146 / totalgood))\n', (26283, 26323), True, 'import numpy as np\n'), ((26328, 26373), 'numpy.log', 'np.log', (['(12389 / totalbad / (1521 / totalgood))'], {}), '(12389 / totalbad / (1521 / totalgood))\n', (26334, 26373), True, 'import numpy as np\n'), ((26378, 26421), 'numpy.log', 'np.log', (['(4774 / totalbad / (344 / totalgood))'], {}), '(4774 / totalbad / (344 / totalgood))\n', (26384, 26421), True, 'import numpy as np\n'), ((26426, 26469), 'numpy.log', 'np.log', (['(3085 / totalbad / (179 / totalgood))'], {}), '(3085 / totalbad / (179 / totalgood))\n', (26432, 26469), True, 'import numpy as np\n'), ((26474, 26516), 'numpy.log', 'np.log', (['(1762 / totalbad / (95 / totalgood))'], {}), '(1762 / totalbad / (95 / totalgood))\n', (26480, 26516), True, 'import numpy as np\n'), ((27582, 27699), 'pandas.crosstab', 'pd.crosstab', (["train_data_sampling_cut['SeriousDlqin2yrs']", "train_data_sampling_cut['NumberRealEstateLoansOrLines']"], {}), "(train_data_sampling_cut['SeriousDlqin2yrs'],\n train_data_sampling_cut['NumberRealEstateLoansOrLines'])\n", (27593, 27699), True, 'import pandas as pd\n'), ((27706, 27752), 'numpy.log', 'np.log', (['(26932 / totalbad / (22100 / totalgood))'], {}), '(26932 / totalbad / (22100 / totalgood))\n', (27712, 27752), True, 'import numpy as np\n'), ((27760, 27806), 'numpy.log', 'np.log', (['(17936 / totalbad / (21270 / totalgood))'], {}), '(17936 / totalbad / (21270 / totalgood))\n', (27766, 27806), True, 'import numpy as np\n'), ((27814, 27860), 'numpy.log', 'np.log', (['(10526 / totalbad / (12656 / totalgood))'], {}), '(10526 / totalbad / (12656 / totalgood))\n', (27820, 27860), True, 'import numpy as np\n'), ((27868, 27912), 'numpy.log', 'np.log', (['(3621 / totalbad / (3429 / totalgood))'], {}), '(3621 / totalbad / (3429 / totalgood))\n', (27874, 27912), True, 'import numpy as np\n'), ((27920, 27963), 'numpy.log', 'np.log', (['(1141 / totalbad / (544 / totalgood))'], {}), '(1141 / totalbad / (544 / totalgood))\n', (27926, 27963), True, 'import numpy as np\n'), ((29351, 29476), 'pandas.crosstab', 'pd.crosstab', (["train_data_sampling_cut['SeriousDlqin2yrs']", "train_data_sampling_cut['NumberOfTime60-89DaysPastDueNotWorse']"], {}), "(train_data_sampling_cut['SeriousDlqin2yrs'],\n train_data_sampling_cut['NumberOfTime60-89DaysPastDueNotWorse'])\n", (29362, 29476), True, 'import pandas as pd\n'), ((29482, 29528), 'numpy.log', 'np.log', (['(42678 / totalbad / (57972 / totalgood))'], {}), '(42678 / totalbad / (57972 / totalgood))\n', (29488, 29528), True, 'import numpy as np\n'), ((29535, 29580), 'numpy.log', 'np.log', (['(12210 / totalbad / (1653 / totalgood))'], {}), '(12210 / totalbad / (1653 / totalgood))\n', (29541, 29580), True, 'import numpy as np\n'), ((29587, 29630), 'numpy.log', 'np.log', (['(3103 / totalbad / (248 / totalgood))'], {}), '(3103 / totalbad / (248 / totalgood))\n', (29593, 29630), True, 'import numpy as np\n'), ((29637, 29679), 'numpy.log', 'np.log', (['(1117 / totalbad / (77 / totalgood))'], {}), '(1117 / totalbad / (77 / totalgood))\n', (29643, 29679), True, 'import numpy as np\n'), ((29686, 29728), 'numpy.log', 'np.log', (['(1048 / totalbad / (49 / totalgood))'], {}), '(1048 / totalbad / (49 / totalgood))\n', (29692, 29728), True, 'import numpy as np\n'), ((31126, 31233), 'pandas.crosstab', 'pd.crosstab', (["train_data_sampling_cut['SeriousDlqin2yrs']", "train_data_sampling_cut['NumberOfDependents']"], {}), "(train_data_sampling_cut['SeriousDlqin2yrs'],\n train_data_sampling_cut['NumberOfDependents'])\n", (31137, 31233), True, 'import pandas as pd\n'), ((31245, 31291), 'numpy.log', 'np.log', (['(29464 / totalbad / (36205 / totalgood))'], {}), '(29464 / totalbad / (36205 / totalgood))\n', (31251, 31291), True, 'import numpy as np\n'), ((31304, 31350), 'numpy.log', 'np.log', (['(14313 / totalbad / (10825 / totalgood))'], {}), '(14313 / totalbad / (10825 / totalgood))\n', (31310, 31350), True, 'import numpy as np\n'), ((31363, 31407), 'numpy.log', 'np.log', (['(9926 / totalbad / (7763 / totalgood))'], {}), '(9926 / totalbad / (7763 / totalgood))\n', (31369, 31407), True, 'import numpy as np\n'), ((31420, 31464), 'numpy.log', 'np.log', (['(6453 / totalbad / (5206 / totalgood))'], {}), '(6453 / totalbad / (5206 / totalgood))\n', (31426, 31464), True, 'import numpy as np\n'), ((32343, 32442), 'pandas.crosstab', 'pd.crosstab', (["train_data_sampling_cut['SeriousDlqin2yrs']", "train_data_sampling_cut['fuzhaijine']"], {}), "(train_data_sampling_cut['SeriousDlqin2yrs'],\n train_data_sampling_cut['fuzhaijine'])\n", (32354, 32442), True, 'import pandas as pd\n'), ((34496, 34597), 'pandas.crosstab', 'pd.crosstab', (["train_data_sampling_cut['SeriousDlqin2yrs']", "train_data_sampling_cut['shifouweiyue']"], {}), "(train_data_sampling_cut['SeriousDlqin2yrs'],\n train_data_sampling_cut['shifouweiyue'])\n", (34507, 34597), True, 'import pandas as pd\n'), ((35322, 35346), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (35344, 35346), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((36480, 36605), 'sklearn.cross_validation.cross_val_score', 'cross_val_score', (['clf_bl', 'woe_train_data.iloc[:, [1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12]]', "woe_train_data['SeriousDlqin2yrs']"], {}), "(clf_bl, woe_train_data.iloc[:, [1, 2, 3, 4, 5, 7, 8, 9, 10,\n 11, 12]], woe_train_data['SeriousDlqin2yrs'])\n", (36495, 36605), False, 'from sklearn.cross_validation import cross_val_score\n'), ((37924, 37963), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""l2"""', 'C': '(2.5)'}), "(penalty='l2', C=2.5)\n", (37942, 37963), False, 'from sklearn.linear_model import LogisticRegression\n'), ((38114, 38239), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['woe_train_data.iloc[:, [1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12]]', 'woe_train_data.iloc[:, 0]'], {'test_size': '(0.4)'}), '(woe_train_data.iloc[:, [1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12\n ]], woe_train_data.iloc[:, 0], test_size=0.4)\n', (38130, 38239), False, 'from sklearn.cross_validation import train_test_split\n'), ((38633, 38662), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['test_y', 'y_pred'], {}), '(test_y, y_pred)\n', (38646, 38662), False, 'from sklearn.metrics import roc_auc_score\n'), ((48356, 48455), 'pandas.crosstab', 'pd.crosstab', (["train_data_sampling_cut['SeriousDlqin2yrs']", "train_data_sampling_cut['fuzhaijine']"], {}), "(train_data_sampling_cut['SeriousDlqin2yrs'],\n train_data_sampling_cut['fuzhaijine'])\n", (48367, 48455), True, 'import pandas as pd\n'), ((49102, 49203), 'pandas.crosstab', 'pd.crosstab', (["train_data_sampling_cut['SeriousDlqin2yrs']", "train_data_sampling_cut['shifouweiyue']"], {}), "(train_data_sampling_cut['SeriousDlqin2yrs'],\n train_data_sampling_cut['shifouweiyue'])\n", (49113, 49203), True, 'import pandas as pd\n'), ((49679, 49753), 'pandas.DataFrame', 'pd.DataFrame', (["{'Id': data_test_woe.index + 1, 'Probability': y_submission}"], {}), "({'Id': data_test_woe.index + 1, 'Probability': y_submission})\n", (49691, 49753), True, 'import pandas as pd\n'), ((49888, 49917), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 9)'}), '(figsize=(12, 9))\n', (49900, 49917), True, 'import matplotlib.pyplot as plt\n'), ((49918, 49961), 'seaborn.heatmap', 'sns.heatmap', (['corrmat'], {'vmax': '(0.8)', 'square': '(True)'}), '(corrmat, vmax=0.8, square=True)\n', (49929, 49961), True, 'import seaborn as sns\n'), ((50068, 50130), 'sklearn.cross_validation.StratifiedKFold', 'StratifiedKFold', (["woe_train_data['SeriousDlqin2yrs']"], {'n_folds': '(6)'}), "(woe_train_data['SeriousDlqin2yrs'], n_folds=6)\n", (50083, 50130), False, 'from sklearn.cross_validation import StratifiedKFold\n'), ((50160, 50182), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (50171, 50182), True, 'import numpy as np\n'), ((50681, 50748), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""--"""'], {'color': '(0.6, 0.6, 0.6)', 'label': '"""Luck"""'}), "([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')\n", (50689, 50748), True, 'import matplotlib.pyplot as plt\n'), ((50894, 50917), 'sklearn.metrics.auc', 'auc', (['mean_fpr', 'mean_tpr'], {}), '(mean_fpr, mean_tpr)\n', (50897, 50917), False, 'from sklearn.metrics import precision_recall_curve, roc_curve, auc\n'), ((50997, 51086), 'matplotlib.pyplot.plot', 'plt.plot', (['mean_fpr', 'mean_tpr', '"""k--"""'], {'label': "('Mean ROC (area = %0.2f)' % mean_auc)", 'lw': '(2)'}), "(mean_fpr, mean_tpr, 'k--', label='Mean ROC (area = %0.2f)' %\n mean_auc, lw=2)\n", (51005, 51086), True, 'import matplotlib.pyplot as plt\n'), ((51099, 51122), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.05, 1.05]'], {}), '([-0.05, 1.05])\n', (51107, 51122), True, 'import matplotlib.pyplot as plt\n'), ((51125, 51148), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.05, 1.05]'], {}), '([-0.05, 1.05])\n', (51133, 51148), True, 'import matplotlib.pyplot as plt\n'), ((51151, 51184), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (51161, 51184), True, 'import matplotlib.pyplot as plt\n'), ((51187, 51219), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (51197, 51219), True, 'import matplotlib.pyplot as plt\n'), ((51222, 51276), 'matplotlib.pyplot.title', 'plt.title', (['"""Receiver operating characteristic example"""'], {}), "('Receiver operating characteristic example')\n", (51231, 51276), True, 'import matplotlib.pyplot as plt\n'), ((51279, 51308), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (51289, 51308), True, 'import matplotlib.pyplot as plt\n'), ((51311, 51321), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (51319, 51321), True, 'import matplotlib.pyplot as plt\n'), ((11825, 11868), 'numpy.log', 'np.log', (['(bad / totalbad / (good / totalgood))'], {}), '(bad / totalbad / (good / totalgood))\n', (11831, 11868), True, 'import numpy as np\n'), ((37646, 37666), 'numpy.arange', 'np.arange', (['(1)', '(3)', '(0.5)'], {}), '(1, 3, 0.5)\n', (37655, 37666), True, 'import numpy as np\n'), ((50362, 50394), 'sklearn.metrics.roc_curve', 'roc_curve', (['test_y', 'probas_[:, 1]'], {}), '(test_y, probas_[:, 1])\n', (50371, 50394), False, 'from sklearn.metrics import precision_recall_curve, roc_curve, auc\n'), ((50413, 50439), 'scipy.interp', 'interp', (['mean_fpr', 'fpr', 'tpr'], {}), '(mean_fpr, fpr, tpr)\n', (50419, 50439), False, 'from scipy import interp\n'), ((50572, 50585), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (50575, 50585), False, 'from sklearn.metrics import precision_recall_curve, roc_curve, auc\n'), ((50592, 50667), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {'lw': '(1)', 'label': "('ROC fold %d (area = %0.2f)' % (i, roc_auc))"}), "(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))\n", (50600, 50667), True, 'import matplotlib.pyplot as plt\n'), ((4623, 4667), 'numpy.zeros', 'np.zeros', (['(self.n_samples * N, self.n_attrs)'], {}), '((self.n_samples * N, self.n_attrs))\n', (4631, 4667), True, 'import numpy as np\n'), ((37698, 37718), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (37716, 37718), False, 'from sklearn.linear_model import LogisticRegression\n'), ((5203, 5232), 'random.randint', 'random.randint', (['(0)', '(self.k - 1)'], {}), '(0, self.k - 1)\n', (5217, 5232), False, 'import random\n'), ((5304, 5319), 'random.random', 'random.random', ([], {}), '()\n', (5317, 5319), False, 'import random\n'), ((4686, 4722), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'self.k'}), '(n_neighbors=self.k)\n', (4702, 4722), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((37041, 37055), 'numpy.mean', 'np.mean', (['score'], {}), '(score)\n', (37048, 37055), True, 'import numpy as np\n')] |
import cdd
import numpy as np
class Cone_on_Plane(object):
"""Attributes:
Quadratic_Form (list of lists, 2x2 matrix): The quadratic form of the cone.
Linear_Form (list, 2x1 vector): The linear form of the matrix.
"""
def __init__(self, a, b, plane_flag):
self.Quadratic_Form = [[-2*np.sin(a)*np.sin(b), np.sin(a+b)], [np.sin(a+b), -2*np.cos(a)*np.cos(b)]]
if (plane_flag):
self.Linear_Form = [np.cos(a),np.sin(a)]
else:
self.Linear_Form = [-np.cos(a),-np.sin(a)]
class Polyhedral_Cone(object):
"""Attributes:
vertices (list of lists): vertices defining the cone.
A (list of lists, matrix): the A-matrix of the halfspace representation.
b (list): the b-matrix of the halfspace representation.
"""
def __init__(self, vertices, A, b):
self.vertices = vertices
self.A = A
self.b = b
def polyhedral_cone_vertices2hrep(vertices):
"""INPUTS: vertices (list of lists)
Given the vertices, returns the b and A matrix of the halfspace rep.
of a polyhedral cone. Uses the cdd library.
"""
cdd_vertices = []
# =============================================================================
# for i in range(0,np.shape(vertices)[0]): #put a 1 in front of all vertices
# #this constructs the vertices of the vertex rep for cdd
# temp=vertices[i][:]
# temp.append(1)
# temp.insert(0,1)
# cdd_vertices.append(temp)
# =============================================================================
for i in range(0,np.shape(vertices)[0]): #put a 0 in front of all vertices, to get a polyhedral cone
#this constructs the rays of the vertex rep for cdd
temp=vertices[i][:]
#temp.append(1)
temp.insert(0,0)
cdd_vertices.append(temp)
mat = cdd.Matrix(cdd_vertices,number_type='float')
mat.rep_type = cdd.RepType.GENERATOR
poly = cdd.Polyhedron(mat)
ext = poly.get_inequalities()
b = np.zeros(np.shape(ext)[0])
A = np.zeros([np.shape(ext)[0], np.shape(ext)[1]-1])
for i in range(0,np.shape(ext)[0]):
b[i] = ext[i][0]
for j in range(0,np.shape(ext)[1]-1):
A[i,j] = -ext[i][j+1]
return [b,A]
def polytope_vrep2hrep(vertices):
"""INPUTS: vertices (list of lists)
Given the vertices, returns the b and A matrix of the halfspace rep.
of a polytope. Uses the cdd library.
"""
cdd_vertices = []
for i in range(0,np.shape(vertices)[0]):#put a 1 in front of all vertices, to get a polytope
#this constructs the vertices of the vertex rep for cdd
temp=vertices[i][:]
temp.insert(0,1)
cdd_vertices.append(temp[:])
mat = cdd.Matrix(cdd_vertices,number_type='float')
mat.rep_type = cdd.RepType.GENERATOR
poly = cdd.Polyhedron(mat)
ext = poly.get_inequalities()
b = np.zeros(np.shape(ext)[0])
A = np.zeros([np.shape(ext)[0], np.shape(ext)[1]-1])
for i in range(0,np.shape(ext)[0]):
b[i] = ext[i][0]
for j in range(0,np.shape(ext)[1]-1):
A[i,j] = -ext[i][j+1]
return [b,A]
| [
"numpy.cos",
"numpy.sin",
"numpy.shape",
"cdd.Polyhedron",
"cdd.Matrix"
] | [((1886, 1931), 'cdd.Matrix', 'cdd.Matrix', (['cdd_vertices'], {'number_type': '"""float"""'}), "(cdd_vertices, number_type='float')\n", (1896, 1931), False, 'import cdd\n'), ((1983, 2002), 'cdd.Polyhedron', 'cdd.Polyhedron', (['mat'], {}), '(mat)\n', (1997, 2002), False, 'import cdd\n'), ((2791, 2836), 'cdd.Matrix', 'cdd.Matrix', (['cdd_vertices'], {'number_type': '"""float"""'}), "(cdd_vertices, number_type='float')\n", (2801, 2836), False, 'import cdd\n'), ((2888, 2907), 'cdd.Polyhedron', 'cdd.Polyhedron', (['mat'], {}), '(mat)\n', (2902, 2907), False, 'import cdd\n'), ((1621, 1639), 'numpy.shape', 'np.shape', (['vertices'], {}), '(vertices)\n', (1629, 1639), True, 'import numpy as np\n'), ((2054, 2067), 'numpy.shape', 'np.shape', (['ext'], {}), '(ext)\n', (2062, 2067), True, 'import numpy as np\n'), ((2150, 2163), 'numpy.shape', 'np.shape', (['ext'], {}), '(ext)\n', (2158, 2163), True, 'import numpy as np\n'), ((2551, 2569), 'numpy.shape', 'np.shape', (['vertices'], {}), '(vertices)\n', (2559, 2569), True, 'import numpy as np\n'), ((2959, 2972), 'numpy.shape', 'np.shape', (['ext'], {}), '(ext)\n', (2967, 2972), True, 'import numpy as np\n'), ((3055, 3068), 'numpy.shape', 'np.shape', (['ext'], {}), '(ext)\n', (3063, 3068), True, 'import numpy as np\n'), ((340, 353), 'numpy.sin', 'np.sin', (['(a + b)'], {}), '(a + b)\n', (346, 353), True, 'import numpy as np\n'), ((355, 368), 'numpy.sin', 'np.sin', (['(a + b)'], {}), '(a + b)\n', (361, 368), True, 'import numpy as np\n'), ((450, 459), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (456, 459), True, 'import numpy as np\n'), ((460, 469), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (466, 469), True, 'import numpy as np\n'), ((2090, 2103), 'numpy.shape', 'np.shape', (['ext'], {}), '(ext)\n', (2098, 2103), True, 'import numpy as np\n'), ((2995, 3008), 'numpy.shape', 'np.shape', (['ext'], {}), '(ext)\n', (3003, 3008), True, 'import numpy as np\n'), ((329, 338), 'numpy.sin', 'np.sin', (['b'], {}), '(b)\n', (335, 338), True, 'import numpy as np\n'), ((381, 390), 'numpy.cos', 'np.cos', (['b'], {}), '(b)\n', (387, 390), True, 'import numpy as np\n'), ((518, 527), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (524, 527), True, 'import numpy as np\n'), ((529, 538), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (535, 538), True, 'import numpy as np\n'), ((2108, 2121), 'numpy.shape', 'np.shape', (['ext'], {}), '(ext)\n', (2116, 2121), True, 'import numpy as np\n'), ((2219, 2232), 'numpy.shape', 'np.shape', (['ext'], {}), '(ext)\n', (2227, 2232), True, 'import numpy as np\n'), ((3013, 3026), 'numpy.shape', 'np.shape', (['ext'], {}), '(ext)\n', (3021, 3026), True, 'import numpy as np\n'), ((3124, 3137), 'numpy.shape', 'np.shape', (['ext'], {}), '(ext)\n', (3132, 3137), True, 'import numpy as np\n'), ((319, 328), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (325, 328), True, 'import numpy as np\n'), ((371, 380), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (377, 380), True, 'import numpy as np\n')] |
# coding: utf-8
# import sys,os,os.path
# os.environ['CUDA_VISIBLE_DEVICES']=''
import json
import os
import sys
from collections import Counter
import tqdm
import numpy as np
import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.python.ops import array_ops
import random
import pickle
import editdistance
import soundex
import jellyfish
import string
import math
import requests
from requests.auth import HTTPBasicAuth
def conv1D(x, inp, out, kernel, stride, dilation_rate, name, use_bias=True, activation=None, batch_norm=False):
try:
with tf.variable_scope("conv"):
W = tf.get_variable(name+"W", shape=[kernel, inp, out],
initializer=tf.contrib.layers.xavier_initializer())
except:
with tf.variable_scope("conv", reuse=True):
W = tf.get_variable(name+"W", shape=[kernel, inp, out],
initializer=tf.contrib.layers.xavier_initializer())
x = tf.nn.convolution(
input=x,
filter=W,
strides=(stride,),
dilation_rate=(dilation_rate,),
padding="SAME",
data_format="NWC")
if use_bias:
x = layers.bias_add(x)
if batch_norm:
x = layers.batch_norm(x)
if activation is not None:
x = activation(x)
return x
def residual_block(inputs, dim, dilation_rate, name):
x = conv1D(inputs, dim, dim, 3, 1, dilation_rate, name+'.conv1', batch_norm=True, activation=tf.nn.relu)
x = conv1D(x, dim, dim, 3, 1, dilation_rate, name+'.conv2', batch_norm=True)
return tf.nn.relu(inputs + x)
def upsampling_1d(x, scale):
original_shape = tf.shape(x)
x = tf.image.resize_nearest_neighbor(tf.reshape(x, [-1, original_shape[1], 1, original_shape[2]]),
[original_shape[1]*scale, 1])
x = tf.reshape(x, [-1, original_shape[1]*2, original_shape[2]])
return x
def crop_and_concat(x1,x2):
x1_shape = tf.shape(x1)
x2_shape = tf.shape(x2)
x1_shape_int = tuple([i.__int__() for i in x1.get_shape()])
# offsets for the top left corner of the crop
offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, 0]
size = [-1, x2_shape[1], -1]
x1_crop = tf.slice(x1, offsets, size)
return tf.concat([x1_crop, x2], 2)
if len(sys.argv) != 3:
print ('usage: unet_prediction_2.py input.json output.json')
exit()
input_filename = sys.argv[1]
output_filename = sys.argv[2]
with open(input_filename) as f:
test_data = json.load(f)
max_num_user_mention_per_twit = 126
max_num_hashtag_per_twit = 28
with open('gen_data_2/input_vocab.hkl', 'rb') as f:
input_vocab = pickle.load(f)
with open('gen_data_2/word_list.hkl', 'rb') as f:
word_list = pickle.load(f)
with open('gen_data_2/users.hkl', 'rb') as f:
users = pickle.load(f)
with open('gen_data_2/hashtag_list.hkl', 'rb') as f:
hashtag_list = pickle.load(f)
with open('gen_data_2/user_mention_list.hkl', 'rb') as f:
user_mention_list = pickle.load(f)
input_vocab_set = set(input_vocab)
inp_item_to_index = {w: i for i, w in enumerate(input_vocab)}
users_to_index = {x:i for i, x in enumerate(users)}
hashtag_to_index = {w: i for i, w in enumerate(hashtag_list)}
user_mention_to_index = {w: i for i, w in enumerate(user_mention_list)}
emb_dim = 64
vocab_size = len(input_vocab)
user_emb_dim = 64
input_seq = tf.placeholder(tf.int32, shape=[None, None])
output_seq = tf.placeholder(tf.int32, shape=[None, None])
hashtag_input = tf.placeholder(tf.int32, shape=[None, max_num_hashtag_per_twit])
user_mention_input = tf.placeholder(tf.int32, shape=[None, max_num_user_mention_per_twit])
input_user = tf.placeholder(tf.int32, shape=[None, 1])
# Embedding: [vocab_size, emb_dim]
init_width = 0.5 / emb_dim
emb_weights = tf.Variable(
tf.random_uniform(
[vocab_size, emb_dim], -init_width, init_width),
name="embed_weights")
embedding = tf.nn.embedding_lookup(emb_weights, input_seq)
init_width = 0.5 / user_emb_dim
users_emb_weights = tf.Variable(
tf.random_uniform(
[len(users), user_emb_dim], -init_width, init_width),
name="users_embed_weights")
users_embedding = tf.nn.embedding_lookup(users_emb_weights, input_user)
hashtag_emb_dim = 64
init_width = 0.5 / hashtag_emb_dim
hashtag_emb_weights = tf.Variable(
tf.random_uniform(
[len(hashtag_list), hashtag_emb_dim], -init_width, init_width),
name="hashtag_embed_weights")
hashtag_embedding = tf.nn.embedding_lookup(hashtag_emb_weights, hashtag_input)
hashtag_embedding = tf.reduce_mean(hashtag_embedding, axis=1)
hashtag_embedding = tf.reshape(hashtag_embedding, (-1, 1, hashtag_emb_dim))
user_mention_emb_dim = 64
init_width = 0.5 / user_mention_emb_dim
user_mention_emb_weights = tf.Variable(
tf.random_uniform(
[len(user_mention_list), user_mention_emb_dim], -init_width, init_width),
name="user_mention_embed_weights")
user_mention_embedding = tf.nn.embedding_lookup(user_mention_emb_weights, user_mention_input)
user_mention_embedding = tf.reduce_mean(user_mention_embedding, axis=1)
user_mention_embedding = tf.reshape(user_mention_embedding, (-1, 1, user_mention_emb_dim))
x = embedding
conv1 = conv1D(x, 64, 64, 3, 1, 1, 'conv1_1', batch_norm=False, activation=tf.nn.relu)
conv1 = conv1D(conv1, 64, 64, 3, 1, 1, 'conv1_2', batch_norm=False, activation=tf.nn.relu)
pool1 = tf.nn.pool(conv1, [2,], "MAX", "SAME", strides=[2,])
conv2 = conv1D(pool1, 64, 128, 3, 1, 1, 'conv2_1', batch_norm=False, activation=tf.nn.relu)
conv2 = conv1D(conv2, 128, 128, 3, 1, 1, 'conv2_2', batch_norm=False, activation=tf.nn.relu)
pool2 = tf.nn.pool(conv2, [2,], "MAX", "SAME", strides=[2,])
conv3 = conv1D(pool2, 128, 256, 3, 1, 1, 'conv3_1_', batch_norm=False, activation=tf.nn.relu)
conv3 = conv1D(conv3, 256, 256, 3, 1, 1, 'conv3_2', batch_norm=False, activation=tf.nn.relu)
conv3 = conv1D(conv3, 256, 256, 3, 1, 1, 'conv3_3', batch_norm=False, activation=tf.nn.relu)
conv3 = tf.concat([tf.tile(users_embedding, [1, tf.shape(conv3)[1], 1]),
tf.tile(user_mention_embedding, [1, tf.shape(conv3)[1], 1]),
tf.tile(hashtag_embedding, [1, tf.shape(conv3)[1], 1]),
conv3], axis=2)
upsample1 = upsampling_1d(conv3, 2)
upsample1 = crop_and_concat(upsample1, conv2)
conv4 = conv1D(upsample1, 384+user_mention_emb_dim+user_emb_dim+hashtag_emb_dim, 128, 3, 1, 1, 'conv4_1', batch_norm=False, activation=tf.nn.relu)
conv4 = conv1D(conv4, 128, 128, 3, 1, 1, 'conv4_2', batch_norm=False, activation=tf.nn.relu)
upsample2 = upsampling_1d(conv4, 2)
upsample2 = crop_and_concat(upsample2, conv1)
conv5 = conv1D(upsample2, 192, 64, 3, 1, 1, 'conv5_1', batch_norm=False, activation=tf.nn.relu)
conv5 = conv1D(conv5, 64, 64, 3, 1, 1, 'conv5_2', batch_norm=False, activation=tf.nn.relu)
net = conv1D(conv5, 64, len(word_list), 1, 1, 1, 'conv_final')
pred_max = tf.argmax(net, 2)
out = tf.reshape(net, (-1, len(word_list)))
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.reshape(output_seq, (-1,)), logits=out)
loss = tf.reduce_mean(loss)
loss_summary_update = tf.summary.scalar("loss", loss)
summary_op = tf.summary.merge_all()
train_op = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5, beta2=0.9, epsilon=0.01).minimize(loss)
sess = tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=8))
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
saver = tf.train.Saver(max_to_keep=0)
#saver.restore(sess, './checkpoints/res_cnn_hashtag_unet_v1_2_2573000_3.855930.ckpt')
saver.restore(sess, './checkpoints/res_cnn_hashtag_unet_v1_2_1003301_3.733280.ckpt')
def entity_shortened_preprocess_test(x):
if x['type'] == 'url':
return '__url__'
elif x['type'] == 'userMention':
return '__userMention__'
elif x['type'] == 'hashtag':
return '__hashtag__'
else:
if '__'+x['type']+'__'+x['value'] in input_vocab_set:
return '__'+x['type']+'__'+x['value']
else:
return '__'+x['type']+'__'
def get_prediction(item):
inp_indexes = np.array([inp_item_to_index[entity_shortened_preprocess_test(x)] for x in item['entitiesShortened']])
hashtag_indexes = np.zeros((1, max_num_hashtag_per_twit), dtype=np.int32)
user_mention_indexes = np.zeros((1, max_num_user_mention_per_twit), dtype=np.int32)
hashtag_tmp = np.array([hashtag_to_index[x['value'].lower()]
if x['value'].lower() in hashtag_to_index else 0
for x in item['entitiesShortened'] if x['type'] == 'hashtag'])
hashtag_indexes[0, :len(hashtag_tmp)] = hashtag_tmp
user_mention_tmp = np.array([user_mention_to_index[x['value'].lower()]
if x['value'].lower() in user_mention_to_index else 0
for x in item['entitiesShortened'] if x['type'] == 'userMention'])
user_mention_indexes[0, :len(user_mention_tmp)] = user_mention_tmp
u = users_to_index[item['user']]
p = sess.run(net, feed_dict={input_seq: inp_indexes.reshape((1, -1)),
input_user: np.array(u).reshape((1, -1)),
hashtag_input:hashtag_indexes, user_mention_input:user_mention_indexes})
pred_seq = []
for i, x in enumerate(item['entitiesShortened']):
count = 0
if x['type'] != 'letter':
continue
for j, k in enumerate(np.argsort(p[0][i])[::-1]):
if word_list[k][0] == x['value'].lower():
count += 1
if count == 1:
break
pred_seq.append(x['value']+word_list[k][1:])
return pred_seq
submission = {}
for item in tqdm.tqdm(test_data):
submission[item['id']] = get_prediction(item)
with open(output_filename, 'w') as f:
json.dump(submission, f) | [
"tensorflow.local_variables_initializer",
"tensorflow.shape",
"numpy.argsort",
"numpy.array",
"tensorflow.contrib.layers.bias_add",
"tensorflow.reduce_mean",
"tensorflow.nn.embedding_lookup",
"tensorflow.slice",
"tensorflow.placeholder",
"tensorflow.concat",
"tensorflow.nn.pool",
"tensorflow.C... | [((3406, 3450), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]'}), '(tf.int32, shape=[None, None])\n', (3420, 3450), True, 'import tensorflow as tf\n'), ((3464, 3508), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]'}), '(tf.int32, shape=[None, None])\n', (3478, 3508), True, 'import tensorflow as tf\n'), ((3526, 3590), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, max_num_hashtag_per_twit]'}), '(tf.int32, shape=[None, max_num_hashtag_per_twit])\n', (3540, 3590), True, 'import tensorflow as tf\n'), ((3612, 3681), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, max_num_user_mention_per_twit]'}), '(tf.int32, shape=[None, max_num_user_mention_per_twit])\n', (3626, 3681), True, 'import tensorflow as tf\n'), ((3695, 3736), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, 1]'}), '(tf.int32, shape=[None, 1])\n', (3709, 3736), True, 'import tensorflow as tf\n'), ((3946, 3992), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['emb_weights', 'input_seq'], {}), '(emb_weights, input_seq)\n', (3968, 3992), True, 'import tensorflow as tf\n'), ((4195, 4248), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['users_emb_weights', 'input_user'], {}), '(users_emb_weights, input_user)\n', (4217, 4248), True, 'import tensorflow as tf\n'), ((4492, 4550), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['hashtag_emb_weights', 'hashtag_input'], {}), '(hashtag_emb_weights, hashtag_input)\n', (4514, 4550), True, 'import tensorflow as tf\n'), ((4571, 4612), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['hashtag_embedding'], {'axis': '(1)'}), '(hashtag_embedding, axis=1)\n', (4585, 4612), True, 'import tensorflow as tf\n'), ((4633, 4688), 'tensorflow.reshape', 'tf.reshape', (['hashtag_embedding', '(-1, 1, hashtag_emb_dim)'], {}), '(hashtag_embedding, (-1, 1, hashtag_emb_dim))\n', (4643, 4688), True, 'import tensorflow as tf\n'), ((4967, 5035), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['user_mention_emb_weights', 'user_mention_input'], {}), '(user_mention_emb_weights, user_mention_input)\n', (4989, 5035), True, 'import tensorflow as tf\n'), ((5061, 5107), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['user_mention_embedding'], {'axis': '(1)'}), '(user_mention_embedding, axis=1)\n', (5075, 5107), True, 'import tensorflow as tf\n'), ((5133, 5198), 'tensorflow.reshape', 'tf.reshape', (['user_mention_embedding', '(-1, 1, user_mention_emb_dim)'], {}), '(user_mention_embedding, (-1, 1, user_mention_emb_dim))\n', (5143, 5198), True, 'import tensorflow as tf\n'), ((5401, 5451), 'tensorflow.nn.pool', 'tf.nn.pool', (['conv1', '[2]', '"""MAX"""', '"""SAME"""'], {'strides': '[2]'}), "(conv1, [2], 'MAX', 'SAME', strides=[2])\n", (5411, 5451), True, 'import tensorflow as tf\n'), ((5647, 5697), 'tensorflow.nn.pool', 'tf.nn.pool', (['conv2', '[2]', '"""MAX"""', '"""SAME"""'], {'strides': '[2]'}), "(conv2, [2], 'MAX', 'SAME', strides=[2])\n", (5657, 5697), True, 'import tensorflow as tf\n'), ((6904, 6921), 'tensorflow.argmax', 'tf.argmax', (['net', '(2)'], {}), '(net, 2)\n', (6913, 6921), True, 'import tensorflow as tf\n'), ((7078, 7098), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (7092, 7098), True, 'import tensorflow as tf\n'), ((7122, 7153), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (7139, 7153), True, 'import tensorflow as tf\n'), ((7167, 7189), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (7187, 7189), True, 'import tensorflow as tf\n'), ((7468, 7497), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(0)'}), '(max_to_keep=0)\n', (7482, 7497), True, 'import tensorflow as tf\n'), ((9669, 9689), 'tqdm.tqdm', 'tqdm.tqdm', (['test_data'], {}), '(test_data)\n', (9678, 9689), False, 'import tqdm\n'), ((958, 1085), 'tensorflow.nn.convolution', 'tf.nn.convolution', ([], {'input': 'x', 'filter': 'W', 'strides': '(stride,)', 'dilation_rate': '(dilation_rate,)', 'padding': '"""SAME"""', 'data_format': '"""NWC"""'}), "(input=x, filter=W, strides=(stride,), dilation_rate=(\n dilation_rate,), padding='SAME', data_format='NWC')\n", (975, 1085), True, 'import tensorflow as tf\n'), ((1628, 1650), 'tensorflow.nn.relu', 'tf.nn.relu', (['(inputs + x)'], {}), '(inputs + x)\n', (1638, 1650), True, 'import tensorflow as tf\n'), ((1702, 1713), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (1710, 1713), True, 'import tensorflow as tf\n'), ((1887, 1948), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, original_shape[1] * 2, original_shape[2]]'], {}), '(x, [-1, original_shape[1] * 2, original_shape[2]])\n', (1897, 1948), True, 'import tensorflow as tf\n'), ((2004, 2016), 'tensorflow.shape', 'tf.shape', (['x1'], {}), '(x1)\n', (2012, 2016), True, 'import tensorflow as tf\n'), ((2032, 2044), 'tensorflow.shape', 'tf.shape', (['x2'], {}), '(x2)\n', (2040, 2044), True, 'import tensorflow as tf\n'), ((2266, 2293), 'tensorflow.slice', 'tf.slice', (['x1', 'offsets', 'size'], {}), '(x1, offsets, size)\n', (2274, 2293), True, 'import tensorflow as tf\n'), ((2305, 2332), 'tensorflow.concat', 'tf.concat', (['[x1_crop, x2]', '(2)'], {}), '([x1_crop, x2], 2)\n', (2314, 2332), True, 'import tensorflow as tf\n'), ((2542, 2554), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2551, 2554), False, 'import json\n'), ((2693, 2707), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2704, 2707), False, 'import pickle\n'), ((2774, 2788), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2785, 2788), False, 'import pickle\n'), ((2847, 2861), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2858, 2861), False, 'import pickle\n'), ((2935, 2949), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2946, 2949), False, 'import pickle\n'), ((3032, 3046), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3043, 3046), False, 'import pickle\n'), ((3831, 3896), 'tensorflow.random_uniform', 'tf.random_uniform', (['[vocab_size, emb_dim]', '(-init_width)', 'init_width'], {}), '([vocab_size, emb_dim], -init_width, init_width)\n', (3848, 3896), True, 'import tensorflow as tf\n'), ((7380, 7413), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7411, 7413), True, 'import tensorflow as tf\n'), ((7424, 7456), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (7454, 7456), True, 'import tensorflow as tf\n'), ((8247, 8302), 'numpy.zeros', 'np.zeros', (['(1, max_num_hashtag_per_twit)'], {'dtype': 'np.int32'}), '((1, max_num_hashtag_per_twit), dtype=np.int32)\n', (8255, 8302), True, 'import numpy as np\n'), ((8330, 8390), 'numpy.zeros', 'np.zeros', (['(1, max_num_user_mention_per_twit)'], {'dtype': 'np.int32'}), '((1, max_num_user_mention_per_twit), dtype=np.int32)\n', (8338, 8390), True, 'import numpy as np\n'), ((9784, 9808), 'json.dump', 'json.dump', (['submission', 'f'], {}), '(submission, f)\n', (9793, 9808), False, 'import json\n'), ((1231, 1249), 'tensorflow.contrib.layers.bias_add', 'layers.bias_add', (['x'], {}), '(x)\n', (1246, 1249), False, 'from tensorflow.contrib import layers\n'), ((1281, 1301), 'tensorflow.contrib.layers.batch_norm', 'layers.batch_norm', (['x'], {}), '(x)\n', (1298, 1301), False, 'from tensorflow.contrib import layers\n'), ((1755, 1815), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, original_shape[1], 1, original_shape[2]]'], {}), '(x, [-1, original_shape[1], 1, original_shape[2]])\n', (1765, 1815), True, 'import tensorflow as tf\n'), ((7028, 7057), 'tensorflow.reshape', 'tf.reshape', (['output_seq', '(-1,)'], {}), '(output_seq, (-1,))\n', (7038, 7057), True, 'import tensorflow as tf\n'), ((7202, 7287), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.0001)', 'beta1': '(0.5)', 'beta2': '(0.9)', 'epsilon': '(0.01)'}), '(learning_rate=0.0001, beta1=0.5, beta2=0.9, epsilon=0.01\n )\n', (7224, 7287), True, 'import tensorflow as tf\n'), ((7323, 7369), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'intra_op_parallelism_threads': '(8)'}), '(intra_op_parallelism_threads=8)\n', (7337, 7369), True, 'import tensorflow as tf\n'), ((581, 606), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv"""'], {}), "('conv')\n", (598, 606), True, 'import tensorflow as tf\n'), ((772, 809), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv"""'], {'reuse': '(True)'}), "('conv', reuse=True)\n", (789, 809), True, 'import tensorflow as tf\n'), ((9398, 9417), 'numpy.argsort', 'np.argsort', (['p[0][i]'], {}), '(p[0][i])\n', (9408, 9417), True, 'import numpy as np\n'), ((707, 745), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (743, 745), True, 'import tensorflow as tf\n'), ((6028, 6043), 'tensorflow.shape', 'tf.shape', (['conv3'], {}), '(conv3)\n', (6036, 6043), True, 'import tensorflow as tf\n'), ((6107, 6122), 'tensorflow.shape', 'tf.shape', (['conv3'], {}), '(conv3)\n', (6115, 6122), True, 'import tensorflow as tf\n'), ((6180, 6195), 'tensorflow.shape', 'tf.shape', (['conv3'], {}), '(conv3)\n', (6188, 6195), True, 'import tensorflow as tf\n'), ((910, 948), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (946, 948), True, 'import tensorflow as tf\n'), ((9089, 9100), 'numpy.array', 'np.array', (['u'], {}), '(u)\n', (9097, 9100), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from docarray import DocumentArray, Document
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate import WeaviateConfig
from docarray.array.weaviate import DocumentArrayWeaviate
@pytest.fixture(scope='function')
def docs():
return [
Document(text='hello'),
Document(text='hello world'),
Document(text='goodbye world!'),
]
@pytest.mark.parametrize('min_freq', [1, 2, 3])
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
],
)
def test_da_vocabulary(da_cls, config, docs, min_freq, start_storage):
if config:
da = da_cls(docs, config=config)
else:
da = da_cls(docs)
vocab = da.get_vocabulary(min_freq)
if min_freq <= 1:
assert set(vocab.values()) == {2, 3, 4} # 0,1 are reserved
assert set(vocab.keys()) == {'hello', 'world', 'goodbye'}
elif min_freq == 2:
assert set(vocab.values()) == {2, 3} # 0,1 are reserved
assert set(vocab.keys()) == {'hello', 'world'}
elif min_freq == 3:
assert not vocab.values()
assert not vocab.keys()
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
],
)
def test_da_text_to_tensor_non_max_len(docs, da_cls, config, start_storage):
if config:
test_docs = da_cls(docs, config=config)
else:
test_docs = da_cls(docs)
vocab = test_docs.get_vocabulary()
test_docs.apply(lambda d: d.convert_text_to_tensor(vocab))
np.testing.assert_array_equal(test_docs[0].tensor, [2])
np.testing.assert_array_equal(test_docs[1].tensor, [2, 3])
np.testing.assert_array_equal(test_docs[2].tensor, [4, 3])
test_docs.apply(lambda d: d.convert_tensor_to_text(vocab))
assert test_docs[0].text == 'hello'
assert test_docs[1].text == 'hello world'
assert test_docs[2].text == 'goodbye world'
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
],
)
def test_da_text_to_tensor_max_len_3(docs, da_cls, config, start_storage):
if config:
test_docs = da_cls(docs, config=config)
else:
test_docs = da_cls(docs)
vocab = test_docs.get_vocabulary()
test_docs.apply(lambda d: d.convert_text_to_tensor(vocab, max_length=3))
np.testing.assert_array_equal(test_docs[0].tensor, [0, 0, 2])
np.testing.assert_array_equal(test_docs[1].tensor, [0, 2, 3])
np.testing.assert_array_equal(test_docs[2].tensor, [0, 4, 3])
test_docs.apply(lambda d: d.convert_tensor_to_text(vocab))
assert test_docs[0].text == 'hello'
assert test_docs[1].text == 'hello world'
assert test_docs[2].text == 'goodbye world'
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
],
)
def test_da_text_to_tensor_max_len_1(docs, da_cls, config, start_storage):
if config:
test_docs = da_cls(docs, config=config)
else:
test_docs = da_cls(docs)
vocab = test_docs.get_vocabulary()
test_docs.apply(lambda d: d.convert_text_to_tensor(vocab, max_length=1))
np.testing.assert_array_equal(test_docs[0].tensor, [2])
np.testing.assert_array_equal(test_docs[1].tensor, [3])
np.testing.assert_array_equal(test_docs[2].tensor, [3])
test_docs.apply(lambda d: d.convert_tensor_to_text(vocab))
assert test_docs[0].text == 'hello'
assert test_docs[1].text == 'world'
assert test_docs[2].text == 'world'
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
],
)
def test_convert_text_tensor_random_text(da_cls, docs, config, start_storage):
if config:
da = da_cls(docs, config=config)
else:
da = da_cls(docs)
texts = ['a short phrase', 'word', 'this is a much longer sentence']
da.clear()
da.extend(Document(text=t) for t in texts)
vocab = da.get_vocabulary()
# encoding
da.apply(lambda d: d.convert_text_to_tensor(vocab, max_length=10))
# decoding
da.apply(lambda d: d.convert_tensor_to_text(vocab))
assert texts
assert da.texts == texts
| [
"docarray.array.annlite.AnnliteConfig",
"docarray.array.storage.qdrant.QdrantConfig",
"pytest.mark.parametrize",
"docarray.Document",
"pytest.fixture",
"docarray.array.storage.weaviate.WeaviateConfig",
"numpy.testing.assert_array_equal"
] | [((433, 465), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (447, 465), False, 'import pytest\n'), ((611, 657), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""min_freq"""', '[1, 2, 3]'], {}), "('min_freq', [1, 2, 3])\n", (634, 657), False, 'import pytest\n'), ((2150, 2205), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['test_docs[0].tensor', '[2]'], {}), '(test_docs[0].tensor, [2])\n', (2179, 2205), True, 'import numpy as np\n'), ((2210, 2268), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['test_docs[1].tensor', '[2, 3]'], {}), '(test_docs[1].tensor, [2, 3])\n', (2239, 2268), True, 'import numpy as np\n'), ((2273, 2331), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['test_docs[2].tensor', '[4, 3]'], {}), '(test_docs[2].tensor, [4, 3])\n', (2302, 2331), True, 'import numpy as np\n'), ((3138, 3199), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['test_docs[0].tensor', '[0, 0, 2]'], {}), '(test_docs[0].tensor, [0, 0, 2])\n', (3167, 3199), True, 'import numpy as np\n'), ((3204, 3265), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['test_docs[1].tensor', '[0, 2, 3]'], {}), '(test_docs[1].tensor, [0, 2, 3])\n', (3233, 3265), True, 'import numpy as np\n'), ((3270, 3331), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['test_docs[2].tensor', '[0, 4, 3]'], {}), '(test_docs[2].tensor, [0, 4, 3])\n', (3299, 3331), True, 'import numpy as np\n'), ((4139, 4194), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['test_docs[0].tensor', '[2]'], {}), '(test_docs[0].tensor, [2])\n', (4168, 4194), True, 'import numpy as np\n'), ((4199, 4254), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['test_docs[1].tensor', '[3]'], {}), '(test_docs[1].tensor, [3])\n', (4228, 4254), True, 'import numpy as np\n'), ((4259, 4314), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['test_docs[2].tensor', '[3]'], {}), '(test_docs[2].tensor, [3])\n', (4288, 4314), True, 'import numpy as np\n'), ((499, 521), 'docarray.Document', 'Document', ([], {'text': '"""hello"""'}), "(text='hello')\n", (507, 521), False, 'from docarray import DocumentArray, Document\n'), ((531, 559), 'docarray.Document', 'Document', ([], {'text': '"""hello world"""'}), "(text='hello world')\n", (539, 559), False, 'from docarray import DocumentArray, Document\n'), ((569, 600), 'docarray.Document', 'Document', ([], {'text': '"""goodbye world!"""'}), "(text='goodbye world!')\n", (577, 600), False, 'from docarray import DocumentArray, Document\n'), ((810, 834), 'docarray.array.annlite.AnnliteConfig', 'AnnliteConfig', ([], {'n_dim': '(128)'}), '(n_dim=128)\n', (823, 834), False, 'from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig\n'), ((869, 894), 'docarray.array.storage.weaviate.WeaviateConfig', 'WeaviateConfig', ([], {'n_dim': '(128)'}), '(n_dim=128)\n', (883, 894), False, 'from docarray.array.storage.weaviate import WeaviateConfig\n'), ((927, 950), 'docarray.array.storage.qdrant.QdrantConfig', 'QdrantConfig', ([], {'n_dim': '(128)'}), '(n_dim=128)\n', (939, 950), False, 'from docarray.array.storage.qdrant import QdrantConfig\n'), ((1709, 1733), 'docarray.array.annlite.AnnliteConfig', 'AnnliteConfig', ([], {'n_dim': '(128)'}), '(n_dim=128)\n', (1722, 1733), False, 'from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig\n'), ((1768, 1793), 'docarray.array.storage.weaviate.WeaviateConfig', 'WeaviateConfig', ([], {'n_dim': '(128)'}), '(n_dim=128)\n', (1782, 1793), False, 'from docarray.array.storage.weaviate import WeaviateConfig\n'), ((1826, 1849), 'docarray.array.storage.qdrant.QdrantConfig', 'QdrantConfig', ([], {'n_dim': '(128)'}), '(n_dim=128)\n', (1838, 1849), False, 'from docarray.array.storage.qdrant import QdrantConfig\n'), ((2684, 2708), 'docarray.array.annlite.AnnliteConfig', 'AnnliteConfig', ([], {'n_dim': '(128)'}), '(n_dim=128)\n', (2697, 2708), False, 'from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig\n'), ((2743, 2768), 'docarray.array.storage.weaviate.WeaviateConfig', 'WeaviateConfig', ([], {'n_dim': '(128)'}), '(n_dim=128)\n', (2757, 2768), False, 'from docarray.array.storage.weaviate import WeaviateConfig\n'), ((2801, 2824), 'docarray.array.storage.qdrant.QdrantConfig', 'QdrantConfig', ([], {'n_dim': '(128)'}), '(n_dim=128)\n', (2813, 2824), False, 'from docarray.array.storage.qdrant import QdrantConfig\n'), ((3685, 3709), 'docarray.array.annlite.AnnliteConfig', 'AnnliteConfig', ([], {'n_dim': '(128)'}), '(n_dim=128)\n', (3698, 3709), False, 'from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig\n'), ((3744, 3769), 'docarray.array.storage.weaviate.WeaviateConfig', 'WeaviateConfig', ([], {'n_dim': '(128)'}), '(n_dim=128)\n', (3758, 3769), False, 'from docarray.array.storage.weaviate import WeaviateConfig\n'), ((3802, 3825), 'docarray.array.storage.qdrant.QdrantConfig', 'QdrantConfig', ([], {'n_dim': '(128)'}), '(n_dim=128)\n', (3814, 3825), False, 'from docarray.array.storage.qdrant import QdrantConfig\n'), ((5079, 5095), 'docarray.Document', 'Document', ([], {'text': 't'}), '(text=t)\n', (5087, 5095), False, 'from docarray import DocumentArray, Document\n'), ((4654, 4678), 'docarray.array.annlite.AnnliteConfig', 'AnnliteConfig', ([], {'n_dim': '(128)'}), '(n_dim=128)\n', (4667, 4678), False, 'from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig\n'), ((4713, 4738), 'docarray.array.storage.weaviate.WeaviateConfig', 'WeaviateConfig', ([], {'n_dim': '(128)'}), '(n_dim=128)\n', (4727, 4738), False, 'from docarray.array.storage.weaviate import WeaviateConfig\n'), ((4771, 4794), 'docarray.array.storage.qdrant.QdrantConfig', 'QdrantConfig', ([], {'n_dim': '(128)'}), '(n_dim=128)\n', (4783, 4794), False, 'from docarray.array.storage.qdrant import QdrantConfig\n')] |
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import os
class Memory(object):
"""
An implementation of the replay memory. This is essential when dealing with DRL algorithms that are not
multi-threaded as in A3C.
"""
def __init__(self, memory_size, state_dim, action_dim, batch_size):
"""
A naive implementation of the replay memory, need to do more work on this after testing DDPG
"""
self.memory_size = memory_size
self.batch_size = batch_size
if type(state_dim) is not tuple:
state_dim = (state_dim, )
# current state
self.curr_state = np.empty(shape=(memory_size, ) + state_dim)
# next state
self.next_state = np.empty(shape=(memory_size, ) + state_dim)
# reward
self.rewards = np.empty(memory_size)
# terminal
self.terminals = np.empty(memory_size)
# actions
self.actions = np.empty((memory_size, action_dim) if action_dim > 1 else memory_size)
self.current = 0
self.count = 0
def add(self, curr_state, next_state, reward, terminal, action):
self.curr_state[self.current, ...] = curr_state
self.next_state[self.current, ...] = next_state
self.rewards[self.current] = reward
self.terminals[self.current] = terminal
self.actions[self.current] = action
self.current += 1
self.count = max(self.count, self.current)
if self.current >= self.memory_size - 1:
self.current = 0
def sample(self):
indexes = np.random.randint(0, self.count, self.batch_size)
curr_state = self.curr_state[indexes, ...]
next_state = self.next_state[indexes, ...]
rewards = self.rewards[indexes]
terminals = self.terminals[indexes]
actions = self.actions[indexes]
return curr_state, next_state, rewards, terminals, actions
def save(self, save_dir):
path = os.path.join(save_dir, type(self).__name__)
if not os.path.exists(path):
os.makedirs(path)
print("Saving memory...")
for name in ("curr_state", "next_state", "rewards", "terminals", "actions"):
np.save(os.path.join(path, name), arr=getattr(self, name))
def restore(self, save_dir):
"""
Restore the memory.
"""
path = os.path.join(save_dir, type(self).__name__)
for name in ("curr_state", "next_state", "rewards", "terminals", "actions"):
setattr(self, name, np.load(os.path.join(path, "%s.npy" % name)))
def size(self):
for name in ("curr_state", "next_state", "rewards", "terminals", "actions"):
print("%s size is %s" % (name, getattr(self, name).shape))
| [
"os.path.exists",
"os.makedirs",
"os.path.join",
"numpy.random.randint",
"numpy.empty"
] | [((721, 763), 'numpy.empty', 'np.empty', ([], {'shape': '((memory_size,) + state_dim)'}), '(shape=(memory_size,) + state_dim)\n', (729, 763), True, 'import numpy as np\n'), ((812, 854), 'numpy.empty', 'np.empty', ([], {'shape': '((memory_size,) + state_dim)'}), '(shape=(memory_size,) + state_dim)\n', (820, 854), True, 'import numpy as np\n'), ((896, 917), 'numpy.empty', 'np.empty', (['memory_size'], {}), '(memory_size)\n', (904, 917), True, 'import numpy as np\n'), ((962, 983), 'numpy.empty', 'np.empty', (['memory_size'], {}), '(memory_size)\n', (970, 983), True, 'import numpy as np\n'), ((1025, 1095), 'numpy.empty', 'np.empty', (['((memory_size, action_dim) if action_dim > 1 else memory_size)'], {}), '((memory_size, action_dim) if action_dim > 1 else memory_size)\n', (1033, 1095), True, 'import numpy as np\n'), ((1660, 1709), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.count', 'self.batch_size'], {}), '(0, self.count, self.batch_size)\n', (1677, 1709), True, 'import numpy as np\n'), ((2109, 2129), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2123, 2129), False, 'import os\n'), ((2143, 2160), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (2154, 2160), False, 'import os\n'), ((2300, 2324), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (2312, 2324), False, 'import os\n'), ((2621, 2656), 'os.path.join', 'os.path.join', (['path', "('%s.npy' % name)"], {}), "(path, '%s.npy' % name)\n", (2633, 2656), False, 'import os\n')] |
import numpy as np
from proteus import Domain, Context, Comm
from proteus.mprans import SpatialTools as st
import proteus.TwoPhaseFlow.TwoPhaseFlowProblem as TpFlow
from proteus import WaveTools as wt
from proteus.Profiling import logEvent
from proteus.mbd import CouplingFSI as fsi
import os
import pychrono
rho_0 = 998.2
nu_0 = 1.004e-6
rho_1 = 1.205
nu_1 = 1.5e-5
sigma_01 = 0.
he = 0.2
tank_dim = [1., 1., 1.]
water_level = 0.5
genMesh = False
rhor = 0.5
# ____ _
# | _ \ ___ _ __ ___ __ _(_)_ __
# | | | |/ _ \| '_ ` _ \ / _` | | '_ \
# | |_| | (_) | | | | | | (_| | | | | |
# |____/ \___/|_| |_| |_|\__,_|_|_| |_|
# Domain
# All geometrical options go here (but not mesh options)
domain = Domain.PiecewiseLinearComplexDomain()
# ----- SHAPES ----- #
# TANK
tank = st.Tank3D(domain, tank_dim)
# CAISSON
radius = 0.1
caisson = st.Cuboid(domain,
dim=[2*radius, 2*radius, 2*radius],
coords=(tank_dim[0]/2., tank_dim[1]/2., water_level+radius/10.),
barycenter=(tank_dim[0]/2., tank_dim[1]/2., water_level+radius/10.))
caisson.setHoles([caisson.barycenter])
caisson.holes_ind = np.array([0])
# let gmsh know that the caisson is IN the tank
tank.setChildShape(caisson, 0)
# ____ _ ____ _ _ _ _
# | __ ) ___ _ _ _ __ __| | __ _ _ __ _ _ / ___|___ _ __ __| (_) |_(_) ___ _ __ ___
# | _ \ / _ \| | | | '_ \ / _` |/ _` | '__| | | | | / _ \| '_ \ / _` | | __| |/ _ \| '_ \/ __|
# | |_) | (_) | |_| | | | | (_| | (_| | | | |_| | |__| (_) | | | | (_| | | |_| | (_) | | | \__ \
# |____/ \___/ \__,_|_| |_|\__,_|\__,_|_| \__, |\____\___/|_| |_|\__,_|_|\__|_|\___/|_| |_|___/
# |___/
# Boundary Conditions
tank.BC['z+'].setAtmosphere()
tank.BC['z-'].setFreeSlip()
tank.BC['y+'].setFreeSlip()
tank.BC['y-'].setFreeSlip()
tank.BC['x+'].setFreeSlip()
tank.BC['x-'].setFreeSlip()
tank.BC['sponge'].setNonMaterial()
for tag, bc in caisson.BC.items():
bc.setNoSlip()
for tag, bc in tank.BC.items():
bc.setFixedNodes()
# ___ _ _ _ _ ____ _ _ _ _
# |_ _|_ __ (_) |_(_) __ _| | / ___|___ _ __ __| (_) |_(_) ___ _ __ ___
# | || '_ \| | __| |/ _` | | | | / _ \| '_ \ / _` | | __| |/ _ \| '_ \/ __|
# | || | | | | |_| | (_| | | | |__| (_) | | | | (_| | | |_| | (_) | | | \__ \
# |___|_| |_|_|\__|_|\__,_|_| \____\___/|_| |_|\__,_|_|\__|_|\___/|_| |_|___/
# Initial Conditions
from proteus.ctransportCoefficients import smoothedHeaviside
from proteus.ctransportCoefficients import smoothedHeaviside_integral
smoothing = 1.5 * he
nd = domain.nd
class P_IC:
def uOfXT(self, x, t):
p_L = 0.0
phi_L = tank_dim[nd-1] - water_level
phi = x[nd-1] - water_level
p = p_L -g[nd-1]*(rho_0*(phi_L - phi)
+(rho_1 -rho_0)*(smoothedHeaviside_integral(smoothing,phi_L)
-smoothedHeaviside_integral(smoothing,phi)))
return p
class U_IC:
def uOfXT(self, x, t):
return 0.0
class V_IC:
def uOfXT(self, x, t):
return 0.0
class W_IC:
def uOfXT(self, x, t):
return 0.0
class VF_IC:
def uOfXT(self, x, t):
return smoothedHeaviside(smoothing,x[nd-1]-water_level)
class PHI_IC:
def uOfXT(self, x, t):
return x[nd-1] - water_level
# instanciating the classes for *_p.py files
initialConditions = {'pressure': P_IC(),
'vel_u': U_IC(),
'vel_v': V_IC(),
'vel_w': W_IC(),
'vof': VF_IC(),
'ncls': PHI_IC(),
'rdls': PHI_IC()}
# ____ _
# / ___| |__ _ __ ___ _ __ ___
# | | | '_ \| '__/ _ \| '_ \ / _ \
# | |___| | | | | | (_) | | | | (_) |
# \____|_| |_|_| \___/|_| |_|\___/
# Chrono
# System
g = np.array([0., 0., -9.81])
system = fsi.ProtChSystem()
system.ChSystem.Set_G_acc(pychrono.ChVectorD(g[0], g[1], g[2]))
system.setTimeStep(1e-5)
#system.setCouplingScheme("CSS", prediction="backwardEuler")
# Body
body = fsi.ProtChBody(system=system)
body.attachShape(caisson)
#body.Aij_factor = 1/width
chbod = body.ChBody
x, y, z = caisson.barycenter
pos = pychrono.ChVectorD(x, y, z)
mass = (2.*radius)**3*rho_0*rhor
inertia = pychrono.ChVectorD(1., 1., 1.)
chbod.SetPos(pos)
chbod.SetMass(mass)
chbod.SetInertiaXX(inertia)
#chbod.SetBodyFixed(True)
body.setConstraints(free_x=np.array([1.,1.,1.]), free_r=np.array([1.,1.,1.]))
# body.setInitialRot(rotation_init)
# body.rotation_init=np.array([np.cos(ang/2.), 0., 0., np.sin(ang/2.)*1.])
body.setRecordValues(all_values=True)
# __ __ _ ___ _ _
# | \/ | ___ ___| |__ / _ \ _ __ | |_(_) ___ _ __ ___
# | |\/| |/ _ \/ __| '_ \ | | | | '_ \| __| |/ _ \| '_ \/ __|
# | | | | __/\__ \ | | | | |_| | |_) | |_| | (_) | | | \__ \
# |_| |_|\___||___/_| |_| \___/| .__/ \__|_|\___/|_| |_|___/
# |_|
domain.MeshOptions.use_gmsh = genMesh
domain.MeshOptions.genMesh = genMesh
he = he
domain.MeshOptions.he = he
modulepath = os.path.dirname(os.path.abspath(__file__))
mesh_fileprefix=modulepath+'/meshFloatingCube'
domain.MeshOptions.setOutputFiles(mesh_fileprefix)
st.assembleDomain(domain)
domain.use_gmsh = False
domain.geofile = mesh_fileprefix
# _ _ _
# | \ | |_ _ _ __ ___ ___ _ __(_) ___ ___
# | \| | | | | '_ ` _ \ / _ \ '__| |/ __/ __|
# | |\ | |_| | | | | | | __/ | | | (__\__ \
# |_| \_|\__,_|_| |_| |_|\___|_| |_|\___|___/
# Numerics
outputStepping = TpFlow.OutputStepping(
final_time=0.1,
dt_init=0.01,
dt_output=0.1,
nDTout=None,
dt_fixed=0.01,
)
myTpFlowProblem = TpFlow.TwoPhaseFlowProblem(
ns_model=None,
ls_model=None,
nd=domain.nd,
cfl=0.9,
outputStepping=outputStepping,
structured=False,
he=he,
nnx=None,
nny=None,
nnz=None,
domain=domain,
initialConditions=initialConditions,
boundaryConditions=None, # set with SpatialTools,
useSuperlu=False,
)
# line below needed for relaxation zones
# (!) hack
m = myTpFlowProblem.Parameters.Models
m.rans2p.auxiliaryVariables += domain.auxiliaryVariables['twp']
myTpFlowProblem.archiveAllSteps = True
myTpFlowProblem.movingDomain = True
params = myTpFlowProblem.Parameters
# MESH PARAMETERS
params.mesh.genMesh = genMesh
params.mesh.he = he
# PHYSICAL PARAMETERS
params.physical.densityA = rho_0 # water
params.physical.densityB = rho_1 # air
params.physical.kinematicViscosityA = nu_0 # water
params.physical.kinematicViscosityB = nu_1 # air
params.physical.gravity = np.array(g)
params.physical.surf_tension_coeff = sigma_01
# MODEL PARAMETERS
ind = -1
m.moveMeshElastic.index = ind+1
ind += 1
m.rans2p.index = ind+1
ind += 1
m.vof.index = ind+1
ind += 1
m.ncls.index = ind+1
ind += 1
m.rdls.index = ind+1
ind += 1
m.mcorr.index = ind+1
ind += 1
m.addedMass.index = ind+1
ind += 1
m.rans2p.auxiliaryVariables += [system]
m.rans2p.p.coefficients.eb_bc_penalty_constant = 10.#/nu_0#Re
m.addedMass.auxiliaryVariables += [system.ProtChAddedMass]
m.rans2p.p.coefficients.NONCONSERVATIVE_FORM=0.0
max_flag = 0
max_flag = max(domain.vertexFlags)
max_flag = max(domain.segmentFlags+[max_flag])
max_flag = max(domain.facetFlags+[max_flag])
flags_rigidbody = np.zeros(max_flag+1, dtype='int32')
for s in system.subcomponents:
if type(s) is fsi.ProtChBody:
for i in s.boundaryFlags:
flags_rigidbody[i] = 1
m.addedMass.p.coefficients.flags_rigidbody = flags_rigidbody
| [
"proteus.TwoPhaseFlow.TwoPhaseFlowProblem.OutputStepping",
"proteus.mbd.CouplingFSI.ProtChSystem",
"proteus.ctransportCoefficients.smoothedHeaviside_integral",
"proteus.TwoPhaseFlow.TwoPhaseFlowProblem.TwoPhaseFlowProblem",
"proteus.ctransportCoefficients.smoothedHeaviside",
"proteus.mprans.SpatialTools.T... | [((728, 765), 'proteus.Domain.PiecewiseLinearComplexDomain', 'Domain.PiecewiseLinearComplexDomain', ([], {}), '()\n', (763, 765), False, 'from proteus import Domain, Context, Comm\n'), ((805, 832), 'proteus.mprans.SpatialTools.Tank3D', 'st.Tank3D', (['domain', 'tank_dim'], {}), '(domain, tank_dim)\n', (814, 832), True, 'from proteus.mprans import SpatialTools as st\n'), ((867, 1095), 'proteus.mprans.SpatialTools.Cuboid', 'st.Cuboid', (['domain'], {'dim': '[2 * radius, 2 * radius, 2 * radius]', 'coords': '(tank_dim[0] / 2.0, tank_dim[1] / 2.0, water_level + radius / 10.0)', 'barycenter': '(tank_dim[0] / 2.0, tank_dim[1] / 2.0, water_level + radius / 10.0)'}), '(domain, dim=[2 * radius, 2 * radius, 2 * radius], coords=(\n tank_dim[0] / 2.0, tank_dim[1] / 2.0, water_level + radius / 10.0),\n barycenter=(tank_dim[0] / 2.0, tank_dim[1] / 2.0, water_level + radius /\n 10.0))\n', (876, 1095), True, 'from proteus.mprans import SpatialTools as st\n'), ((1174, 1187), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1182, 1187), True, 'import numpy as np\n'), ((3935, 3962), 'numpy.array', 'np.array', (['[0.0, 0.0, -9.81]'], {}), '([0.0, 0.0, -9.81])\n', (3943, 3962), True, 'import numpy as np\n'), ((3970, 3988), 'proteus.mbd.CouplingFSI.ProtChSystem', 'fsi.ProtChSystem', ([], {}), '()\n', (3986, 3988), True, 'from proteus.mbd import CouplingFSI as fsi\n'), ((4153, 4182), 'proteus.mbd.CouplingFSI.ProtChBody', 'fsi.ProtChBody', ([], {'system': 'system'}), '(system=system)\n', (4167, 4182), True, 'from proteus.mbd import CouplingFSI as fsi\n'), ((4291, 4318), 'pychrono.ChVectorD', 'pychrono.ChVectorD', (['x', 'y', 'z'], {}), '(x, y, z)\n', (4309, 4318), False, 'import pychrono\n'), ((4362, 4395), 'pychrono.ChVectorD', 'pychrono.ChVectorD', (['(1.0)', '(1.0)', '(1.0)'], {}), '(1.0, 1.0, 1.0)\n', (4380, 4395), False, 'import pychrono\n'), ((5314, 5339), 'proteus.mprans.SpatialTools.assembleDomain', 'st.assembleDomain', (['domain'], {}), '(domain)\n', (5331, 5339), True, 'from proteus.mprans import SpatialTools as st\n'), ((5652, 5751), 'proteus.TwoPhaseFlow.TwoPhaseFlowProblem.OutputStepping', 'TpFlow.OutputStepping', ([], {'final_time': '(0.1)', 'dt_init': '(0.01)', 'dt_output': '(0.1)', 'nDTout': 'None', 'dt_fixed': '(0.01)'}), '(final_time=0.1, dt_init=0.01, dt_output=0.1, nDTout=\n None, dt_fixed=0.01)\n', (5673, 5751), True, 'import proteus.TwoPhaseFlow.TwoPhaseFlowProblem as TpFlow\n'), ((5789, 6062), 'proteus.TwoPhaseFlow.TwoPhaseFlowProblem.TwoPhaseFlowProblem', 'TpFlow.TwoPhaseFlowProblem', ([], {'ns_model': 'None', 'ls_model': 'None', 'nd': 'domain.nd', 'cfl': '(0.9)', 'outputStepping': 'outputStepping', 'structured': '(False)', 'he': 'he', 'nnx': 'None', 'nny': 'None', 'nnz': 'None', 'domain': 'domain', 'initialConditions': 'initialConditions', 'boundaryConditions': 'None', 'useSuperlu': '(False)'}), '(ns_model=None, ls_model=None, nd=domain.nd, cfl=\n 0.9, outputStepping=outputStepping, structured=False, he=he, nnx=None,\n nny=None, nnz=None, domain=domain, initialConditions=initialConditions,\n boundaryConditions=None, useSuperlu=False)\n', (5815, 6062), True, 'import proteus.TwoPhaseFlow.TwoPhaseFlowProblem as TpFlow\n'), ((6704, 6715), 'numpy.array', 'np.array', (['g'], {}), '(g)\n', (6712, 6715), True, 'import numpy as np\n'), ((7388, 7425), 'numpy.zeros', 'np.zeros', (['(max_flag + 1)'], {'dtype': '"""int32"""'}), "(max_flag + 1, dtype='int32')\n", (7396, 7425), True, 'import numpy as np\n'), ((4015, 4051), 'pychrono.ChVectorD', 'pychrono.ChVectorD', (['g[0]', 'g[1]', 'g[2]'], {}), '(g[0], g[1], g[2])\n', (4033, 4051), False, 'import pychrono\n'), ((5189, 5214), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (5204, 5214), False, 'import os\n'), ((3309, 3362), 'proteus.ctransportCoefficients.smoothedHeaviside', 'smoothedHeaviside', (['smoothing', '(x[nd - 1] - water_level)'], {}), '(smoothing, x[nd - 1] - water_level)\n', (3326, 3362), False, 'from proteus.ctransportCoefficients import smoothedHeaviside\n'), ((4512, 4537), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (4520, 4537), True, 'import numpy as np\n'), ((4541, 4566), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (4549, 4566), True, 'import numpy as np\n'), ((2926, 2970), 'proteus.ctransportCoefficients.smoothedHeaviside_integral', 'smoothedHeaviside_integral', (['smoothing', 'phi_L'], {}), '(smoothing, phi_L)\n', (2952, 2970), False, 'from proteus.ctransportCoefficients import smoothedHeaviside_integral\n'), ((3019, 3061), 'proteus.ctransportCoefficients.smoothedHeaviside_integral', 'smoothedHeaviside_integral', (['smoothing', 'phi'], {}), '(smoothing, phi)\n', (3045, 3061), False, 'from proteus.ctransportCoefficients import smoothedHeaviside_integral\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.