code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import copy
import math
try:
from transformers.modeling_bert import BertConfig, BertEncoder, BertModel
except:
from transformers.models.bert.modeling_bert import BertConfig, BertEncoder, BertModel
class LSTM(nn.Module):
def __init__(self, args):
super(LSTM, self).__init__()
self.args = args
self.device = args.device
self.hidden_dim = self.args.hidden_dim
self.n_layers = self.args.n_layers
# Embedding
# interaction은 현재 correct로 구성되어있다. correct(1, 2) + padding(0)
self.embedding_interaction = nn.Embedding(3, self.hidden_dim//3)
self.embedding_test = nn.Embedding(self.args.n_test + 1, self.hidden_dim//3)
self.embedding_question = nn.Embedding(self.args.n_questions + 1, self.hidden_dim//3)
self.embedding_tag = nn.Embedding(self.args.n_tag + 1, self.hidden_dim//3)
self.embedding_tag = nn.Embedding(self.args.n_grade + 1, self.hidden_dim//3)
# embedding combination projection
self.cate_proj = nn.Sequential(nn.Linear((self.hidden_dim//3)*4, self.hidden_dim), nn.LayerNorm(self.hidden_dim))
self.embedding_cont = nn.Sequential(nn.Linear(self.args.n_cont, self.hidden_dim), nn.LayerNorm(self.hidden_dim))
self.comb_proj = nn.Sequential(nn.ReLU(),
nn.Linear(self.args.hidden_dim*2, self.args.hidden_dim),
nn.LayerNorm(self.args.hidden_dim))
self.lstm = nn.LSTM(self.hidden_dim,
self.hidden_dim,
self.n_layers,
batch_first=True)
# Fully connected layer
self.fc = nn.Linear(self.hidden_dim, 1)
self.activation = nn.Sigmoid()
def init_hidden(self, batch_size):
h = torch.zeros(
self.n_layers,
batch_size,
self.hidden_dim)
h = h.to(self.device)
c = torch.zeros(
self.n_layers,
batch_size,
self.hidden_dim)
c = c.to(self.device)
return (h, c)
def forward(self, input):
# Todo 수정!!!
batch_size = input["interaction"].size(0)
# Embedding
embed_interaction = self.embedding_interaction(input["interaction"])
embed_test = self.embedding_test(input["testId"])
embed_question = self.embedding_question(input["assessmentItemID"])
embed_tag = self.embedding_tag(input["KnowledgeTag"])
embed_cate = torch.cat([embed_interaction,
embed_test,
embed_question,
embed_tag,], 2)
embed_cate = self.cate_proj(embed_cate)
#check!!! 어떻게 한번에 넣을 수 있는지
cont = torch.cat([input[c].unsqueeze(2) for c in self.args.cont_col], 2)
embed_cont = self.embedding_cont(cont)
X = self.comb_proj(torch.cat([embed_cate, embed_cont],2))
hidden = self.init_hidden(batch_size)
out, hidden = self.lstm(X, hidden)
out = out.contiguous().view(batch_size, -1, self.hidden_dim)
out = self.fc(out)
preds = self.activation(out).view(batch_size, -1)
return preds
class LSTMATTN(nn.Module):
def __init__(self, args):
super(LSTMATTN, self).__init__()
self.args = args
self.device = args.device
self.hidden_dim = self.args.hidden_dim
self.n_layers = self.args.n_layers
self.n_heads = self.args.n_heads
self.drop_out = self.args.drop_out
# Embedding
# interaction은 현재 correct로 구성되어있다. correct(1, 2) + padding(0)
self.embedding_interaction = nn.Embedding(3, self.hidden_dim//3)
self.embedding_problem_interaction = nn.Embedding(13*2+1, self.hidden_dim//3)
self.embedding_test = nn.Embedding(self.args.n_test + 1, self.hidden_dim//3)
self.embedding_question = nn.Embedding(self.args.n_questions + 1, self.hidden_dim//3)
self.embedding_tag = nn.Embedding(self.args.n_tag + 1, self.hidden_dim//3)
self.embedding_grade = nn.Embedding(self.args.n_grade + 1, self.hidden_dim//3)
self.embedding_other = nn.Embedding(self.args.n_other + 1, self.hidden_dim//3)
# embedding combination projection
self.cate_proj = nn.Sequential(nn.Linear((self.hidden_dim//3)*(len(self.args.cate_col)+1), self.hidden_dim), nn.LayerNorm(self.hidden_dim))
self.bn_cont = nn.BatchNorm1d(self.args.n_cont)
self.embedding_cont = nn.Sequential(
nn.Linear(self.args.n_cont, self.hidden_dim),
nn.LayerNorm(self.hidden_dim))
self.comb_proj = nn.Sequential(
nn.Dropout(0.3),
nn.Linear(self.hidden_dim*2, self.hidden_dim),
nn.LayerNorm(self.hidden_dim))
self.lstm = nn.LSTM(self.hidden_dim,
self.hidden_dim,
self.n_layers,
batch_first=True)
self.config = BertConfig(
3, # not used
hidden_size=self.hidden_dim,
num_hidden_layers=1,
num_attention_heads=self.n_heads,
intermediate_size=self.hidden_dim,
hidden_dropout_prob=self.drop_out,
attention_probs_dropout_prob=self.drop_out,
)
self.attn = BertEncoder(self.config)
# Fully connected layer
self.fc = nn.Linear(self.hidden_dim, 1)
self.activation = nn.Sigmoid()
def init_hidden(self, batch_size):
h = torch.zeros(
self.n_layers,
batch_size,
self.hidden_dim)
h = h.to(self.device)
c = torch.zeros(
self.n_layers,
batch_size,
self.hidden_dim)
c = c.to(self.device)
return (h, c)
def forward(self, input):
# Categorical Variable Embedding
be_concat = []
if "interaction" in input :
embed_interaction = self.embedding_interaction(input["interaction"])
be_concat.append(embed_interaction)
batch_size = input["interaction"].size(0)
if "problem_interaction" in input :
embed_problem_interaction = self.embedding_problem_interaction(input["problem_interaction"])
be_concat.append(embed_problem_interaction)
batch_size = input["problem_interaction"].size(0)
if "testId" in input :
embed_test = self.embedding_test(input["testId"])
be_concat.append(embed_test)
if "assessmentItemID" in input :
embed_question = self.embedding_question(input["assessmentItemID"])
be_concat.append(embed_question)
if "KnowledgeTag" in input :
embed_tag = self.embedding_tag(input["KnowledgeTag"])
be_concat.append(embed_tag)
if "grade" in input :
embed_grade = self.embedding_grade(input["grade"])
be_concat.append(embed_grade)
# Categorical Variable Embedding (other embedding at one embedding function)
for c in self.args.cate_col :
if c not in ['assessmentItemID', 'testId', 'KnowledgeTag', 'grade']:
be_concat.append(self.embedding_other(input[c]))
embed_cate = torch.cat(be_concat, 2)
embed = self.cate_proj(embed_cate)
# continuous variable embedding
# batch normalization
if self.args.n_cont > 0 :
cont = torch.cat([input[c].unsqueeze(2) for c in self.args.cont_col], 2)
cont = self.bn_cont(cont.view(-1,cont.size(-1))).view(batch_size,-1,cont.size(-1))
embed_cont = self.embedding_cont(cont)
embed = [embed, embed_cont]
# Running LSTM
X = self.comb_proj(torch.cat(embed,2))
hidden = self.init_hidden(batch_size)
out, hidden = self.lstm(X, hidden)
out = out.contiguous().view(batch_size, -1, self.hidden_dim)
extended_attention_mask = input["mask"].unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=torch.float32)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
head_mask = [None] * self.n_layers
encoded_layers = self.attn(out, extended_attention_mask, head_mask=head_mask)
sequence_output = encoded_layers[-1]
if self.args.loss_type == 'arcface' :
sequence_output = sequence_output[:,-1].contiguous().view(batch_size, -1)
return sequence_output
out = self.fc(sequence_output)
preds = self.activation(out).view(batch_size, -1)
return preds
class Bert(nn.Module):
def __init__(self, args):
super(Bert, self).__init__()
self.args = args
self.device = args.device
# Defining some parameters
self.hidden_dim = self.args.hidden_dim
self.n_layers = self.args.n_layers
# Embedding
# interaction은 현재 correct로 구성되어있다. correct(1, 2) + padding(0)
self.embedding_interaction = nn.Embedding(3, self.hidden_dim//4)
self.embedding_test = nn.Embedding(self.args.n_test + 1, self.hidden_dim//4)
self.embedding_question = nn.Embedding(self.args.n_questions + 1, self.hidden_dim//4)
self.embedding_tag = nn.Embedding(self.args.n_tag + 1, self.hidden_dim//4)
self.embedding_grade = nn.Embedding(self.args.n_grade + 1, self.hidden_dim//4)
# embedding combination projection
self.cate_proj = nn.Sequential(nn.Linear((self.hidden_dim//4)*5, self.hidden_dim), nn.LayerNorm(self.hidden_dim))
self.embedding_cont = nn.Sequential(nn.Linear(self.args.n_cont, self.hidden_dim), nn.LayerNorm(self.hidden_dim))
self.comb_proj = nn.Sequential(nn.ReLU(),
nn.Linear(self.args.hidden_dim*2, self.args.hidden_dim),
nn.LayerNorm(self.args.hidden_dim))
# Bert config
self.config = BertConfig(
3, # not used
hidden_size=self.hidden_dim,
num_hidden_layers=self.args.n_layers,
num_attention_heads=self.args.n_heads,
max_position_embeddings=self.args.max_seq_len
)
# Defining the layers
# Bert Layer
self.encoder = BertModel(self.config)
# Fully connected layer
self.fc = nn.Linear(self.args.hidden_dim, 1)
self.activation = nn.Sigmoid()
def forward(self, input):
batch_size = input["interaction"].size(0)
# Embedding
embed_interaction = self.embedding_interaction(input["interaction"])
embed_test = self.embedding_test(input["testId"])
embed_question = self.embedding_question(input["assessmentItemID"])
embed_tag = self.embedding_tag(input["KnowledgeTag"])
embed_grade = self.embedding_grade(input["grade"])
embed_cate = torch.cat([embed_interaction,
embed_test,
embed_question,
embed_tag,
embed_grade], 2)
embed_cate = self.cate_proj(embed_cate)
cont = torch.cat([input[c].unsqueeze(2) for c in self.args.cont_col], 2)
embed_cont = self.embedding_cont(cont)
X = self.comb_proj(torch.cat([embed_cate, embed_cont],2))
# Bert
encoded_layers = self.encoder(inputs_embeds=X, attention_mask=input["mask"])
out = encoded_layers[0]
out = out.contiguous().view(batch_size, -1, self.hidden_dim)
out = self.fc(out)
preds = self.activation(out).view(batch_size, -1)
return preds
class FFN(nn.Module):
def __init__(self, state_size=200):
super(FFN, self).__init__()
self.state_size = state_size
self.lr1 = nn.Linear(state_size, state_size)
self.relu = nn.ReLU()
self.lr2 = nn.Linear(state_size, state_size)
self.dropout = nn.Dropout(0.2)
def forward(self, x):
x = self.lr1(x)
x = self.relu(x)
x = self.lr2(x)
return self.dropout(x)
def future_mask(seq_length):
future_mask = np.triu(np.ones((seq_length, seq_length)), k=1).astype("bool")
return torch.from_numpy(future_mask)
class SAKT(nn.Module):
def __init__(self, n_skill, max_seq=400, embed_dim=256): # HDKIM 100
super(SAKTModel, self).__init__()
self.n_skill = n_skill
self.embed_dim = embed_dim
self.embedding = nn.Embedding(2 * n_skill + 1, embed_dim)
self.pos_embedding = nn.Embedding(max_seq - 1, embed_dim)
self.e_embedding = nn.Embedding(n_skill + 1, embed_dim)
self.multi_att = nn.MultiheadAttention(
embed_dim=embed_dim, num_heads=8, dropout=0.2
)
self.dropout = nn.Dropout(0.2)
self.layer_normal = nn.LayerNorm(embed_dim)
self.ffn = FFN(embed_dim)
self.pred = nn.Linear(embed_dim, 1)
self.activation = nn.Sigmoid()
def forward(self, x, question_ids):
device = x.device
x = self.embedding(x)
pos_id = torch.arange(x.size(1)).unsqueeze(0).to(device)
pos_x = self.pos_embedding(pos_id)
x = x + pos_x
e = self.e_embedding(question_ids)
x = x.permute(1, 0, 2) # x: [bs, s_len, embed] => [s_len, bs, embed]
e = e.permute(1, 0, 2)
att_mask = future_mask(x.size(0)).to(device)
att_output, att_weight = self.multi_att(e, x, x, attn_mask=att_mask)
att_output = self.layer_normal(att_output + e)
att_output = att_output.permute(
1, 0, 2
) # att_output: [s_len, bs, embed] => [bs, s_len, embed]
x = self.ffn(att_output)
x = self.layer_normal(x + att_output)
x = self.pred(x)
x = self.activation(x)
return x.squeeze(-1), att_weight
class Feed_Forward_block(nn.Module):
"""
out = Relu( M_out*w1 + b1) *w2 + b2
"""
def __init__(self, dim_ff):
super().__init__()
self.layer1 = nn.Linear(in_features=dim_ff, out_features=dim_ff)
self.layer2 = nn.Linear(in_features=dim_ff, out_features=dim_ff)
def forward(self, ffn_in):
return self.layer2(F.relu(self.layer1(ffn_in)))
class LastQuery(nn.Module):
def __init__(self, args):
super(LastQuery, self).__init__()
self.args = args
self.device = args.device
self.hidden_dim = self.args.hidden_dim
# Embedding
# interaction은 현재 correct으로 구성되어있다. correct(1, 2) + padding(0)
self.embedding_interaction = nn.Embedding(3, self.hidden_dim//3)
self.embedding_problem_interaction = nn.Embedding(13*2+1, self.hidden_dim//3)
self.embedding_test = nn.Embedding(self.args.n_test + 1, self.hidden_dim//3)
self.embedding_question = nn.Embedding(self.args.n_questions + 1, self.hidden_dim//3)
self.embedding_tag = nn.Embedding(self.args.n_tag + 1, self.hidden_dim//3)
self.embedding_grade = nn.Embedding(self.args.n_grade + 1, self.hidden_dim//3)
self.embedding_other = nn.Embedding(self.args.n_other + 1, self.hidden_dim//3)
self.embedding_position = nn.Embedding(self.args.max_seq_len, self.hidden_dim)
self.cate_proj = nn.Sequential(
nn.Linear((self.hidden_dim//3)*(len(self.args.cate_col)+1), self.hidden_dim),
nn.LayerNorm(self.hidden_dim))
self.bn_cont = nn.BatchNorm1d(self.args.n_cont)
self.embedding_cont = nn.Sequential(
nn.Linear(self.args.n_cont, self.hidden_dim),
nn.LayerNorm(self.hidden_dim))
# embedding combination projection
self.comb_proj = nn.Sequential(
nn.Linear(self.hidden_dim*2, self.hidden_dim),
nn.LayerNorm(self.hidden_dim))
# 기존 keetar님 솔루션에서는 Positional Embedding은 사용되지 않습니다
# 하지만 사용 여부는 자유롭게 결정해주세요 :)
# Encoder
self.query = nn.Linear(
in_features=self.hidden_dim, out_features=self.hidden_dim
)
self.key = nn.Linear(
in_features=self.hidden_dim, out_features=self.hidden_dim
)
self.value = nn.Linear(
in_features=self.hidden_dim, out_features=self.hidden_dim
)
self.attn = nn.MultiheadAttention(
embed_dim=self.hidden_dim, num_heads=self.args.n_heads
)
self.mask = None # last query에서는 필요가 없지만 수정을 고려하여서 넣어둠
self.ffn = Feed_Forward_block(self.hidden_dim)
self.ln1 = nn.LayerNorm(self.hidden_dim)
self.ln2 = nn.LayerNorm(self.hidden_dim)
# LSTM
self.lstm = nn.LSTM(
self.hidden_dim, self.hidden_dim, self.args.n_layers, batch_first=True
)
# Fully connected layer
self.fc = nn.Linear(self.hidden_dim, 1)
self.activation = nn.Sigmoid()
def get_pos(self, seq_len):
# use sine positional embeddinds
return torch.arange(seq_len).unsqueeze(0)
def init_hidden(self, batch_size):
h = torch.zeros(self.args.n_layers, batch_size, self.args.hidden_dim)
h = h.to(self.device)
c = torch.zeros(self.args.n_layers, batch_size, self.args.hidden_dim)
c = c.to(self.device)
return (h, c)
def get_mask(self, seq_len, mask, batch_size):
new_mask = torch.zeros_like(mask)
new_mask[mask == 0] = 1
new_mask[mask != 0] = 0
mask = new_mask
# batchsize * n_head 수만큼 각 mask를 반복하여 증가시킨다
mask = mask.repeat(1, self.args.n_heads).view(batch_size*self.args.n_heads, -1, seq_len)
return mask.masked_fill(mask==1, float('-inf'))
def forward(self, input):
# Categorical Variable Embedding
be_concat = []
if "interaction" in input :
embed_interaction = self.embedding_interaction(input["interaction"])
be_concat.append(embed_interaction)
batch_size = input["interaction"].size(0)
if "problem_interaction" in input :
embed_problem_interaction = self.embedding_problem_interaction(input["problem_interaction"])
be_concat.append(embed_problem_interaction)
batch_size = input["problem_interaction"].size(0)
if "testId" in input :
embed_test = self.embedding_test(input["testId"])
be_concat.append(embed_test)
if "assessmentItemID" in input :
embed_question = self.embedding_question(input["assessmentItemID"])
be_concat.append(embed_question)
if "KnowledgeTag" in input :
embed_tag = self.embedding_tag(input["KnowledgeTag"])
be_concat.append(embed_tag)
if "grade" in input :
embed_grade = self.embedding_grade(input["grade"])
be_concat.append(embed_grade)
# Categorical Variable Embedding (other embedding at one embedding function)
for c in self.args.cate_col :
if c not in ['assessmentItemID', 'testId', 'KnowledgeTag', 'grade']:
be_concat.append(self.embedding_other(input[c]))
embed_cate = torch.cat(be_concat, 2)
embed = self.cate_proj(embed_cate)
# continuous variable embedding
# batch normalization
if self.args.n_cont > 0 :
cont = torch.cat([input[c].unsqueeze(2) for c in self.args.cont_col], 2)
cont = self.bn_cont(cont.view(-1,cont.size(-1))).view(batch_size,-1,cont.size(-1))
embed_cont = self.embedding_cont(cont)
embed = [embed, embed_cont]
# Running LSTM
embed = self.comb_proj(torch.cat(embed,2))
# Positional Embedding
# last query에서는 positional embedding을 하지 않음
# position = self.get_pos(seq_len).to('cuda')
# embed_pos = self.embedding_position(position)
# embed = embed + embed_pos
####################### ENCODER #####################
q = self.query(embed)[:, -1:, :].permute(1, 0, 2)
k = self.key(embed).permute(1, 0, 2)
v = self.value(embed).permute(1, 0, 2)
## attention
# last query only
self.mask = self.get_mask(seq_len, mask, batch_size).to(self.device)
out, _ = self.attn(q, k, v)
## residual + layer norm
out = out.permute(1, 0, 2)
out = embed + out
out = self.ln1(out)
## feed forward network
out = self.ffn(out)
## residual + layer norm
out = embed + out
out = self.ln2(out)
###################### LSTM #####################
hidden = self.init_hidden(batch_size)
out, hidden = self.lstm(out, hidden)
###################### DNN #####################
out = out.contiguous().view(batch_size, -1, self.hidden_dim)
out = self.fc(out)
preds = self.activation(out).view(batch_size, -1)
return preds
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=1000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
self.scale = nn.Parameter(torch.ones(1))
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(
0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.scale * self.pe[:x.size(0), :]
return self.dropout(x)
class Saint(nn.Module):
def __init__(self, args):
super(Saint, self).__init__()
self.args = args
self.device = args.device
self.hidden_dim = self.args.hidden_dim
# self.dropout = self.args.dropout
self.dropout = 0.
### Embedding
# ENCODER embedding
self.embedding_test = nn.Embedding(self.args.n_test + 1, self.hidden_dim//3)
self.embedding_question = nn.Embedding(self.args.n_questions + 1, self.hidden_dim//3)
self.embedding_tag = nn.Embedding(self.args.n_tag + 1, self.hidden_dim//3)
self.embedding_grade = nn.Embedding(self.args.n_grade + 1, self.hidden_dim//3)
self.bn_cont_e = nn.BatchNorm1d(max(self.args.n_cont_e,1))
self.embedding_cont_e = nn.Sequential(
nn.Linear(max(self.args.n_cont_e,1), self.hidden_dim),
nn.LayerNorm(self.hidden_dim))
c = min(self.args.n_cont_e,1)
# encoder combination projection
self.enc_comb_proj = nn.Sequential(
nn.Linear(self.hidden_dim * c+(self.hidden_dim//3)*len(self.args.cate_col_e),
self.hidden_dim),
nn.LayerNorm(self.hidden_dim))
# DECODER embedding
# interaction은 현재 correct으로 구성되어있다. correct(1, 2) + padding(0)
self.embedding_interaction = nn.Embedding(3, self.hidden_dim//3)
self.embedding_problem_interaction = nn.Embedding(13*2+1, self.hidden_dim//3)
self.embedding_other = nn.Embedding(self.args.n_other + 1, self.hidden_dim//3)
self.bn_cont_d = nn.BatchNorm1d(max(self.args.n_cont_d,1))
self.embedding_cont_d = nn.Sequential(
nn.Linear(max(self.args.n_cont_d,1), self.hidden_dim),
nn.LayerNorm(self.hidden_dim))
# decoder combination projection
c = min(self.args.n_cont_d,1)
self.dec_comb_proj = nn.Linear(self.hidden_dim*c+(self.hidden_dim//3)*(len(self.args.cate_col_d)+1),
self.hidden_dim)
# Positional encoding
self.pos_encoder = PositionalEncoding(self.hidden_dim, self.dropout, self.args.max_seq_len)
self.pos_decoder = PositionalEncoding(self.hidden_dim, self.dropout, self.args.max_seq_len)
self.transformer = nn.Transformer(
d_model=self.hidden_dim,
nhead=self.args.n_heads,
num_encoder_layers=self.args.n_layers,
num_decoder_layers=self.args.n_layers,
dim_feedforward=self.hidden_dim,
dropout=self.dropout,
activation='relu')
self.fc = nn.Linear(self.hidden_dim, 1)
self.activation = nn.Sigmoid()
self.enc_mask = None
self.dec_mask = None
self.enc_dec_mask = None
def get_mask(self, seq_len):
mask = torch.from_numpy(np.triu(np.ones((seq_len, seq_len)), k=1))
return mask.masked_fill(mask==1, float('-inf'))
def forward(self, input):
# 신나는 embedding
# ENCODER
be_concat = []
if "testId" in input and "testId" in self.args.cate_col_e:
embed_test = self.embedding_test(input["testId"])
be_concat.append(embed_test)
if "assessmentItemID" in input and "assessmentItemID" in self.args.cate_col_e:
embed_question = self.embedding_question(input["assessmentItemID"])
be_concat.append(embed_question)
if "KnowledgeTag" in input and "KnowledgeTag" in self.args.cate_col_e:
embed_tag = self.embedding_tag(input["KnowledgeTag"])
be_concat.append(embed_tag)
batch_size = input["KnowledgeTag"].size(0)
seq_len = input["KnowledgeTag"].size(1)
if "grade" in input and "grade" in self.args.cate_col_e:
embed_grade = self.embedding_grade(input["grade"])
be_concat.append(embed_grade)
for c in self.args.cate_col_e :
if c not in ['assessmentItemID', 'testId', 'KnowledgeTag', 'grade']:
be_concat.append(self.embedding_other(input[c]))
if self.args.n_cont_e > 0 :
cont = torch.cat([input[c].unsqueeze(2) for c in self.args.cont_col_e], 2)
cont = self.bn_cont_e(cont.view(-1,cont.size(-1))).view(batch_size,-1,cont.size(-1))
embed_cont_e = self.embedding_cont_e(cont)
be_concat.append(embed_cont_e)
embed_enc = torch.cat(be_concat, 2)
embed_enc = self.enc_comb_proj(embed_enc)
# DECODER
be_concat = []
if "testId" in input and "testId" in self.args.cate_col_d:
embed_test = self.embedding_test(input["testId"])
be_concat.append(embed_test)
if "assessmentItemID" in input and "assessmentItemID" in self.args.cate_col_d:
embed_question = self.embedding_question(input["assessmentItemID"])
be_concat.append(embed_question)
if "KnowledgeTag" in input and "assessmentItemID" in self.args.cate_col_d:
embed_tag = self.embedding_tag(input["KnowledgeTag"])
be_concat.append(embed_tag)
if "grade" in input and "assessmentItemID" in self.args.cate_col_d:
embed_grade = self.embedding_grade(input["grade"])
be_concat.append(embed_grade)
if "interaction" in input :
embed_interaction = self.embedding_interaction(input["interaction"])
be_concat.append(embed_interaction)
if "problem_interaction" in input :
embed_problem_interaction = self.embedding_problem_interaction(input["problem_interaction"])
be_concat.append(embed_problem_interaction)
for c in self.args.cate_col_d :
if c not in ['assessmentItemID', 'testId', 'KnowledgeTag', 'grade']:
be_concat.append(self.embedding_other(input[c]))
if self.args.n_cont_d > 0 :
cont = torch.cat([input[c].unsqueeze(2) for c in self.args.cont_col_d], 2)
cont = self.bn_cont_d(cont.view(-1,cont.size(-1))).view(batch_size,-1,cont.size(-1))
embed_cont_d = self.embedding_cont_d(cont)
be_concat.append(embed_cont_d)
embed_dec = torch.cat(be_concat, 2)
embed_dec = self.dec_comb_proj(embed_dec)
# ATTENTION MASK 생성
# encoder하고 decoder의 mask는 가로 세로 길이가 모두 동일하여
# 사실 이렇게 3개로 나눌 필요가 없다
if self.enc_mask is None or self.enc_mask.size(0) != seq_len:
self.enc_mask = self.get_mask(seq_len).to(self.device)
if self.dec_mask is None or self.dec_mask.size(0) != seq_len:
self.dec_mask = self.get_mask(seq_len).to(self.device)
if self.enc_dec_mask is None or self.enc_dec_mask.size(0) != seq_len:
self.enc_dec_mask = self.get_mask(seq_len).to(self.device)
embed_enc = embed_enc.permute(1, 0, 2)
embed_dec = embed_dec.permute(1, 0, 2)
# Positional encoding
embed_enc = self.pos_encoder(embed_enc)
embed_dec = self.pos_decoder(embed_dec)
mask = input["mask"]
mask = mask.eq(0)
out = self.transformer(embed_enc, embed_dec,
src_mask = self.enc_mask,
tgt_mask = self.dec_mask,
memory_mask = self.enc_dec_mask,
# tgt_mask = self.dec_mask,
# src_key_padding_mask = mask,
# tgt_key_padding_mask = mask,
# memory_key_padding_mask = mask,
)
out = out.permute(1, 0, 2)
out = out.contiguous().view(batch_size, -1, self.hidden_dim)
out = self.fc(out)
preds = self.activation(out).view(batch_size, -1)
return preds | [
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.sin",
"torch.from_numpy",
"math.log",
"torch.nn.BatchNorm1d",
"torch.cos",
"torch.arange",
"torch.nn.Sigmoid",
"transformers.models.bert.modeling_bert.BertModel",
"torch.nn.LSTM",
"torch.nn.LayerNorm",
"transformers.models.bert.modeling_bert.BertCo... | [((12488, 12517), 'torch.from_numpy', 'torch.from_numpy', (['future_mask'], {}), '(future_mask)\n', (12504, 12517), False, 'import torch\n'), ((675, 712), 'torch.nn.Embedding', 'nn.Embedding', (['(3)', '(self.hidden_dim // 3)'], {}), '(3, self.hidden_dim // 3)\n', (687, 712), True, 'import torch.nn as nn\n'), ((741, 797), 'torch.nn.Embedding', 'nn.Embedding', (['(self.args.n_test + 1)', '(self.hidden_dim // 3)'], {}), '(self.args.n_test + 1, self.hidden_dim // 3)\n', (753, 797), True, 'import torch.nn as nn\n'), ((830, 891), 'torch.nn.Embedding', 'nn.Embedding', (['(self.args.n_questions + 1)', '(self.hidden_dim // 3)'], {}), '(self.args.n_questions + 1, self.hidden_dim // 3)\n', (842, 891), True, 'import torch.nn as nn\n'), ((919, 974), 'torch.nn.Embedding', 'nn.Embedding', (['(self.args.n_tag + 1)', '(self.hidden_dim // 3)'], {}), '(self.args.n_tag + 1, self.hidden_dim // 3)\n', (931, 974), True, 'import torch.nn as nn\n'), ((1002, 1059), 'torch.nn.Embedding', 'nn.Embedding', (['(self.args.n_grade + 1)', '(self.hidden_dim // 3)'], {}), '(self.args.n_grade + 1, self.hidden_dim // 3)\n', (1014, 1059), True, 'import torch.nn as nn\n'), ((1558, 1632), 'torch.nn.LSTM', 'nn.LSTM', (['self.hidden_dim', 'self.hidden_dim', 'self.n_layers'], {'batch_first': '(True)'}), '(self.hidden_dim, self.hidden_dim, self.n_layers, batch_first=True)\n', (1565, 1632), True, 'import torch.nn as nn\n'), ((1776, 1805), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_dim', '(1)'], {}), '(self.hidden_dim, 1)\n', (1785, 1805), True, 'import torch.nn as nn\n'), ((1833, 1845), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1843, 1845), True, 'import torch.nn as nn\n'), ((1898, 1953), 'torch.zeros', 'torch.zeros', (['self.n_layers', 'batch_size', 'self.hidden_dim'], {}), '(self.n_layers, batch_size, self.hidden_dim)\n', (1909, 1953), False, 'import torch\n'), ((2034, 2089), 'torch.zeros', 'torch.zeros', (['self.n_layers', 'batch_size', 'self.hidden_dim'], {}), '(self.n_layers, batch_size, self.hidden_dim)\n', (2045, 2089), False, 'import torch\n'), ((2600, 2672), 'torch.cat', 'torch.cat', (['[embed_interaction, embed_test, embed_question, embed_tag]', '(2)'], {}), '([embed_interaction, embed_test, embed_question, embed_tag], 2)\n', (2609, 2672), False, 'import torch\n'), ((3763, 3800), 'torch.nn.Embedding', 'nn.Embedding', (['(3)', '(self.hidden_dim // 3)'], {}), '(3, self.hidden_dim // 3)\n', (3775, 3800), True, 'import torch.nn as nn\n'), ((3844, 3890), 'torch.nn.Embedding', 'nn.Embedding', (['(13 * 2 + 1)', '(self.hidden_dim // 3)'], {}), '(13 * 2 + 1, self.hidden_dim // 3)\n', (3856, 3890), True, 'import torch.nn as nn\n'), ((3915, 3971), 'torch.nn.Embedding', 'nn.Embedding', (['(self.args.n_test + 1)', '(self.hidden_dim // 3)'], {}), '(self.args.n_test + 1, self.hidden_dim // 3)\n', (3927, 3971), True, 'import torch.nn as nn\n'), ((4004, 4065), 'torch.nn.Embedding', 'nn.Embedding', (['(self.args.n_questions + 1)', '(self.hidden_dim // 3)'], {}), '(self.args.n_questions + 1, self.hidden_dim // 3)\n', (4016, 4065), True, 'import torch.nn as nn\n'), ((4093, 4148), 'torch.nn.Embedding', 'nn.Embedding', (['(self.args.n_tag + 1)', '(self.hidden_dim // 3)'], {}), '(self.args.n_tag + 1, self.hidden_dim // 3)\n', (4105, 4148), True, 'import torch.nn as nn\n'), ((4178, 4235), 'torch.nn.Embedding', 'nn.Embedding', (['(self.args.n_grade + 1)', '(self.hidden_dim // 3)'], {}), '(self.args.n_grade + 1, self.hidden_dim // 3)\n', (4190, 4235), True, 'import torch.nn as nn\n'), ((4265, 4322), 'torch.nn.Embedding', 'nn.Embedding', (['(self.args.n_other + 1)', '(self.hidden_dim // 3)'], {}), '(self.args.n_other + 1, self.hidden_dim // 3)\n', (4277, 4322), True, 'import torch.nn as nn\n'), ((4544, 4576), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['self.args.n_cont'], {}), '(self.args.n_cont)\n', (4558, 4576), True, 'import torch.nn as nn\n'), ((5005, 5079), 'torch.nn.LSTM', 'nn.LSTM', (['self.hidden_dim', 'self.hidden_dim', 'self.n_layers'], {'batch_first': '(True)'}), '(self.hidden_dim, self.hidden_dim, self.n_layers, batch_first=True)\n', (5012, 5079), True, 'import torch.nn as nn\n'), ((5195, 5419), 'transformers.models.bert.modeling_bert.BertConfig', 'BertConfig', (['(3)'], {'hidden_size': 'self.hidden_dim', 'num_hidden_layers': '(1)', 'num_attention_heads': 'self.n_heads', 'intermediate_size': 'self.hidden_dim', 'hidden_dropout_prob': 'self.drop_out', 'attention_probs_dropout_prob': 'self.drop_out'}), '(3, hidden_size=self.hidden_dim, num_hidden_layers=1,\n num_attention_heads=self.n_heads, intermediate_size=self.hidden_dim,\n hidden_dropout_prob=self.drop_out, attention_probs_dropout_prob=self.\n drop_out)\n', (5205, 5419), False, 'from transformers.models.bert.modeling_bert import BertConfig, BertEncoder, BertModel\n'), ((5534, 5558), 'transformers.models.bert.modeling_bert.BertEncoder', 'BertEncoder', (['self.config'], {}), '(self.config)\n', (5545, 5558), False, 'from transformers.models.bert.modeling_bert import BertConfig, BertEncoder, BertModel\n'), ((5621, 5650), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_dim', '(1)'], {}), '(self.hidden_dim, 1)\n', (5630, 5650), True, 'import torch.nn as nn\n'), ((5678, 5690), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (5688, 5690), True, 'import torch.nn as nn\n'), ((5743, 5798), 'torch.zeros', 'torch.zeros', (['self.n_layers', 'batch_size', 'self.hidden_dim'], {}), '(self.n_layers, batch_size, self.hidden_dim)\n', (5754, 5798), False, 'import torch\n'), ((5879, 5934), 'torch.zeros', 'torch.zeros', (['self.n_layers', 'batch_size', 'self.hidden_dim'], {}), '(self.n_layers, batch_size, self.hidden_dim)\n', (5890, 5934), False, 'import torch\n'), ((7499, 7522), 'torch.cat', 'torch.cat', (['be_concat', '(2)'], {}), '(be_concat, 2)\n', (7508, 7522), False, 'import torch\n'), ((9334, 9371), 'torch.nn.Embedding', 'nn.Embedding', (['(3)', '(self.hidden_dim // 4)'], {}), '(3, self.hidden_dim // 4)\n', (9346, 9371), True, 'import torch.nn as nn\n'), ((9400, 9456), 'torch.nn.Embedding', 'nn.Embedding', (['(self.args.n_test + 1)', '(self.hidden_dim // 4)'], {}), '(self.args.n_test + 1, self.hidden_dim // 4)\n', (9412, 9456), True, 'import torch.nn as nn\n'), ((9489, 9550), 'torch.nn.Embedding', 'nn.Embedding', (['(self.args.n_questions + 1)', '(self.hidden_dim // 4)'], {}), '(self.args.n_questions + 1, self.hidden_dim // 4)\n', (9501, 9550), True, 'import torch.nn as nn\n'), ((9578, 9633), 'torch.nn.Embedding', 'nn.Embedding', (['(self.args.n_tag + 1)', '(self.hidden_dim // 4)'], {}), '(self.args.n_tag + 1, self.hidden_dim // 4)\n', (9590, 9633), True, 'import torch.nn as nn\n'), ((9663, 9720), 'torch.nn.Embedding', 'nn.Embedding', (['(self.args.n_grade + 1)', '(self.hidden_dim // 4)'], {}), '(self.args.n_grade + 1, self.hidden_dim // 4)\n', (9675, 9720), True, 'import torch.nn as nn\n'), ((10242, 10417), 'transformers.models.bert.modeling_bert.BertConfig', 'BertConfig', (['(3)'], {'hidden_size': 'self.hidden_dim', 'num_hidden_layers': 'self.args.n_layers', 'num_attention_heads': 'self.args.n_heads', 'max_position_embeddings': 'self.args.max_seq_len'}), '(3, hidden_size=self.hidden_dim, num_hidden_layers=self.args.\n n_layers, num_attention_heads=self.args.n_heads,\n max_position_embeddings=self.args.max_seq_len)\n', (10252, 10417), False, 'from transformers.models.bert.modeling_bert import BertConfig, BertEncoder, BertModel\n'), ((10576, 10598), 'transformers.models.bert.modeling_bert.BertModel', 'BertModel', (['self.config'], {}), '(self.config)\n', (10585, 10598), False, 'from transformers.models.bert.modeling_bert import BertConfig, BertEncoder, BertModel\n'), ((10652, 10686), 'torch.nn.Linear', 'nn.Linear', (['self.args.hidden_dim', '(1)'], {}), '(self.args.hidden_dim, 1)\n', (10661, 10686), True, 'import torch.nn as nn\n'), ((10714, 10726), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (10724, 10726), True, 'import torch.nn as nn\n'), ((11186, 11275), 'torch.cat', 'torch.cat', (['[embed_interaction, embed_test, embed_question, embed_tag, embed_grade]', '(2)'], {}), '([embed_interaction, embed_test, embed_question, embed_tag,\n embed_grade], 2)\n', (11195, 11275), False, 'import torch\n'), ((12078, 12111), 'torch.nn.Linear', 'nn.Linear', (['state_size', 'state_size'], {}), '(state_size, state_size)\n', (12087, 12111), True, 'import torch.nn as nn\n'), ((12132, 12141), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (12139, 12141), True, 'import torch.nn as nn\n'), ((12161, 12194), 'torch.nn.Linear', 'nn.Linear', (['state_size', 'state_size'], {}), '(state_size, state_size)\n', (12170, 12194), True, 'import torch.nn as nn\n'), ((12218, 12233), 'torch.nn.Dropout', 'nn.Dropout', (['(0.2)'], {}), '(0.2)\n', (12228, 12233), True, 'import torch.nn as nn\n'), ((12751, 12791), 'torch.nn.Embedding', 'nn.Embedding', (['(2 * n_skill + 1)', 'embed_dim'], {}), '(2 * n_skill + 1, embed_dim)\n', (12763, 12791), True, 'import torch.nn as nn\n'), ((12821, 12857), 'torch.nn.Embedding', 'nn.Embedding', (['(max_seq - 1)', 'embed_dim'], {}), '(max_seq - 1, embed_dim)\n', (12833, 12857), True, 'import torch.nn as nn\n'), ((12885, 12921), 'torch.nn.Embedding', 'nn.Embedding', (['(n_skill + 1)', 'embed_dim'], {}), '(n_skill + 1, embed_dim)\n', (12897, 12921), True, 'import torch.nn as nn\n'), ((12948, 13016), 'torch.nn.MultiheadAttention', 'nn.MultiheadAttention', ([], {'embed_dim': 'embed_dim', 'num_heads': '(8)', 'dropout': '(0.2)'}), '(embed_dim=embed_dim, num_heads=8, dropout=0.2)\n', (12969, 13016), True, 'import torch.nn as nn\n'), ((13063, 13078), 'torch.nn.Dropout', 'nn.Dropout', (['(0.2)'], {}), '(0.2)\n', (13073, 13078), True, 'import torch.nn as nn\n'), ((13107, 13130), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['embed_dim'], {}), '(embed_dim)\n', (13119, 13130), True, 'import torch.nn as nn\n'), ((13186, 13209), 'torch.nn.Linear', 'nn.Linear', (['embed_dim', '(1)'], {}), '(embed_dim, 1)\n', (13195, 13209), True, 'import torch.nn as nn\n'), ((13236, 13248), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (13246, 13248), True, 'import torch.nn as nn\n'), ((14299, 14349), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'dim_ff', 'out_features': 'dim_ff'}), '(in_features=dim_ff, out_features=dim_ff)\n', (14308, 14349), True, 'import torch.nn as nn\n'), ((14372, 14422), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'dim_ff', 'out_features': 'dim_ff'}), '(in_features=dim_ff, out_features=dim_ff)\n', (14381, 14422), True, 'import torch.nn as nn\n'), ((14848, 14885), 'torch.nn.Embedding', 'nn.Embedding', (['(3)', '(self.hidden_dim // 3)'], {}), '(3, self.hidden_dim // 3)\n', (14860, 14885), True, 'import torch.nn as nn\n'), ((14929, 14975), 'torch.nn.Embedding', 'nn.Embedding', (['(13 * 2 + 1)', '(self.hidden_dim // 3)'], {}), '(13 * 2 + 1, self.hidden_dim // 3)\n', (14941, 14975), True, 'import torch.nn as nn\n'), ((15000, 15056), 'torch.nn.Embedding', 'nn.Embedding', (['(self.args.n_test + 1)', '(self.hidden_dim // 3)'], {}), '(self.args.n_test + 1, self.hidden_dim // 3)\n', (15012, 15056), True, 'import torch.nn as nn\n'), ((15089, 15150), 'torch.nn.Embedding', 'nn.Embedding', (['(self.args.n_questions + 1)', '(self.hidden_dim // 3)'], {}), '(self.args.n_questions + 1, self.hidden_dim // 3)\n', (15101, 15150), True, 'import torch.nn as nn\n'), ((15178, 15233), 'torch.nn.Embedding', 'nn.Embedding', (['(self.args.n_tag + 1)', '(self.hidden_dim // 3)'], {}), '(self.args.n_tag + 1, self.hidden_dim // 3)\n', (15190, 15233), True, 'import torch.nn as nn\n'), ((15263, 15320), 'torch.nn.Embedding', 'nn.Embedding', (['(self.args.n_grade + 1)', '(self.hidden_dim // 3)'], {}), '(self.args.n_grade + 1, self.hidden_dim // 3)\n', (15275, 15320), True, 'import torch.nn as nn\n'), ((15350, 15407), 'torch.nn.Embedding', 'nn.Embedding', (['(self.args.n_other + 1)', '(self.hidden_dim // 3)'], {}), '(self.args.n_other + 1, self.hidden_dim // 3)\n', (15362, 15407), True, 'import torch.nn as nn\n'), ((15440, 15492), 'torch.nn.Embedding', 'nn.Embedding', (['self.args.max_seq_len', 'self.hidden_dim'], {}), '(self.args.max_seq_len, self.hidden_dim)\n', (15452, 15492), True, 'import torch.nn as nn\n'), ((15732, 15764), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['self.args.n_cont'], {}), '(self.args.n_cont)\n', (15746, 15764), True, 'import torch.nn as nn\n'), ((16307, 16375), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'self.hidden_dim', 'out_features': 'self.hidden_dim'}), '(in_features=self.hidden_dim, out_features=self.hidden_dim)\n', (16316, 16375), True, 'import torch.nn as nn\n'), ((16417, 16485), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'self.hidden_dim', 'out_features': 'self.hidden_dim'}), '(in_features=self.hidden_dim, out_features=self.hidden_dim)\n', (16426, 16485), True, 'import torch.nn as nn\n'), ((16529, 16597), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'self.hidden_dim', 'out_features': 'self.hidden_dim'}), '(in_features=self.hidden_dim, out_features=self.hidden_dim)\n', (16538, 16597), True, 'import torch.nn as nn\n'), ((16641, 16718), 'torch.nn.MultiheadAttention', 'nn.MultiheadAttention', ([], {'embed_dim': 'self.hidden_dim', 'num_heads': 'self.args.n_heads'}), '(embed_dim=self.hidden_dim, num_heads=self.args.n_heads)\n', (16662, 16718), True, 'import torch.nn as nn\n'), ((16880, 16909), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.hidden_dim'], {}), '(self.hidden_dim)\n', (16892, 16909), True, 'import torch.nn as nn\n'), ((16929, 16958), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.hidden_dim'], {}), '(self.hidden_dim)\n', (16941, 16958), True, 'import torch.nn as nn\n'), ((16995, 17074), 'torch.nn.LSTM', 'nn.LSTM', (['self.hidden_dim', 'self.hidden_dim', 'self.args.n_layers'], {'batch_first': '(True)'}), '(self.hidden_dim, self.hidden_dim, self.args.n_layers, batch_first=True)\n', (17002, 17074), True, 'import torch.nn as nn\n'), ((17148, 17177), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_dim', '(1)'], {}), '(self.hidden_dim, 1)\n', (17157, 17177), True, 'import torch.nn as nn\n'), ((17205, 17217), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (17215, 17217), True, 'import torch.nn as nn\n'), ((17394, 17459), 'torch.zeros', 'torch.zeros', (['self.args.n_layers', 'batch_size', 'self.args.hidden_dim'], {}), '(self.args.n_layers, batch_size, self.args.hidden_dim)\n', (17405, 17459), False, 'import torch\n'), ((17503, 17568), 'torch.zeros', 'torch.zeros', (['self.args.n_layers', 'batch_size', 'self.args.hidden_dim'], {}), '(self.args.n_layers, batch_size, self.args.hidden_dim)\n', (17514, 17568), False, 'import torch\n'), ((17697, 17719), 'torch.zeros_like', 'torch.zeros_like', (['mask'], {}), '(mask)\n', (17713, 17719), False, 'import torch\n'), ((19491, 19514), 'torch.cat', 'torch.cat', (['be_concat', '(2)'], {}), '(be_concat, 2)\n', (19500, 19514), False, 'import torch\n'), ((21446, 21467), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (21456, 21467), True, 'import torch.nn as nn\n'), ((21531, 21560), 'torch.zeros', 'torch.zeros', (['max_len', 'd_model'], {}), '(max_len, d_model)\n', (21542, 21560), False, 'import torch\n'), ((21771, 21801), 'torch.sin', 'torch.sin', (['(position * div_term)'], {}), '(position * div_term)\n', (21780, 21801), False, 'import torch\n'), ((21824, 21854), 'torch.cos', 'torch.cos', (['(position * div_term)'], {}), '(position * div_term)\n', (21833, 21854), False, 'import torch\n'), ((22413, 22469), 'torch.nn.Embedding', 'nn.Embedding', (['(self.args.n_test + 1)', '(self.hidden_dim // 3)'], {}), '(self.args.n_test + 1, self.hidden_dim // 3)\n', (22425, 22469), True, 'import torch.nn as nn\n'), ((22502, 22563), 'torch.nn.Embedding', 'nn.Embedding', (['(self.args.n_questions + 1)', '(self.hidden_dim // 3)'], {}), '(self.args.n_questions + 1, self.hidden_dim // 3)\n', (22514, 22563), True, 'import torch.nn as nn\n'), ((22591, 22646), 'torch.nn.Embedding', 'nn.Embedding', (['(self.args.n_tag + 1)', '(self.hidden_dim // 3)'], {}), '(self.args.n_tag + 1, self.hidden_dim // 3)\n', (22603, 22646), True, 'import torch.nn as nn\n'), ((22676, 22733), 'torch.nn.Embedding', 'nn.Embedding', (['(self.args.n_grade + 1)', '(self.hidden_dim // 3)'], {}), '(self.args.n_grade + 1, self.hidden_dim // 3)\n', (22688, 22733), True, 'import torch.nn as nn\n'), ((23481, 23518), 'torch.nn.Embedding', 'nn.Embedding', (['(3)', '(self.hidden_dim // 3)'], {}), '(3, self.hidden_dim // 3)\n', (23493, 23518), True, 'import torch.nn as nn\n'), ((23562, 23608), 'torch.nn.Embedding', 'nn.Embedding', (['(13 * 2 + 1)', '(self.hidden_dim // 3)'], {}), '(13 * 2 + 1, self.hidden_dim // 3)\n', (23574, 23608), True, 'import torch.nn as nn\n'), ((23634, 23691), 'torch.nn.Embedding', 'nn.Embedding', (['(self.args.n_other + 1)', '(self.hidden_dim // 3)'], {}), '(self.args.n_other + 1, self.hidden_dim // 3)\n', (23646, 23691), True, 'import torch.nn as nn\n'), ((24469, 24698), 'torch.nn.Transformer', 'nn.Transformer', ([], {'d_model': 'self.hidden_dim', 'nhead': 'self.args.n_heads', 'num_encoder_layers': 'self.args.n_layers', 'num_decoder_layers': 'self.args.n_layers', 'dim_feedforward': 'self.hidden_dim', 'dropout': 'self.dropout', 'activation': '"""relu"""'}), "(d_model=self.hidden_dim, nhead=self.args.n_heads,\n num_encoder_layers=self.args.n_layers, num_decoder_layers=self.args.\n n_layers, dim_feedforward=self.hidden_dim, dropout=self.dropout,\n activation='relu')\n", (24483, 24698), True, 'import torch.nn as nn\n'), ((24795, 24824), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_dim', '(1)'], {}), '(self.hidden_dim, 1)\n', (24804, 24824), True, 'import torch.nn as nn\n'), ((24851, 24863), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (24861, 24863), True, 'import torch.nn as nn\n'), ((26642, 26665), 'torch.cat', 'torch.cat', (['be_concat', '(2)'], {}), '(be_concat, 2)\n', (26651, 26665), False, 'import torch\n'), ((28484, 28507), 'torch.cat', 'torch.cat', (['be_concat', '(2)'], {}), '(be_concat, 2)\n', (28493, 28507), False, 'import torch\n'), ((1141, 1193), 'torch.nn.Linear', 'nn.Linear', (['(self.hidden_dim // 3 * 4)', 'self.hidden_dim'], {}), '(self.hidden_dim // 3 * 4, self.hidden_dim)\n', (1150, 1193), True, 'import torch.nn as nn\n'), ((1193, 1222), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.hidden_dim'], {}), '(self.hidden_dim)\n', (1205, 1222), True, 'import torch.nn as nn\n'), ((1268, 1312), 'torch.nn.Linear', 'nn.Linear', (['self.args.n_cont', 'self.hidden_dim'], {}), '(self.args.n_cont, self.hidden_dim)\n', (1277, 1312), True, 'import torch.nn as nn\n'), ((1314, 1343), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.hidden_dim'], {}), '(self.hidden_dim)\n', (1326, 1343), True, 'import torch.nn as nn\n'), ((1385, 1394), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1392, 1394), True, 'import torch.nn as nn\n'), ((1420, 1477), 'torch.nn.Linear', 'nn.Linear', (['(self.args.hidden_dim * 2)', 'self.args.hidden_dim'], {}), '(self.args.hidden_dim * 2, self.args.hidden_dim)\n', (1429, 1477), True, 'import torch.nn as nn\n'), ((1501, 1535), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.args.hidden_dim'], {}), '(self.args.hidden_dim)\n', (1513, 1535), True, 'import torch.nn as nn\n'), ((2993, 3031), 'torch.cat', 'torch.cat', (['[embed_cate, embed_cont]', '(2)'], {}), '([embed_cate, embed_cont], 2)\n', (3002, 3031), False, 'import torch\n'), ((4490, 4519), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.hidden_dim'], {}), '(self.hidden_dim)\n', (4502, 4519), True, 'import torch.nn as nn\n'), ((4654, 4698), 'torch.nn.Linear', 'nn.Linear', (['self.args.n_cont', 'self.hidden_dim'], {}), '(self.args.n_cont, self.hidden_dim)\n', (4663, 4698), True, 'import torch.nn as nn\n'), ((4733, 4762), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.hidden_dim'], {}), '(self.hidden_dim)\n', (4745, 4762), True, 'import torch.nn as nn\n'), ((4833, 4848), 'torch.nn.Dropout', 'nn.Dropout', (['(0.3)'], {}), '(0.3)\n', (4843, 4848), True, 'import torch.nn as nn\n'), ((4878, 4925), 'torch.nn.Linear', 'nn.Linear', (['(self.hidden_dim * 2)', 'self.hidden_dim'], {}), '(self.hidden_dim * 2, self.hidden_dim)\n', (4887, 4925), True, 'import torch.nn as nn\n'), ((4953, 4982), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.hidden_dim'], {}), '(self.hidden_dim)\n', (4965, 4982), True, 'import torch.nn as nn\n'), ((7993, 8012), 'torch.cat', 'torch.cat', (['embed', '(2)'], {}), '(embed, 2)\n', (8002, 8012), False, 'import torch\n'), ((9801, 9853), 'torch.nn.Linear', 'nn.Linear', (['(self.hidden_dim // 4 * 5)', 'self.hidden_dim'], {}), '(self.hidden_dim // 4 * 5, self.hidden_dim)\n', (9810, 9853), True, 'import torch.nn as nn\n'), ((9853, 9882), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.hidden_dim'], {}), '(self.hidden_dim)\n', (9865, 9882), True, 'import torch.nn as nn\n'), ((9928, 9972), 'torch.nn.Linear', 'nn.Linear', (['self.args.n_cont', 'self.hidden_dim'], {}), '(self.args.n_cont, self.hidden_dim)\n', (9937, 9972), True, 'import torch.nn as nn\n'), ((9974, 10003), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.hidden_dim'], {}), '(self.hidden_dim)\n', (9986, 10003), True, 'import torch.nn as nn\n'), ((10045, 10054), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (10052, 10054), True, 'import torch.nn as nn\n'), ((10080, 10137), 'torch.nn.Linear', 'nn.Linear', (['(self.args.hidden_dim * 2)', 'self.args.hidden_dim'], {}), '(self.args.hidden_dim * 2, self.args.hidden_dim)\n', (10089, 10137), True, 'import torch.nn as nn\n'), ((10161, 10195), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.args.hidden_dim'], {}), '(self.args.hidden_dim)\n', (10173, 10195), True, 'import torch.nn as nn\n'), ((11573, 11611), 'torch.cat', 'torch.cat', (['[embed_cate, embed_cont]', '(2)'], {}), '([embed_cate, embed_cont], 2)\n', (11582, 11611), False, 'import torch\n'), ((15669, 15698), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.hidden_dim'], {}), '(self.hidden_dim)\n', (15681, 15698), True, 'import torch.nn as nn\n'), ((15842, 15886), 'torch.nn.Linear', 'nn.Linear', (['self.args.n_cont', 'self.hidden_dim'], {}), '(self.args.n_cont, self.hidden_dim)\n', (15851, 15886), True, 'import torch.nn as nn\n'), ((15921, 15950), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.hidden_dim'], {}), '(self.hidden_dim)\n', (15933, 15950), True, 'import torch.nn as nn\n'), ((16064, 16111), 'torch.nn.Linear', 'nn.Linear', (['(self.hidden_dim * 2)', 'self.hidden_dim'], {}), '(self.hidden_dim * 2, self.hidden_dim)\n', (16073, 16111), True, 'import torch.nn as nn\n'), ((16139, 16168), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.hidden_dim'], {}), '(self.hidden_dim)\n', (16151, 16168), True, 'import torch.nn as nn\n'), ((19989, 20008), 'torch.cat', 'torch.cat', (['embed', '(2)'], {}), '(embed, 2)\n', (19998, 20008), False, 'import torch\n'), ((21502, 21515), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (21512, 21515), False, 'import torch\n'), ((22966, 22995), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.hidden_dim'], {}), '(self.hidden_dim)\n', (22978, 22995), True, 'import torch.nn as nn\n'), ((23313, 23342), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.hidden_dim'], {}), '(self.hidden_dim)\n', (23325, 23342), True, 'import torch.nn as nn\n'), ((23924, 23953), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['self.hidden_dim'], {}), '(self.hidden_dim)\n', (23936, 23953), True, 'import torch.nn as nn\n'), ((12422, 12455), 'numpy.ones', 'np.ones', (['(seq_length, seq_length)'], {}), '((seq_length, seq_length))\n', (12429, 12455), True, 'import numpy as np\n'), ((17307, 17328), 'torch.arange', 'torch.arange', (['seq_len'], {}), '(seq_len)\n', (17319, 17328), False, 'import torch\n'), ((21580, 21623), 'torch.arange', 'torch.arange', (['(0)', 'max_len'], {'dtype': 'torch.float'}), '(0, max_len, dtype=torch.float)\n', (21592, 21623), False, 'import torch\n'), ((25034, 25061), 'numpy.ones', 'np.ones', (['(seq_len, seq_len)'], {}), '((seq_len, seq_len))\n', (25041, 25061), True, 'import numpy as np\n'), ((21666, 21693), 'torch.arange', 'torch.arange', (['(0)', 'd_model', '(2)'], {}), '(0, d_model, 2)\n', (21678, 21693), False, 'import torch\n'), ((21719, 21736), 'math.log', 'math.log', (['(10000.0)'], {}), '(10000.0)\n', (21727, 21736), False, 'import math\n')] |
# Copyright 2016, 2017 California Institute of Technology
# Users must agree to abide by the restrictions listed in the
# file "LegalStuff.txt" in the PROPER library directory.
#
# PROPER developed at Jet Propulsion Laboratory/California Inst. Technology
# Original IDL version by <NAME>
# Python translation by <NAME>, with <NAME> and <NAME>
import numpy as np
class WaveFront(object):
"""Wavefront class to define wavefront of an optical system.
Wavefront array structure is created by this routine.
"""
# Class variables
nlist = 1500
def __init__(self, beam_diam, ndiam, wavelength, ngrid, w0, z_ray):
"""WaveFront object constructor
Parameters
----------
beam_diam : float
Initial diameter if beam in meters
wavelength : float
Wavelength in meters
ngrid : float
Wavefront gridsize in pixels (n by n)
beam_diam_fraction : float
Fraction of the grid width corresponding to the beam diameter.
If not specified, it is assumed to be 0.5.
Returns
-------
wfo : obj
Wavefront class object
"""
# wavefront structure
self._wfarr = np.ones([ngrid, ngrid], dtype = np.complex128) # wavefront array
self._lamda = float(wavelength) # wavelength in meters
self._dx = beam_diam/ndiam # grid sampling (meters/pixel)
self._beam_type_old = "INSIDE_" # beam location (inside or outside beam waist)
self._reference_surface = "PLANAR" # reference surface type
self._R_beam = 0. # beam radius of curvature
self._R_beam_inf = 1 # beam starts out with infinite curvature radius
self._z = 0. # current location along propagation direction (meters)
self._z_w0 = 0. # beam waist location (meters)
self._w0 = w0 # beam waist radius (meters)
self._z_Rayleigh = z_ray # Rayleigh distance for current beam
self._propagator_type = "INSIDE__TO_INSIDE_" # inside_to_outside (or vice-versa) or inside_to_inside
self._current_fratio = 1.e9 # current F-ratio
self._diam = beam_diam # initial beam diameter in meters
self._ngrid = ngrid
return
@property
def wfarr(self):
"""Method returns current complex-values wavefront array.
Parameters
----------
None
Returns
-------
wfarr : numpy ndarray
A 2D, complex valued wavefront array centered in the array
"""
return self._wfarr
@wfarr.setter
def wfarr(self, value):
self._wfarr = value
@property
def lamda(self):
"""Method returns wavelength in meters.
Parameters
----------
None
Returns
-------
lamda : float
Wavelength in meters
"""
return self._lamda
@lamda.setter
def lamda(self, value):
self._lamda = float(value)
@property
def dx(self):
"""Method returns grid sampling
Parameters
----------
None
Returns
-------
dx : float
Grid sampling
"""
return self._dx
@dx.setter
def dx(self, value):
self._dx = float(value)
@property
def beam_type_old(self):
"""Method returns beam location
Parameters
----------
None
Returns
-------
beam_type_old : str
Beam location
"""
return self._beam_type_old
@beam_type_old.setter
def beam_type_old(self, value):
self._beam_type_old = value
@property
def reference_surface(self):
"""Method returns reference surface type
Parameters
----------
None
Returns
-------
beam_type_old : str
Reference surface type
"""
return self._reference_surface
@reference_surface.setter
def reference_surface(self, value):
self._reference_surface = value
@property
def R_beam(self):
"""Method returns beam radius of curvature
Parameters
----------
None
Returns
-------
R_beam : float
Beam radius of curvature
"""
return self._R_beam
@R_beam.setter
def R_beam(self, value):
self._R_beam = float(value)
@property
def R_beam_inf(self):
"""Method returns beam infinite radius of curvature
Parameters
----------
None
Returns
-------
R_beam : float
Beam infinite radius of curvature
"""
return self._R_beam_inf
@R_beam_inf.setter
def R_beam_inf(self, value):
self._R_beam_inf = float(value)
@property
def z(self):
"""Method returns current location along propagation direction
Parameters
----------
None
Returns
-------
z : float
Current location along propagation direction
"""
return self._z
@z.setter
def z(self, value):
self._z = float(value)
@property
def z_w0(self):
"""Method returns beam waist location
Parameters
----------
None
Returns
-------
z_w0 : float
Beam waist location (meters)
"""
return self._z_w0
@z_w0.setter
def z_w0(self, value):
self._z_w0 = float(value)
@property
def w0(self):
"""Method returns beam waist radius (meters)
Parameters
----------
None
Returns
-------
w0 : float
Beam waist radius (meters)
"""
return self._w0
@w0.setter
def w0(self, value):
self._w0 = float(value)
@property
def z_Rayleigh(self):
"""Method returns Rayleigh distance from current beam
Parameters
----------
None
Returns
-------
z_rayleigh : float
Rayleigh distance from current beam
"""
return self._z_Rayleigh
@z_Rayleigh.setter
def z_Rayleigh(self, value):
self._z_Rayleigh = float(value)
@property
def propagator_type(self):
"""Method returns propagator type
Parameters
----------
None
Returns
-------
propagator_type : str
Propagator type (inside_to_outside (or vice-versa) or inside_to_inside
"""
return self._propagator_type
@propagator_type.setter
def propagator_type(self, value):
self._propagator_type = value
@property
def current_fratio(self):
"""Method returns current f-ratio
Parameters
----------
None
Returns
-------
current_fratio : float
Current f-ratio
"""
return self._current_fratio
@current_fratio.setter
def current_fratio(self, value):
self._current_fratio = float(value)
@property
def diam(self):
"""Method returns beam diameter
Parameter
---------
None
Returns
-------
ngrid : float
Beam diameter in meters
"""
return self._diam
@diam.setter
def diam(self, value):
self._diam = float(value)
@property
def ngrid(self):
"""Method returns grid size
Parameter
---------
None
Returns
-------
ngrid : float
Grid size in pixels
"""
return self._ngrid
| [
"numpy.ones"
] | [((1332, 1376), 'numpy.ones', 'np.ones', (['[ngrid, ngrid]'], {'dtype': 'np.complex128'}), '([ngrid, ngrid], dtype=np.complex128)\n', (1339, 1376), True, 'import numpy as np\n')] |
from __future__ import print_function
from scipy import misc
import numpy as np
import os
def to_npy(paths,dir):
m = len(paths)
npdata = np.zeros([m,224,224,3])
for i,name in enumerate(paths):
name = dir+paths[i]
temp = misc.imread(name,mode='RGB')
temp = misc.imresize(temp,[224,224])
temp = np.expand_dims(temp,axis=0)
npdata[i] = temp
return npdata
def path_to_image(dir):
dir = dir+'/img/'
file_names = os.listdir(dir)
#build input iamge_pre
pre_names = file_names.copy()
del pre_names[-1]
#build input image_now
now_names = file_names.copy()
del now_names[0]
print(len(pre_names))
print(len(now_names))
pre_data1 = to_npy(pre_names,dir)
now_data2 = to_npy(now_names,dir)
print(pre_data1.shape,now_data2.shape)
return pre_data1,now_data2
def file2list(filename):
fr = open(filename)
array = fr.readlines() #以文件中的每行为一个元素,形成一个list列表
num = len(array)
returnMat = np.zeros((num,4))#初始化元素为0的,行号数个列表,其中每个元素仍是列表,元素数是3,在此表示矩阵
index = 0
for i,line in enumerate(array):
line = line.strip()#去掉一行后的回车符号
linelist = line.split(',')#将一行根据分割符,划分成多个元素的列表
linelist = [int(i) for i in linelist]
returnMat[i,:] = linelist[0:4]#向矩阵赋值,注意这种赋值方式比较笨拙
return returnMat
def path_to_lable(path):
img_path = path+'/img/0001.jpg'
img = misc.imread(img_path,mode='RGB')
w,h,_ = img.shape
print('image_size:',w,h)
path = path+'/groundtruth_rect.txt'
data = file2list(path)
size = np.array([w,h,w,h])
data = np.divide(data,size)
pre_lable = np.delete(data,0,axis=0)
now_lable = np.delete(data,-1,axis=0)
print(pre_lable.shape,now_lable.shape)
return pre_lable,now_lable
def main():
dir = 'F:/object_track/data/'
file_dir = os.listdir(dir)
pre_data = []
now_data = []
pre_lable = []
now_lable = []
for i,name in enumerate(file_dir):
path = dir+name
print(path)
data1,data2 = path_to_image(path)
y1,y2 = path_to_lable(path)
pre_data.append(data1)
now_data.append(data2)
pre_lable.append(y1)
now_lable.append(y2)
pre_data = np.concatenate(pre_data,axis=0)
now_data = np.concatenate(now_data,axis=0)
pre_lable = np.concatenate(pre_lable,axis=0)
now_lable = np.concatenate(now_lable,axis=0)
np.save('npy_data/pre.npy',pre_data)
np.save('npy_data/now.npy',now_data)
np.save('npy_data/pre_lable.npy',pre_lable)
np.save('npy_data/now_lable.npy',now_lable)
if __name__ == '__main__':
main()
| [
"os.listdir",
"numpy.delete",
"numpy.array",
"numpy.zeros",
"scipy.misc.imread",
"numpy.concatenate",
"scipy.misc.imresize",
"numpy.expand_dims",
"numpy.save",
"numpy.divide"
] | [((146, 172), 'numpy.zeros', 'np.zeros', (['[m, 224, 224, 3]'], {}), '([m, 224, 224, 3])\n', (154, 172), True, 'import numpy as np\n'), ((473, 488), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (483, 488), False, 'import os\n'), ((998, 1016), 'numpy.zeros', 'np.zeros', (['(num, 4)'], {}), '((num, 4))\n', (1006, 1016), True, 'import numpy as np\n'), ((1397, 1430), 'scipy.misc.imread', 'misc.imread', (['img_path'], {'mode': '"""RGB"""'}), "(img_path, mode='RGB')\n", (1408, 1430), False, 'from scipy import misc\n'), ((1559, 1581), 'numpy.array', 'np.array', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (1567, 1581), True, 'import numpy as np\n'), ((1590, 1611), 'numpy.divide', 'np.divide', (['data', 'size'], {}), '(data, size)\n', (1599, 1611), True, 'import numpy as np\n'), ((1627, 1653), 'numpy.delete', 'np.delete', (['data', '(0)'], {'axis': '(0)'}), '(data, 0, axis=0)\n', (1636, 1653), True, 'import numpy as np\n'), ((1668, 1695), 'numpy.delete', 'np.delete', (['data', '(-1)'], {'axis': '(0)'}), '(data, -1, axis=0)\n', (1677, 1695), True, 'import numpy as np\n'), ((1830, 1845), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (1840, 1845), False, 'import os\n'), ((2217, 2249), 'numpy.concatenate', 'np.concatenate', (['pre_data'], {'axis': '(0)'}), '(pre_data, axis=0)\n', (2231, 2249), True, 'import numpy as np\n'), ((2264, 2296), 'numpy.concatenate', 'np.concatenate', (['now_data'], {'axis': '(0)'}), '(now_data, axis=0)\n', (2278, 2296), True, 'import numpy as np\n'), ((2312, 2345), 'numpy.concatenate', 'np.concatenate', (['pre_lable'], {'axis': '(0)'}), '(pre_lable, axis=0)\n', (2326, 2345), True, 'import numpy as np\n'), ((2361, 2394), 'numpy.concatenate', 'np.concatenate', (['now_lable'], {'axis': '(0)'}), '(now_lable, axis=0)\n', (2375, 2394), True, 'import numpy as np\n'), ((2399, 2436), 'numpy.save', 'np.save', (['"""npy_data/pre.npy"""', 'pre_data'], {}), "('npy_data/pre.npy', pre_data)\n", (2406, 2436), True, 'import numpy as np\n'), ((2440, 2477), 'numpy.save', 'np.save', (['"""npy_data/now.npy"""', 'now_data'], {}), "('npy_data/now.npy', now_data)\n", (2447, 2477), True, 'import numpy as np\n'), ((2481, 2525), 'numpy.save', 'np.save', (['"""npy_data/pre_lable.npy"""', 'pre_lable'], {}), "('npy_data/pre_lable.npy', pre_lable)\n", (2488, 2525), True, 'import numpy as np\n'), ((2529, 2573), 'numpy.save', 'np.save', (['"""npy_data/now_lable.npy"""', 'now_lable'], {}), "('npy_data/now_lable.npy', now_lable)\n", (2536, 2573), True, 'import numpy as np\n'), ((249, 278), 'scipy.misc.imread', 'misc.imread', (['name'], {'mode': '"""RGB"""'}), "(name, mode='RGB')\n", (260, 278), False, 'from scipy import misc\n'), ((293, 324), 'scipy.misc.imresize', 'misc.imresize', (['temp', '[224, 224]'], {}), '(temp, [224, 224])\n', (306, 324), False, 'from scipy import misc\n'), ((338, 366), 'numpy.expand_dims', 'np.expand_dims', (['temp'], {'axis': '(0)'}), '(temp, axis=0)\n', (352, 366), True, 'import numpy as np\n')] |
import argparse
import os
import shutil
import random
from datetime import datetime
import numpy as np
from mediaio.audio_io import AudioSignal, AudioMixer
from mediaio.dsp.spectrogram import MelConverter
from dataset import AudioVisualDataset
def enhance_speech(speaker_file_path, noise_file_path, speech_prediction_path, speech_profile):
print("enhancing mix of %s, %s" % (speaker_file_path, noise_file_path))
speaker_source_signal = AudioSignal.from_wav_file(speaker_file_path)
noise_source_signal = AudioSignal.from_wav_file(noise_file_path)
while noise_source_signal.get_number_of_samples() < speaker_source_signal.get_number_of_samples():
noise_source_signal = AudioSignal.concat([noise_source_signal, noise_source_signal])
noise_source_signal = noise_source_signal.slice(0, speaker_source_signal.get_number_of_samples())
mixed_signal = AudioMixer.mix([speaker_source_signal, noise_source_signal])
predicted_speech_signal = AudioSignal.from_wav_file(speech_prediction_path)
signals = [mixed_signal, predicted_speech_signal]
max_length = max([signal.get_number_of_samples() for signal in signals])
for signal in signals:
signal.pad_with_zeros(max_length)
mel_converter = MelConverter(mixed_signal.get_sample_rate(), n_mel_freqs=128, freq_min_hz=0, freq_max_hz=4000)
mixed_spectrogram, original_phase = mel_converter.signal_to_mel_spectrogram(mixed_signal, get_phase=True)
predicted_speech_spectrogram = mel_converter.signal_to_mel_spectrogram(predicted_speech_signal)
speech_enhancement_mask = np.zeros(shape=mixed_spectrogram.shape)
thresholds = np.zeros(shape=(speech_enhancement_mask.shape[0]))
for f in range(speech_enhancement_mask.shape[0]):
thresholds[f] = np.percentile(speech_profile[f, :], 85)
for f in range(speech_enhancement_mask.shape[0]):
for t in range(speech_enhancement_mask.shape[1]):
if predicted_speech_spectrogram[f, t] > thresholds[f]:
speech_enhancement_mask[f, t] = 1
continue
enhanced_speech_spectrogram = mixed_spectrogram * speech_enhancement_mask
enhanced_speech_signal = mel_converter.reconstruct_signal_from_mel_spectrogram(enhanced_speech_spectrogram, original_phase)
return mixed_signal, enhanced_speech_signal
def build_speech_profile(speaker_speech_dir, max_files=50):
print("building speech profile...")
speech_file_paths = [os.path.join(speaker_speech_dir, f) for f in os.listdir(speaker_speech_dir)][:max_files]
speech_signals = [AudioSignal.from_wav_file(f) for f in speech_file_paths]
mel_converter = MelConverter(speech_signals[0].get_sample_rate(), n_mel_freqs=128, freq_min_hz=0, freq_max_hz=4000)
speech_spectrograms = [mel_converter.signal_to_mel_spectrogram(signal) for signal in speech_signals]
speech_profile = np.concatenate(speech_spectrograms, axis=1)
return speech_profile
def apply_speech_enhancement(dataset_dir, speaker_id, noise_dir, prediction_input_dir, enhancement_output_dir):
enhancement_output_dir = os.path.join(enhancement_output_dir, '{:%Y-%m-%d_%H-%M-%S}'.format(datetime.now()))
os.mkdir(enhancement_output_dir)
speech_profile = build_speech_profile(os.path.join(prediction_input_dir, speaker_id))
for speaker_file_path, noise_file_path in list_source_pairs(dataset_dir, speaker_id, noise_dir):
try:
speaker_file_name = os.path.splitext(os.path.basename(speaker_file_path))[0]
noise_file_name = os.path.splitext(os.path.basename(noise_file_path))[0]
speech_enhancement_dir_path = os.path.join(enhancement_output_dir, speaker_file_name + "_" + noise_file_name)
os.mkdir(speech_enhancement_dir_path)
speech_prediction_path = os.path.join(prediction_input_dir, speaker_id, speaker_file_name + ".wav")
mixed_signal, enhanced_speech_signal = enhance_speech(
speaker_file_path, noise_file_path, speech_prediction_path, speech_profile
)
shutil.copy(speaker_file_path, os.path.join(speech_enhancement_dir_path, "source.wav"))
enhanced_speech_signal.save_to_wav_file(os.path.join(speech_enhancement_dir_path, "enhanced.wav"))
mixed_signal.save_to_wav_file(os.path.join(speech_enhancement_dir_path, "mixture.wav"))
except Exception as e:
print("failed to enhance (%s). skipping" % e)
def list_source_pairs(dataset_dir, speaker_id, noise_dir):
dataset = AudioVisualDataset(dataset_dir)
speaker_file_paths = dataset.subset([speaker_id], max_files=20, shuffle=True).audio_paths()
noise_file_paths = [os.path.join(noise_dir, f) for f in os.listdir(noise_dir)]
random.shuffle(speaker_file_paths)
random.shuffle(noise_file_paths)
return zip(speaker_file_paths, noise_file_paths)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("dataset_dir", type=str)
parser.add_argument("speaker", type=str)
parser.add_argument("noise_dir", type=str)
parser.add_argument("prediction_input_dir", type=str)
parser.add_argument("enhancement_output_dir", type=str)
args = parser.parse_args()
apply_speech_enhancement(
args.dataset_dir, args.speaker, args.noise_dir, args.prediction_input_dir, args.enhancement_output_dir
)
if __name__ == "__main__":
main()
| [
"mediaio.audio_io.AudioMixer.mix",
"os.listdir",
"random.shuffle",
"argparse.ArgumentParser",
"dataset.AudioVisualDataset",
"os.path.join",
"datetime.datetime.now",
"numpy.zeros",
"os.mkdir",
"numpy.concatenate",
"mediaio.audio_io.AudioSignal.concat",
"os.path.basename",
"numpy.percentile",
... | [((443, 487), 'mediaio.audio_io.AudioSignal.from_wav_file', 'AudioSignal.from_wav_file', (['speaker_file_path'], {}), '(speaker_file_path)\n', (468, 487), False, 'from mediaio.audio_io import AudioSignal, AudioMixer\n'), ((511, 553), 'mediaio.audio_io.AudioSignal.from_wav_file', 'AudioSignal.from_wav_file', (['noise_file_path'], {}), '(noise_file_path)\n', (536, 553), False, 'from mediaio.audio_io import AudioSignal, AudioMixer\n'), ((858, 918), 'mediaio.audio_io.AudioMixer.mix', 'AudioMixer.mix', (['[speaker_source_signal, noise_source_signal]'], {}), '([speaker_source_signal, noise_source_signal])\n', (872, 918), False, 'from mediaio.audio_io import AudioSignal, AudioMixer\n'), ((947, 996), 'mediaio.audio_io.AudioSignal.from_wav_file', 'AudioSignal.from_wav_file', (['speech_prediction_path'], {}), '(speech_prediction_path)\n', (972, 996), False, 'from mediaio.audio_io import AudioSignal, AudioMixer\n'), ((1528, 1567), 'numpy.zeros', 'np.zeros', ([], {'shape': 'mixed_spectrogram.shape'}), '(shape=mixed_spectrogram.shape)\n', (1536, 1567), True, 'import numpy as np\n'), ((1583, 1631), 'numpy.zeros', 'np.zeros', ([], {'shape': 'speech_enhancement_mask.shape[0]'}), '(shape=speech_enhancement_mask.shape[0])\n', (1591, 1631), True, 'import numpy as np\n'), ((2729, 2772), 'numpy.concatenate', 'np.concatenate', (['speech_spectrograms'], {'axis': '(1)'}), '(speech_spectrograms, axis=1)\n', (2743, 2772), True, 'import numpy as np\n'), ((3021, 3053), 'os.mkdir', 'os.mkdir', (['enhancement_output_dir'], {}), '(enhancement_output_dir)\n', (3029, 3053), False, 'import os\n'), ((4237, 4268), 'dataset.AudioVisualDataset', 'AudioVisualDataset', (['dataset_dir'], {}), '(dataset_dir)\n', (4255, 4268), False, 'from dataset import AudioVisualDataset\n'), ((4444, 4478), 'random.shuffle', 'random.shuffle', (['speaker_file_paths'], {}), '(speaker_file_paths)\n', (4458, 4478), False, 'import random\n'), ((4480, 4512), 'random.shuffle', 'random.shuffle', (['noise_file_paths'], {}), '(noise_file_paths)\n', (4494, 4512), False, 'import random\n'), ((4588, 4613), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4611, 4613), False, 'import argparse\n'), ((679, 741), 'mediaio.audio_io.AudioSignal.concat', 'AudioSignal.concat', (['[noise_source_signal, noise_source_signal]'], {}), '([noise_source_signal, noise_source_signal])\n', (697, 741), False, 'from mediaio.audio_io import AudioSignal, AudioMixer\n'), ((1703, 1742), 'numpy.percentile', 'np.percentile', (['speech_profile[f, :]', '(85)'], {}), '(speech_profile[f, :], 85)\n', (1716, 1742), True, 'import numpy as np\n'), ((2433, 2461), 'mediaio.audio_io.AudioSignal.from_wav_file', 'AudioSignal.from_wav_file', (['f'], {}), '(f)\n', (2458, 2461), False, 'from mediaio.audio_io import AudioSignal, AudioMixer\n'), ((3094, 3140), 'os.path.join', 'os.path.join', (['prediction_input_dir', 'speaker_id'], {}), '(prediction_input_dir, speaker_id)\n', (3106, 3140), False, 'import os\n'), ((4383, 4409), 'os.path.join', 'os.path.join', (['noise_dir', 'f'], {}), '(noise_dir, f)\n', (4395, 4409), False, 'import os\n'), ((2325, 2360), 'os.path.join', 'os.path.join', (['speaker_speech_dir', 'f'], {}), '(speaker_speech_dir, f)\n', (2337, 2360), False, 'import os\n'), ((3003, 3017), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3015, 3017), False, 'from datetime import datetime\n'), ((3438, 3517), 'os.path.join', 'os.path.join', (['enhancement_output_dir', "(speaker_file_name + '_' + noise_file_name)"], {}), "(enhancement_output_dir, speaker_file_name + '_' + noise_file_name)\n", (3450, 3517), False, 'import os\n'), ((3521, 3558), 'os.mkdir', 'os.mkdir', (['speech_enhancement_dir_path'], {}), '(speech_enhancement_dir_path)\n', (3529, 3558), False, 'import os\n'), ((3588, 3662), 'os.path.join', 'os.path.join', (['prediction_input_dir', 'speaker_id', "(speaker_file_name + '.wav')"], {}), "(prediction_input_dir, speaker_id, speaker_file_name + '.wav')\n", (3600, 3662), False, 'import os\n'), ((4419, 4440), 'os.listdir', 'os.listdir', (['noise_dir'], {}), '(noise_dir)\n', (4429, 4440), False, 'import os\n'), ((2370, 2400), 'os.listdir', 'os.listdir', (['speaker_speech_dir'], {}), '(speaker_speech_dir)\n', (2380, 2400), False, 'import os\n'), ((3840, 3895), 'os.path.join', 'os.path.join', (['speech_enhancement_dir_path', '"""source.wav"""'], {}), "(speech_enhancement_dir_path, 'source.wav')\n", (3852, 3895), False, 'import os\n'), ((3940, 3997), 'os.path.join', 'os.path.join', (['speech_enhancement_dir_path', '"""enhanced.wav"""'], {}), "(speech_enhancement_dir_path, 'enhanced.wav')\n", (3952, 3997), False, 'import os\n'), ((4032, 4088), 'os.path.join', 'os.path.join', (['speech_enhancement_dir_path', '"""mixture.wav"""'], {}), "(speech_enhancement_dir_path, 'mixture.wav')\n", (4044, 4088), False, 'import os\n'), ((3288, 3323), 'os.path.basename', 'os.path.basename', (['speaker_file_path'], {}), '(speaker_file_path)\n', (3304, 3323), False, 'import os\n'), ((3366, 3399), 'os.path.basename', 'os.path.basename', (['noise_file_path'], {}), '(noise_file_path)\n', (3382, 3399), False, 'import os\n')] |
import os
import matplotlib.pyplot as plt
import numpy as np
IMG_PATH = os.path.dirname(os.path.abspath(__file__))
def plot_function(
input_signal: np.ndarray,
output_signal: np.ndarray,
name: str = None
) -> None:
plt.step(input_signal, output_signal)
plt.xlabel('a')
plt.ylabel('f(a)')
plt.xlim(np.min(input_signal) - 0.2, np.max(input_signal) + 0.2)
plt.ylim(np.min(output_signal) - 0.2, np.max(output_signal) + 0.2)
if name:
plt.title(f"Activation function: {name}")
plt.show()
if __name__ == "__main__":
input_signal = np.linspace(start=-10, stop=10, num=1000)
# Step function
# f(a) = 0, if a <= 0 else 1
output_signal = [0 if a <= 0 else 1 for a in input_signal]
plot_function(input_signal, output_signal, name='step')
# Tanh
# f(a) = tanh(a) = 2 / (1+e^(-2a)) - 1
output_signal = [2 / (1 + np.exp(-2 * a)) - 1 for a in input_signal]
plot_function(input_signal, output_signal, name='tanh')
# SIGMOID
# sigmoid(a) = 1 / (1 + e^-a)
output_signal = [1 / (1 + np.exp(-a)) for a in input_signal]
plot_function(input_signal, output_signal, name='sigmoid')
# RELU = Rectified Linear Unit
# f(a) = max (0, a)
output_signal = [max(0, a) for a in input_signal]
plot_function(input_signal, output_signal, name='relu')
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.max",
"numpy.exp",
"numpy.linspace",
"numpy.min",
"os.path.abspath",
"matplotlib.pyplot.title",
"matplotlib.pyplot.step",
"matplotlib.pyplot.show"
] | [((89, 114), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (104, 114), False, 'import os\n'), ((234, 271), 'matplotlib.pyplot.step', 'plt.step', (['input_signal', 'output_signal'], {}), '(input_signal, output_signal)\n', (242, 271), True, 'import matplotlib.pyplot as plt\n'), ((276, 291), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""a"""'], {}), "('a')\n", (286, 291), True, 'import matplotlib.pyplot as plt\n'), ((296, 314), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""f(a)"""'], {}), "('f(a)')\n", (306, 314), True, 'import matplotlib.pyplot as plt\n'), ((522, 532), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (530, 532), True, 'import matplotlib.pyplot as plt\n'), ((581, 622), 'numpy.linspace', 'np.linspace', ([], {'start': '(-10)', 'stop': '(10)', 'num': '(1000)'}), '(start=-10, stop=10, num=1000)\n', (592, 622), True, 'import numpy as np\n'), ((476, 517), 'matplotlib.pyplot.title', 'plt.title', (['f"""Activation function: {name}"""'], {}), "(f'Activation function: {name}')\n", (485, 517), True, 'import matplotlib.pyplot as plt\n'), ((328, 348), 'numpy.min', 'np.min', (['input_signal'], {}), '(input_signal)\n', (334, 348), True, 'import numpy as np\n'), ((356, 376), 'numpy.max', 'np.max', (['input_signal'], {}), '(input_signal)\n', (362, 376), True, 'import numpy as np\n'), ((397, 418), 'numpy.min', 'np.min', (['output_signal'], {}), '(output_signal)\n', (403, 418), True, 'import numpy as np\n'), ((426, 447), 'numpy.max', 'np.max', (['output_signal'], {}), '(output_signal)\n', (432, 447), True, 'import numpy as np\n'), ((1067, 1077), 'numpy.exp', 'np.exp', (['(-a)'], {}), '(-a)\n', (1073, 1077), True, 'import numpy as np\n'), ((885, 899), 'numpy.exp', 'np.exp', (['(-2 * a)'], {}), '(-2 * a)\n', (891, 899), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import os
import numpy as np
def _import_networkx():
try:
import networkx as nx
except Exception as e:
raise ImportError('Cannot import networkx. Use graph-tool or try to '
'install it with pip (or conda) install networkx. '
'Original exception: {}'.format(e))
return nx
def _import_graphtool():
try:
import graph_tool as gt
except Exception as e:
raise ImportError('Cannot import graph-tool. Use networkx or try to '
'install it. Original exception: {}'.format(e))
return gt
class IOMixIn(object):
def _break_signals(self):
r"""Break N-dimensional signals into N 1D signals."""
for name in list(self.signals.keys()):
if self.signals[name].ndim == 2:
for i, signal_1d in enumerate(self.signals[name].T):
self.signals[name + '_' + str(i)] = signal_1d
del self.signals[name]
def _join_signals(self):
r"""Join N 1D signals into one N-dimensional signal."""
joined = dict()
for name in self.signals:
name_base = name.rsplit('_', 1)[0]
names = joined.get(name_base, list())
names.append(name)
joined[name_base] = names
for name_base, names in joined.items():
if len(names) > 1:
names = sorted(names) # ensure dim ordering (_0, _1, etc.)
signal_nd = np.stack([self.signals[n] for n in names], axis=1)
self.signals[name_base] = signal_nd
for name in names:
del self.signals[name]
def to_networkx(self):
r"""Export the graph to NetworkX.
Edge weights are stored as an edge attribute,
under the name "weight".
Signals are stored as node attributes,
under their name in the :attr:`signals` dictionary.
`N`-dimensional signals are broken into `N` 1-dimensional signals.
They will eventually be joined back together on import.
Returns
-------
graph : :class:`networkx.Graph`
A NetworkX graph object.
See Also
--------
to_graphtool : export to graph-tool
save : save to a file
Examples
--------
>>> import networkx as nx
>>> from matplotlib import pyplot as plt
>>> graph = graphs.Path(4, directed=True)
>>> graph.set_signal(np.full(4, 2.3), 'signal')
>>> graph = graph.to_networkx()
>>> print(nx.info(graph))
DiGraph named 'Path' with 4 nodes and 3 edges
>>> nx.is_directed(graph)
True
>>> graph.nodes()
NodeView((0, 1, 2, 3))
>>> graph.edges()
OutEdgeView([(0, 1), (1, 2), (2, 3)])
>>> graph.nodes()[2]
{'signal': 2.3}
>>> graph.edges()[(0, 1)]
{'weight': 1.0}
>>> # nx.draw(graph, with_labels=True)
Another common goal is to use NetworkX to compute some properties to be
be imported back in the PyGSP as signals.
>>> import networkx as nx
>>> from matplotlib import pyplot as plt
>>> graph = graphs.Sensor(100, seed=42)
>>> graph.set_signal(graph.coords, 'coords')
>>> graph = graph.to_networkx()
>>> betweenness = nx.betweenness_centrality(graph, weight='weight')
>>> nx.set_node_attributes(graph, betweenness, 'betweenness')
>>> graph = graphs.Graph.from_networkx(graph)
>>> graph.compute_fourier_basis()
>>> graph.set_coordinates(graph.signals['coords'])
>>> fig, axes = plt.subplots(1, 2)
>>> _ = graph.plot(graph.signals['betweenness'], ax=axes[0])
>>> _ = axes[1].plot(graph.e, graph.gft(graph.signals['betweenness']))
"""
nx = _import_networkx()
def convert(number):
# NetworkX accepts arbitrary python objects as attributes, but:
# * the GEXF writer does not accept any NumPy types (on signals),
# * the GraphML writer does not accept NumPy ints.
if issubclass(number.dtype.type, (np.integer, np.bool_)):
return int(number)
else:
return float(number)
def edges():
for source, target, weight in zip(*self.get_edge_list()):
yield int(source), int(target), {'weight': convert(weight)}
def nodes():
for vertex in range(self.n_vertices):
signals = {name: convert(signal[vertex])
for name, signal in self.signals.items()}
yield vertex, signals
self._break_signals()
graph = nx.DiGraph() if self.is_directed() else nx.Graph()
graph.add_nodes_from(nodes())
graph.add_edges_from(edges())
graph.name = self.__class__.__name__
return graph
def to_graphtool(self):
r"""Export the graph to graph-tool.
Edge weights are stored as an edge property map,
under the name "weight".
Signals are stored as vertex property maps,
under their name in the :attr:`signals` dictionary.
`N`-dimensional signals are broken into `N` 1-dimensional signals.
They will eventually be joined back together on import.
Returns
-------
graph : :class:`graph_tool.Graph`
A graph-tool graph object.
See Also
--------
to_networkx : export to NetworkX
save : save to a file
Examples
--------
>>> import graph_tool as gt
>>> import graph_tool.draw
>>> from matplotlib import pyplot as plt
>>> graph = graphs.Path(4, directed=True)
>>> graph.set_signal(np.full(4, 2.3), 'signal')
>>> graph = graph.to_graphtool()
>>> graph.is_directed()
True
>>> graph.vertex_properties['signal'][2]
2.3
>>> graph.edge_properties['weight'][graph.edge(0, 1)]
1.0
>>> # gt.draw.graph_draw(graph, vertex_text=graph.vertex_index)
Another common goal is to use graph-tool to compute some properties to
be imported back in the PyGSP as signals.
>>> import graph_tool as gt
>>> import graph_tool.centrality
>>> from matplotlib import pyplot as plt
>>> graph = graphs.Sensor(100, seed=42)
>>> graph.set_signal(graph.coords, 'coords')
>>> graph = graph.to_graphtool()
>>> vprop, eprop = gt.centrality.betweenness(
... graph, weight=graph.edge_properties['weight'])
>>> graph.vertex_properties['betweenness'] = vprop
>>> graph = graphs.Graph.from_graphtool(graph)
>>> graph.compute_fourier_basis()
>>> graph.set_coordinates(graph.signals['coords'])
>>> fig, axes = plt.subplots(1, 2)
>>> _ = graph.plot(graph.signals['betweenness'], ax=axes[0])
>>> _ = axes[1].plot(graph.e, graph.gft(graph.signals['betweenness']))
"""
gt = _import_graphtool()
graph = gt.Graph(directed=self.is_directed())
sources, targets, weights = self.get_edge_list()
graph.add_edge_list(zip(sources, targets))
prop = graph.new_edge_property(gt._gt_type(weights.dtype))
prop.get_array()[:] = weights
graph.edge_properties['weight'] = prop
self._break_signals()
for name, signal in self.signals.items():
prop = graph.new_vertex_property(gt._gt_type(signal.dtype))
prop.get_array()[:] = signal
graph.vertex_properties[name] = prop
return graph
@classmethod
def from_networkx(cls, graph, weight='weight'):
r"""Import a graph from NetworkX.
Edge weights are retrieved as an edge attribute,
under the name specified by the ``weight`` parameter.
Signals are retrieved from node attributes,
and stored in the :attr:`signals` dictionary under the attribute name.
`N`-dimensional signals that were broken during export are joined.
Parameters
----------
graph : :class:`networkx.Graph`
A NetworkX graph object.
weight : string or None, optional
The edge attribute that holds the numerical values used as the edge
weights. All edge weights are set to 1 if None, or not found.
Returns
-------
graph : :class:`~pygsp.graphs.Graph`
A PyGSP graph object.
Notes
-----
The nodes are ordered according to :meth:`networkx.Graph.nodes`.
In NetworkX, node attributes need not be set for every node.
If a node attribute is not set for a node, a NaN is assigned to the
corresponding signal for that node.
If the graph is a :class:`networkx.MultiGraph`, multiedges are
aggregated by summation.
See Also
--------
from_graphtool : import from graph-tool
load : load from a file
Examples
--------
>>> import networkx as nx
>>> graph = nx.Graph()
>>> graph.add_edge(1, 2, weight=0.2)
>>> graph.add_edge(2, 3, weight=0.9)
>>> graph.add_node(4, sig=3.1416)
>>> graph.nodes()
NodeView((1, 2, 3, 4))
>>> graph = graphs.Graph.from_networkx(graph)
>>> graph.W.toarray()
array([[0. , 0.2, 0. , 0. ],
[0.2, 0. , 0.9, 0. ],
[0. , 0.9, 0. , 0. ],
[0. , 0. , 0. , 0. ]])
>>> graph.signals
{'sig': array([ nan, nan, nan, 3.1416])}
"""
nx = _import_networkx()
from .graph import Graph
adjacency = nx.to_scipy_sparse_matrix(graph, weight=weight)
graph_pg = Graph(adjacency)
for i, node in enumerate(graph.nodes()):
for name in graph.nodes[node].keys():
try:
signal = graph_pg.signals[name]
except KeyError:
signal = np.full(graph_pg.n_vertices, np.nan)
graph_pg.set_signal(signal, name)
try:
signal[i] = graph.nodes[node][name]
except KeyError:
pass # attribute not set for node
graph_pg._join_signals()
return graph_pg
@classmethod
def from_graphtool(cls, graph, weight='weight'):
r"""Import a graph from graph-tool.
Edge weights are retrieved as an edge property,
under the name specified by the ``weight`` parameter.
Signals are retrieved from node properties,
and stored in the :attr:`signals` dictionary under the property name.
`N`-dimensional signals that were broken during export are joined.
Parameters
----------
graph : :class:`graph_tool.Graph`
A graph-tool graph object.
weight : string
The edge property that holds the numerical values used as the edge
weights. All edge weights are set to 1 if None, or not found.
Returns
-------
graph : :class:`~pygsp.graphs.Graph`
A PyGSP graph object.
Notes
-----
If the graph has multiple edge connecting the same two nodes, a sum
over the edges is taken to merge them.
See Also
--------
from_networkx : import from NetworkX
load : load from a file
Examples
--------
>>> import graph_tool as gt
>>> graph = gt.Graph(directed=False)
>>> e1 = graph.add_edge(0, 1)
>>> e2 = graph.add_edge(1, 2)
>>> v = graph.add_vertex()
>>> eprop = graph.new_edge_property("double")
>>> eprop[e1] = 0.2
>>> eprop[graph.edge(1, 2)] = 0.9
>>> graph.edge_properties["weight"] = eprop
>>> vprop = graph.new_vertex_property("double", val=np.nan)
>>> vprop[3] = 3.1416
>>> graph.vertex_properties["sig"] = vprop
>>> graph = graphs.Graph.from_graphtool(graph)
>>> graph.W.toarray()
array([[0. , 0.2, 0. , 0. ],
[0.2, 0. , 0.9, 0. ],
[0. , 0.9, 0. , 0. ],
[0. , 0. , 0. , 0. ]])
>>> graph.signals
{'sig': PropertyArray([ nan, nan, nan, 3.1416])}
"""
gt = _import_graphtool()
import graph_tool.spectral
from .graph import Graph
weight = graph.edge_properties.get(weight, None)
adjacency = gt.spectral.adjacency(graph, weight=weight)
graph_pg = Graph(adjacency.T)
for name, signal in graph.vertex_properties.items():
graph_pg.set_signal(signal.get_array(), name)
graph_pg._join_signals()
return graph_pg
@classmethod
def load(cls, path, fmt=None, backend=None):
r"""Load a graph from a file.
Edge weights are retrieved as an edge attribute named "weight".
Signals are retrieved from node attributes,
and stored in the :attr:`signals` dictionary under the attribute name.
`N`-dimensional signals that were broken during export are joined.
Parameters
----------
path : string
Path to the file from which to load the graph.
fmt : {'graphml', 'gml', 'gexf', None}, optional
Format in which the graph is saved.
Guessed from the filename extension if None.
backend : {'networkx', 'graph-tool', None}, optional
Library used to load the graph. Automatically chosen if None.
Returns
-------
graph : :class:`Graph`
The loaded graph.
See Also
--------
save : save a graph to a file
from_networkx : load with NetworkX then import in the PyGSP
from_graphtool : load with graph-tool then import in the PyGSP
Notes
-----
A lossless round-trip is only guaranteed if the graph (and its signals)
is saved and loaded with the same backend.
Loading from other formats is possible by loading in NetworkX or
graph-tool, and importing to the PyGSP.
The proposed formats are however tested for faithful round-trips.
Examples
--------
>>> graph = graphs.Logo()
>>> graph.save('logo.graphml')
>>> graph = graphs.Graph.load('logo.graphml')
>>> import os
>>> os.remove('logo.graphml')
"""
if fmt is None:
fmt = os.path.splitext(path)[1][1:]
if fmt not in ['graphml', 'gml', 'gexf']:
raise ValueError('Unsupported format {}.'.format(fmt))
def load_networkx(path, fmt):
nx = _import_networkx()
load = getattr(nx, 'read_' + fmt)
graph = load(path)
return cls.from_networkx(graph)
def load_graphtool(path, fmt):
gt = _import_graphtool()
graph = gt.load_graph(path, fmt=fmt)
return cls.from_graphtool(graph)
if backend == 'networkx':
return load_networkx(path, fmt)
elif backend == 'graph-tool':
return load_graphtool(path, fmt)
elif backend is None:
try:
return load_networkx(path, fmt)
except ImportError:
try:
return load_graphtool(path, fmt)
except ImportError:
raise ImportError('Cannot import networkx nor graph-tool.')
else:
raise ValueError('Unknown backend {}.'.format(backend))
def save(self, path, fmt=None, backend=None):
r"""Save the graph to a file.
Edge weights are stored as an edge attribute,
under the name "weight".
Signals are stored as node attributes,
under their name in the :attr:`signals` dictionary.
`N`-dimensional signals are broken into `N` 1-dimensional signals.
They will eventually be joined back together on import.
Supported formats are:
* GraphML_, a comprehensive XML format.
Supported by NetworkX_, graph-tool_, NetworKit_, igraph_, Gephi_,
Cytoscape_, SocNetV_.
* GML_ (Graph Modelling Language), a simple non-XML format.
Supported by NetworkX_, graph-tool_, NetworKit_, igraph_, Gephi_,
Cytoscape_, SocNetV_, Tulip_.
* GEXF_ (Graph Exchange XML Format), Gephi's XML format.
Supported by NetworkX_, NetworKit_, Gephi_, Tulip_, ngraph_.
If unsure, we recommend GraphML_.
.. _GraphML: https://en.wikipedia.org/wiki/GraphML
.. _GML: https://en.wikipedia.org/wiki/Graph_Modelling_Language
.. _GEXF: https://gephi.org/gexf/format
.. _NetworkX: https://networkx.org
.. _graph-tool: https://graph-tool.skewed.de
.. _NetworKit: https://networkit.github.io
.. _igraph: https://igraph.org
.. _ngraph: https://github.com/anvaka/ngraph
.. _Gephi: https://gephi.org
.. _Cytoscape: https://cytoscape.org
.. _SocNetV: https://socnetv.org
.. _Tulip: https://tulip.labri.fr
Parameters
----------
path : string
Path to the file where the graph is to be saved.
fmt : {'graphml', 'gml', 'gexf', None}, optional
Format in which to save the graph.
Guessed from the filename extension if None.
backend : {'networkx', 'graph-tool', None}, optional
Library used to load the graph. Automatically chosen if None.
See Also
--------
load : load a graph from a file
to_networkx : export as a NetworkX graph, and save with NetworkX
to_graphtool : export as a graph-tool graph, and save with graph-tool
Notes
-----
A lossless round-trip is only guaranteed if the graph (and its signals)
is saved and loaded with the same backend.
Saving in other formats is possible by exporting to NetworkX or
graph-tool, and using their respective saving functionality.
The proposed formats are however tested for faithful round-trips.
Edge weights and signal values are rounded at the sixth decimal when
saving in ``fmt='gml'`` with ``backend='graph-tool'``.
Examples
--------
>>> graph = graphs.Logo()
>>> graph.save('logo.graphml')
>>> graph = graphs.Graph.load('logo.graphml')
>>> import os
>>> os.remove('logo.graphml')
"""
if fmt is None:
fmt = os.path.splitext(path)[1][1:]
if fmt not in ['graphml', 'gml', 'gexf']:
raise ValueError('Unsupported format {}.'.format(fmt))
def save_networkx(graph, path, fmt):
nx = _import_networkx()
graph = graph.to_networkx()
save = getattr(nx, 'write_' + fmt)
save(graph, path)
def save_graphtool(graph, path, fmt):
graph = graph.to_graphtool()
graph.save(path, fmt=fmt)
if backend == 'networkx':
save_networkx(self, path, fmt)
elif backend == 'graph-tool':
save_graphtool(self, path, fmt)
elif backend is None:
try:
save_networkx(self, path, fmt)
except ImportError:
try:
save_graphtool(self, path, fmt)
except ImportError:
raise ImportError('Cannot import networkx nor graph-tool.')
else:
raise ValueError('Unknown backend {}.'.format(backend))
| [
"graph_tool.load_graph",
"networkx.DiGraph",
"os.path.splitext",
"networkx.Graph",
"numpy.stack",
"networkx.to_scipy_sparse_matrix",
"graph_tool.spectral.adjacency",
"numpy.full",
"graph_tool._gt_type"
] | [((9779, 9826), 'networkx.to_scipy_sparse_matrix', 'nx.to_scipy_sparse_matrix', (['graph'], {'weight': 'weight'}), '(graph, weight=weight)\n', (9804, 9826), True, 'import networkx as nx\n'), ((12595, 12638), 'graph_tool.spectral.adjacency', 'gt.spectral.adjacency', (['graph'], {'weight': 'weight'}), '(graph, weight=weight)\n', (12616, 12638), True, 'import graph_tool as gt\n'), ((4767, 4779), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (4777, 4779), True, 'import networkx as nx\n'), ((4807, 4817), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (4815, 4817), True, 'import networkx as nx\n'), ((7321, 7347), 'graph_tool._gt_type', 'gt._gt_type', (['weights.dtype'], {}), '(weights.dtype)\n', (7332, 7347), True, 'import graph_tool as gt\n'), ((15038, 15066), 'graph_tool.load_graph', 'gt.load_graph', (['path'], {'fmt': 'fmt'}), '(path, fmt=fmt)\n', (15051, 15066), True, 'import graph_tool as gt\n'), ((1525, 1575), 'numpy.stack', 'np.stack', (['[self.signals[n] for n in names]'], {'axis': '(1)'}), '([self.signals[n] for n in names], axis=1)\n', (1533, 1575), True, 'import numpy as np\n'), ((7560, 7585), 'graph_tool._gt_type', 'gt._gt_type', (['signal.dtype'], {}), '(signal.dtype)\n', (7571, 7585), True, 'import graph_tool as gt\n'), ((14598, 14620), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (14614, 14620), False, 'import os\n'), ((18673, 18695), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (18689, 18695), False, 'import os\n'), ((10098, 10134), 'numpy.full', 'np.full', (['graph_pg.n_vertices', 'np.nan'], {}), '(graph_pg.n_vertices, np.nan)\n', (10105, 10134), True, 'import numpy as np\n')] |
import time
import cv2
import numpy as np
import tensorflow as tf
from keras import Model
def vgg16_cnn(df, h = 800, w = 800, c = 3):
'''
This function uses pre-trained vgg16 model to extract the feature map of the passed images.
Params: df with
0: image path
Returns:
- output model of vgg16
- feature map
- df with additional info:
w_fm: width of the feature map
h_fm: height of the feature map
n_anchors: number of potential anchors per image
'''
# show execution time
start_time = time.time()
# --------------- vgg16 model -------------
vgg16 = tf.keras.applications.VGG16(
include_top=False,
weights='imagenet',
input_shape = (w, h, c)
)
for layer in vgg16.layers:
layer.trainable = True
vgg16_model = Model(inputs= [vgg16.layers[0].input], outputs= [vgg16.layers[17].output])
# train data
train_images = []
for i in df.iloc:
img = cv2.imread(str(i[0]))
train_images.append(img)
train_images = np.array(train_images)
train_images = train_images/255 # normalize images
feature_map = vgg16_model.predict(train_images)
# feature map
anchors = []
w_fms = []
h_fms = []
features = []
for i in df.iloc:
img = cv2.imread(str(i[0]))
fm = vgg16_model.predict(np.expand_dims(img, 0))
_, w_fm, h_fm, _ = fm.shape
n_anchor = w_fm * h_fm
anchors.append(n_anchor)
w_fms.append(w_fm)
h_fms.append(h_fm)
df['n_anchor'] = anchors
df['w_fm'] = w_fms
df['h_fm'] = h_fms
print(f"\n------- Execution time: {(time.time() - start_time)/60:.2f} minutes -------\n")
print('Number of anchors needed: ', n_anchor)
print('\n', vgg16_model.summary())
return df, vgg16_model, feature_map | [
"tensorflow.keras.applications.VGG16",
"keras.Model",
"numpy.array",
"numpy.expand_dims",
"time.time"
] | [((523, 534), 'time.time', 'time.time', ([], {}), '()\n', (532, 534), False, 'import time\n'), ((598, 691), 'tensorflow.keras.applications.VGG16', 'tf.keras.applications.VGG16', ([], {'include_top': '(False)', 'weights': '"""imagenet"""', 'input_shape': '(w, h, c)'}), "(include_top=False, weights='imagenet',\n input_shape=(w, h, c))\n", (625, 691), True, 'import tensorflow as tf\n'), ((783, 855), 'keras.Model', 'Model', ([], {'inputs': '[vgg16.layers[0].input]', 'outputs': '[vgg16.layers[17].output]'}), '(inputs=[vgg16.layers[0].input], outputs=[vgg16.layers[17].output])\n', (788, 855), False, 'from keras import Model\n'), ((997, 1019), 'numpy.array', 'np.array', (['train_images'], {}), '(train_images)\n', (1005, 1019), True, 'import numpy as np\n'), ((1281, 1303), 'numpy.expand_dims', 'np.expand_dims', (['img', '(0)'], {}), '(img, 0)\n', (1295, 1303), True, 'import numpy as np\n'), ((1555, 1566), 'time.time', 'time.time', ([], {}), '()\n', (1564, 1566), False, 'import time\n')] |
"""
This module implements training and evaluation of a Convolutional Neural Network in PyTorch.
You should fill in code into indicated sections.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import os
# import cifar10_utilsLisa
import cifar10_utils
import matplotlib.pyplot as plt
import time
import torch
import torch.nn as nn
from torch.optim import lr_scheduler
import torchvision
# Default constants
LEARNING_RATE_DEFAULT = 1e-4
BATCH_SIZE_DEFAULT = 32
MAX_STEPS_DEFAULT = 5000
EVAL_FREQ_DEFAULT = 500
OPTIMIZER_DEFAULT = 'ADAM'
# Directory in which cifar data is saved
DATA_DIR_DEFAULT = './cifar10/cifar-10-batches-py'
FLAGS = None
def accuracy(predictions, targets):
"""
Computes the prediction accuracy, i.e. the average of correct predictions
of the network.
Args:
predictions: 2D float array of size [batch_size, n_classes]
labels: 2D int array of size [batch_size, n_classes]
with one-hot encoding. Ground truth labels for
each sample in the batch
Returns:
accuracy: scalar float, the accuracy of predictions,
i.e. the average correct predictions over the whole batch
TODO:
Implement accuracy computation.
"""
########################
# PUT YOUR CODE HERE #
#######################
# find argmax of prediction
batch_size, _ = predictions.shape
predictions_argmax = predictions.argmax(dim=1)
targets_argmax = targets.argmax(dim=1)
correct = torch.sum(predictions_argmax == targets_argmax)
accuracy = correct.item() / float(batch_size)
########################
# END OF YOUR CODE #
#######################
return accuracy
def train():
"""
Performs training and evaluation of ConvNet model.
TODO:
Implement training and evaluation of ConvNet model. Evaluate your model on the whole test set each eval_freq iterations.
"""
### DO NOT CHANGE SEEDS!
# Set the random seeds for reproducibility
np.random.seed(42)
torch.manual_seed(42)
########################
# PUT YOUR CODE HERE #
#######################
start_time = time.time()
# data_dict = cifar10_utilsLisa.get_cifar10(
# data_dir=FLAGS.data_dir, validation_size=0)
data_dict = cifar10_utils.get_cifar10(
data_dir=FLAGS.data_dir, validation_size=0)
trainset = data_dict["train"]
validationset = data_dict["validation"]
testset = data_dict["test"]
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
if torch.cuda.is_available():
torch.cuda.manual_seed(42)
torch.cuda.manual_seed_all(42)
# create Conv Net
model = torchvision.models.resnet34(pretrained=True)
model.fc = nn.Linear(512, 10)
model.to(device)
# sum of parameters
print("Model has %d parameters".format(sum([np.prod(p.shape) for p in model.parameters()])))
# define loss function
loss_module = nn.CrossEntropyLoss()
# define opotimizer
optimizer = torch.optim.Adam(model.parameters(), FLAGS.learning_rate)
# lr scheduler
scheduler = lr_scheduler.StepLR(optimizer, step_size=1500, gamma=0.1)
#######################
###### training #######
#######################
# set model to train mode
model.train()
training_loss = []
testset_loss = []
training_accuracy = []
testset_accuracy = []
running_loss = 0.0
running_acc = 0.0
train_batch_size = FLAGS.batch_size
eval_frequency = FLAGS.eval_freq
for iter_step in range(FLAGS.max_steps):
# get next batch
train_images, train_labels = trainset.next_batch(train_batch_size)
# convert numpy array to torch tensor
train_images = torch.from_numpy(train_images)
train_labels = torch.from_numpy(train_labels)
# move input data to device if available
train_images, train_labels = train_images.to(device), train_labels.to(device)
# run model on input data
train_output = model(train_images)
# argmax so that loss_module can evaluate output
train_labels_argmax = torch.argmax(train_labels, dim=1)
# calculate loss
loss = loss_module(train_output, train_labels_argmax)
# perform backpropagation
optimizer.zero_grad()
loss.backward()
# update parameters
optimizer.step()
# update running loss
running_loss += loss.item()
# running accuracy
running_acc += accuracy(train_output, train_labels)
if iter_step % eval_frequency == eval_frequency - 1:
# training loss
current_loss = running_loss / (eval_frequency)
training_loss.append(current_loss)
running_loss = 0.0
# training acc
current_acc = running_acc / (eval_frequency)
training_accuracy.append(current_acc)
running_acc = 0.0
# get testset loss and accuracy
model.eval()
with torch.no_grad():
test_epochs_completed = 0.0
running_test_loss = 0.0
running_test_acc = 0.0
test_step_iter = 0.0
test_batch_size = FLAGS.batch_size
test_set_img_counter = 0.0
num_examples_test = testset.num_examples
while test_set_img_counter < num_examples_test:
# get next test batch
test_images, test_labels = testset.next_batch(test_batch_size)
# convert numpy array to torch tensor
test_images = torch.from_numpy(test_images)
test_labels = torch.from_numpy(test_labels)
# move input data to device if available
test_images, test_labels = test_images.to(device), test_labels.to(device)
# predictions for test set
test_output = model(test_images)
test_labels_argmax = torch.argmax(test_labels, dim=1)
test_loss = loss_module(test_output, test_labels_argmax)
# update running loss
running_test_loss += test_loss.item()
# running accuracy
running_test_acc += accuracy(test_output, test_labels)
test_step_iter += 1
test_set_img_counter += test_batch_size
testset_loss.append(running_test_loss / test_step_iter)
testset_accuracy.append(running_test_acc / test_step_iter)
# set model to training mode again
model.train()
# scheduler
scheduler.step()
# plot the train and validation loss
fig = plt.figure(figsize=(12, 4))
ax1 = fig.add_subplot(121)
ax1.plot(np.arange(len(training_loss)) * eval_frequency + eval_frequency, training_loss, label="Training")
ax1.plot(np.arange(len(testset_loss)) * eval_frequency + eval_frequency, testset_loss, label="Test")
ax1.set_xlabel("Step Number")
ax1.set_ylabel("Cross Entropy Loss")
ax1.set_title("Training and Test Loss")
ax1.legend()
ax2 = fig.add_subplot(122)
ax2.plot(np.arange(len(training_accuracy)) * eval_frequency + eval_frequency, training_accuracy, label="Training")
ax2.plot(np.arange(len(testset_accuracy)) * eval_frequency + eval_frequency, testset_accuracy, label="Test")
ax2.set_xlabel("Step Number")
ax2.set_ylabel("Accuracy")
ax2.set_title("Training and Test Accuracy")
ax2.legend()
plt.show()
fig.savefig("./results/pytorchCNN.png")
print("Final Accuracy")
print(testset_accuracy[-1])
print(time.time() - start_time)
########################
# END OF YOUR CODE #
#######################
def print_flags():
"""
Prints all entries in FLAGS variable.
"""
for key, value in vars(FLAGS).items():
print(key + ' : ' + str(value))
def main():
"""
Main function
"""
# Print all Flags to confirm parameter settings
print_flags()
if not os.path.exists(FLAGS.data_dir):
os.makedirs(FLAGS.data_dir)
# Run the training operation
train()
if __name__ == '__main__':
# Command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--learning_rate', type=float, default=LEARNING_RATE_DEFAULT,
help='Learning rate')
parser.add_argument('--max_steps', type=int, default=MAX_STEPS_DEFAULT,
help='Number of steps to run trainer.')
parser.add_argument('--batch_size', type=int, default=BATCH_SIZE_DEFAULT,
help='Batch size to run trainer.')
parser.add_argument('--eval_freq', type=int, default=EVAL_FREQ_DEFAULT,
help='Frequency of evaluation on the test set')
parser.add_argument('--data_dir', type=str, default=DATA_DIR_DEFAULT,
help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
main()
| [
"numpy.prod",
"torch.nn.CrossEntropyLoss",
"torch.from_numpy",
"torch.cuda.is_available",
"torch.sum",
"os.path.exists",
"argparse.ArgumentParser",
"numpy.random.seed",
"torch.argmax",
"time.time",
"matplotlib.pyplot.show",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"torch.device",... | [((1611, 1658), 'torch.sum', 'torch.sum', (['(predictions_argmax == targets_argmax)'], {}), '(predictions_argmax == targets_argmax)\n', (1620, 1658), False, 'import torch\n'), ((2130, 2148), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (2144, 2148), True, 'import numpy as np\n'), ((2153, 2174), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (2170, 2174), False, 'import torch\n'), ((2282, 2293), 'time.time', 'time.time', ([], {}), '()\n', (2291, 2293), False, 'import time\n'), ((2415, 2484), 'cifar10_utils.get_cifar10', 'cifar10_utils.get_cifar10', ([], {'data_dir': 'FLAGS.data_dir', 'validation_size': '(0)'}), '(data_dir=FLAGS.data_dir, validation_size=0)\n', (2440, 2484), False, 'import cifar10_utils\n'), ((2700, 2725), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2723, 2725), False, 'import torch\n'), ((2836, 2880), 'torchvision.models.resnet34', 'torchvision.models.resnet34', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2863, 2880), False, 'import torchvision\n'), ((2897, 2915), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(10)'], {}), '(512, 10)\n', (2906, 2915), True, 'import torch.nn as nn\n'), ((3110, 3131), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3129, 3131), True, 'import torch.nn as nn\n'), ((3267, 3324), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['optimizer'], {'step_size': '(1500)', 'gamma': '(0.1)'}), '(optimizer, step_size=1500, gamma=0.1)\n', (3286, 3324), False, 'from torch.optim import lr_scheduler\n'), ((6968, 6995), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (6978, 6995), True, 'import matplotlib.pyplot as plt\n'), ((7781, 7791), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7789, 7791), True, 'import matplotlib.pyplot as plt\n'), ((8503, 8528), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8526, 8528), False, 'import argparse\n'), ((2642, 2667), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2665, 2667), False, 'import torch\n'), ((2618, 2638), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2630, 2638), False, 'import torch\n'), ((2673, 2692), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2685, 2692), False, 'import torch\n'), ((2735, 2761), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(42)'], {}), '(42)\n', (2757, 2761), False, 'import torch\n'), ((2770, 2800), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(42)'], {}), '(42)\n', (2796, 2800), False, 'import torch\n'), ((3909, 3939), 'torch.from_numpy', 'torch.from_numpy', (['train_images'], {}), '(train_images)\n', (3925, 3939), False, 'import torch\n'), ((3963, 3993), 'torch.from_numpy', 'torch.from_numpy', (['train_labels'], {}), '(train_labels)\n', (3979, 3993), False, 'import torch\n'), ((4296, 4329), 'torch.argmax', 'torch.argmax', (['train_labels'], {'dim': '(1)'}), '(train_labels, dim=1)\n', (4308, 4329), False, 'import torch\n'), ((8314, 8344), 'os.path.exists', 'os.path.exists', (['FLAGS.data_dir'], {}), '(FLAGS.data_dir)\n', (8328, 8344), False, 'import os\n'), ((8354, 8381), 'os.makedirs', 'os.makedirs', (['FLAGS.data_dir'], {}), '(FLAGS.data_dir)\n', (8365, 8381), False, 'import os\n'), ((7907, 7918), 'time.time', 'time.time', ([], {}), '()\n', (7916, 7918), False, 'import time\n'), ((5197, 5212), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5210, 5212), False, 'import torch\n'), ((3011, 3027), 'numpy.prod', 'np.prod', (['p.shape'], {}), '(p.shape)\n', (3018, 3027), True, 'import numpy as np\n'), ((5812, 5841), 'torch.from_numpy', 'torch.from_numpy', (['test_images'], {}), '(test_images)\n', (5828, 5841), False, 'import torch\n'), ((5876, 5905), 'torch.from_numpy', 'torch.from_numpy', (['test_labels'], {}), '(test_labels)\n', (5892, 5905), False, 'import torch\n'), ((6205, 6237), 'torch.argmax', 'torch.argmax', (['test_labels'], {'dim': '(1)'}), '(test_labels, dim=1)\n', (6217, 6237), False, 'import torch\n')] |
from sklearn.decomposition import TruncatedSVD
from sklearn.decomposition import PCA
from sklearn.decomposition import LatentDirichletAllocation
import numpy as np
class Math:
def svd(self, data, k):
s = TruncatedSVD(n_components=k, n_iter=7, random_state=42)
d = s.fit(data)
components = d.components_
ev = d.explained_variance_ratio_
data = s.transform(data)
return data, components, ev
def pca(self, data, k):
p = PCA(n_components=k)
d = p.fit(data)
components = d.components_
ev = d.explained_variance_ratio_
data = p.transform(data)
return data, components, ev
def lda(self, data, k):
l = LatentDirichletAllocation(n_components=k, random_state=0, learning_method='batch', learning_decay=0.0)
d = l.fit(data)
components = d.components_
ev = np.zeros((k,))
data = l.transform(data)
return data, components, ev
def dot_product(self, v1, v2):
element_count = 0
dot_product = 0
while element_count < len(v1):
prod = v1[element_count] * v2[element_count]
dot_product = dot_product + prod
element_count = element_count + 1
return dot_product
def length(self, v):
length = 0
for element in v:
prod = pow(element, 2)
length = length + prod
length = pow(length, 1/2)
return length
def euclidean_distance(self, v1, v2):
element_count = 0
distance = 0
while element_count < len(v1):
difference = pow(v1[element_count] - v2[element_count], 2)
distance = distance + difference
element_count = element_count + 1
distance = pow(distance, 1/2)
return distance
def manhattan_distance(self, v1, v2):
distance = 0
for i in range(0, len(v1)):
distance = distance + abs(v1[i]-v2[i])
return distance
def cosine_similarity(self, v1, v2):
numerator = self.dot_product(v1, v2)
length_v1 = self.length(v1)
length_v2 = self.length(v2)
denominator = length_v1 * length_v2
similarity = numerator / denominator
return similarity | [
"sklearn.decomposition.PCA",
"numpy.zeros",
"sklearn.decomposition.LatentDirichletAllocation",
"sklearn.decomposition.TruncatedSVD"
] | [((209, 264), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': 'k', 'n_iter': '(7)', 'random_state': '(42)'}), '(n_components=k, n_iter=7, random_state=42)\n', (221, 264), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((437, 456), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'k'}), '(n_components=k)\n', (440, 456), False, 'from sklearn.decomposition import PCA\n'), ((629, 736), 'sklearn.decomposition.LatentDirichletAllocation', 'LatentDirichletAllocation', ([], {'n_components': 'k', 'random_state': '(0)', 'learning_method': '"""batch"""', 'learning_decay': '(0.0)'}), "(n_components=k, random_state=0, learning_method=\n 'batch', learning_decay=0.0)\n", (654, 736), False, 'from sklearn.decomposition import LatentDirichletAllocation\n'), ((786, 800), 'numpy.zeros', 'np.zeros', (['(k,)'], {}), '((k,))\n', (794, 800), True, 'import numpy as np\n')] |
import numpy as np
import rospy
from ros_rl.msg import EnvAct, EnvObs, EnvDescMsg
from ros_rl.srv import GetEnvDesc, GetEnvDescResponse
from std_srvs.srv import Empty, EmptyResponse
from ros_rl.utils.thing import ThingDesc, ThingfromDesc, ThingDescfromMsg, ThingfromMsg
INACTIVE = 0
ACTIVE = 1
FINISHED = 2
state_map = {INACTIVE:'INACTIVE', ACTIVE:'ACTIVE', FINISHED:'FINISHED'}
class EnvDesc(object):
def __init__(self):
# Episodic or continuing?
self.episodic = None
# If you are going to cut episodes, when should you cut them?
self.suggestedMaxT = 1
# How many episodes should an agent be given to learn on this env?
self.suggestedMaxEps = 1
# Reward discount parameter. Only used if episodic (not for continuing tasks)
self.gamma = 1.
# Description of the actions
self.actDesc = ThingDesc()
# Description of the observations
self.obsDesc = ThingDesc()
self.minReward = -float("inf") # Set to -INF if not known or not bounded
self.maxReward = float("inf") # Set to INF if not known or not bounded
self.minReturn = -float("inf") # Set to -INF if not known or not bounded
self.maxReturn = float("inf") # Set to INF if not known or not bounded
# Recommended plot y-axis
self.suggestedPlotMinPerformance = 0.
self.suggestedPlotMaxPerformance = 1.
def toMsg(self):
msg = EnvDescMsg()
msg.episodic = self.episodic
msg.suggestedMaxT = self.suggestedMaxT
msg.suggestedMaxEps = self.suggestedMaxEps
msg.gamma = self.gamma
msg.actDesc = self.actDesc.toMsg()
msg.obsDesc = self.obsDesc.toMsg()
msg.minReward = self.minReward
msg.maxReward = self.maxReward
msg.minReturn = self.minReturn
msg.maxReturn = self.maxReturn
msg.suggestedPlotMinPerformance = self.suggestedPlotMinPerformance
msg.suggestedPlotMaxPerformance = self.suggestedPlotMaxPerformance
return msg
def EnvDescfromMsg(msg):
desc = EnvDesc()
desc.episodic = msg.episodic
desc.suggestedMaxT = msg.suggestedMaxT
desc.suggestedMaxEps = msg.suggestedMaxEps
desc.gamma = msg.gamma
desc.actDesc = ThingDescfromMsg(msg.actDesc)
desc.obsDesc = ThingDescfromMsg(msg.obsDesc)
desc.minReward = msg.minReward
desc.maxReward = msg.maxReward
desc.minReturn = msg.minReturn
desc.maxReturn = msg.maxReturn
desc.suggestedPlotMinPerformance = msg.suggestedPlotMinPerformance
desc.suggestedPlotMaxPerformance = msg.suggestedPlotMaxPerformance
return desc
class RosEnv(object):
def __init__(self, env_name, desc, rate=100, act_wait=0.005):
self.env_name = env_name
self.desc = desc
self.act_wait = act_wait
# setup obs publisher
self.obs_pub = rospy.Publisher("/RL/env/"+self.env_name+"/obs", EnvObs, queue_size=1)
# setup action subscriber
self.act_sub = rospy.Subscriber("/RL/env/"+self.env_name+"/act", EnvAct, self.act_callback, queue_size=1)
# setup environment services
rospy.Service("/RL/env/"+self.env_name+"/start", Empty, self.handle_start_request)
rospy.Service("/RL/env/"+self.env_name+"/reset", Empty, self.handle_reset_request)
rospy.Service("/RL/env/"+self.env_name+"/stop", Empty, self.handle_stop_request)
rospy.Service("/RL/env/"+self.env_name+"/getDesc", GetEnvDesc, self.handle_envDesc_request)
# initalize obs, act, and reward
self.obs = ThingfromDesc(self.desc.obsDesc)
self.action = ThingfromDesc(self.desc.actDesc)
self.reward = 0
self.terminal = False
# set rate for looping between observations
self.rate = rospy.Rate(rate) # in hz
self.start_flag = False
self.reset_flag = False
self.stop_flag = False
self.is_reset = False
self.new_action = False
self.act_cmds = 0
self.state = INACTIVE
self.time_step = 0
def act_callback(self, msg):
act = ThingfromMsg(msg.act)
self.action = self.constrain_actions(act)
self.new_action = True
def handle_envDesc_request(self, req):
resp = GetEnvDescResponse()
resp.envDesc = self.desc.toMsg()
return resp
def handle_start_request(self, req):
self.start_flag = True
return EmptyResponse()
def handle_stop_request(self, req):
self.stop_flag = True
self.stop_controllers()
self.state = INACTIVE
return EmptyResponse()
def handle_reset_request(self, req):
self.reset_flag = True
self.stop_controllers()
self.state = FINISHED
return EmptyResponse()
def constrain_actions(self, action):
if self.desc.actDesc.numDisc > 0:
action.disc = max(min(action.disc, self.desc.actDesc.numDisc), 0)
if self.desc.actDesc.contDim > 0:
action.cont = np.clip(action.cont, self.desc.actDesc.contRange[:, 0], self.desc.actDesc.contRange[:, 1])
return action
def publish_obs(self):
msg = EnvObs()
msg.stamp = rospy.Time.now()
msg.obs = self.obs.toMsg()
msg.reward = self.reward
msg.terminal = self.terminal
self.obs_pub.publish(msg)
def reset(self):
self.resetWorld()
self.newEpisode()
self.is_reset = True
def resetWorld(self):
raise NotImplementedError
def inTerminalState(self):
raise NotImplementedError
def newEpisode(self):
raise NotImplementedError
def send_action(self):
raise NotImplementedError
def stop_controllers(self):
raise NotImplementedError
def compute_obs(self):
raise NotImplementedError
def compute_reward(self):
raise NotImplementedError
def run(self):
# print('state {0}\t start {1}\t reset {2}\t reset_flag {3}\t action {4}'.format(state_map[self.state], self.start_flag, self.is_reset, self.reset_flag, self.new_action))
if self.state == INACTIVE:
if self.start_flag:
if self.is_reset:
self.start_flag = False
self.state = ACTIVE
else:
self.reset_flag = True
if self.reset_flag:
self.reset()
self.reset_flag = False
elif self.state == ACTIVE:
self.is_reset = False
self.compute_obs()
self.inTerminalState()
self.compute_reward()
self.publish_obs()
if self.terminal:
self.stop_controllers()
self.state = FINISHED
else:
rospy.sleep(self.act_wait)
self.send_action()
elif self.state == FINISHED:
print("received {0:d}/{1:d} actions".format(self.act_cmds, self.time_step-1))
self.reset()
self.state = INACTIVE
else:
print('unknown state')
self.stop_controllers()
self.state = INACTIVE
| [
"numpy.clip",
"ros_rl.srv.GetEnvDescResponse",
"std_srvs.srv.EmptyResponse",
"rospy.Subscriber",
"ros_rl.msg.EnvObs",
"rospy.Service",
"rospy.sleep",
"rospy.Time.now",
"rospy.Rate",
"ros_rl.utils.thing.ThingfromDesc",
"ros_rl.msg.EnvDescMsg",
"ros_rl.utils.thing.ThingfromMsg",
"ros_rl.utils.... | [((2261, 2290), 'ros_rl.utils.thing.ThingDescfromMsg', 'ThingDescfromMsg', (['msg.actDesc'], {}), '(msg.actDesc)\n', (2277, 2290), False, 'from ros_rl.utils.thing import ThingDesc, ThingfromDesc, ThingDescfromMsg, ThingfromMsg\n'), ((2310, 2339), 'ros_rl.utils.thing.ThingDescfromMsg', 'ThingDescfromMsg', (['msg.obsDesc'], {}), '(msg.obsDesc)\n', (2326, 2339), False, 'from ros_rl.utils.thing import ThingDesc, ThingfromDesc, ThingDescfromMsg, ThingfromMsg\n'), ((875, 886), 'ros_rl.utils.thing.ThingDesc', 'ThingDesc', ([], {}), '()\n', (884, 886), False, 'from ros_rl.utils.thing import ThingDesc, ThingfromDesc, ThingDescfromMsg, ThingfromMsg\n'), ((953, 964), 'ros_rl.utils.thing.ThingDesc', 'ThingDesc', ([], {}), '()\n', (962, 964), False, 'from ros_rl.utils.thing import ThingDesc, ThingfromDesc, ThingDescfromMsg, ThingfromMsg\n'), ((1455, 1467), 'ros_rl.msg.EnvDescMsg', 'EnvDescMsg', ([], {}), '()\n', (1465, 1467), False, 'from ros_rl.msg import EnvAct, EnvObs, EnvDescMsg\n'), ((2871, 2945), 'rospy.Publisher', 'rospy.Publisher', (["('/RL/env/' + self.env_name + '/obs')", 'EnvObs'], {'queue_size': '(1)'}), "('/RL/env/' + self.env_name + '/obs', EnvObs, queue_size=1)\n", (2886, 2945), False, 'import rospy\n'), ((2999, 3098), 'rospy.Subscriber', 'rospy.Subscriber', (["('/RL/env/' + self.env_name + '/act')", 'EnvAct', 'self.act_callback'], {'queue_size': '(1)'}), "('/RL/env/' + self.env_name + '/act', EnvAct, self.\n act_callback, queue_size=1)\n", (3015, 3098), False, 'import rospy\n'), ((3136, 3227), 'rospy.Service', 'rospy.Service', (["('/RL/env/' + self.env_name + '/start')", 'Empty', 'self.handle_start_request'], {}), "('/RL/env/' + self.env_name + '/start', Empty, self.\n handle_start_request)\n", (3149, 3227), False, 'import rospy\n'), ((3227, 3318), 'rospy.Service', 'rospy.Service', (["('/RL/env/' + self.env_name + '/reset')", 'Empty', 'self.handle_reset_request'], {}), "('/RL/env/' + self.env_name + '/reset', Empty, self.\n handle_reset_request)\n", (3240, 3318), False, 'import rospy\n'), ((3318, 3407), 'rospy.Service', 'rospy.Service', (["('/RL/env/' + self.env_name + '/stop')", 'Empty', 'self.handle_stop_request'], {}), "('/RL/env/' + self.env_name + '/stop', Empty, self.\n handle_stop_request)\n", (3331, 3407), False, 'import rospy\n'), ((3407, 3507), 'rospy.Service', 'rospy.Service', (["('/RL/env/' + self.env_name + '/getDesc')", 'GetEnvDesc', 'self.handle_envDesc_request'], {}), "('/RL/env/' + self.env_name + '/getDesc', GetEnvDesc, self.\n handle_envDesc_request)\n", (3420, 3507), False, 'import rospy\n'), ((3560, 3592), 'ros_rl.utils.thing.ThingfromDesc', 'ThingfromDesc', (['self.desc.obsDesc'], {}), '(self.desc.obsDesc)\n', (3573, 3592), False, 'from ros_rl.utils.thing import ThingDesc, ThingfromDesc, ThingDescfromMsg, ThingfromMsg\n'), ((3615, 3647), 'ros_rl.utils.thing.ThingfromDesc', 'ThingfromDesc', (['self.desc.actDesc'], {}), '(self.desc.actDesc)\n', (3628, 3647), False, 'from ros_rl.utils.thing import ThingDesc, ThingfromDesc, ThingDescfromMsg, ThingfromMsg\n'), ((3775, 3791), 'rospy.Rate', 'rospy.Rate', (['rate'], {}), '(rate)\n', (3785, 3791), False, 'import rospy\n'), ((4090, 4111), 'ros_rl.utils.thing.ThingfromMsg', 'ThingfromMsg', (['msg.act'], {}), '(msg.act)\n', (4102, 4111), False, 'from ros_rl.utils.thing import ThingDesc, ThingfromDesc, ThingDescfromMsg, ThingfromMsg\n'), ((4252, 4272), 'ros_rl.srv.GetEnvDescResponse', 'GetEnvDescResponse', ([], {}), '()\n', (4270, 4272), False, 'from ros_rl.srv import GetEnvDesc, GetEnvDescResponse\n'), ((4422, 4437), 'std_srvs.srv.EmptyResponse', 'EmptyResponse', ([], {}), '()\n', (4435, 4437), False, 'from std_srvs.srv import Empty, EmptyResponse\n'), ((4586, 4601), 'std_srvs.srv.EmptyResponse', 'EmptyResponse', ([], {}), '()\n', (4599, 4601), False, 'from std_srvs.srv import Empty, EmptyResponse\n'), ((4752, 4767), 'std_srvs.srv.EmptyResponse', 'EmptyResponse', ([], {}), '()\n', (4765, 4767), False, 'from std_srvs.srv import Empty, EmptyResponse\n'), ((5154, 5162), 'ros_rl.msg.EnvObs', 'EnvObs', ([], {}), '()\n', (5160, 5162), False, 'from ros_rl.msg import EnvAct, EnvObs, EnvDescMsg\n'), ((5183, 5199), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (5197, 5199), False, 'import rospy\n'), ((4998, 5093), 'numpy.clip', 'np.clip', (['action.cont', 'self.desc.actDesc.contRange[:, 0]', 'self.desc.actDesc.contRange[:, 1]'], {}), '(action.cont, self.desc.actDesc.contRange[:, 0], self.desc.actDesc.\n contRange[:, 1])\n', (5005, 5093), True, 'import numpy as np\n'), ((6781, 6807), 'rospy.sleep', 'rospy.sleep', (['self.act_wait'], {}), '(self.act_wait)\n', (6792, 6807), False, 'import rospy\n')] |
# encoding: utf-8
"""Unit test suite for `cr.cube.stripe.assembler` module."""
import numpy as np
import pytest
from cr.cube.cube import Cube
from cr.cube.dimension import Dimension, _Element, _OrderSpec, _Subtotal
from cr.cube.enums import COLLATION_METHOD as CM
from cr.cube.stripe.assembler import (
StripeAssembler,
_BaseOrderHelper,
_BaseSortByValueHelper,
_OrderHelper,
_SortByLabelHelper,
_SortByMeasureHelper,
)
from cr.cube.stripe.measure import (
StripeMeasures,
_BaseSecondOrderMeasure,
_Means,
_PopulationProportions,
_PopulationProportionStderrs,
_ScaledCounts,
_TableProportions,
_TableProportionStddevs,
_TableProportionStderrs,
_UnweightedBases,
_UnweightedCounts,
_WeightedBases,
_WeightedCounts,
)
from ...unitutil import class_mock, instance_mock, method_mock, property_mock
class DescribeStripeAssembler:
"""Unit test suite for `cr.cube.stripe.assembler.StripeAssembler` object."""
@pytest.mark.parametrize(
"measure_prop_name, MeasureCls",
(
("means", _Means),
("population_proportions", _PopulationProportions),
("population_proportion_stderrs", _PopulationProportionStderrs),
("table_proportion_stddevs", _TableProportionStddevs),
("table_proportion_stderrs", _TableProportionStderrs),
("table_proportions", _TableProportions),
("unweighted_bases", _UnweightedBases),
("unweighted_counts", _UnweightedCounts),
("weighted_bases", _WeightedBases),
("weighted_counts", _WeightedCounts),
),
)
def it_assembles_various_measures(
self,
request,
_measures_prop_,
measures_,
_assemble_vector_,
measure_prop_name,
MeasureCls,
):
_measures_prop_.return_value = measures_
setattr(
measures_,
measure_prop_name,
instance_mock(request, MeasureCls, blocks=("A", "B")),
)
_assemble_vector_.return_value = np.array([1, 2, 3, 4, 5])
assembler = StripeAssembler(None, None, None, None)
value = getattr(assembler, measure_prop_name)
_assemble_vector_.assert_called_once_with(assembler, ("A", "B"))
assert value.tolist() == [1, 2, 3, 4, 5]
def it_knows_the_inserted_row_idxs(self, _row_order_prop_):
_row_order_prop_.return_value = np.array([-1, 0, 3, -2, 4, 1])
assembler = StripeAssembler(None, None, None, None)
assert assembler.inserted_row_idxs == (0, 3)
def it_knows_the_row_count(self, _row_order_prop_):
_row_order_prop_.return_value = np.array([1, 2, 3, 4, 5])
assembler = StripeAssembler(None, None, None, None)
assert assembler.row_count == 5
def it_knows_the_row_labels(self, rows_dimension_, _row_order_prop_):
rows_dimension_.element_labels = ("baz", "foo", "bar")
rows_dimension_.subtotal_labels = ("bing", "bada")
_row_order_prop_.return_value = np.array([1, 2, 0, -1, -2])
assembler = StripeAssembler(None, rows_dimension_, None, None)
assert assembler.row_labels.tolist() == ["foo", "bar", "baz", "bada", "bing"]
@pytest.mark.parametrize(
"order, expected_fills",
(
((2, -2, 0, -1), ("#f00ba5", "STF1", "#000000", "STF2")),
((0, 1, 2, -2, -1), ("#000000", "#111111", "#f00ba5", "STF1", "STF2")),
((-2, -1, 0, 1, 2), ("STF1", "STF2", "#000000", "#111111", "#f00ba5")),
((-1, -2, 2, 1, 0), ("STF2", "STF1", "#f00ba5", "#111111", "#000000")),
),
)
def it_knows_the_rows_dimension_fills(
self, request, rows_dimension_, _row_order_prop_, order, expected_fills
):
element_fills = ("#000000", "#111111", "#f00ba5")
subtotal_fills = ("STF1", "STF2")
rows_dimension_.valid_elements = tuple(
instance_mock(request, _Element, fill=fill) for fill in element_fills
)
rows_dimension_.subtotals = tuple(
instance_mock(request, _Subtotal, fill=fill) for fill in subtotal_fills
)
_row_order_prop_.return_value = order
assembler = StripeAssembler(None, rows_dimension_, None, None)
assert assembler.rows_dimension_fills == expected_fills
def it_knows_the_scale_mean(self, _measures_prop_, measures_, scaled_counts_):
scaled_counts_.scale_mean = 3
measures_.scaled_counts = scaled_counts_
_measures_prop_.return_value = measures_
assembler = StripeAssembler(None, None, None, None)
assert assembler.scale_mean == 3
def it_knows_the_scale_median(self, _measures_prop_, measures_, scaled_counts_):
scaled_counts_.scale_median = 4
measures_.scaled_counts = scaled_counts_
_measures_prop_.return_value = measures_
assembler = StripeAssembler(None, None, None, None)
assert assembler.scale_median == 4
def it_knows_the_scale_stddev(self, _measures_prop_, measures_, scaled_counts_):
scaled_counts_.scale_stddev = 5
measures_.scaled_counts = scaled_counts_
_measures_prop_.return_value = measures_
assembler = StripeAssembler(None, None, None, None)
assert assembler.scale_stddev == 5
def it_knows_the_scale_stderr(self, _measures_prop_, measures_, scaled_counts_):
scaled_counts_.scale_stderr = 6
measures_.scaled_counts = scaled_counts_
_measures_prop_.return_value = measures_
assembler = StripeAssembler(None, None, None, None)
assert assembler.scale_stderr == 6
def it_knows_the_table_base_range(self, request, _measures_prop_, measures_):
measures_.unweighted_bases = instance_mock(
request, _UnweightedBases, table_base_range=np.array([50, 100])
)
_measures_prop_.return_value = measures_
assembler = StripeAssembler(None, None, None, None)
assert assembler.table_base_range.tolist() == [50, 100]
def it_knows_the_table_margin_range(self, request, _measures_prop_, measures_):
measures_.weighted_bases = instance_mock(
request, _WeightedBases, table_margin_range=np.array([50.5, 100.1])
)
_measures_prop_.return_value = measures_
assembler = StripeAssembler(None, None, None, None)
assert assembler.table_margin_range.tolist() == [50.5, 100.1]
def it_can_assemble_a_vector_to_help(self, _row_order_prop_):
base_values = np.array([1, 2, 3, 4])
subtotal_values = (3, 5, 7)
blocks = (base_values, subtotal_values)
_row_order_prop_.return_value = np.array([-3, 1, 0, -2, 3, 2, -1])
assembler = StripeAssembler(None, None, None, None)
assert assembler._assemble_vector(blocks).tolist() == [3, 2, 1, 5, 4, 3, 7]
def it_constructs_its_measures_collaborator_object_to_help(
self, request, cube_, rows_dimension_, measures_
):
StripeMeasures_ = class_mock(
request,
"cr.cube.stripe.assembler.StripeMeasures",
return_value=measures_,
)
assembler = StripeAssembler(
cube_, rows_dimension_, ca_as_0th=False, slice_idx=7
)
measures = assembler._measures
StripeMeasures_.assert_called_once_with(cube_, rows_dimension_, False, 7)
assert measures is measures_
def it_knows_the_row_order_to_help(
self, request, rows_dimension_, _measures_prop_, measures_
):
_measures_prop_.return_value = measures_
_BaseOrderHelper_ = class_mock(
request, "cr.cube.stripe.assembler._BaseOrderHelper"
)
_BaseOrderHelper_.display_order.return_value = (-1, 1, -2, 2, -3, 3)
assembler = StripeAssembler(None, rows_dimension_, None, None)
row_order = assembler._row_order
_BaseOrderHelper_.display_order.assert_called_once_with(
rows_dimension_, measures_
)
assert row_order.tolist() == [-1, 1, -2, 2, -3, 3]
# fixture components ---------------------------------------------
@pytest.fixture
def _assemble_vector_(self, request):
return method_mock(request, StripeAssembler, "_assemble_vector")
@pytest.fixture
def cube_(self, request):
return instance_mock(request, Cube)
@pytest.fixture
def measures_(self, request):
return instance_mock(request, StripeMeasures)
@pytest.fixture
def _measures_prop_(self, request):
return property_mock(request, StripeAssembler, "_measures")
@pytest.fixture
def _row_order_prop_(self, request):
return property_mock(request, StripeAssembler, "_row_order")
@pytest.fixture
def rows_dimension_(self, request):
return instance_mock(request, Dimension)
@pytest.fixture
def scaled_counts_(self, request):
return instance_mock(request, _ScaledCounts)
class Describe_BaseOrderHelper:
"""Unit-test suite for `cr.cube.stripe.assembler._BaseOrderHelper` object."""
@pytest.mark.parametrize(
"collation_method, HelperCls",
(
(CM.UNIVARIATE_MEASURE, _SortByMeasureHelper),
(CM.LABEL, _SortByLabelHelper),
(CM.EXPLICIT_ORDER, _OrderHelper),
(CM.PAYLOAD_ORDER, _OrderHelper),
),
)
def it_dispatches_to_the_right_order_helper(
self, request, measures_, collation_method, HelperCls
):
rows_dimension_ = instance_mock(
request,
Dimension,
order_spec=instance_mock(
request, _OrderSpec, collation_method=collation_method
),
)
order_helper_ = instance_mock(
request, HelperCls, _display_order=np.array([-2, 1, -1, 2])
)
HelperCls_ = class_mock(
request,
"cr.cube.stripe.assembler.%s" % HelperCls.__name__,
return_value=order_helper_,
)
display_order = _BaseOrderHelper.display_order(rows_dimension_, measures_)
HelperCls_.assert_called_once_with(rows_dimension_, measures_)
assert display_order.tolist() == [-2, 1, -1, 2]
@pytest.mark.parametrize(
"pruning_base, expected_value",
(([1, 1, 1], ()), ([1, 0, 1], (1,)), ([0, 0, 0], (0, 1, 2))),
)
def it_knows_the_empty_row_idxs_to_help(
self, measures_, pruning_base, expected_value
):
measures_.pruning_base = np.array(pruning_base)
order_helper = _BaseOrderHelper(None, measures_)
assert order_helper._empty_row_idxs == expected_value
# fixture components ---------------------------------------------
@pytest.fixture
def measures_(self, request):
return instance_mock(request, StripeMeasures)
class Describe_OrderHelper:
"""Unit test suite for `cr.cube.stripe.assembler._OrderHelper` object."""
@pytest.mark.parametrize(
"collation_method, collator_class_name",
(
(CM.PAYLOAD_ORDER, "PayloadOrderCollator"),
(CM.EXPLICIT_ORDER, "ExplicitOrderCollator"),
),
)
def it_computes_the_order_of_a_rows_dimension_to_help(
self, request, collation_method, collator_class_name
):
rows_dimension_ = instance_mock(
request,
Dimension,
order_spec=instance_mock(
request, _OrderSpec, collation_method=collation_method
),
)
CollatorCls_ = class_mock(
request, "cr.cube.stripe.assembler.%s" % collator_class_name
)
CollatorCls_.display_order.return_value = (1, -2, 3, 5, -1)
property_mock(request, _OrderHelper, "_empty_row_idxs", return_value=(2, 4, 6))
order_helper = _OrderHelper(rows_dimension_, None)
display_order = order_helper._display_order
CollatorCls_.display_order.assert_called_once_with(rows_dimension_, (2, 4, 6))
assert display_order == (1, -2, 3, 5, -1)
class Describe_BaseSortByValueHelper:
"""Unit test suite for `cr.cube.strip.assembler._BaseSortByValueHelper`."""
def it_computes_the_display_order_to_help(
self,
dimension_,
_element_values_prop_,
_subtotal_values_prop_,
_empty_row_idxs_prop_,
SortByValueCollator_,
):
# --- return type of first two is ndarray in real life, but
# --- assert_called_once_with() won't match on those, so use list instead.
_element_values_prop_.return_value = [16, 3, 12]
_subtotal_values_prop_.return_value = [15, 19]
_empty_row_idxs_prop_.return_value = ()
SortByValueCollator_.display_order.return_value = (-1, -2, 0, 2, 1)
order_helper = _BaseSortByValueHelper(dimension_, None)
order = order_helper._display_order
SortByValueCollator_.display_order.assert_called_once_with(
dimension_, [16, 3, 12], [15, 19], ()
)
assert order == (-1, -2, 0, 2, 1)
def but_it_falls_back_to_payload_order_on_value_error(
self,
request,
dimension_,
_element_values_prop_,
_subtotal_values_prop_,
_empty_row_idxs_prop_,
SortByValueCollator_,
):
_element_values_prop_.return_value = None
_subtotal_values_prop_.return_value = None
_empty_row_idxs_prop_.return_value = (4, 2)
SortByValueCollator_.display_order.side_effect = ValueError
PayloadOrderCollator_ = class_mock(
request, "cr.cube.stripe.assembler.PayloadOrderCollator"
)
PayloadOrderCollator_.display_order.return_value = (1, 2, 3, 4)
order_helper = _BaseSortByValueHelper(dimension_, None)
order = order_helper._display_order
PayloadOrderCollator_.display_order.assert_called_once_with(dimension_, (4, 2))
assert order == (1, 2, 3, 4)
# fixture components ---------------------------------------------
@pytest.fixture
def dimension_(self, request):
return instance_mock(request, Dimension)
@pytest.fixture
def _element_values_prop_(self, request):
return property_mock(request, _BaseSortByValueHelper, "_element_values")
@pytest.fixture
def _empty_row_idxs_prop_(self, request):
return property_mock(request, _BaseSortByValueHelper, "_empty_row_idxs")
@pytest.fixture
def SortByValueCollator_(self, request):
return class_mock(request, "cr.cube.stripe.assembler.SortByValueCollator")
@pytest.fixture
def _subtotal_values_prop_(self, request):
return property_mock(request, _BaseSortByValueHelper, "_subtotal_values")
class Describe_SortByLabelHelper:
"""Unit test suite for `cr.cube.strip.assembler._SortByLabelHelper`."""
def it_extracts_the_element_values_to_help(self, dimension_):
dimension_.element_labels = ["b", "a", "c"]
order_helper = _SortByLabelHelper(dimension_, None)
assert order_helper._element_values.tolist() == ["b", "a", "c"]
def it_extracts_the_subtotal_values_to_help(self, dimension_):
dimension_.subtotal_labels = ["b", "a", "c"]
order_helper = _SortByLabelHelper(dimension_, None)
assert order_helper._subtotal_values.tolist() == ["b", "a", "c"]
# fixture components ---------------------------------------------
@pytest.fixture
def dimension_(self, request):
return instance_mock(request, Dimension)
class Describe_SortByMeasureHelper:
"""Unit test suite for `cr.cube.strip.assembler._SortByMeasureHelper`."""
def it_extracts_the_element_values_to_help(self, _measure_prop_, measure_):
_measure_prop_.return_value = measure_
measure_.blocks = [np.arange(5), None]
order_helper = _SortByMeasureHelper(None, None)
assert order_helper._element_values.tolist() == [0, 1, 2, 3, 4]
@pytest.mark.parametrize(
"json_name, internal_name",
(
("base_unweighted", "unweighted_bases"),
("base_weighted", "weighted_bases"),
("count_unweighted", "unweighted_counts"),
("count_weighted", "weighted_counts"),
("mean", "means"),
("percent", "table_proportions"),
("percent_stddev", "table_proportion_stddevs"),
("percent_moe", "table_proportion_stderrs"),
("population", "population_proportions"),
("population_moe", "population_proportion_stderrs"),
("sum", "sums"),
),
)
def it_retrieves_the_right_measure_object_to_help(
self,
request,
_order_spec_prop_,
order_spec_,
measure_,
json_name,
internal_name,
):
measures_ = instance_mock(request, StripeMeasures)
setattr(measures_, internal_name, measure_)
_order_spec_prop_.return_value = order_spec_
order_spec_.measure_keyname = json_name
order_helper = _SortByMeasureHelper(None, measures_)
assert order_helper._measure is measure_
def but_it_raises_when_the_sort_measure_is_not_supported(
self, _order_spec_prop_, order_spec_
):
_order_spec_prop_.return_value = order_spec_
order_spec_.measure_keyname = "foobar"
order_helper = _SortByMeasureHelper(None, None)
with pytest.raises(ValueError) as e:
order_helper._measure
assert str(e.value) == "sort-by-value for measure 'foobar' is not yet supported"
def it_extracts_the_subtotal_values_to_help(self, _measure_prop_, measure_):
_measure_prop_.return_value = measure_
measure_.blocks = [None, np.arange(3)]
order_helper = _SortByMeasureHelper(None, None)
assert order_helper._subtotal_values.tolist() == [0, 1, 2]
# fixture components ---------------------------------------------
@pytest.fixture
def measure_(self, request):
return instance_mock(request, _BaseSecondOrderMeasure)
@pytest.fixture
def _measure_prop_(self, request):
return property_mock(request, _SortByMeasureHelper, "_measure")
@pytest.fixture
def order_spec_(self, request):
return instance_mock(request, _OrderSpec)
@pytest.fixture
def _order_spec_prop_(self, request):
return property_mock(request, _SortByMeasureHelper, "_order_spec")
| [
"cr.cube.stripe.assembler._SortByLabelHelper",
"cr.cube.stripe.assembler._BaseOrderHelper.display_order",
"cr.cube.stripe.assembler._OrderHelper",
"cr.cube.stripe.assembler.StripeAssembler",
"cr.cube.stripe.assembler._SortByMeasureHelper",
"cr.cube.stripe.assembler._BaseSortByValueHelper",
"pytest.mark.... | [((994, 1529), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""measure_prop_name, MeasureCls"""', "(('means', _Means), ('population_proportions', _PopulationProportions), (\n 'population_proportion_stderrs', _PopulationProportionStderrs), (\n 'table_proportion_stddevs', _TableProportionStddevs), (\n 'table_proportion_stderrs', _TableProportionStderrs), (\n 'table_proportions', _TableProportions), ('unweighted_bases',\n _UnweightedBases), ('unweighted_counts', _UnweightedCounts), (\n 'weighted_bases', _WeightedBases), ('weighted_counts', _WeightedCounts))"], {}), "('measure_prop_name, MeasureCls', (('means', _Means),\n ('population_proportions', _PopulationProportions), (\n 'population_proportion_stderrs', _PopulationProportionStderrs), (\n 'table_proportion_stddevs', _TableProportionStddevs), (\n 'table_proportion_stderrs', _TableProportionStderrs), (\n 'table_proportions', _TableProportions), ('unweighted_bases',\n _UnweightedBases), ('unweighted_counts', _UnweightedCounts), (\n 'weighted_bases', _WeightedBases), ('weighted_counts', _WeightedCounts)))\n", (1017, 1529), False, 'import pytest\n'), ((3251, 3592), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""order, expected_fills"""', "(((2, -2, 0, -1), ('#f00ba5', 'STF1', '#000000', 'STF2')), ((0, 1, 2, -2, -\n 1), ('#000000', '#111111', '#f00ba5', 'STF1', 'STF2')), ((-2, -1, 0, 1,\n 2), ('STF1', 'STF2', '#000000', '#111111', '#f00ba5')), ((-1, -2, 2, 1,\n 0), ('STF2', 'STF1', '#f00ba5', '#111111', '#000000')))"], {}), "('order, expected_fills', (((2, -2, 0, -1), (\n '#f00ba5', 'STF1', '#000000', 'STF2')), ((0, 1, 2, -2, -1), ('#000000',\n '#111111', '#f00ba5', 'STF1', 'STF2')), ((-2, -1, 0, 1, 2), ('STF1',\n 'STF2', '#000000', '#111111', '#f00ba5')), ((-1, -2, 2, 1, 0), ('STF2',\n 'STF1', '#f00ba5', '#111111', '#000000'))))\n", (3274, 3592), False, 'import pytest\n'), ((9090, 9308), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""collation_method, HelperCls"""', '((CM.UNIVARIATE_MEASURE, _SortByMeasureHelper), (CM.LABEL,\n _SortByLabelHelper), (CM.EXPLICIT_ORDER, _OrderHelper), (CM.\n PAYLOAD_ORDER, _OrderHelper))'], {}), "('collation_method, HelperCls', ((CM.\n UNIVARIATE_MEASURE, _SortByMeasureHelper), (CM.LABEL,\n _SortByLabelHelper), (CM.EXPLICIT_ORDER, _OrderHelper), (CM.\n PAYLOAD_ORDER, _OrderHelper)))\n", (9113, 9308), False, 'import pytest\n'), ((10221, 10343), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pruning_base, expected_value"""', '(([1, 1, 1], ()), ([1, 0, 1], (1,)), ([0, 0, 0], (0, 1, 2)))'], {}), "('pruning_base, expected_value', (([1, 1, 1], ()), (\n [1, 0, 1], (1,)), ([0, 0, 0], (0, 1, 2))))\n", (10244, 10343), False, 'import pytest\n'), ((10939, 11104), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""collation_method, collator_class_name"""', "((CM.PAYLOAD_ORDER, 'PayloadOrderCollator'), (CM.EXPLICIT_ORDER,\n 'ExplicitOrderCollator'))"], {}), "('collation_method, collator_class_name', ((CM.\n PAYLOAD_ORDER, 'PayloadOrderCollator'), (CM.EXPLICIT_ORDER,\n 'ExplicitOrderCollator')))\n", (10962, 11104), False, 'import pytest\n'), ((15911, 16407), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""json_name, internal_name"""', "(('base_unweighted', 'unweighted_bases'), ('base_weighted',\n 'weighted_bases'), ('count_unweighted', 'unweighted_counts'), (\n 'count_weighted', 'weighted_counts'), ('mean', 'means'), ('percent',\n 'table_proportions'), ('percent_stddev', 'table_proportion_stddevs'), (\n 'percent_moe', 'table_proportion_stderrs'), ('population',\n 'population_proportions'), ('population_moe',\n 'population_proportion_stderrs'), ('sum', 'sums'))"], {}), "('json_name, internal_name', (('base_unweighted',\n 'unweighted_bases'), ('base_weighted', 'weighted_bases'), (\n 'count_unweighted', 'unweighted_counts'), ('count_weighted',\n 'weighted_counts'), ('mean', 'means'), ('percent', 'table_proportions'),\n ('percent_stddev', 'table_proportion_stddevs'), ('percent_moe',\n 'table_proportion_stderrs'), ('population', 'population_proportions'),\n ('population_moe', 'population_proportion_stderrs'), ('sum', 'sums')))\n", (15934, 16407), False, 'import pytest\n'), ((2084, 2109), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (2092, 2109), True, 'import numpy as np\n'), ((2130, 2169), 'cr.cube.stripe.assembler.StripeAssembler', 'StripeAssembler', (['None', 'None', 'None', 'None'], {}), '(None, None, None, None)\n', (2145, 2169), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((2453, 2483), 'numpy.array', 'np.array', (['[-1, 0, 3, -2, 4, 1]'], {}), '([-1, 0, 3, -2, 4, 1])\n', (2461, 2483), True, 'import numpy as np\n'), ((2504, 2543), 'cr.cube.stripe.assembler.StripeAssembler', 'StripeAssembler', (['None', 'None', 'None', 'None'], {}), '(None, None, None, None)\n', (2519, 2543), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((2695, 2720), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (2703, 2720), True, 'import numpy as np\n'), ((2741, 2780), 'cr.cube.stripe.assembler.StripeAssembler', 'StripeAssembler', (['None', 'None', 'None', 'None'], {}), '(None, None, None, None)\n', (2756, 2780), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((3059, 3086), 'numpy.array', 'np.array', (['[1, 2, 0, -1, -2]'], {}), '([1, 2, 0, -1, -2])\n', (3067, 3086), True, 'import numpy as np\n'), ((3107, 3157), 'cr.cube.stripe.assembler.StripeAssembler', 'StripeAssembler', (['None', 'rows_dimension_', 'None', 'None'], {}), '(None, rows_dimension_, None, None)\n', (3122, 3157), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((4231, 4281), 'cr.cube.stripe.assembler.StripeAssembler', 'StripeAssembler', (['None', 'rows_dimension_', 'None', 'None'], {}), '(None, rows_dimension_, None, None)\n', (4246, 4281), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((4587, 4626), 'cr.cube.stripe.assembler.StripeAssembler', 'StripeAssembler', (['None', 'None', 'None', 'None'], {}), '(None, None, None, None)\n', (4602, 4626), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((4913, 4952), 'cr.cube.stripe.assembler.StripeAssembler', 'StripeAssembler', (['None', 'None', 'None', 'None'], {}), '(None, None, None, None)\n', (4928, 4952), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((5241, 5280), 'cr.cube.stripe.assembler.StripeAssembler', 'StripeAssembler', (['None', 'None', 'None', 'None'], {}), '(None, None, None, None)\n', (5256, 5280), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((5569, 5608), 'cr.cube.stripe.assembler.StripeAssembler', 'StripeAssembler', (['None', 'None', 'None', 'None'], {}), '(None, None, None, None)\n', (5584, 5608), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((5943, 5982), 'cr.cube.stripe.assembler.StripeAssembler', 'StripeAssembler', (['None', 'None', 'None', 'None'], {}), '(None, None, None, None)\n', (5958, 5982), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((6342, 6381), 'cr.cube.stripe.assembler.StripeAssembler', 'StripeAssembler', (['None', 'None', 'None', 'None'], {}), '(None, None, None, None)\n', (6357, 6381), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((6542, 6564), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (6550, 6564), True, 'import numpy as np\n'), ((6689, 6723), 'numpy.array', 'np.array', (['[-3, 1, 0, -2, 3, 2, -1]'], {}), '([-3, 1, 0, -2, 3, 2, -1])\n', (6697, 6723), True, 'import numpy as np\n'), ((6744, 6783), 'cr.cube.stripe.assembler.StripeAssembler', 'StripeAssembler', (['None', 'None', 'None', 'None'], {}), '(None, None, None, None)\n', (6759, 6783), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((7178, 7247), 'cr.cube.stripe.assembler.StripeAssembler', 'StripeAssembler', (['cube_', 'rows_dimension_'], {'ca_as_0th': '(False)', 'slice_idx': '(7)'}), '(cube_, rows_dimension_, ca_as_0th=False, slice_idx=7)\n', (7193, 7247), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((7806, 7856), 'cr.cube.stripe.assembler.StripeAssembler', 'StripeAssembler', (['None', 'rows_dimension_', 'None', 'None'], {}), '(None, rows_dimension_, None, None)\n', (7821, 7856), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((10028, 10086), 'cr.cube.stripe.assembler._BaseOrderHelper.display_order', '_BaseOrderHelper.display_order', (['rows_dimension_', 'measures_'], {}), '(rows_dimension_, measures_)\n', (10058, 10086), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((10501, 10523), 'numpy.array', 'np.array', (['pruning_base'], {}), '(pruning_base)\n', (10509, 10523), True, 'import numpy as np\n'), ((10547, 10580), 'cr.cube.stripe.assembler._BaseOrderHelper', '_BaseOrderHelper', (['None', 'measures_'], {}), '(None, measures_)\n', (10563, 10580), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((11797, 11832), 'cr.cube.stripe.assembler._OrderHelper', '_OrderHelper', (['rows_dimension_', 'None'], {}), '(rows_dimension_, None)\n', (11809, 11832), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((12767, 12807), 'cr.cube.stripe.assembler._BaseSortByValueHelper', '_BaseSortByValueHelper', (['dimension_', 'None'], {}), '(dimension_, None)\n', (12789, 12807), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((13705, 13745), 'cr.cube.stripe.assembler._BaseSortByValueHelper', '_BaseSortByValueHelper', (['dimension_', 'None'], {}), '(dimension_, None)\n', (13727, 13745), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((14943, 14979), 'cr.cube.stripe.assembler._SortByLabelHelper', '_SortByLabelHelper', (['dimension_', 'None'], {}), '(dimension_, None)\n', (14961, 14979), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((15197, 15233), 'cr.cube.stripe.assembler._SortByLabelHelper', '_SortByLabelHelper', (['dimension_', 'None'], {}), '(dimension_, None)\n', (15215, 15233), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((15799, 15831), 'cr.cube.stripe.assembler._SortByMeasureHelper', '_SortByMeasureHelper', (['None', 'None'], {}), '(None, None)\n', (15819, 15831), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((16985, 17022), 'cr.cube.stripe.assembler._SortByMeasureHelper', '_SortByMeasureHelper', (['None', 'measures_'], {}), '(None, measures_)\n', (17005, 17022), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((17311, 17343), 'cr.cube.stripe.assembler._SortByMeasureHelper', '_SortByMeasureHelper', (['None', 'None'], {}), '(None, None)\n', (17331, 17343), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((17713, 17745), 'cr.cube.stripe.assembler._SortByMeasureHelper', '_SortByMeasureHelper', (['None', 'None'], {}), '(None, None)\n', (17733, 17745), False, 'from cr.cube.stripe.assembler import StripeAssembler, _BaseOrderHelper, _BaseSortByValueHelper, _OrderHelper, _SortByLabelHelper, _SortByMeasureHelper\n'), ((15756, 15768), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (15765, 15768), True, 'import numpy as np\n'), ((17358, 17383), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (17371, 17383), False, 'import pytest\n'), ((17676, 17688), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (17685, 17688), True, 'import numpy as np\n'), ((5844, 5863), 'numpy.array', 'np.array', (['[50, 100]'], {}), '([50, 100])\n', (5852, 5863), True, 'import numpy as np\n'), ((6239, 6262), 'numpy.array', 'np.array', (['[50.5, 100.1]'], {}), '([50.5, 100.1])\n', (6247, 6262), True, 'import numpy as np\n'), ((9800, 9824), 'numpy.array', 'np.array', (['[-2, 1, -1, 2]'], {}), '([-2, 1, -1, 2])\n', (9808, 9824), True, 'import numpy as np\n')] |
import os
import subprocess
from Chaos import Chaos
import random
from numba.core.decorators import jit
import numpy as np
from anim import *
from numba import njit
from functools import partial
OUTPUT_DIR = "gifs"
def randomCoef():
c = round(random.random() * 3, 6)
return random.choice([ -c, c, 0, 0, 0 ])
def genCoefs(numCoefs):
coefs = [ randomCoef() for _ in range(numCoefs) ]
while np.sum(coefs) == 0:
coefs = [ randomCoef() for _ in range(numCoefs) ]
return coefs
xcoefs = genCoefs(10)
ycoefs = genCoefs(10)
zcoefs = genCoefs(10)
# @jit
def dx(x, y, z):
return xcoefs[0]*x**2 \
+ xcoefs[1]*y**2 \
+ xcoefs[2]*z**2 \
+ xcoefs[3]*x*y \
+ xcoefs[4]*x*z \
+ xcoefs[5]*y*z \
+ xcoefs[6]*x \
+ xcoefs[7]*y \
+ xcoefs[8]*z \
+ xcoefs[9]
# @jit
def dy(x, y, z):
return ycoefs[0]*x**2 \
+ ycoefs[1]*y**2 \
+ ycoefs[2]*z**2 \
+ ycoefs[3]*x*y \
+ ycoefs[4]*x*z \
+ ycoefs[5]*y*z \
+ ycoefs[6]*x \
+ ycoefs[7]*y \
+ ycoefs[8]*z \
+ ycoefs[9]
# @jit
def dz(x, y, z):
return zcoefs[0]*x**2 \
+ zcoefs[1]*y**2 \
+ zcoefs[2]*z**2 \
+ zcoefs[3]*x*y \
+ zcoefs[4]*x*z \
+ zcoefs[5]*y*z \
+ zcoefs[6]*x \
+ zcoefs[7]*y \
+ zcoefs[8]*z \
+ zcoefs[9]
# @jit
def fillNextState(states, idx, dt):
for i in range(states.shape[0]):
prev = max(0, idx-dt)
x = states[i][prev][0]
y = states[i][prev][1]
z = states[i][prev][2]
x1 = dx(x, y, z)
y1 = dy(x, y, z)
z1 = dz(x, y, z)
# if np.isnan(x1) or abs(x1) > 1e100:
# x1 = 1e100 if x1 > 0 else -1e100
# if np.isnan(y1) or abs(y1) > 1e100:
# y1 = 1e100 if x1 > 0 else -1e100
# if np.isnan(z1) or abs(z1) > 1e100:
# z1 = 1e100 if x1 > 0 else -1e100
states[i][idx] = (x1, y1, z1)
return states
# @jit
def fillFirstStates(states, dt):
for i in range(states.shape[0]):
x0 = states[i][0][0]
y0 = states[i][0][1]
z0 = states[i][0][2]
x1 = dx(x0, y0, z0)
y1 = dy(x0, y0, z0)
z1 = dz(x0, y0, z0)
xs = np.linspace(x0, x1, dt)
ys = np.linspace(y0, y1, dt)
zs = np.linspace(z0, z1, dt)
for j in range(xs.shape[0]):
states[i][j] = np.array([ xs[j], ys[j], zs[j] ])
return states
# @jit
def fillAllStates(states, dt):
states = fillFirstStates(states, dt)
for i in range(dt, states.shape[1]):
fillNextState(states, i, dt-1)
return states
class Main(object):
def __init__(self, sceneSize=1000, sceneRange=20, trailLength=10):
self.dir = self.outDir()
self.scene = Scene(sceneSize, sceneSize, (-sceneRange, sceneRange), (-sceneRange, sceneRange), BLACK, trailLength)
# scene.createPartials(states)
# self.createGif()
def outDir(self):
dirNum = 0
while os.path.isdir(OUTPUT_DIR+f"/{dirNum}"):
dirNum += 1
os.mkdir(OUTPUT_DIR+f"/{dirNum}")
return OUTPUT_DIR+f"/{dirNum}"
def log(self, xcoefs, ycoefs, zcoefs):
with open(self.dir+'/log.log', 'a') as f:
f.write(f"""
dx = {xcoefs[0]}x^2 + {xcoefs[1]}y^2 + {xcoefs[2]}z^2 + {xcoefs[3]}xy + {xcoefs[4]}xz + {xcoefs[5]}yz + {xcoefs[6]}x + {xcoefs[7]}y + {xcoefs[8]}z + {xcoefs[9]}
xcoefs = {", ".join([ str(c) for c in xcoefs ])}
dy = {ycoefs[0]}x^2 + {ycoefs[1]}y^2 + {ycoefs[2]}z^2 + {ycoefs[3]}xy + {ycoefs[4]}xz + {ycoefs[5]}yz + {ycoefs[6]}x + {ycoefs[7]}y + {ycoefs[8]}z + {ycoefs[9]}
ycoefs = {", ".join([ str(c) for c in ycoefs ])}
dz = {zcoefs[0]}x^2 + {zcoefs[1]}y^2 + {zcoefs[2]}z^2 + {zcoefs[3]}xy + {zcoefs[4]}xz + {zcoefs[5]}yz + {zcoefs[6]}x + {zcoefs[7]}y + {zcoefs[8]}z + {zcoefs[9]}
zcoefs = {", ".join([ str(c) for c in zcoefs ])}
--------------------------------------------------
""")
def run(self, numParts, dt, t):
self.log(xcoefs, ycoefs, zcoefs)
states = np.zeros((numParts, t*dt+dt, 3))
print("States:", states.shape)
states[:,0] = np.array([
(round(1.5*random.random(), 6), round(1.5*random.random(), 6), round(1.5*random.random(), 6))
for _ in range(numParts)
])
states = fillAllStates(states, dt)
return states
# def initStates(self):
# return [ (x, y, z)
# for x in np.arange(-1.5, 1.5, 0.1)
# for y in np.arange(-1.5, 1.5, 0.1)
# for z in np.arange(-1.5, 1.5, 1)
# ]
# def setupStates(self):
# states = np.empty((len(self.state0s), self.dt*self.totalTime, 3))
# states[:,1] = self.state0s
# return states
# @jit
# def genStates(self, states, numParts, duration):
# for i in range(1, numParts):
# for p in range(duration):
# states[p][i][0] = self.dx(states[p][i-1][0])
# states[p][i][1] = self.dx(states[p][i-1][1])
# states[p][i][2] = self.dx(states[p][i-1][2])
# return states
# def createGif(self):
# gifNum = 0
# for i in range(1000):
# if not os.path.isfile(f"chaos{i}.gif"):
# gifNum = i
# break
# subprocess.run(["./pngToGif.sh", "partials/", f"{self.dir}/chaos{i}.gif"])
if __name__ == "__main__":
states = Main().run(1000, 100, 10)
print(states)
print(states.shape) | [
"random.choice",
"numpy.sum",
"numpy.linspace",
"os.path.isdir",
"numpy.zeros",
"os.mkdir",
"numpy.array",
"random.random"
] | [((284, 315), 'random.choice', 'random.choice', (['[-c, c, 0, 0, 0]'], {}), '([-c, c, 0, 0, 0])\n', (297, 315), False, 'import random\n'), ((407, 420), 'numpy.sum', 'np.sum', (['coefs'], {}), '(coefs)\n', (413, 420), True, 'import numpy as np\n'), ((2277, 2300), 'numpy.linspace', 'np.linspace', (['x0', 'x1', 'dt'], {}), '(x0, x1, dt)\n', (2288, 2300), True, 'import numpy as np\n'), ((2314, 2337), 'numpy.linspace', 'np.linspace', (['y0', 'y1', 'dt'], {}), '(y0, y1, dt)\n', (2325, 2337), True, 'import numpy as np\n'), ((2351, 2374), 'numpy.linspace', 'np.linspace', (['z0', 'z1', 'dt'], {}), '(z0, z1, dt)\n', (2362, 2374), True, 'import numpy as np\n'), ((3043, 3083), 'os.path.isdir', 'os.path.isdir', (["(OUTPUT_DIR + f'/{dirNum}')"], {}), "(OUTPUT_DIR + f'/{dirNum}')\n", (3056, 3083), False, 'import os\n'), ((3115, 3150), 'os.mkdir', 'os.mkdir', (["(OUTPUT_DIR + f'/{dirNum}')"], {}), "(OUTPUT_DIR + f'/{dirNum}')\n", (3123, 3150), False, 'import os\n'), ((4184, 4220), 'numpy.zeros', 'np.zeros', (['(numParts, t * dt + dt, 3)'], {}), '((numParts, t * dt + dt, 3))\n', (4192, 4220), True, 'import numpy as np\n'), ((249, 264), 'random.random', 'random.random', ([], {}), '()\n', (262, 264), False, 'import random\n'), ((2439, 2470), 'numpy.array', 'np.array', (['[xs[j], ys[j], zs[j]]'], {}), '([xs[j], ys[j], zs[j]])\n', (2447, 2470), True, 'import numpy as np\n'), ((4313, 4328), 'random.random', 'random.random', ([], {}), '()\n', (4326, 4328), False, 'import random\n'), ((4344, 4359), 'random.random', 'random.random', ([], {}), '()\n', (4357, 4359), False, 'import random\n'), ((4375, 4390), 'random.random', 'random.random', ([], {}), '()\n', (4388, 4390), False, 'import random\n')] |
from mlpractice.stats.stats_utils import print_stats, _update_stats
from mlpractice.utils import ExceptionInterception
try:
from mlpractice_solutions.\
mlpractice_solutions.linear_classifier_solution import softmax
except ImportError:
softmax = None
from scipy.special import softmax as softmax_sample
import numpy as np
def test_all(softmax=softmax):
test_interface(softmax)
test_public(softmax)
test_default(softmax)
test_normalization(softmax)
test_random(softmax, 100)
print('All tests passed!')
_update_stats('linear_classifier', 'softmax')
print_stats('linear_classifier')
def test_interface(softmax=softmax):
with ExceptionInterception():
x1 = np.array([1, 2, 3])
x2 = np.array([[1, 2, 3],
[1, 2, 3]])
y1 = softmax(x1)
y2 = softmax(x2)
assert isinstance(y1, np.ndarray), \
"softmax must return an ndarray"
assert x1.shape == y1.shape, \
"The output shape must match the input shape"
assert isinstance(y2, np.ndarray), \
"softmax must return an ndarray"
assert x2.shape == y2.shape, \
"The output shape must match the input shape"
def test_public(softmax=softmax):
with ExceptionInterception():
x = np.array([1, 2, 3])
y_sample = softmax_sample(x)
y = softmax(x)
assert np.all(np.abs(y - y_sample) < 10 ** -8)
def test_default(softmax=softmax):
with ExceptionInterception():
x = np.array([[1, 0.5, 0.2, 3],
[1, -1, 7, 3],
[2, 12, 13, 3]])
y_sample = softmax_sample(x, axis=1)
y = softmax(x)
assert np.all(np.abs(y - y_sample) < 10 ** -8)
def test_normalization(softmax=softmax):
with ExceptionInterception():
x = np.array([10000, 0, 0])
y_sample = softmax_sample(x)
y = softmax(x)
assert np.all(np.abs(y - y_sample) < 10 ** -8)
def test_random(softmax=softmax, iterations=1):
with ExceptionInterception():
np.random.seed(42)
for _ in range(iterations):
x = np.random.rand(3, 4)
y_sample = softmax_sample(x, axis=1)
y = softmax(x)
assert np.all(np.abs(y - y_sample) < 10 ** -8)
| [
"numpy.abs",
"numpy.random.rand",
"mlpractice.utils.ExceptionInterception",
"mlpractice_solutions.mlpractice_solutions.linear_classifier_solution.softmax",
"numpy.array",
"mlpractice.stats.stats_utils.print_stats",
"mlpractice.stats.stats_utils._update_stats",
"numpy.random.seed",
"scipy.special.sof... | [((548, 593), 'mlpractice.stats.stats_utils._update_stats', '_update_stats', (['"""linear_classifier"""', '"""softmax"""'], {}), "('linear_classifier', 'softmax')\n", (561, 593), False, 'from mlpractice.stats.stats_utils import print_stats, _update_stats\n'), ((598, 630), 'mlpractice.stats.stats_utils.print_stats', 'print_stats', (['"""linear_classifier"""'], {}), "('linear_classifier')\n", (609, 630), False, 'from mlpractice.stats.stats_utils import print_stats, _update_stats\n'), ((679, 702), 'mlpractice.utils.ExceptionInterception', 'ExceptionInterception', ([], {}), '()\n', (700, 702), False, 'from mlpractice.utils import ExceptionInterception\n'), ((717, 736), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (725, 736), True, 'import numpy as np\n'), ((750, 782), 'numpy.array', 'np.array', (['[[1, 2, 3], [1, 2, 3]]'], {}), '([[1, 2, 3], [1, 2, 3]])\n', (758, 782), True, 'import numpy as np\n'), ((820, 831), 'mlpractice_solutions.mlpractice_solutions.linear_classifier_solution.softmax', 'softmax', (['x1'], {}), '(x1)\n', (827, 831), False, 'from mlpractice_solutions.mlpractice_solutions.linear_classifier_solution import softmax\n'), ((845, 856), 'mlpractice_solutions.mlpractice_solutions.linear_classifier_solution.softmax', 'softmax', (['x2'], {}), '(x2)\n', (852, 856), False, 'from mlpractice_solutions.mlpractice_solutions.linear_classifier_solution import softmax\n'), ((1277, 1300), 'mlpractice.utils.ExceptionInterception', 'ExceptionInterception', ([], {}), '()\n', (1298, 1300), False, 'from mlpractice.utils import ExceptionInterception\n'), ((1314, 1333), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1322, 1333), True, 'import numpy as np\n'), ((1354, 1371), 'scipy.special.softmax', 'softmax_sample', (['x'], {}), '(x)\n', (1368, 1371), True, 'from scipy.special import softmax as softmax_sample\n'), ((1384, 1394), 'mlpractice_solutions.mlpractice_solutions.linear_classifier_solution.softmax', 'softmax', (['x'], {}), '(x)\n', (1391, 1394), False, 'from mlpractice_solutions.mlpractice_solutions.linear_classifier_solution import softmax\n'), ((1497, 1520), 'mlpractice.utils.ExceptionInterception', 'ExceptionInterception', ([], {}), '()\n', (1518, 1520), False, 'from mlpractice.utils import ExceptionInterception\n'), ((1534, 1593), 'numpy.array', 'np.array', (['[[1, 0.5, 0.2, 3], [1, -1, 7, 3], [2, 12, 13, 3]]'], {}), '([[1, 0.5, 0.2, 3], [1, -1, 7, 3], [2, 12, 13, 3]])\n', (1542, 1593), True, 'import numpy as np\n'), ((1658, 1683), 'scipy.special.softmax', 'softmax_sample', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (1672, 1683), True, 'from scipy.special import softmax as softmax_sample\n'), ((1696, 1706), 'mlpractice_solutions.mlpractice_solutions.linear_classifier_solution.softmax', 'softmax', (['x'], {}), '(x)\n', (1703, 1706), False, 'from mlpractice_solutions.mlpractice_solutions.linear_classifier_solution import softmax\n'), ((1815, 1838), 'mlpractice.utils.ExceptionInterception', 'ExceptionInterception', ([], {}), '()\n', (1836, 1838), False, 'from mlpractice.utils import ExceptionInterception\n'), ((1852, 1875), 'numpy.array', 'np.array', (['[10000, 0, 0]'], {}), '([10000, 0, 0])\n', (1860, 1875), True, 'import numpy as np\n'), ((1896, 1913), 'scipy.special.softmax', 'softmax_sample', (['x'], {}), '(x)\n', (1910, 1913), True, 'from scipy.special import softmax as softmax_sample\n'), ((1926, 1936), 'mlpractice_solutions.mlpractice_solutions.linear_classifier_solution.softmax', 'softmax', (['x'], {}), '(x)\n', (1933, 1936), False, 'from mlpractice_solutions.mlpractice_solutions.linear_classifier_solution import softmax\n'), ((2052, 2075), 'mlpractice.utils.ExceptionInterception', 'ExceptionInterception', ([], {}), '()\n', (2073, 2075), False, 'from mlpractice.utils import ExceptionInterception\n'), ((2085, 2103), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (2099, 2103), True, 'import numpy as np\n'), ((2157, 2177), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)'], {}), '(3, 4)\n', (2171, 2177), True, 'import numpy as np\n'), ((2202, 2227), 'scipy.special.softmax', 'softmax_sample', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (2216, 2227), True, 'from scipy.special import softmax as softmax_sample\n'), ((2244, 2254), 'mlpractice_solutions.mlpractice_solutions.linear_classifier_solution.softmax', 'softmax', (['x'], {}), '(x)\n', (2251, 2254), False, 'from mlpractice_solutions.mlpractice_solutions.linear_classifier_solution import softmax\n'), ((1418, 1438), 'numpy.abs', 'np.abs', (['(y - y_sample)'], {}), '(y - y_sample)\n', (1424, 1438), True, 'import numpy as np\n'), ((1730, 1750), 'numpy.abs', 'np.abs', (['(y - y_sample)'], {}), '(y - y_sample)\n', (1736, 1750), True, 'import numpy as np\n'), ((1960, 1980), 'numpy.abs', 'np.abs', (['(y - y_sample)'], {}), '(y - y_sample)\n', (1966, 1980), True, 'import numpy as np\n'), ((2282, 2302), 'numpy.abs', 'np.abs', (['(y - y_sample)'], {}), '(y - y_sample)\n', (2288, 2302), True, 'import numpy as np\n')] |
import numpy as np
def mortality_lognormal(r, s):
'''Calculate mortality from cumulative log-normal distribution
Keyword arguments:
:param r: ratio of body burdens to cbr, summed (dimensionless)
:param s: dose-response slope (dimensionless)
:returns: mortality fraction (fraction)
'''
if r>0:
mean = 0.0
x = (np.log10(r) - mean) / (s * np.sqrt(2))
return 0.5 * (1 + erf(x))
else:
return 0.0
def mortality_loglogistic(conc, alpha, beta):
'''Calculate mortality from cumulative log-logistic distribution
Keyword arguments:
:param conc: internal concentration ()
:param alpha: threshold level ()
:param beta: shape parameter ()
:returns: F, cumulative log-logistic
'''
if conc>0:
return 1.0 / (1 + (conc/alpha)**-beta)
else:
return 0.0
| [
"numpy.log10",
"numpy.sqrt"
] | [((355, 366), 'numpy.log10', 'np.log10', (['r'], {}), '(r)\n', (363, 366), True, 'import numpy as np\n'), ((382, 392), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (389, 392), True, 'import numpy as np\n')] |
import pygame
import random
from enum import Enum
from collections import namedtuple
import numpy as np
pygame.init()
font = pygame.font.Font('arial.ttf', 25)
#font = pygame.font.SysFont('arial', 25)
class Direction(Enum):
RIGHT = 1
LEFT = 2
UP = 3
DOWN = 4
Point = namedtuple('Point', 'x, y')
# rgb colors
WHITE = (255, 255, 255)
RED = (200,0,0)
BLUE1 = (0, 0, 255)
BLUE2 = (0, 100, 255)
BLACK = (0,0,0)
BLOCK_SIZE = 20
SPEED = 60
class SnakeGameAI:
def __init__(self, w=640, h=480, game_n=1):
self.w = w
self.h = h
# init display
self.display = pygame.display.set_mode((self.w, self.h))
pygame.display.set_caption('Snake')
self.clock = pygame.time.Clock()
self.game_n = game_n
self.reset()
def reset(self):
'''Reset the game (snake, food and score)'''
self.direction = Direction.RIGHT
self.head = Point(self.w/2, self.h/2)
self.snake = [self.head,
Point(self.head.x-BLOCK_SIZE, self.head.y),
Point(self.head.x-(2*BLOCK_SIZE), self.head.y)]
self.score = 0
self.game_n += 1
self.food = None
self._place_food()
self.frame_iteration = 0
def _place_food(self):
'''Place food inside the screen randomly. If the food lands already in the snake it replace it'''
x = random.randint(0, (self.w-BLOCK_SIZE )//BLOCK_SIZE )*BLOCK_SIZE
y = random.randint(0, (self.h-BLOCK_SIZE )//BLOCK_SIZE )*BLOCK_SIZE
self.food = Point(x, y)
if self.food in self.snake:
self._place_food()
def snake_vision(self):
''' return an array with -1 where there is a border or the snake, 1 where there is food 0 aywhere'''
# vision_corners
left_upper_corner = Point(self.snake[0].x - 2*BLOCK_SIZE, self.snake[0].y - 2*BLOCK_SIZE)
vision_grid = [0 for _ in range(25)]
for row in range (5):
point_x = left_upper_corner.x+row*BLOCK_SIZE
for col in range(5):
point_y = left_upper_corner.y+col*BLOCK_SIZE
point = Point(point_x,point_y)
index = row*5+col
# if I collide with myself or a wall
if self.is_collision(point):
vision_grid[index] = -1
# if there is an apple I keep it in mind
if self.food == self.head:
vision_grid[index] = 1
return vision_grid
def play_step(self,action):
'''Play a step based on a action, '''
self.frame_iteration += 1
# 1. collect user input
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
# 2. move
self._move(action) # update the head
self.snake.insert(0, self.head)
# 3. check if game over and calculate the reward
reward = 0
game_over = False
# if the snake do nothing then we stop him ()
if self.is_collision() or self.frame_iteration > 100*len(self.snake):
game_over = True
reward -= 10 # the snake has died
return reward, game_over, self.score
# 4. place new food or just move
if self.head == self.food:
self.score += 1
reward += 10 # snake eat food
self._place_food()
else:
self.snake.pop()
# 5. update ui and clock
self._update_ui()
self.clock.tick(SPEED)
# 6. return game over and score
return reward, game_over, self.score
def is_collision(self, point:Point=None)->bool:
if point is None:
point = self.head
# hits boundary
if point.x > self.w - BLOCK_SIZE or point.x < 0 or point.y > self.h - BLOCK_SIZE or point.y < 0:
return True
# hits itself
if point in self.snake[1:]:
return True
return False
def _update_ui(self):
self.display.fill(BLACK)
# draw snake
for pt in self.snake:
pygame.draw.rect(self.display, BLUE1, pygame.Rect(pt.x, pt.y, BLOCK_SIZE, BLOCK_SIZE))
pygame.draw.rect(self.display, BLUE2, pygame.Rect(pt.x+4, pt.y+4, 12, 12))
# snake vision
pygame.draw.rect(self.display, WHITE, pygame.Rect(self.snake[0].x - 2*BLOCK_SIZE, self.snake[0].y - 2*BLOCK_SIZE, 5*BLOCK_SIZE, 5*BLOCK_SIZE), 3)
# draw food
pygame.draw.rect(self.display, RED, pygame.Rect(self.food.x, self.food.y, BLOCK_SIZE, BLOCK_SIZE))
# draw score
text = font.render("Score: {} Game: {}".format(str(self.score),str(self.game_n)), True, WHITE)
# update screen
self.display.blit(text, [0, 0])
pygame.display.flip()
def _move(self, action):
''' |1,0,0| -> straight
|0,1,0| -> right turn
|0,0,1| -> left turn
'''
clock_wise = [Direction.RIGHT, Direction.DOWN, Direction.LEFT, Direction.UP]
idx = clock_wise.index(self.direction)
if np.array_equal(action,[1,0,0]):
# no change
new_dir = clock_wise[idx]
elif np.array_equal(action,[0,1,0]):
# right turn right -> down -> left -> up
next_idx = (idx + 1) % 4
new_dir = clock_wise[next_idx]
elif np.array_equal(action,[0,0,1]):
# left turn right -> up -> left -> down
next_idx = (idx - 1) % 4
new_dir = clock_wise[next_idx]
self.direction = new_dir
x = self.head.x
y = self.head.y
if self.direction == Direction.RIGHT:
x += BLOCK_SIZE
elif self.direction == Direction.LEFT:
x -= BLOCK_SIZE
elif self.direction == Direction.DOWN:
y += BLOCK_SIZE
elif self.direction == Direction.UP:
y -= BLOCK_SIZE
self.head = Point(x, y)
| [
"collections.namedtuple",
"pygame.init",
"pygame.quit",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.display.flip",
"pygame.time.Clock",
"pygame.Rect",
"numpy.array_equal",
"pygame.display.set_caption",
"pygame.font.Font",
"random.randint"
] | [((105, 118), 'pygame.init', 'pygame.init', ([], {}), '()\n', (116, 118), False, 'import pygame\n'), ((126, 159), 'pygame.font.Font', 'pygame.font.Font', (['"""arial.ttf"""', '(25)'], {}), "('arial.ttf', 25)\n", (142, 159), False, 'import pygame\n'), ((273, 300), 'collections.namedtuple', 'namedtuple', (['"""Point"""', '"""x, y"""'], {}), "('Point', 'x, y')\n", (283, 300), False, 'from collections import namedtuple\n'), ((567, 608), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(self.w, self.h)'], {}), '((self.w, self.h))\n', (590, 608), False, 'import pygame\n'), ((611, 646), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Snake"""'], {}), "('Snake')\n", (637, 646), False, 'import pygame\n'), ((662, 681), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (679, 681), False, 'import pygame\n'), ((2293, 2311), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2309, 2311), False, 'import pygame\n'), ((4095, 4116), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (4114, 4116), False, 'import pygame\n'), ((4351, 4384), 'numpy.array_equal', 'np.array_equal', (['action', '[1, 0, 0]'], {}), '(action, [1, 0, 0])\n', (4365, 4384), True, 'import numpy as np\n'), ((1232, 1286), 'random.randint', 'random.randint', (['(0)', '((self.w - BLOCK_SIZE) // BLOCK_SIZE)'], {}), '(0, (self.w - BLOCK_SIZE) // BLOCK_SIZE)\n', (1246, 1286), False, 'import random\n'), ((1302, 1356), 'random.randint', 'random.randint', (['(0)', '((self.h - BLOCK_SIZE) // BLOCK_SIZE)'], {}), '(0, (self.h - BLOCK_SIZE) // BLOCK_SIZE)\n', (1316, 1356), False, 'import random\n'), ((3701, 3816), 'pygame.Rect', 'pygame.Rect', (['(self.snake[0].x - 2 * BLOCK_SIZE)', '(self.snake[0].y - 2 * BLOCK_SIZE)', '(5 * BLOCK_SIZE)', '(5 * BLOCK_SIZE)'], {}), '(self.snake[0].x - 2 * BLOCK_SIZE, self.snake[0].y - 2 *\n BLOCK_SIZE, 5 * BLOCK_SIZE, 5 * BLOCK_SIZE)\n', (3712, 3816), False, 'import pygame\n'), ((3862, 3923), 'pygame.Rect', 'pygame.Rect', (['self.food.x', 'self.food.y', 'BLOCK_SIZE', 'BLOCK_SIZE'], {}), '(self.food.x, self.food.y, BLOCK_SIZE, BLOCK_SIZE)\n', (3873, 3923), False, 'import pygame\n'), ((4435, 4468), 'numpy.array_equal', 'np.array_equal', (['action', '[0, 1, 0]'], {}), '(action, [0, 1, 0])\n', (4449, 4468), True, 'import numpy as np\n'), ((2350, 2363), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (2361, 2363), False, 'import pygame\n'), ((3514, 3561), 'pygame.Rect', 'pygame.Rect', (['pt.x', 'pt.y', 'BLOCK_SIZE', 'BLOCK_SIZE'], {}), '(pt.x, pt.y, BLOCK_SIZE, BLOCK_SIZE)\n', (3525, 3561), False, 'import pygame\n'), ((3604, 3643), 'pygame.Rect', 'pygame.Rect', (['(pt.x + 4)', '(pt.y + 4)', '(12)', '(12)'], {}), '(pt.x + 4, pt.y + 4, 12, 12)\n', (3615, 3643), False, 'import pygame\n'), ((4580, 4613), 'numpy.array_equal', 'np.array_equal', (['action', '[0, 0, 1]'], {}), '(action, [0, 0, 1])\n', (4594, 4613), True, 'import numpy as np\n')] |
"""
File Name: UnoPytorch/drug_qed_func.py
Author: <NAME> (xduan7)
Email: <EMAIL>
Date: 9/4/18
Python Version: 3.6.6
File Description:
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import r2_score
def train_drug_qed(device: torch.device,
drug_qed_net: nn.Module,
data_loader: torch.utils.data.DataLoader,
max_num_batches: int,
loss_func: callable,
optimizer: torch.optim, ):
drug_qed_net.train()
total_loss = 0.
num_samples = 0
for batch_idx, (drug_feature, target) in enumerate(data_loader):
if batch_idx >= max_num_batches:
break
drug_feature, target = drug_feature.to(device), target.to(device)
drug_qed_net.zero_grad()
pred_target = drug_qed_net(drug_feature)
loss = loss_func(pred_target, target)
loss.backward()
optimizer.step()
num_samples += target.shape[0]
total_loss += loss.item() * target.shape[0]
print('\tDrug Weighted QED Regression Loss: %8.6f'
% (total_loss / num_samples))
def valid_drug_qed(device: torch.device,
drug_qed_net: nn.Module,
data_loader: torch.utils.data.DataLoader, ):
drug_qed_net.eval()
mse, mae = 0., 0.
target_array, pred_array = np.array([]), np.array([])
with torch.no_grad():
for drug_feature, target in data_loader:
drug_feature, target = drug_feature.to(device), target.to(device)
pred_target = drug_qed_net(drug_feature)
num_samples = target.shape[0]
mse += F.mse_loss(pred_target, target).item() * num_samples
mae += F.l1_loss(pred_target, target).item() * num_samples
target_array = np.concatenate(
(target_array, target.cpu().numpy().flatten()))
pred_array = np.concatenate(
(pred_array, pred_target.cpu().numpy().flatten()))
mse /= len(data_loader.dataset)
mae /= len(data_loader.dataset)
r2 = r2_score(y_pred=pred_array, y_true=target_array)
print('\tDrug Weighted QED Regression\n'
'\t\tMSE: %8.6f \t MAE: %8.6f \t R2: %+4.2f' % (mse, mae, r2))
return mse, mae, r2
| [
"torch.nn.functional.mse_loss",
"torch.nn.functional.l1_loss",
"numpy.array",
"torch.no_grad",
"sklearn.metrics.r2_score"
] | [((1491, 1503), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1499, 1503), True, 'import numpy as np\n'), ((1505, 1517), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1513, 1517), True, 'import numpy as np\n'), ((1528, 1543), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1541, 1543), False, 'import torch\n'), ((2223, 2271), 'sklearn.metrics.r2_score', 'r2_score', ([], {'y_pred': 'pred_array', 'y_true': 'target_array'}), '(y_pred=pred_array, y_true=target_array)\n', (2231, 2271), False, 'from sklearn.metrics import r2_score\n'), ((1789, 1820), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['pred_target', 'target'], {}), '(pred_target, target)\n', (1799, 1820), True, 'import torch.nn.functional as F\n'), ((1861, 1891), 'torch.nn.functional.l1_loss', 'F.l1_loss', (['pred_target', 'target'], {}), '(pred_target, target)\n', (1870, 1891), True, 'import torch.nn.functional as F\n')] |
#!/usr/bin/env python
import numpy as np
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from scipy.stats import pearsonr, spearmanr
#===============================================================================
#===============================================================================
class Metrics:
@staticmethod
def r2(true, pred):
return r2_score(true, pred)
@staticmethod
def rmse(true, pred):
return np.sqrt(mean_squared_error(true, pred))
@staticmethod
def mae(true, pred):
return mean_absolute_error(true, pred)
@staticmethod
def pearson(true, pred):
if true.shape[-1] == 1:
true, pred = np.squeeze(true), np.squeeze(pred)
pearson_coeff, p_value = pearsonr(true, pred)
return pearson_coeff
else:
pearsons = []
for dim in range(true.shape[-1]):
pearson_coeff, p_value = pearsonr(true[:, dim], pred[:, dim])
pearsons.append(pearson_coeff)
return pearsons
@staticmethod
def spearman(true, pred):
if true.shape[-1] == 1:
true, pred = np.squeeze(true), np.squeeze(pred)
spearman_coeff, p_value = spearmanr(true, pred)
return spearman_coeff
else:
spearmans = []
for dim in range(true.shape[-1]):
spearman_coeff, p_value = spearmanr(true[:, dim], pred[:, dim])
spearmans.append(spearman_coeff)
return spearmans
# @staticmethod
# def correlation_matrix(true, pred):
# for ix in range(true.shape[0]):
def __call__(self, true, pred, kinds):
metrics = {}
for kind in kinds:
try:
fn = getattr(self, kind)
except NameError as e:
print(e)
error = fn(true, pred)
metrics[kind] = error
return metrics
@staticmethod
def get_best_metric(kind, metric_list):
'''
retrieve the dictionary for which the metric is the best
'''
if kind == 'r2':
r2s = [d['r2'] for d in metric_list]
max_ix = np.argmax(r2s)
return metric_list[max_ix]
elif kind == 'rmse':
rmses = [d['rmse'] for d in metric_list]
min_ix = np.argmin(rmses)
return metric_list[min_ix]
elif kind == 'mae':
maes = [d['mae'] for d in metric_list]
min_ix = np.argmin(maes)
return metric_list[min_ix]
elif kind == 'pearson':
pearsons = [d['pearson'] for d in metric_list]
max_ix = np.argmax(pearsons)
return metric_list[max_ix]
elif kind == 'spearman':
spearmans = [d['spearman'] for d in metric_list]
max_ix = np.argmax(spearmans)
return metric_list[max_ix]
else:
raise NotImplementedError
@staticmethod
def get_all_best_metric(types, metrics_list):
'''
Retrieve all the best metrics
'''
best = {}
for metric in types:
met = [d[metric] for d in metrics_list]
if metric in ['r2', 'spearman', 'pearson']:
b = np.amax(met)
elif metric in ['rmse', 'mae']:
b = np.amin(met)
best[metric] = b
return best
@staticmethod
def get_best_index(kind, metric_list):
'''
retrieve training index for which the best metric
is reported
'''
if kind == 'r2':
r2s = [d['r2'] for d in metric_list]
max_ix = np.argmax(r2s)
return max_ix
elif kind == 'rmse':
rmses = [d['rmse'] for d in metric_list]
min_ix = np.argmin(rmses)
return min_ix
elif kind == 'mae':
maes = [d['mae'] for d in metric_list]
min_ix = np.argmin(maes)
return min_ix
elif kind == 'pearson':
pearsons = [d['pearson'] for d in metric_list]
max_ix = np.argmax(pearsons)
return max_ix
elif kind == 'spearman':
spearmans = [d['spearman'] for d in metric_list]
max_ix = np.argmax(spearmans)
return max_ix
else:
raise NotImplementedError
@staticmethod
def early_stopping(kind, metrics_list, patience):
'''
If patience is set to None, only stop training
once we have seen the maximum number of epochs
'''
stop = False
if patience == None:
return stop
else:
if kind == 'r2':
r2s = [d['r2'] for d in metrics_list]
best_ix = np.argmax(r2s)
if len(metrics_list) - best_ix > patience:
stop = True
elif kind == 'rmse':
r2s = [d['rmse'] for d in metrics_list]
best_ix = np.argmin(r2s)
if len(metrics_list) - best_ix > patience:
stop = True
return stop
| [
"numpy.amin",
"numpy.argmax",
"sklearn.metrics.mean_squared_error",
"numpy.squeeze",
"scipy.stats.pearsonr",
"numpy.argmin",
"sklearn.metrics.mean_absolute_error",
"scipy.stats.spearmanr",
"sklearn.metrics.r2_score",
"numpy.amax"
] | [((407, 427), 'sklearn.metrics.r2_score', 'r2_score', (['true', 'pred'], {}), '(true, pred)\n', (415, 427), False, 'from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error\n'), ((595, 626), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['true', 'pred'], {}), '(true, pred)\n', (614, 626), False, 'from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error\n'), ((500, 530), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['true', 'pred'], {}), '(true, pred)\n', (518, 530), False, 'from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error\n'), ((804, 824), 'scipy.stats.pearsonr', 'pearsonr', (['true', 'pred'], {}), '(true, pred)\n', (812, 824), False, 'from scipy.stats import pearsonr, spearmanr\n'), ((1276, 1297), 'scipy.stats.spearmanr', 'spearmanr', (['true', 'pred'], {}), '(true, pred)\n', (1285, 1297), False, 'from scipy.stats import pearsonr, spearmanr\n'), ((2240, 2254), 'numpy.argmax', 'np.argmax', (['r2s'], {}), '(r2s)\n', (2249, 2254), True, 'import numpy as np\n'), ((3710, 3724), 'numpy.argmax', 'np.argmax', (['r2s'], {}), '(r2s)\n', (3719, 3724), True, 'import numpy as np\n'), ((732, 748), 'numpy.squeeze', 'np.squeeze', (['true'], {}), '(true)\n', (742, 748), True, 'import numpy as np\n'), ((750, 766), 'numpy.squeeze', 'np.squeeze', (['pred'], {}), '(pred)\n', (760, 766), True, 'import numpy as np\n'), ((985, 1021), 'scipy.stats.pearsonr', 'pearsonr', (['true[:, dim]', 'pred[:, dim]'], {}), '(true[:, dim], pred[:, dim])\n', (993, 1021), False, 'from scipy.stats import pearsonr, spearmanr\n'), ((1203, 1219), 'numpy.squeeze', 'np.squeeze', (['true'], {}), '(true)\n', (1213, 1219), True, 'import numpy as np\n'), ((1221, 1237), 'numpy.squeeze', 'np.squeeze', (['pred'], {}), '(pred)\n', (1231, 1237), True, 'import numpy as np\n'), ((1461, 1498), 'scipy.stats.spearmanr', 'spearmanr', (['true[:, dim]', 'pred[:, dim]'], {}), '(true[:, dim], pred[:, dim])\n', (1470, 1498), False, 'from scipy.stats import pearsonr, spearmanr\n'), ((2397, 2413), 'numpy.argmin', 'np.argmin', (['rmses'], {}), '(rmses)\n', (2406, 2413), True, 'import numpy as np\n'), ((3312, 3324), 'numpy.amax', 'np.amax', (['met'], {}), '(met)\n', (3319, 3324), True, 'import numpy as np\n'), ((3854, 3870), 'numpy.argmin', 'np.argmin', (['rmses'], {}), '(rmses)\n', (3863, 3870), True, 'import numpy as np\n'), ((4816, 4830), 'numpy.argmax', 'np.argmax', (['r2s'], {}), '(r2s)\n', (4825, 4830), True, 'import numpy as np\n'), ((2553, 2568), 'numpy.argmin', 'np.argmin', (['maes'], {}), '(maes)\n', (2562, 2568), True, 'import numpy as np\n'), ((3389, 3401), 'numpy.amin', 'np.amin', (['met'], {}), '(met)\n', (3396, 3401), True, 'import numpy as np\n'), ((3997, 4012), 'numpy.argmin', 'np.argmin', (['maes'], {}), '(maes)\n', (4006, 4012), True, 'import numpy as np\n'), ((5037, 5051), 'numpy.argmin', 'np.argmin', (['r2s'], {}), '(r2s)\n', (5046, 5051), True, 'import numpy as np\n'), ((2720, 2739), 'numpy.argmax', 'np.argmax', (['pearsons'], {}), '(pearsons)\n', (2729, 2739), True, 'import numpy as np\n'), ((4151, 4170), 'numpy.argmax', 'np.argmax', (['pearsons'], {}), '(pearsons)\n', (4160, 4170), True, 'import numpy as np\n'), ((2894, 2914), 'numpy.argmax', 'np.argmax', (['spearmans'], {}), '(spearmans)\n', (2903, 2914), True, 'import numpy as np\n'), ((4312, 4332), 'numpy.argmax', 'np.argmax', (['spearmans'], {}), '(spearmans)\n', (4321, 4332), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
from typing import Text, List, Dict, Any, Union, Optional, Tuple, Callable
from rasa.shared.nlu.constants import TEXT
from rasa.utils.tensorflow.model_data import FeatureSignature
from rasa.utils.tensorflow.constants import (
REGULARIZATION_CONSTANT,
CONNECTION_DENSITY,
NUM_TRANSFORMER_LAYERS,
TRANSFORMER_SIZE,
NUM_HEADS,
UNIDIRECTIONAL_ENCODER,
KEY_RELATIVE_ATTENTION,
VALUE_RELATIVE_ATTENTION,
MAX_RELATIVE_POSITION,
MASKED_LM,
HIDDEN_LAYERS_SIZES,
DROP_RATE,
SPARSE_INPUT_DROPOUT,
DENSE_INPUT_DROPOUT,
DENSE_DIMENSION,
CONCAT_DIMENSION,
DROP_RATE_ATTENTION,
SEQUENCE,
SENTENCE,
)
from rasa.utils.tensorflow import layers
from rasa.utils.tensorflow.exceptions import TFLayerConfigException
from rasa.utils.tensorflow.transformer import TransformerEncoder
from rasa.nlu.constants import DEFAULT_TRANSFORMER_SIZE
class RasaCustomLayer(tf.keras.layers.Layer):
"""Parent class for all classes in `rasa_layers.py`.
Allows a shared implementation for adjusting `DenseForSparse`
layers during incremental training.
During fine-tuning, sparse feature sizes might change due to addition of new data.
If this happens, we need to adjust our `DenseForSparse` layers to a new size.
`ConcatenateSparseDenseFeatures`, `RasaSequenceLayer` and
`RasaFeatureCombiningLayer` all inherit from `RasaCustomLayer` and thus can
change their own `DenseForSparse` layers if it's needed.
"""
def adjust_sparse_layers_for_incremental_training(
self,
new_sparse_feature_sizes: Dict[Text, Dict[Text, List[int]]],
old_sparse_feature_sizes: Dict[Text, Dict[Text, List[int]]],
reg_lambda: float,
) -> None:
"""Finds and adjusts `DenseForSparse` layers during incremental training.
Recursively looks through the layers until it finds all the `DenseForSparse`
ones and adjusts those which have their sparse feature sizes increased.
This function heavily relies on the name of `DenseForSparse` layer being
in the following format - f"sparse_to_dense.{attribute}_{feature_type}" -
in order to correctly extract the attribute and feature type.
New and old sparse feature sizes could look like this:
{TEXT: {FEATURE_TYPE_SEQUENCE: [4, 24, 128], FEATURE_TYPE_SENTENCE: [4, 128]}}
Args:
new_sparse_feature_sizes: sizes of current sparse features.
old_sparse_feature_sizes: sizes of sparse features the model was
previously trained on.
reg_lambda: regularization constant.
"""
for name, layer in self._tf_layers.items():
if isinstance(layer, RasaCustomLayer):
layer.adjust_sparse_layers_for_incremental_training(
new_sparse_feature_sizes=new_sparse_feature_sizes,
old_sparse_feature_sizes=old_sparse_feature_sizes,
reg_lambda=reg_lambda,
)
elif isinstance(layer, layers.DenseForSparse):
attribute = layer.get_attribute()
feature_type = layer.get_feature_type()
if (
attribute in new_sparse_feature_sizes
and feature_type in new_sparse_feature_sizes[attribute]
):
new_feature_sizes = new_sparse_feature_sizes[attribute][
feature_type
]
old_feature_sizes = old_sparse_feature_sizes[attribute][
feature_type
]
if sum(new_feature_sizes) > sum(old_feature_sizes):
self._tf_layers[name] = self._replace_dense_for_sparse_layer(
layer_to_replace=layer,
new_sparse_feature_sizes=new_feature_sizes,
old_sparse_feature_sizes=old_feature_sizes,
attribute=attribute,
feature_type=feature_type,
reg_lambda=reg_lambda,
)
@staticmethod
def _replace_dense_for_sparse_layer(
layer_to_replace: layers.DenseForSparse,
new_sparse_feature_sizes: List[int],
old_sparse_feature_sizes: List[int],
attribute: Text,
feature_type: Text,
reg_lambda: float,
) -> layers.DenseForSparse:
"""Replaces a `DenseForSparse` layer with a new one.
Replaces an existing `DenseForSparse` layer with a new one
in order to adapt it to incremental training.
Args:
layer_to_replace: a `DenseForSparse` layer that is used to create a new one.
new_sparse_feature_sizes: sizes of sparse features that will be
the input of the layer.
old_sparse_feature_sizes: sizes of sparse features that used to be
the input of the layer.
attribute: an attribute of the data fed to the layer.
feature_type: a feature type of the data fed to the layer.
reg_lambda: regularization constant.
Returns:
New `DenseForSparse` layer.
"""
kernel = layer_to_replace.get_kernel().numpy()
bias = layer_to_replace.get_bias()
if bias is not None:
bias = bias.numpy()
units = layer_to_replace.get_units()
# split kernel by feature sizes to update the layer accordingly
kernel_splits = []
splitting_index = 0
for size in old_sparse_feature_sizes:
kernel_splits.append(kernel[splitting_index : splitting_index + size, :])
splitting_index += size
additional_sizes = [
new_size - old_size
for new_size, old_size in zip(
new_sparse_feature_sizes, old_sparse_feature_sizes
)
]
std, mean = np.std(kernel), np.mean(kernel)
additional_weights = [
np.random.normal(mean, std, size=(num_rows, units)).astype(np.float32)
for num_rows in additional_sizes
]
merged_weights = [
np.vstack((existing, new))
for existing, new in zip(kernel_splits, additional_weights)
]
# stack each merged weight to form a new weight tensor
new_weights = np.vstack(merged_weights)
kernel_init = tf.constant_initializer(new_weights)
bias_init = tf.constant_initializer(bias) if bias is not None else None
new_layer = layers.DenseForSparse(
name=f"sparse_to_dense.{attribute}_{feature_type}",
reg_lambda=reg_lambda,
units=units,
use_bias=bias is not None,
kernel_initializer=kernel_init,
bias_initializer=bias_init,
)
return new_layer
class ConcatenateSparseDenseFeatures(RasaCustomLayer):
"""Combines multiple sparse and dense feature tensors into one dense tensor.
This layer combines features from various featurisers into a single feature array
per input example. All features must be of the same feature type, i.e. sentence-
level or sequence-level (token-level).
The layer combines a given list of tensors (whether sparse or dense) by:
1. converting sparse tensors into dense ones
2. optionally, applying dropout to sparse tensors before and/or after the conversion
3. concatenating all tensors along the last dimension
Arguments:
attribute: Name of attribute (e.g. `text` or `label`) whose features will be
processed.
feature_type: Feature type to be processed -- `sequence` or `sentence`.
feature_type_signature: A list of signatures for the given attribute and feature
type.
config: A model config for correctly parametrising the layer.
Input shape:
Tuple containing one list of N-D tensors, each with shape: `(batch_size, ...,
input_dim)`.
All dense tensors must have the same shape, except possibly the last dimension.
All sparse tensors must have the same shape, including the last dimension.
Output shape:
N-D tensor with shape: `(batch_size, ..., units)` where `units` is the sum of
the last dimension sizes across all input tensors, with sparse tensors instead
contributing `config[DENSE_DIMENSION][attribute]` units each.
Raises:
A `TFLayerConfigException` if no feature signatures are provided.
Attributes:
output_units: The last dimension size of the layer's output.
"""
SPARSE_DROPOUT = "sparse_dropout"
SPARSE_TO_DENSE = "sparse_to_dense"
DENSE_DROPOUT = "dense_dropout"
def __init__(
self,
attribute: Text,
feature_type: Text,
feature_type_signature: List[FeatureSignature],
config: Dict[Text, Any],
) -> None:
"""Creates a new `ConcatenateSparseDenseFeatures` object."""
if not feature_type_signature:
raise TFLayerConfigException(
"The feature type signature must contain some feature signatures."
)
super().__init__(
name=f"concatenate_sparse_dense_features_{attribute}_{feature_type}"
)
self._check_sparse_input_units(feature_type_signature)
self.output_units = self._calculate_output_units(
attribute, feature_type_signature, config
)
# Prepare dropout and sparse-to-dense layers if any sparse tensors are expected
self._tf_layers: Dict[Text, tf.keras.layers.Layer] = {}
if any([signature.is_sparse for signature in feature_type_signature]):
self._prepare_layers_for_sparse_tensors(attribute, feature_type, config)
def _check_sparse_input_units(
self, feature_type_signature: List[FeatureSignature]
) -> None:
"""Checks that all sparse features have the same last dimension size."""
sparse_units = [
feature_sig.units
for feature_sig in feature_type_signature
if feature_sig.is_sparse
]
if len(set(sparse_units)) > 1:
raise TFLayerConfigException(
f"All sparse features must have the same last dimension size but found "
f"different sizes: {set(sparse_units)}."
)
def _prepare_layers_for_sparse_tensors(
self, attribute: Text, feature_type: Text, config: Dict[Text, Any]
) -> None:
"""Sets up sparse tensor pre-processing before combining with dense ones."""
# For optionally applying dropout to sparse tensors
if config[SPARSE_INPUT_DROPOUT]:
self._tf_layers[self.SPARSE_DROPOUT] = layers.SparseDropout(
rate=config[DROP_RATE]
)
# For converting sparse tensors to dense
self._tf_layers[self.SPARSE_TO_DENSE] = layers.DenseForSparse(
name=f"sparse_to_dense.{attribute}_{feature_type}",
units=config[DENSE_DIMENSION][attribute],
reg_lambda=config[REGULARIZATION_CONSTANT],
)
# For optionally apply dropout to sparse tensors after they're converted to
# dense tensors.
if config[DENSE_INPUT_DROPOUT]:
self._tf_layers[self.DENSE_DROPOUT] = tf.keras.layers.Dropout(
rate=config[DROP_RATE]
)
@staticmethod
def _calculate_output_units(
attribute: Text,
feature_type_signature: List[FeatureSignature],
config: Dict[Text, Any],
) -> int:
"""Determines the output units from the provided feature signatures.
Sparse features will be turned into dense ones, hence they each contribute with
their future dense number of units.
"""
return sum(
[
config[DENSE_DIMENSION][attribute]
if signature.is_sparse
else signature.units
for signature in feature_type_signature
]
)
def _process_sparse_feature(
self, feature: tf.SparseTensor, training: bool
) -> tf.Tensor:
"""Turns sparse tensor into dense, possibly adds dropout before and/or after."""
if self.SPARSE_DROPOUT in self._tf_layers:
feature = self._tf_layers[self.SPARSE_DROPOUT](feature, training)
feature = self._tf_layers[self.SPARSE_TO_DENSE](feature)
if self.DENSE_DROPOUT in self._tf_layers:
feature = self._tf_layers[self.DENSE_DROPOUT](feature, training)
return feature
def call(
self,
inputs: Tuple[List[Union[tf.Tensor, tf.SparseTensor]]],
training: bool = False,
) -> tf.Tensor:
"""Combines sparse and dense feature tensors into one tensor.
Arguments:
inputs: Contains the input tensors, all of the same rank.
training: A flag indicating whether the layer should behave in training mode
(applying dropout to sparse tensors if applicable) or in inference mode
(not applying dropout).
Returns:
Single tensor with all input tensors combined along the last dimension.
"""
features = inputs[0]
dense_features = []
for f in features:
if isinstance(f, tf.SparseTensor):
f = self._process_sparse_feature(f, training)
dense_features.append(f)
# Now that all features are made dense, concatenate them along the last (units)
# dimension.
return tf.concat(dense_features, axis=-1)
class RasaFeatureCombiningLayer(RasaCustomLayer):
"""Combines multiple dense or sparse feature tensors into one.
This layer combines features by following these steps:
1. Apply a `ConcatenateSparseDenseFeatures` layer separately to sequence- and
sentence-level features, yielding two tensors (one for each feature type).
2. Concatenate the sequence- and sentence-level tensors along the sequence dimension
by appending sentence-level features at the first available token position after
the sequence-level (token-level) features.
Arguments:
attribute: Name of attribute (e.g. `text` or `label`) whose features will be
processed.
attribute_signature: A dictionary containing two lists of feature signatures,
one for each feature type (`sequence` or `sentence`) of the given attribute.
config: A model config used for correctly parameterising the layer and the
`ConcatenateSparseDenseFeatures` layer it uses internally.
Input shape:
Tuple of three input tensors:
sequence_features: List of 3-D dense or sparse tensors, each with shape
`(batch_size, max_seq_length, input_dim)` where `input_dim` can be
different for sparse vs dense tensors. See the input shape of
`ConcatenateSparseDenseFeatures` for more information.
sentence_features: List of 3-D dense or sparse tensors, each with shape
`(batch_size, 1, input_dim)` where `input_dim` can be different for
sparse vs dense tensors, and can differ from that in
`sequence_features`. See the input shape of
`ConcatenateSparseDenseFeatures` for more information.
sequence_feature_lengths: Dense tensor of shape `(batch_size, )`.
Output shape:
combined_features: A 3-D tensor with shape `(batch_size, sequence_length,
units)` where `units` is completely determined by the internally applied
`ConcatenateSparseDenseFeatures` layer and `sequence_length` is the combined
length of sequence- and sentence-level features: `max_seq_length + 1` if
both feature types are present, `max_seq_length` if only sequence-level
features are present, and 1 if only sentence-level features are present).
mask_combined_sequence_sentence: A 3-D tensor with shape
`(batch_size, sequence_length, 1)`.
Raises:
A `TFLayerConfigException` if no feature signatures are provided.
Attributes:
output_units: The last dimension size of the layer's `combined_features` output.
"""
def __init__(
self,
attribute: Text,
attribute_signature: Dict[Text, List[FeatureSignature]],
config: Dict[Text, Any],
) -> None:
"""Creates a new `RasaFeatureCombiningLayer` object."""
if not attribute_signature or not (
attribute_signature.get(SENTENCE, [])
or attribute_signature.get(SEQUENCE, [])
):
raise TFLayerConfigException(
"The attribute signature must contain some feature signatures."
)
super().__init__(name=f"rasa_feature_combining_layer_{attribute}")
self._tf_layers: Dict[Text, tf.keras.layers.Layer] = {}
# Prepare sparse-dense combining layers for each present feature type
self._feature_types_present = self._get_present_feature_types(
attribute_signature
)
self._prepare_sparse_dense_concat_layers(attribute, attribute_signature, config)
# Prepare components for combining sequence- and sentence-level features
self._prepare_sequence_sentence_concat(attribute, config)
self.output_units = self._calculate_output_units(attribute, config)
@staticmethod
def _get_present_feature_types(
attribute_signature: Dict[Text, List[FeatureSignature]]
) -> Dict[Text, bool]:
"""Determines feature types that are present.
Knowing which feature types are present is important because many downstream
operations depend on it, e.g. combining sequence- and sentence-level features
is only done if both feature types are present.
"""
return {
feature_type: (
feature_type in attribute_signature
and len(attribute_signature[feature_type]) > 0
)
for feature_type in [SEQUENCE, SENTENCE]
}
def _prepare_sparse_dense_concat_layers(
self,
attribute: Text,
attribute_signature: Dict[Text, List[FeatureSignature]],
config: Dict[Text, Any],
) -> None:
"""Prepares sparse-dense combining layers for all present feature types."""
for feature_type, present in self._feature_types_present.items():
if not present:
continue
self._tf_layers[
f"sparse_dense.{feature_type}"
] = ConcatenateSparseDenseFeatures(
attribute=attribute,
feature_type=feature_type,
feature_type_signature=attribute_signature[feature_type],
config=config,
)
def _prepare_sequence_sentence_concat(
self, attribute: Text, config: Dict[Text, Any]
) -> None:
"""Sets up combining sentence- and sequence-level features (if needed).
This boils down to preparing for unifying the units of the sequence- and
sentence-level features if they differ -- the same number of units is required
for combining the features.
"""
if (
self._feature_types_present[SEQUENCE]
and self._feature_types_present[SENTENCE]
):
# The output units of this layer will be based on the output sizes of the
# sparse+dense combining layers that are internally applied to all features.
sequence_units = self._tf_layers[f"sparse_dense.{SEQUENCE}"].output_units
sentence_units = self._tf_layers[f"sparse_dense.{SENTENCE}"].output_units
# Last dimension needs to be unified if sequence- and sentence-level
# features have different sizes, e.g. due to being produced by different
# featurizers.
if sequence_units != sentence_units:
for feature_type in [SEQUENCE, SENTENCE]:
self._tf_layers[
f"unify_dims_before_seq_sent_concat.{feature_type}"
] = layers.Ffnn(
layer_name_suffix=f"unify_dims.{attribute}_{feature_type}",
layer_sizes=[config[CONCAT_DIMENSION][attribute]],
dropout_rate=config[DROP_RATE],
reg_lambda=config[REGULARIZATION_CONSTANT],
density=config[CONNECTION_DENSITY],
)
def _calculate_output_units(self, attribute: Text, config: Dict[Text, Any]) -> int:
"""Calculates the number of output units for this layer class.
The number depends mainly on whether dimension unification is used or not.
"""
# If dimension unification is used, output units are determined by the unifying
# layers.
if (
f"unify_dims_before_seq_sent_concat.{SEQUENCE}" in self._tf_layers
or f"unify_dims_before_seq_sent_concat.{SENTENCE}" in self._tf_layers
):
return config[CONCAT_DIMENSION][attribute]
# Without dimension unification, the units from the underlying sparse_dense
# layers are carried over and should be the same for sequence-level features
# (if present) as for sentence-level features.
elif self._feature_types_present[SEQUENCE]:
return self._tf_layers[f"sparse_dense.{SEQUENCE}"].output_units
return self._tf_layers[f"sparse_dense.{SENTENCE}"].output_units
def _concat_sequence_sentence_features(
self,
sequence_tensor: tf.Tensor,
sentence_tensor: tf.Tensor,
mask_combined_sequence_sentence: tf.Tensor,
) -> tf.Tensor:
"""Concatenates sequence- & sentence-level features along sequence dimension."""
# If needed, pass both feature types through a dense layer to bring them to the
# same shape.
if f"unify_dims_before_seq_sent_concat.{SEQUENCE}" in self._tf_layers:
sequence_tensor = self._tf_layers[
f"unify_dims_before_seq_sent_concat.{SEQUENCE}"
](sequence_tensor)
if f"unify_dims_before_seq_sent_concat.{SENTENCE}" in self._tf_layers:
sentence_tensor = self._tf_layers[
f"unify_dims_before_seq_sent_concat.{SENTENCE}"
](sentence_tensor)
# mask_combined_sequence_sentence has for each input example a sequence of 1s of
# the length seq_length+1, where seq_length is the number of real tokens. The
# rest is 0s which form a padding up to the max. sequence length + 1 (max.
# number of real tokens + 1). Here the mask is turned into a mask that has 0s
# everywhere and 1 only at the immediate next position after the last real
# token's position for a given input example. Example (batch size = 2, sequence
# lengths = [1, 2]):
# [[[1], [0], [0]], ___\ [[[0], [1], [0]],
# [[1], [1], [0]]] / [[0], [0], [1]]]
sentence_feature_positions_mask = (
mask_combined_sequence_sentence
* tf.math.cumprod(
1 - mask_combined_sequence_sentence,
axis=1,
exclusive=True,
reverse=True,
)
)
# The new mask is used to distribute the sentence features at the sequence
# positions marked by 1s. The sentence features' dimensionality effectively
# changes from `(batch_size, 1, feature_dim)` to `(batch_size, max_seq_length+1,
# feature_dim)`, but the array is sparse, with real features present only at
# positions determined by 1s in the mask.
sentence_tensor = sentence_feature_positions_mask * sentence_tensor
# Padding of sequence-level features is increased by 1 in the sequence
# dimension to match the shape of modified sentence-level features.
sequence_tensor = tf.pad(sequence_tensor, [[0, 0], [0, 1], [0, 0]])
# Sequence- and sentence-level features effectively get concatenated by
# summing the two padded feature arrays like this (batch size = 1):
# [[seq1, seq2, seq3, 0, 0]] + [[0, 0, 0, sent1, 0]] =
# = [[seq1, seq2, seq3, sent1, 0]]
return sequence_tensor + sentence_tensor
def _combine_sequence_level_features(
self,
sequence_features: List[Union[tf.Tensor, tf.SparseTensor]],
mask_sequence: tf.Tensor,
training: bool,
) -> Optional[tf.Tensor]:
"""Processes & combines sequence-level features if any are present."""
if self._feature_types_present[SEQUENCE]:
sequence_features_combined = self._tf_layers[f"sparse_dense.{SEQUENCE}"](
(sequence_features,), training=training
)
# Apply mask which has 1s at positions of real tokens and 0s at all padded
# token positions. This is needed because the sparse+dense combining layer
# might've turned some fake (padded) features (i.e. 0s) into non-zero
# numbers and we want those to become zeros again.
# This step isn't needed for sentence-level features because those are never
# padded -- the effective sequence length in their case is always 1.
return sequence_features_combined * mask_sequence
return None
def _combine_sentence_level_features(
self,
sentence_features: List[Union[tf.Tensor, tf.SparseTensor]],
sequence_feature_lengths: tf.Tensor,
training: bool,
) -> Tuple[Optional[tf.Tensor], Optional[tf.Tensor]]:
"""Processes & combines sentence-level features if any are present."""
if self._feature_types_present[SENTENCE]:
sentence_features_combined = self._tf_layers[f"sparse_dense.{SENTENCE}"](
(sentence_features,), training=training
)
# Sentence-level features have sequence dimension of length 1, add it to
# sequence-level feature lengths.
combined_sequence_sentence_feature_lengths = sequence_feature_lengths + 1
else:
sentence_features_combined = None
# Without sentence-level features, the feature sequence lengths are
# completely determined by sequence-level features.
combined_sequence_sentence_feature_lengths = sequence_feature_lengths
return sentence_features_combined, combined_sequence_sentence_feature_lengths
def call(
self,
inputs: Tuple[
List[Union[tf.Tensor, tf.SparseTensor]],
List[Union[tf.Tensor, tf.SparseTensor]],
tf.Tensor,
],
training: bool = False,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Combines multiple 3-D dense/sparse feature tensors into one.
Arguments:
inputs: Tuple containing:
sequence_features: Dense or sparse tensors representing different
token-level features.
sentence_features: Dense or sparse tensors representing sentence-level
features.
sequence_feature_lengths: A tensor containing the real sequence length
(the number of real -- not padding -- tokens) for each example in
the batch.
training: A flag indicating whether the layer should behave in training mode
(applying dropout to sparse tensors if applicable) or in inference mode
(not applying dropout).
Returns:
combined features: A tensor containing all the features combined.
mask_combined_sequence_sentence: A binary mask with 1s in place of real
features in the combined feature tensor, and 0s in padded positions with
fake features.
"""
sequence_features = inputs[0]
sentence_features = inputs[1]
sequence_feature_lengths = inputs[2]
# This mask is specifically for sequence-level features.
mask_sequence = compute_mask(sequence_feature_lengths)
sequence_features_combined = self._combine_sequence_level_features(
sequence_features, mask_sequence, training
)
(
sentence_features_combined,
combined_sequence_sentence_feature_lengths,
) = self._combine_sentence_level_features(
sentence_features, sequence_feature_lengths, training
)
mask_combined_sequence_sentence = compute_mask(
combined_sequence_sentence_feature_lengths
)
# If both feature types are present, combine them. Otherwise, just the present
# feature type will be returned.
if (
sequence_features_combined is not None
and sentence_features_combined is not None
):
features_to_return = self._concat_sequence_sentence_features(
sequence_features_combined,
sentence_features_combined,
mask_combined_sequence_sentence,
)
elif sequence_features_combined is not None:
features_to_return = sequence_features_combined
else:
features_to_return = sentence_features_combined
return features_to_return, mask_combined_sequence_sentence
class RasaSequenceLayer(RasaCustomLayer):
"""Creates an embedding from all features for a sequence attribute; facilitates MLM.
This layer combines all features for an attribute and embeds them using a
transformer, optionally doing masked language modeling. The layer is meant only for
attributes with sequence-level features, such as `text`, `response` and
`action_text`.
Internally, this layer applies the following steps:
1. Combine features using `RasaFeatureCombiningLayer`.
2. Apply a dense layer(s) to the combined features.
3. Optionally, and only during training for the `text` attribute, apply masking to
the features and create further helper variables for masked language modeling.
4. Embed the features using a transformer, effectively reducing variable-length
sequences of features to fixed-size embeddings.
Arguments:
attribute: Name of attribute (e.g. `text` or `label`) whose features will be
processed.
attribute_signature: A dictionary containing two lists of feature signatures,
one for each feature type (`sentence` or `sequence`) of the given attribute.
config: A model config used for correctly parameterising the underlying layers.
Input shape:
Tuple of three input tensors:
sequence_features: List of 3-D dense or sparse tensors, each with shape
`(batch_size, max_seq_length, input_dim)` where `input_dim` can be
different for sparse vs dense tensors. See the input shape of
`ConcatenateSparseDenseFeatures` for more information.
sentence_features: List of 3-D dense or sparse tensors, each with shape
`(batch_size, 1, input_dim)` where `input_dim` can be different for
sparse vs dense tensors, and can differ from that in
`sequence_features`. See the input shape of
`ConcatenateSparseDenseFeatures` for more information.
sequence_feature_lengths: Dense tensor of shape `(batch_size, )`.
Output shape:
outputs: `(batch_size, seq_length, units)` where `units` matches the underlying
transformer's output size (if present), otherwise it matches the output size
of the `Ffnn` block applied to the combined features, or it's the output
size of the underlying `RasaFeatureCombiningLayer` if the `Ffnn` block has 0
layers. `seq_length` is the sum of the sequence dimension
sizes of sequence- and sentence-level features (for details, see the output
shape of `RasaFeatureCombiningLayer`). If both feature types are present,
then `seq_length` will be 1 + the length of the longest sequence of real
tokens across all examples in the given batch.
seq_sent_features: `(batch_size, seq_length, hidden_dim)`, where `hidden_dim` is
the output size of the underlying `Ffnn` block, or the output size of the
underlying `RasaFeatureCombiningLayer` if the `Ffnn` block has 0 layers.
mask_combined_sequence_sentence: `(batch_size, seq_length, 1)`
token_ids: `(batch_size, seq_length, id_dim)`. `id_dim` is 2 when no dense
sequence-level features are present. Otherwise, it's arbitrarily chosen to
match the last dimension size of the first dense sequence-level feature in
the input list of features.
mlm_boolean_mask: `(batch_size, seq_length, 1)`, empty tensor if not doing MLM.
attention_weights: `(transformer_layers, batch_size, num_transformer_heads,
seq_length, seq_length)`, empty tensor if the transformer has 0 layers.
Raises:
A `TFLayerConfigException` if no feature signatures for sequence-level features
are provided.
Attributes:
output_units: The last dimension size of the layer's first output (`outputs`).
"""
FEATURE_COMBINING = "feature_combining"
FFNN = "ffnn"
TRANSFORMER = "transformer"
MLM_INPUT_MASK = "mlm_input_mask"
SPARSE_TO_DENSE_FOR_TOKEN_IDS = "sparse_to_dense_for_token_ids"
def __init__(
self,
attribute: Text,
attribute_signature: Dict[Text, List[FeatureSignature]],
config: Dict[Text, Any],
) -> None:
"""Creates a new `RasaSequenceLayer` object."""
if not attribute_signature or not attribute_signature.get(SEQUENCE, []):
raise TFLayerConfigException(
"The attribute signature must contain some sequence-level feature"
"signatures but none were found."
)
super().__init__(name=f"rasa_sequence_layer_{attribute}")
self._tf_layers: Dict[Text, Any] = {
self.FEATURE_COMBINING: RasaFeatureCombiningLayer(
attribute, attribute_signature, config
),
self.FFNN: layers.Ffnn(
config[HIDDEN_LAYERS_SIZES][attribute],
config[DROP_RATE],
config[REGULARIZATION_CONSTANT],
config[CONNECTION_DENSITY],
layer_name_suffix=attribute,
),
}
self._enables_mlm = False
# Note: Within TED, masked language modeling becomes just input dropout,
# since there is no loss term associated with predicting the masked tokens.
self._prepare_masked_language_modeling(attribute, attribute_signature, config)
transformer_layers, transformer_units = self._prepare_transformer(
attribute, config
)
self._has_transformer = transformer_layers > 0
self.output_units = self._calculate_output_units(
attribute, transformer_layers, transformer_units, config
)
@staticmethod
def _get_transformer_dimensions(
attribute: Text, config: Dict[Text, Any]
) -> Tuple[int, int]:
"""Determines # of transformer layers & output size from the model config.
The config can contain these directly (same for all attributes) or specified
separately for each attribute.
If a transformer is used (e.i. if `number_of_transformer_layers` is positive),
the default `transformer_size` which is `None` breaks things. Thus,
we need to set a reasonable default value so that the model works fine.
"""
transformer_layers = config[NUM_TRANSFORMER_LAYERS]
if isinstance(transformer_layers, dict):
transformer_layers = transformer_layers[attribute]
transformer_units = config[TRANSFORMER_SIZE]
if isinstance(transformer_units, dict):
transformer_units = transformer_units[attribute]
if transformer_layers > 0 and (not transformer_units or transformer_units < 1):
transformer_units = DEFAULT_TRANSFORMER_SIZE
return transformer_layers, transformer_units
def _prepare_transformer(
self, attribute: Text, config: Dict[Text, Any]
) -> Tuple[int, int]:
"""Creates a transformer & returns its number of layers and output units."""
transformer_layers, transformer_units = self._get_transformer_dimensions(
attribute, config
)
self._tf_layers[self.TRANSFORMER] = prepare_transformer_layer(
attribute_name=attribute,
config=config,
num_layers=transformer_layers,
units=transformer_units,
drop_rate=config[DROP_RATE],
unidirectional=config[UNIDIRECTIONAL_ENCODER],
)
return transformer_layers, transformer_units
def _prepare_masked_language_modeling(
self,
attribute: Text,
attribute_signature: Dict[Text, List[FeatureSignature]],
config: Dict[Text, Any],
) -> None:
"""Prepares masking and computing helper variables for masked language modeling.
Only done for the text attribute and only if sequence-level (token-level)
features are present (MLM requires token-level information).
"""
if attribute == TEXT and SEQUENCE in attribute_signature and config[MASKED_LM]:
self._enables_mlm = True
self._tf_layers[self.MLM_INPUT_MASK] = layers.InputMask()
# Unique IDs of different token types are needed to construct the possible
# label space for MLM. If dense features are present, they're used as such
# IDs, othwerise sparse features are embedded by a non-trainable
# DenseForSparse layer to create small embeddings that serve as IDs.
expect_dense_seq_features = any(
[not signature.is_sparse for signature in attribute_signature[SEQUENCE]]
)
if not expect_dense_seq_features:
self._tf_layers[
self.SPARSE_TO_DENSE_FOR_TOKEN_IDS
] = layers.DenseForSparse(
units=2,
use_bias=False,
trainable=False,
name=f"{self.SPARSE_TO_DENSE_FOR_TOKEN_IDS}.{attribute}",
)
def _calculate_output_units(
self,
attribute: Text,
transformer_layers: int,
transformer_units: int,
config: Dict[Text, Any],
) -> int:
"""Determines the output units based on what layer components are present.
The size depends on which component is the last created one in the internal
pipeline that is `RasaFeatureCombiningLayer` -> `Ffnn` -> `Transformer`, since
not all the components are always created.
"""
# transformer is the last component
if transformer_layers > 0:
return transformer_units
# the Ffnn block is the last component
if len(config[HIDDEN_LAYERS_SIZES][attribute]) > 0:
# this is the output size of the last layer of the Ffnn block
return config[HIDDEN_LAYERS_SIZES][attribute][-1]
# only the RasaFeatureCombiningLayer is present
return self._tf_layers[self.FEATURE_COMBINING].output_units
def _features_as_token_ids(
self, features: List[Union[tf.Tensor, tf.SparseTensor]]
) -> Optional[tf.Tensor]:
"""Creates dense labels (token IDs) used for negative sampling in MLM."""
# If there are dense features, we use them as labels - taking the first dense
# feature in the list, but any other dense feature would do the job.
for f in features:
if not isinstance(f, tf.SparseTensor):
return tf.stop_gradient(f)
# If no dense features are found, use a sparse feature but convert it into
# a dense one first.
for f in features:
if isinstance(f, tf.SparseTensor):
return tf.stop_gradient(
self._tf_layers[self.SPARSE_TO_DENSE_FOR_TOKEN_IDS](f)
)
return None
def _create_mlm_tensors(
self,
sequence_features: List[Union[tf.Tensor, tf.SparseTensor]],
seq_sent_features: tf.Tensor,
mask_sequence: tf.Tensor,
sentence_features_present: bool,
training: bool,
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
"""Produces helper variables for masked language modelling (only in training).
The `token_ids` embeddings can be viewed as token-level labels/unique IDs of all
input tokens (to be used later in the MLM loss) because these embeddings aren't
affected by dropout or masking and are effectively always unique for different
input tokens (and same for the same tokens).
`token_ids` share the batch and sequence dimension with the combined sequence-
and sentence-level features, the last dimension is unimportant and mimics the
first dense sequence-level feature in the list of features, or alternatively the
last dimension will have size 2 if there are only sparse sequence features
present.
"""
token_ids = self._features_as_token_ids(sequence_features)
# Pad in the sequence dimension to match the shape of combined sequence- and
# sentence-level features. This means padding by 1 if sentence-level features
# are present (those effectively have sequence length of 1) and not padding
# otherwise.
if sentence_features_present:
token_ids = tf.pad(token_ids, [[0, 0], [0, 1], [0, 0]])
mask_sequence = tf.pad(mask_sequence, [[0, 0], [0, 1], [0, 0]])
# mlm_boolean_mask has the same shape as the tensor with all combined features
# (except the last dimension), with True meaning tokens that are masked and
# False meaning tokens that aren't masked or that are fake (padded) tokens.
# Note that only sequence-level features are masked, nothing happens to the
# sentence-level features in the combined features tensor.
seq_sent_features, mlm_boolean_mask = self._tf_layers[self.MLM_INPUT_MASK](
seq_sent_features, mask_sequence, training
)
return seq_sent_features, token_ids, mlm_boolean_mask
def call(
self,
inputs: Tuple[
List[Union[tf.Tensor, tf.SparseTensor]],
List[Union[tf.Tensor, tf.SparseTensor]],
tf.Tensor,
],
training: bool = False,
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
"""Combines all of an attribute's features and embeds using a transformer.
Arguments:
inputs: Tuple containing:
sequence_features: Dense or sparse tensors representing different
token-level features.
sentence_features: Dense or sparse tensors representing different
sentence-level features.
sequence_feature_lengths: A tensor containing the real sequence length
(the number of real -- not padding -- tokens) for each example in
the batch.
training: A flag indicating whether the layer should behave in training mode
(applying dropout to sparse tensors if applicable) or in inference mode
(not applying dropout).
Returns:
outputs: Tensor with all features combined, masked (if doing MLM) and
embedded with a transformer.
seq_sent_features: Tensor with all features combined from just before the
masking and transformer is applied
mask_combined_sequence_sentence: A binary mask with 1s in place of real
features in the combined feature tensor, and 0s in padded positions with
fake features.
token_ids: Tensor with dense token-level features which can serve as
IDs (unique embeddings) of all the different tokens found in the batch.
Empty tensor if not doing MLM.
mlm_boolean_mask: A boolean mask with `True` where real tokens in `outputs`
were masked and `False` elsewhere. Empty tensor if not doing MLM.
attention_weights: Tensor containing self-attention weights received
from the underlying transformer. Empty tensor if the transformer has 0
layers.
"""
sequence_features = inputs[0]
sentence_features = inputs[1]
sequence_feature_lengths = inputs[2]
# Combine all features (sparse/dense, sequence-/sentence-level) into one tensor,
# also get a binary mask that has 1s at positions with real features and 0s at
# padded positions.
seq_sent_features, mask_combined_sequence_sentence = self._tf_layers[
self.FEATURE_COMBINING
]((sequence_features, sentence_features, sequence_feature_lengths))
# Apply one or more dense layers.
seq_sent_features = self._tf_layers[self.FFNN](seq_sent_features, training)
# If using masked language modeling, mask the transformer inputs and get labels
# for the masked tokens and a boolean mask. Note that TED does not use MLM loss,
# hence using masked language modeling (if enabled) becomes just input dropout.
if self._enables_mlm and training:
mask_sequence = compute_mask(sequence_feature_lengths)
(
seq_sent_features_masked,
token_ids,
mlm_boolean_mask,
) = self._create_mlm_tensors(
sequence_features,
seq_sent_features,
mask_sequence,
sentence_features_present=len(sentence_features) > 0,
training=training,
)
else:
# tf.zeros((0,)) is an alternative to None
token_ids = tf.zeros((0,))
mlm_boolean_mask = tf.zeros((0,))
seq_sent_features_masked = seq_sent_features
# Apply the transformer (if present), hence reducing a sequences of features per
# input example into a simple fixed-size embedding.
if self._has_transformer:
mask_padding = 1 - mask_combined_sequence_sentence
outputs, attention_weights = self._tf_layers[self.TRANSFORMER](
seq_sent_features_masked, mask_padding, training
)
outputs = tf.nn.gelu(outputs)
else:
# tf.zeros((0,)) is an alternative to None
outputs, attention_weights = seq_sent_features_masked, tf.zeros((0,))
return (
outputs,
seq_sent_features,
mask_combined_sequence_sentence,
token_ids,
mlm_boolean_mask,
attention_weights,
)
def compute_mask(sequence_lengths: tf.Tensor) -> tf.Tensor:
"""Computes binary mask given real sequence lengths.
Takes a 1-D tensor of shape `(batch_size,)` containing the lengths of sequences
(in terms of number of tokens) in the batch. Creates a binary mask of shape
`(batch_size, max_seq_length, 1)` with 1s at positions with real tokens and 0s
elsewhere.
"""
mask = tf.sequence_mask(sequence_lengths, dtype=tf.float32)
return tf.expand_dims(mask, -1)
def prepare_transformer_layer(
attribute_name: Text,
config: Dict[Text, Any],
num_layers: int,
units: int,
drop_rate: float,
unidirectional: bool,
) -> Union[
TransformerEncoder,
Callable[
[tf.Tensor, Optional[tf.Tensor], Optional[Union[tf.Tensor, bool]]],
Tuple[tf.Tensor, Optional[tf.Tensor]],
],
]:
"""Creates & returns a transformer encoder, potentially with 0 layers."""
if num_layers > 0:
return TransformerEncoder(
num_layers,
units,
config[NUM_HEADS],
units * 4,
config[REGULARIZATION_CONSTANT],
dropout_rate=drop_rate,
attention_dropout_rate=config[DROP_RATE_ATTENTION],
density=config[CONNECTION_DENSITY],
unidirectional=unidirectional,
use_key_relative_position=config[KEY_RELATIVE_ATTENTION],
use_value_relative_position=config[VALUE_RELATIVE_ATTENTION],
max_relative_position=config[MAX_RELATIVE_POSITION],
name=f"{attribute_name}_encoder",
)
# create lambda so that it can be used later without the check
return lambda x, mask, training: (x, None)
| [
"tensorflow.pad",
"rasa.utils.tensorflow.layers.Ffnn",
"rasa.utils.tensorflow.layers.InputMask",
"numpy.mean",
"rasa.utils.tensorflow.exceptions.TFLayerConfigException",
"tensorflow.concat",
"numpy.vstack",
"rasa.utils.tensorflow.layers.DenseForSparse",
"tensorflow.zeros",
"numpy.random.normal",
... | [((47773, 47825), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['sequence_lengths'], {'dtype': 'tf.float32'}), '(sequence_lengths, dtype=tf.float32)\n', (47789, 47825), True, 'import tensorflow as tf\n'), ((47837, 47861), 'tensorflow.expand_dims', 'tf.expand_dims', (['mask', '(-1)'], {}), '(mask, -1)\n', (47851, 47861), True, 'import tensorflow as tf\n'), ((6499, 6524), 'numpy.vstack', 'np.vstack', (['merged_weights'], {}), '(merged_weights)\n', (6508, 6524), True, 'import numpy as np\n'), ((6547, 6583), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['new_weights'], {}), '(new_weights)\n', (6570, 6583), True, 'import tensorflow as tf\n'), ((6684, 6888), 'rasa.utils.tensorflow.layers.DenseForSparse', 'layers.DenseForSparse', ([], {'name': 'f"""sparse_to_dense.{attribute}_{feature_type}"""', 'reg_lambda': 'reg_lambda', 'units': 'units', 'use_bias': '(bias is not None)', 'kernel_initializer': 'kernel_init', 'bias_initializer': 'bias_init'}), "(name=f'sparse_to_dense.{attribute}_{feature_type}',\n reg_lambda=reg_lambda, units=units, use_bias=bias is not None,\n kernel_initializer=kernel_init, bias_initializer=bias_init)\n", (6705, 6888), False, 'from rasa.utils.tensorflow import layers\n'), ((11045, 11213), 'rasa.utils.tensorflow.layers.DenseForSparse', 'layers.DenseForSparse', ([], {'name': 'f"""sparse_to_dense.{attribute}_{feature_type}"""', 'units': 'config[DENSE_DIMENSION][attribute]', 'reg_lambda': 'config[REGULARIZATION_CONSTANT]'}), "(name=f'sparse_to_dense.{attribute}_{feature_type}',\n units=config[DENSE_DIMENSION][attribute], reg_lambda=config[\n REGULARIZATION_CONSTANT])\n", (11066, 11213), False, 'from rasa.utils.tensorflow import layers\n'), ((13710, 13744), 'tensorflow.concat', 'tf.concat', (['dense_features'], {'axis': '(-1)'}), '(dense_features, axis=-1)\n', (13719, 13744), True, 'import tensorflow as tf\n'), ((24183, 24232), 'tensorflow.pad', 'tf.pad', (['sequence_tensor', '[[0, 0], [0, 1], [0, 0]]'], {}), '(sequence_tensor, [[0, 0], [0, 1], [0, 0]])\n', (24189, 24232), True, 'import tensorflow as tf\n'), ((48334, 48815), 'rasa.utils.tensorflow.transformer.TransformerEncoder', 'TransformerEncoder', (['num_layers', 'units', 'config[NUM_HEADS]', '(units * 4)', 'config[REGULARIZATION_CONSTANT]'], {'dropout_rate': 'drop_rate', 'attention_dropout_rate': 'config[DROP_RATE_ATTENTION]', 'density': 'config[CONNECTION_DENSITY]', 'unidirectional': 'unidirectional', 'use_key_relative_position': 'config[KEY_RELATIVE_ATTENTION]', 'use_value_relative_position': 'config[VALUE_RELATIVE_ATTENTION]', 'max_relative_position': 'config[MAX_RELATIVE_POSITION]', 'name': 'f"""{attribute_name}_encoder"""'}), "(num_layers, units, config[NUM_HEADS], units * 4, config[\n REGULARIZATION_CONSTANT], dropout_rate=drop_rate,\n attention_dropout_rate=config[DROP_RATE_ATTENTION], density=config[\n CONNECTION_DENSITY], unidirectional=unidirectional,\n use_key_relative_position=config[KEY_RELATIVE_ATTENTION],\n use_value_relative_position=config[VALUE_RELATIVE_ATTENTION],\n max_relative_position=config[MAX_RELATIVE_POSITION], name=\n f'{attribute_name}_encoder')\n", (48352, 48815), False, 'from rasa.utils.tensorflow.transformer import TransformerEncoder\n'), ((6065, 6079), 'numpy.std', 'np.std', (['kernel'], {}), '(kernel)\n', (6071, 6079), True, 'import numpy as np\n'), ((6081, 6096), 'numpy.mean', 'np.mean', (['kernel'], {}), '(kernel)\n', (6088, 6096), True, 'import numpy as np\n'), ((6305, 6331), 'numpy.vstack', 'np.vstack', (['(existing, new)'], {}), '((existing, new))\n', (6314, 6331), True, 'import numpy as np\n'), ((6604, 6633), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['bias'], {}), '(bias)\n', (6627, 6633), True, 'import tensorflow as tf\n'), ((9167, 9262), 'rasa.utils.tensorflow.exceptions.TFLayerConfigException', 'TFLayerConfigException', (['"""The feature type signature must contain some feature signatures."""'], {}), "(\n 'The feature type signature must contain some feature signatures.')\n", (9189, 9262), False, 'from rasa.utils.tensorflow.exceptions import TFLayerConfigException\n'), ((10872, 10916), 'rasa.utils.tensorflow.layers.SparseDropout', 'layers.SparseDropout', ([], {'rate': 'config[DROP_RATE]'}), '(rate=config[DROP_RATE])\n', (10892, 10916), False, 'from rasa.utils.tensorflow import layers\n'), ((11452, 11499), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': 'config[DROP_RATE]'}), '(rate=config[DROP_RATE])\n', (11475, 11499), True, 'import tensorflow as tf\n'), ((16846, 16938), 'rasa.utils.tensorflow.exceptions.TFLayerConfigException', 'TFLayerConfigException', (['"""The attribute signature must contain some feature signatures."""'], {}), "(\n 'The attribute signature must contain some feature signatures.')\n", (16868, 16938), False, 'from rasa.utils.tensorflow.exceptions import TFLayerConfigException\n'), ((23353, 23447), 'tensorflow.math.cumprod', 'tf.math.cumprod', (['(1 - mask_combined_sequence_sentence)'], {'axis': '(1)', 'exclusive': '(True)', 'reverse': '(True)'}), '(1 - mask_combined_sequence_sentence, axis=1, exclusive=True,\n reverse=True)\n', (23368, 23447), True, 'import tensorflow as tf\n'), ((34092, 34223), 'rasa.utils.tensorflow.exceptions.TFLayerConfigException', 'TFLayerConfigException', (['"""The attribute signature must contain some sequence-level featuresignatures but none were found."""'], {}), "(\n 'The attribute signature must contain some sequence-level featuresignatures but none were found.'\n )\n", (34114, 34223), False, 'from rasa.utils.tensorflow.exceptions import TFLayerConfigException\n'), ((34532, 34700), 'rasa.utils.tensorflow.layers.Ffnn', 'layers.Ffnn', (['config[HIDDEN_LAYERS_SIZES][attribute]', 'config[DROP_RATE]', 'config[REGULARIZATION_CONSTANT]', 'config[CONNECTION_DENSITY]'], {'layer_name_suffix': 'attribute'}), '(config[HIDDEN_LAYERS_SIZES][attribute], config[DROP_RATE],\n config[REGULARIZATION_CONSTANT], config[CONNECTION_DENSITY],\n layer_name_suffix=attribute)\n', (34543, 34700), False, 'from rasa.utils.tensorflow import layers\n'), ((37845, 37863), 'rasa.utils.tensorflow.layers.InputMask', 'layers.InputMask', ([], {}), '()\n', (37861, 37863), False, 'from rasa.utils.tensorflow import layers\n'), ((42026, 42069), 'tensorflow.pad', 'tf.pad', (['token_ids', '[[0, 0], [0, 1], [0, 0]]'], {}), '(token_ids, [[0, 0], [0, 1], [0, 0]])\n', (42032, 42069), True, 'import tensorflow as tf\n'), ((42098, 42145), 'tensorflow.pad', 'tf.pad', (['mask_sequence', '[[0, 0], [0, 1], [0, 0]]'], {}), '(mask_sequence, [[0, 0], [0, 1], [0, 0]])\n', (42104, 42145), True, 'import tensorflow as tf\n'), ((46450, 46464), 'tensorflow.zeros', 'tf.zeros', (['(0,)'], {}), '((0,))\n', (46458, 46464), True, 'import tensorflow as tf\n'), ((46496, 46510), 'tensorflow.zeros', 'tf.zeros', (['(0,)'], {}), '((0,))\n', (46504, 46510), True, 'import tensorflow as tf\n'), ((46992, 47011), 'tensorflow.nn.gelu', 'tf.nn.gelu', (['outputs'], {}), '(outputs)\n', (47002, 47011), True, 'import tensorflow as tf\n'), ((38499, 38625), 'rasa.utils.tensorflow.layers.DenseForSparse', 'layers.DenseForSparse', ([], {'units': '(2)', 'use_bias': '(False)', 'trainable': '(False)', 'name': 'f"""{self.SPARSE_TO_DENSE_FOR_TOKEN_IDS}.{attribute}"""'}), "(units=2, use_bias=False, trainable=False, name=\n f'{self.SPARSE_TO_DENSE_FOR_TOKEN_IDS}.{attribute}')\n", (38520, 38625), False, 'from rasa.utils.tensorflow import layers\n'), ((40181, 40200), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['f'], {}), '(f)\n', (40197, 40200), True, 'import tensorflow as tf\n'), ((47148, 47162), 'tensorflow.zeros', 'tf.zeros', (['(0,)'], {}), '((0,))\n', (47156, 47162), True, 'import tensorflow as tf\n'), ((6140, 6191), 'numpy.random.normal', 'np.random.normal', (['mean', 'std'], {'size': '(num_rows, units)'}), '(mean, std, size=(num_rows, units))\n', (6156, 6191), True, 'import numpy as np\n'), ((20344, 20592), 'rasa.utils.tensorflow.layers.Ffnn', 'layers.Ffnn', ([], {'layer_name_suffix': 'f"""unify_dims.{attribute}_{feature_type}"""', 'layer_sizes': '[config[CONCAT_DIMENSION][attribute]]', 'dropout_rate': 'config[DROP_RATE]', 'reg_lambda': 'config[REGULARIZATION_CONSTANT]', 'density': 'config[CONNECTION_DENSITY]'}), "(layer_name_suffix=f'unify_dims.{attribute}_{feature_type}',\n layer_sizes=[config[CONCAT_DIMENSION][attribute]], dropout_rate=config[\n DROP_RATE], reg_lambda=config[REGULARIZATION_CONSTANT], density=config[\n CONNECTION_DENSITY])\n", (20355, 20592), False, 'from rasa.utils.tensorflow import layers\n')] |
'''
Created on Aug 28, 2015
@author: wirkert
'''
import numpy as np
def collapse_image(img):
""" helper method which transorms the n x m x nrWavelengths image to a
(n*m) x nrWavelength image.
note that this function doesn't take an object of class Msi but
msi.get_image() """
return img.reshape(-1, img.shape[-1])
def remove_masked_elements(img):
""" helper method which removes masked pixels.
Note that by applying this method, the img loses it's shape."""
collapsed_image = collapse_image(img)
# if one reflectance is masked msis are defined to have all refl.
# masked. Thus we can just have a look at the first column
one_column = collapsed_image[:, 0]
if (isinstance(one_column, np.ma.masked_array)):
masked_elems = np.where(one_column.mask)
collapsed_image = np.delete(collapsed_image, masked_elems, 0)
return collapsed_image
def select_n_reflectances(img, n):
""" randomly select n reflectances from image.
The output is of shape n x nr_wavelengths """
collapsed_image = collapse_image(img)
perms = np.random.permutation(collapsed_image.shape[0])
first_n_perms = perms[0:n]
return collapsed_image[first_n_perms, :]
def get_bands(img, bands):
"""get the bands bands (np.array) from the multispectral image.
Example: image is 2048x2048x8. get_bands(img, [0,3] will return
img[:,:,[0,3]]. The advantage of this function is that the image does not
need to be 2d + wavelength."""
original_shape = img.shape
collapsed_image = collapse_image(img)
img_bands = collapsed_image[ :, bands]
new_nr_bands = 1
if hasattr(bands, "__len__"):
new_nr_bands = len(bands)
new_shape = original_shape[:-1] + (new_nr_bands,)
return np.reshape(img_bands, new_shape)
def sortout_bands(img, bands):
"""delete bands bands (np.array) from the multispectral image.
Example: image is 2048x2048x8. sortout_bands(img, [0,3] will return
img[:,:,[1,2,4,5,6,7]]. The advantage of this function is that the image does not
need to be 2d + wavelength.
TODO SW: Test"""
all_bands = np.arange(img.shape[-1])
bands_to_get = np.setdiff1d(all_bands, bands)
return get_bands(img, bands_to_get)
| [
"numpy.reshape",
"numpy.where",
"numpy.delete",
"numpy.setdiff1d",
"numpy.arange",
"numpy.random.permutation"
] | [((1098, 1145), 'numpy.random.permutation', 'np.random.permutation', (['collapsed_image.shape[0]'], {}), '(collapsed_image.shape[0])\n', (1119, 1145), True, 'import numpy as np\n'), ((1770, 1802), 'numpy.reshape', 'np.reshape', (['img_bands', 'new_shape'], {}), '(img_bands, new_shape)\n', (1780, 1802), True, 'import numpy as np\n'), ((2131, 2155), 'numpy.arange', 'np.arange', (['img.shape[-1]'], {}), '(img.shape[-1])\n', (2140, 2155), True, 'import numpy as np\n'), ((2175, 2205), 'numpy.setdiff1d', 'np.setdiff1d', (['all_bands', 'bands'], {}), '(all_bands, bands)\n', (2187, 2205), True, 'import numpy as np\n'), ((783, 808), 'numpy.where', 'np.where', (['one_column.mask'], {}), '(one_column.mask)\n', (791, 808), True, 'import numpy as np\n'), ((835, 878), 'numpy.delete', 'np.delete', (['collapsed_image', 'masked_elems', '(0)'], {}), '(collapsed_image, masked_elems, 0)\n', (844, 878), True, 'import numpy as np\n')] |
"""MHD rotor test script
"""
import numpy as np
from scipy.constants import pi as PI
from gawain.main import run_gawain
run_name = "mhd_rotor"
output_dir = "."
cfl = 0.25
with_mhd = True
t_max = 0.15
integrator = "euler"
# "base", "lax-wendroff", "lax-friedrichs", "vanleer", "hll"
fluxer = "hll"
################ MESH #####################
nx, ny, nz = 128, 128, 1
mesh_shape = (nx, ny, nz)
n_outputs = 100
lx, ly, lz = 1, 1, 0.001
mesh_size = (lx, ly, lz)
x = np.linspace(0.0, lx, num=nx)
y = np.linspace(0.0, ly, num=ny)
z = np.linspace(0.0, lz, num=nz)
X, Y, Z = np.meshgrid(x, y, z, indexing="ij")
############ INITIAL CONDITION #################
adiabatic_idx = 1.4
R = np.sqrt((X - 0.5) ** 2 + (Y - 0.5) ** 2)
R0 = 0.1
R1 = 0.115
FR = (R1 - R) / (R - R0)
U0 = 2
rho_mid_vals = 1 + 9 * FR
vx_in_vals = -FR * U0 * (Y - 0.5) / R0
vx_mid_vals = -FR * U0 * (Y - 0.5) / R
vy_in_vals = FR * U0 * (X - 0.5) / R0
vy_mid_vals = FR * U0 * (X - 0.5) / R
inner_mask = np.where(R <= R0)
middle_mask = np.where(np.logical_and(R > R0, R < R1))
rho = np.ones(mesh_shape)
rho[inner_mask] = 10.0
rho[middle_mask] = rho_mid_vals[middle_mask]
vx = np.zeros(mesh_shape)
vx[inner_mask] = vx_in_vals[inner_mask]
vx[middle_mask] = vx_mid_vals[middle_mask]
vy = np.zeros(mesh_shape)
vy[inner_mask] = vy_in_vals[inner_mask]
vy[middle_mask] = vy_mid_vals[middle_mask]
vz = np.zeros(mesh_shape)
mx = rho * vx
my = rho * vy
mz = rho * vz
bx = 5 * np.ones(mesh_shape) / np.sqrt(4 * PI)
by = np.zeros(mesh_shape)
bz = np.zeros(mesh_shape)
pressure = np.ones(mesh_shape)
mag_pressure = 0.5 * (bx ** 2 + by ** 2 + bz ** 2)
e = (
pressure / (adiabatic_idx - 1)
+ 0.5 * (mx * mx + my * my + mz * mz) / rho
+ mag_pressure
)
initial_condition = np.array([rho, mx, my, mz, e, bx, by, bz])
############## BOUNDARY CONDITION ######################
# available types: periodic, fixed
boundary_conditions = ["periodic", "periodic", "periodic"]
############## DO NOT EDIT BELOW ############################
config = {
"run_name": run_name,
"cfl": cfl,
"mesh_shape": mesh_shape,
"mesh_size": mesh_size,
"t_max": t_max,
"n_dumps": n_outputs,
"initial_condition": initial_condition,
"boundary_type": boundary_conditions,
"adi_idx": adiabatic_idx,
"integrator": integrator,
"fluxer": fluxer,
"output_dir": output_dir,
"with_mhd": with_mhd,
}
run_gawain(config)
| [
"numpy.sqrt",
"numpy.ones",
"numpy.logical_and",
"numpy.where",
"gawain.main.run_gawain",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"numpy.meshgrid"
] | [((474, 502), 'numpy.linspace', 'np.linspace', (['(0.0)', 'lx'], {'num': 'nx'}), '(0.0, lx, num=nx)\n', (485, 502), True, 'import numpy as np\n'), ((507, 535), 'numpy.linspace', 'np.linspace', (['(0.0)', 'ly'], {'num': 'ny'}), '(0.0, ly, num=ny)\n', (518, 535), True, 'import numpy as np\n'), ((540, 568), 'numpy.linspace', 'np.linspace', (['(0.0)', 'lz'], {'num': 'nz'}), '(0.0, lz, num=nz)\n', (551, 568), True, 'import numpy as np\n'), ((579, 614), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y', 'z'], {'indexing': '"""ij"""'}), "(x, y, z, indexing='ij')\n", (590, 614), True, 'import numpy as np\n'), ((691, 731), 'numpy.sqrt', 'np.sqrt', (['((X - 0.5) ** 2 + (Y - 0.5) ** 2)'], {}), '((X - 0.5) ** 2 + (Y - 0.5) ** 2)\n', (698, 731), True, 'import numpy as np\n'), ((981, 998), 'numpy.where', 'np.where', (['(R <= R0)'], {}), '(R <= R0)\n', (989, 998), True, 'import numpy as np\n'), ((1061, 1080), 'numpy.ones', 'np.ones', (['mesh_shape'], {}), '(mesh_shape)\n', (1068, 1080), True, 'import numpy as np\n'), ((1155, 1175), 'numpy.zeros', 'np.zeros', (['mesh_shape'], {}), '(mesh_shape)\n', (1163, 1175), True, 'import numpy as np\n'), ((1265, 1285), 'numpy.zeros', 'np.zeros', (['mesh_shape'], {}), '(mesh_shape)\n', (1273, 1285), True, 'import numpy as np\n'), ((1375, 1395), 'numpy.zeros', 'np.zeros', (['mesh_shape'], {}), '(mesh_shape)\n', (1383, 1395), True, 'import numpy as np\n'), ((1492, 1512), 'numpy.zeros', 'np.zeros', (['mesh_shape'], {}), '(mesh_shape)\n', (1500, 1512), True, 'import numpy as np\n'), ((1518, 1538), 'numpy.zeros', 'np.zeros', (['mesh_shape'], {}), '(mesh_shape)\n', (1526, 1538), True, 'import numpy as np\n'), ((1551, 1570), 'numpy.ones', 'np.ones', (['mesh_shape'], {}), '(mesh_shape)\n', (1558, 1570), True, 'import numpy as np\n'), ((1756, 1798), 'numpy.array', 'np.array', (['[rho, mx, my, mz, e, bx, by, bz]'], {}), '([rho, mx, my, mz, e, bx, by, bz])\n', (1764, 1798), True, 'import numpy as np\n'), ((2398, 2416), 'gawain.main.run_gawain', 'run_gawain', (['config'], {}), '(config)\n', (2408, 2416), False, 'from gawain.main import run_gawain\n'), ((1022, 1052), 'numpy.logical_and', 'np.logical_and', (['(R > R0)', '(R < R1)'], {}), '(R > R0, R < R1)\n', (1036, 1052), True, 'import numpy as np\n'), ((1471, 1486), 'numpy.sqrt', 'np.sqrt', (['(4 * PI)'], {}), '(4 * PI)\n', (1478, 1486), True, 'import numpy as np\n'), ((1449, 1468), 'numpy.ones', 'np.ones', (['mesh_shape'], {}), '(mesh_shape)\n', (1456, 1468), True, 'import numpy as np\n')] |
import itertools
import numba as nb
import numpy as np
import pandas as pd
import pytest
from sid.contacts import _consolidate_reason_of_infection
from sid.contacts import _numpy_replace
from sid.contacts import calculate_infections_by_contacts
from sid.contacts import create_group_indexer
@pytest.mark.unit
@pytest.mark.parametrize(
"states, group_code_name, expected",
[
(
pd.DataFrame({"a": [1] * 7 + [0] * 8}),
"a",
[list(range(7, 15)), list(range(7))],
),
(
pd.DataFrame({"a": pd.Series([0, 1, 2, 3, 0, 1, 2, 3]).astype("category")}),
"a",
[[0, 4], [1, 5], [2, 6], [3, 7]],
),
(
pd.DataFrame({"a": pd.Series([0, 1, 2, 3, 0, 1, 2, -1])}),
"a",
[[0, 4], [1, 5], [2, 6], [3]],
),
],
)
def test_create_group_indexer(states, group_code_name, expected):
result = create_group_indexer(states, group_code_name)
result = [r.tolist() for r in result]
assert result == expected
@pytest.fixture()
def households_w_one_infected():
states = pd.DataFrame(
{
"infectious": [True] + [False] * 7,
"cd_infectious_true": [-1] * 8,
"immunity": [1.0] + [0.0] * 7,
"group_codes_households": [0] * 4 + [1] * 4,
"households": [0] * 4 + [1] * 4,
"group_codes_non_rec": [0] * 4 + [1] * 4,
"n_has_infected": 0,
"virus_strain": pd.Series(["base_strain"] + [pd.NA] * 7, dtype="category"),
}
)
params = pd.DataFrame(
columns=["value"],
data=1,
index=pd.MultiIndex.from_tuples(
[("infection_prob", "households", "households")]
),
)
indexers = {"recurrent": nb.typed.List()}
indexers["recurrent"].append(create_group_indexer(states, ["households"]))
assortative_matching_cum_probs = nb.typed.List()
assortative_matching_cum_probs.append(np.zeros((0, 0)))
group_codes_info = {"households": {"name": "group_codes_households"}}
virus_strains = {
"names": ["base_strain"],
"contagiousness_factor": np.ones(1),
"immunity_resistance_factor": np.zeros(1),
}
return {
"states": states,
"recurrent_contacts": np.ones((len(states), 1), dtype=bool),
"random_contacts": None,
"params": params,
"indexers": indexers,
"assortative_matching_cum_probs": assortative_matching_cum_probs,
"group_codes_info": group_codes_info,
"susceptibility_factor": np.ones(len(states)),
"virus_strains": virus_strains,
"seasonality_factor": pd.Series([1], index=["households"]),
}
@pytest.mark.integration
def test_calculate_infections_only_recurrent_all_participate(
households_w_one_infected,
):
(
calc_infected,
calc_n_has_additionally_infected,
calc_missed_contacts,
was_infected_by,
) = calculate_infections_by_contacts(
**households_w_one_infected,
contact_models={"households": {"is_recurrent": True}},
seed=itertools.count(),
)
states = households_w_one_infected["states"]
exp_infected = pd.Series([-1] + [0] * 3 + [-1] * 4, dtype="int8")
exp_infection_counter = pd.Series([3] + [0] * 7, dtype="int32")
assert calc_infected.equals(exp_infected)
assert (
(states["n_has_infected"] + calc_n_has_additionally_infected)
.astype(np.int32)
.equals(exp_infection_counter)
)
assert calc_missed_contacts is None
@pytest.mark.integration
def test_calculate_infections_only_recurrent_sick_skips(
households_w_one_infected,
):
households_w_one_infected["recurrent_contacts"][0] = 0
(
calc_infected,
calc_n_has_additionally_infected,
calc_missed_contacts,
was_infected_by,
) = calculate_infections_by_contacts(
**households_w_one_infected,
contact_models={"households": {"is_recurrent": True}},
seed=itertools.count(),
)
exp_infected = pd.Series([-1] * 8, dtype="int8")
exp_infection_counter = pd.Series([0] * 8, dtype="int32")
assert calc_infected.equals(exp_infected)
assert calc_n_has_additionally_infected.astype(np.int32).equals(
exp_infection_counter
)
assert calc_missed_contacts is None
@pytest.mark.integration
def test_calculate_infections_only_recurrent_one_skips(
households_w_one_infected,
):
# 2nd person does not participate in household meeting
households_w_one_infected["recurrent_contacts"][1] = 0
(
calc_infected,
calc_n_has_additionally_infected,
calc_missed_contacts,
was_infected_by,
) = calculate_infections_by_contacts(
**households_w_one_infected,
contact_models={"households": {"is_recurrent": True}},
seed=itertools.count(),
)
exp_infected = pd.Series([-1, -1] + [0] * 2 + [-1] * 4, dtype="int8")
exp_infection_counter = pd.Series([2] + [0] * 7, dtype="int32")
assert calc_infected.equals(exp_infected)
assert calc_n_has_additionally_infected.astype(np.int32).equals(
exp_infection_counter
)
assert calc_missed_contacts is None
@pytest.mark.integration
def test_calculate_infections_only_recurrent_one_immune(
households_w_one_infected,
):
households_w_one_infected["states"].loc[1, "immunity"] = 1.0
(
calc_infected,
calc_n_has_additionally_infected,
calc_missed_contacts,
was_infected_by,
) = calculate_infections_by_contacts(
**households_w_one_infected,
contact_models={"households": {"is_recurrent": True}},
seed=itertools.count(),
)
exp_infected = pd.Series([-1, -1] + [0] * 2 + [-1] * 4, dtype="int8")
exp_infection_counter = pd.Series([2] + [0] * 7, dtype="int32")
assert calc_infected.equals(exp_infected)
assert calc_n_has_additionally_infected.astype(np.int32).equals(
exp_infection_counter
)
assert calc_missed_contacts is None
@pytest.mark.integration
def test_calculate_infections_only_non_recurrent(households_w_one_infected):
random_contacts = households_w_one_infected.pop("recurrent_contacts")
random_contacts[0] = 1
params = pd.DataFrame(
columns=["value"],
data=1,
index=pd.MultiIndex.from_tuples([("infection_prob", "non_rec", "non_rec")]),
)
states = households_w_one_infected["states"]
indexers = {"random": nb.typed.List()}
indexers["random"].append(create_group_indexer(states, ["group_codes_non_rec"]))
assortative_matching_cum_probs = nb.typed.List()
assortative_matching_cum_probs.append(np.array([[0.8, 1], [0.2, 1]]))
(
calc_infected,
calc_n_has_additionally_infected,
calc_missed_contacts,
was_infected_by,
) = calculate_infections_by_contacts(
states=households_w_one_infected["states"],
random_contacts=random_contacts,
recurrent_contacts=None,
params=params,
indexers=indexers,
assortative_matching_cum_probs=assortative_matching_cum_probs,
contact_models={"non_rec": {"is_recurrent": False}},
group_codes_info={"non_rec": {"name": "group_codes_non_rec"}},
susceptibility_factor=households_w_one_infected["susceptibility_factor"],
virus_strains=households_w_one_infected["virus_strains"],
seasonality_factor=pd.Series([1], index=["non_rec"]),
seed=itertools.count(),
)
exp_infected = pd.Series([-1, -1, 0, -1, -1, -1, -1, -1], dtype="int8")
exp_infection_counter = pd.Series([1] + [0] * 7, dtype="int32")
assert calc_infected.equals(exp_infected)
assert calc_n_has_additionally_infected.astype(np.int32).equals(
exp_infection_counter
)
assert not np.any(calc_missed_contacts)
@pytest.mark.unit
def test_consolidate_reason_of_infection():
was_infected_by_recurrent = np.array([0, 1, 1, -1, -1, -1, 0, -1])
was_infected_by_random = np.array([-1, -1, -1, 0, 0, 1, 0, -1])
contact_models = {
"a": {"is_recurrent": True},
"b": {"is_recurrent": True},
"c": {"is_recurrent": False},
"d": {"is_recurrent": False},
}
result = _consolidate_reason_of_infection(
was_infected_by_recurrent, was_infected_by_random, contact_models
)
expected = pd.Series(
pd.Categorical(
["a", "b", "b", "c", "c", "d", "a", "not_infected_by_contact"],
categories=["not_infected_by_contact", "a", "b", "c", "d"],
)
)
assert result.equals(expected)
@pytest.mark.unit
def test_numpy_replace():
x = np.arange(6)
replace_to = {4: 6, 5: 7}
result = _numpy_replace(x, replace_to)
assert (result == np.array([0, 1, 2, 3, 6, 7])).all()
| [
"pandas.Series",
"sid.contacts._consolidate_reason_of_infection",
"numpy.ones",
"pandas.DataFrame",
"numba.typed.List",
"numpy.any",
"sid.contacts.create_group_indexer",
"pandas.Categorical",
"numpy.array",
"numpy.zeros",
"itertools.count",
"pytest.fixture",
"pandas.MultiIndex.from_tuples",
... | [((1059, 1075), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1073, 1075), False, 'import pytest\n'), ((938, 983), 'sid.contacts.create_group_indexer', 'create_group_indexer', (['states', 'group_code_name'], {}), '(states, group_code_name)\n', (958, 983), False, 'from sid.contacts import create_group_indexer\n'), ((1928, 1943), 'numba.typed.List', 'nb.typed.List', ([], {}), '()\n', (1941, 1943), True, 'import numba as nb\n'), ((3223, 3273), 'pandas.Series', 'pd.Series', (['([-1] + [0] * 3 + [-1] * 4)'], {'dtype': '"""int8"""'}), "([-1] + [0] * 3 + [-1] * 4, dtype='int8')\n", (3232, 3273), True, 'import pandas as pd\n'), ((3302, 3341), 'pandas.Series', 'pd.Series', (['([3] + [0] * 7)'], {'dtype': '"""int32"""'}), "([3] + [0] * 7, dtype='int32')\n", (3311, 3341), True, 'import pandas as pd\n'), ((4086, 4119), 'pandas.Series', 'pd.Series', (['([-1] * 8)'], {'dtype': '"""int8"""'}), "([-1] * 8, dtype='int8')\n", (4095, 4119), True, 'import pandas as pd\n'), ((4148, 4181), 'pandas.Series', 'pd.Series', (['([0] * 8)'], {'dtype': '"""int32"""'}), "([0] * 8, dtype='int32')\n", (4157, 4181), True, 'import pandas as pd\n'), ((4936, 4990), 'pandas.Series', 'pd.Series', (['([-1, -1] + [0] * 2 + [-1] * 4)'], {'dtype': '"""int8"""'}), "([-1, -1] + [0] * 2 + [-1] * 4, dtype='int8')\n", (4945, 4990), True, 'import pandas as pd\n'), ((5019, 5058), 'pandas.Series', 'pd.Series', (['([2] + [0] * 7)'], {'dtype': '"""int32"""'}), "([2] + [0] * 7, dtype='int32')\n", (5028, 5058), True, 'import pandas as pd\n'), ((5761, 5815), 'pandas.Series', 'pd.Series', (['([-1, -1] + [0] * 2 + [-1] * 4)'], {'dtype': '"""int8"""'}), "([-1, -1] + [0] * 2 + [-1] * 4, dtype='int8')\n", (5770, 5815), True, 'import pandas as pd\n'), ((5844, 5883), 'pandas.Series', 'pd.Series', (['([2] + [0] * 7)'], {'dtype': '"""int32"""'}), "([2] + [0] * 7, dtype='int32')\n", (5853, 5883), True, 'import pandas as pd\n'), ((6656, 6671), 'numba.typed.List', 'nb.typed.List', ([], {}), '()\n', (6669, 6671), True, 'import numba as nb\n'), ((7562, 7618), 'pandas.Series', 'pd.Series', (['[-1, -1, 0, -1, -1, -1, -1, -1]'], {'dtype': '"""int8"""'}), "([-1, -1, 0, -1, -1, -1, -1, -1], dtype='int8')\n", (7571, 7618), True, 'import pandas as pd\n'), ((7647, 7686), 'pandas.Series', 'pd.Series', (['([1] + [0] * 7)'], {'dtype': '"""int32"""'}), "([1] + [0] * 7, dtype='int32')\n", (7656, 7686), True, 'import pandas as pd\n'), ((7978, 8016), 'numpy.array', 'np.array', (['[0, 1, 1, -1, -1, -1, 0, -1]'], {}), '([0, 1, 1, -1, -1, -1, 0, -1])\n', (7986, 8016), True, 'import numpy as np\n'), ((8046, 8084), 'numpy.array', 'np.array', (['[-1, -1, -1, 0, 0, 1, 0, -1]'], {}), '([-1, -1, -1, 0, 0, 1, 0, -1])\n', (8054, 8084), True, 'import numpy as np\n'), ((8279, 8382), 'sid.contacts._consolidate_reason_of_infection', '_consolidate_reason_of_infection', (['was_infected_by_recurrent', 'was_infected_by_random', 'contact_models'], {}), '(was_infected_by_recurrent,\n was_infected_by_random, contact_models)\n', (8311, 8382), False, 'from sid.contacts import _consolidate_reason_of_infection\n'), ((8698, 8710), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (8707, 8710), True, 'import numpy as np\n'), ((8755, 8784), 'sid.contacts._numpy_replace', '_numpy_replace', (['x', 'replace_to'], {}), '(x, replace_to)\n', (8769, 8784), False, 'from sid.contacts import _numpy_replace\n'), ((1794, 1809), 'numba.typed.List', 'nb.typed.List', ([], {}), '()\n', (1807, 1809), True, 'import numba as nb\n'), ((1844, 1888), 'sid.contacts.create_group_indexer', 'create_group_indexer', (['states', "['households']"], {}), "(states, ['households'])\n", (1864, 1888), False, 'from sid.contacts import create_group_indexer\n'), ((1986, 2002), 'numpy.zeros', 'np.zeros', (['(0, 0)'], {}), '((0, 0))\n', (1994, 2002), True, 'import numpy as np\n'), ((2169, 2179), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (2176, 2179), True, 'import numpy as np\n'), ((2219, 2230), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (2227, 2230), True, 'import numpy as np\n'), ((2681, 2717), 'pandas.Series', 'pd.Series', (['[1]'], {'index': "['households']"}), "([1], index=['households'])\n", (2690, 2717), True, 'import pandas as pd\n'), ((6517, 6532), 'numba.typed.List', 'nb.typed.List', ([], {}), '()\n', (6530, 6532), True, 'import numba as nb\n'), ((6564, 6617), 'sid.contacts.create_group_indexer', 'create_group_indexer', (['states', "['group_codes_non_rec']"], {}), "(states, ['group_codes_non_rec'])\n", (6584, 6617), False, 'from sid.contacts import create_group_indexer\n'), ((6714, 6744), 'numpy.array', 'np.array', (['[[0.8, 1], [0.2, 1]]'], {}), '([[0.8, 1], [0.2, 1]])\n', (6722, 6744), True, 'import numpy as np\n'), ((7853, 7881), 'numpy.any', 'np.any', (['calc_missed_contacts'], {}), '(calc_missed_contacts)\n', (7859, 7881), True, 'import numpy as np\n'), ((8428, 8574), 'pandas.Categorical', 'pd.Categorical', (["['a', 'b', 'b', 'c', 'c', 'd', 'a', 'not_infected_by_contact']"], {'categories': "['not_infected_by_contact', 'a', 'b', 'c', 'd']"}), "(['a', 'b', 'b', 'c', 'c', 'd', 'a',\n 'not_infected_by_contact'], categories=['not_infected_by_contact', 'a',\n 'b', 'c', 'd'])\n", (8442, 8574), True, 'import pandas as pd\n'), ((407, 445), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1] * 7 + [0] * 8}"], {}), "({'a': [1] * 7 + [0] * 8})\n", (419, 445), True, 'import pandas as pd\n'), ((1498, 1556), 'pandas.Series', 'pd.Series', (["(['base_strain'] + [pd.NA] * 7)"], {'dtype': '"""category"""'}), "(['base_strain'] + [pd.NA] * 7, dtype='category')\n", (1507, 1556), True, 'import pandas as pd\n'), ((1659, 1734), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (["[('infection_prob', 'households', 'households')]"], {}), "([('infection_prob', 'households', 'households')])\n", (1684, 1734), True, 'import pandas as pd\n'), ((3129, 3146), 'itertools.count', 'itertools.count', ([], {}), '()\n', (3144, 3146), False, 'import itertools\n'), ((4041, 4058), 'itertools.count', 'itertools.count', ([], {}), '()\n', (4056, 4058), False, 'import itertools\n'), ((4891, 4908), 'itertools.count', 'itertools.count', ([], {}), '()\n', (4906, 4908), False, 'import itertools\n'), ((5716, 5733), 'itertools.count', 'itertools.count', ([], {}), '()\n', (5731, 5733), False, 'import itertools\n'), ((6365, 6434), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (["[('infection_prob', 'non_rec', 'non_rec')]"], {}), "([('infection_prob', 'non_rec', 'non_rec')])\n", (6390, 6434), True, 'import pandas as pd\n'), ((7469, 7502), 'pandas.Series', 'pd.Series', (['[1]'], {'index': "['non_rec']"}), "([1], index=['non_rec'])\n", (7478, 7502), True, 'import pandas as pd\n'), ((7517, 7534), 'itertools.count', 'itertools.count', ([], {}), '()\n', (7532, 7534), False, 'import itertools\n'), ((8808, 8836), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 6, 7]'], {}), '([0, 1, 2, 3, 6, 7])\n', (8816, 8836), True, 'import numpy as np\n'), ((739, 775), 'pandas.Series', 'pd.Series', (['[0, 1, 2, 3, 0, 1, 2, -1]'], {}), '([0, 1, 2, 3, 0, 1, 2, -1])\n', (748, 775), True, 'import pandas as pd\n'), ((566, 601), 'pandas.Series', 'pd.Series', (['[0, 1, 2, 3, 0, 1, 2, 3]'], {}), '([0, 1, 2, 3, 0, 1, 2, 3])\n', (575, 601), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
"""
This module allows to convert standard data representations
(e.g., a spike train stored as Neo SpikeTrain object)
into other representations useful to perform calculations on the data.
An example is the representation of a spike train as a sequence of 0-1 values
(binned spike train).
.. autosummary::
:toctree: _toctree/conversion
BinnedSpikeTrain
BinnedSpikeTrainView
binarize
Examples
********
>>> import neo
>>> import quantities as pq
>>> from elephant.conversion import BinnedSpikeTrain
>>> spiketrains = [
... neo.SpikeTrain([0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7], t_stop=9, units='s'),
... neo.SpikeTrain([0.1, 0.7, 1.2, 2.2, 4.3, 5.5, 8.0], t_stop=9, units='s')
... ]
>>> bst = BinnedSpikeTrain(spiketrains, bin_size=1 * pq.s)
>>> bst
BinnedSpikeTrain(t_start=0.0 s, t_stop=9.0 s, bin_size=1.0 s; shape=(2, 9))
>>> bst.to_array()
array([[2, 1, 0, 1, 1, 1, 1, 0, 0],
[2, 1, 1, 0, 1, 1, 0, 0, 1]], dtype=int32)
Binarizing the binned matrix.
>>> bst.to_bool_array()
array([[ True, True, False, True, True, True, True, False, False],
[ True, True, True, False, True, True, False, False, True]])
>>> bst_binary = bst.binarize()
>>> bst_binary
BinnedSpikeTrainView(t_start=0.0 s, t_stop=9.0 s, bin_size=1.0 s; shape=(2, 9))
>>> bst_binary.to_array()
array([[1, 1, 0, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 0, 1, 1, 0, 0, 1]], dtype=int32)
Slicing.
>>> bst.time_slice(t_stop=3.5 * pq.s)
BinnedSpikeTrainView(t_start=0.0 s, t_stop=3.0 s, bin_size=1.0 s; shape=(2, 3))
>>> bst[0, 1:-3]
BinnedSpikeTrainView(t_start=1.0 s, t_stop=6.0 s, bin_size=1.0 s; shape=(1, 5))
Generate a realisation of spike trains from the binned version.
>>> bst.to_spike_trains(spikes='center')
[<SpikeTrain(array([0.33333333, 0.66666667, 1.5 , 3.5 , 4.5 ,
5.5 , 6.5 ]) * s, [0.0 s, 9.0 s])>,
<SpikeTrain(array([0.33333333, 0.66666667, 1.5 , 2.5 , 4.5 ,
5.5 , 8.5 ]) * s, [0.0 s, 9.0 s])>]
Check the correctness of a spike trains realosation
>>> BinnedSpikeTrain(bst.to_spike_trains(), bin_size=bst.bin_size) == bst
True
Rescale the units of a binned spike train without changing the data.
>>> bst.rescale('ms')
>>> bst
BinnedSpikeTrain(t_start=0.0 ms, t_stop=9000.0 ms, bin_size=1000.0 ms;
shape=(2, 9))
:copyright: Copyright 2014-2020 by the Elephant team, see `doc/authors.rst`.
:license: BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function, unicode_literals
import math
import warnings
import neo
import numpy as np
import quantities as pq
import scipy.sparse as sps
from elephant.utils import is_binary, deprecated_alias, is_time_quantity, \
check_neo_consistency, round_binning_errors
__all__ = [
"binarize",
"BinnedSpikeTrain"
]
def binarize(spiketrain, sampling_rate=None, t_start=None, t_stop=None,
return_times=False):
"""
Return an array indicating if spikes occurred at individual time points.
The array contains boolean values identifying whether at least one spike
occurred in the corresponding time bin. Time bins start at `t_start`
and end at `t_stop`, spaced in `1/sampling_rate` intervals.
Accepts either a `neo.SpikeTrain`, a `pq.Quantity` array, or a plain
`np.ndarray`.
Returns a boolean array with each element indicating the presence or
absence of a spike in that time bin.
Optionally also returns an array of time points corresponding to the
elements of the boolean array. The units of this array will be the same as
the units of the neo.SpikeTrain, if any.
Parameters
----------
spiketrain : neo.SpikeTrain or pq.Quantity or np.ndarray
The spike times. Does not have to be sorted.
sampling_rate : float or pq.Quantity, optional
The sampling rate to use for the time points.
If not specified, retrieved from the `sampling_rate` attribute of
`spiketrain`.
Default: None
t_start : float or pq.Quantity, optional
The start time to use for the time points.
If not specified, retrieved from the `t_start` attribute of
`spiketrain`. If this is not present, defaults to `0`. Any element of
`spiketrain` lower than `t_start` is ignored.
Default: None
t_stop : float or pq.Quantity, optional
The stop time to use for the time points.
If not specified, retrieved from the `t_stop` attribute of
`spiketrain`. If this is not present, defaults to the maximum value of
`spiketrain`. Any element of `spiketrain` higher than `t_stop` is
ignored.
Default: None
return_times : bool, optional
If True, also return the corresponding time points.
Default: False
Returns
-------
values : np.ndarray of bool
A True value at a particular index indicates the presence of one or
more spikes at the corresponding time point.
times : np.ndarray or pq.Quantity, optional
The time points. This will have the same units as `spiketrain`.
If `spiketrain` has no units, this will be an `np.ndarray` array.
Raises
------
TypeError
If `spiketrain` is an `np.ndarray` and `t_start`, `t_stop`, or
`sampling_rate` is a `pq.Quantity`.
ValueError
If `sampling_rate` is not explicitly defined and not present as an
attribute of `spiketrain`.
Notes
-----
Spike times are placed in the bin of the closest time point, going to the
higher bin if exactly between two bins.
So in the case where the bins are `5.5` and `6.5`, with the spike time
being `6.0`, the spike will be placed in the `6.5` bin.
The upper edge of the last bin, equal to `t_stop`, is inclusive. That is,
a spike time exactly equal to `t_stop` will be included.
If `spiketrain` is a `pq.Quantity` or `neo.SpikeTrain` and `t_start`,
`t_stop` or `sampling_rate` is not, then the arguments that are not
`pq.Quantity` will be assumed to have the same units as `spiketrain`.
"""
# get the values from spiketrain if they are not specified.
if sampling_rate is None:
sampling_rate = getattr(spiketrain, 'sampling_rate', None)
if sampling_rate is None:
raise ValueError('sampling_rate must either be explicitly defined '
'or must be an attribute of spiketrain')
if t_start is None:
t_start = getattr(spiketrain, 't_start', 0)
if t_stop is None:
t_stop = getattr(spiketrain, 't_stop', np.max(spiketrain))
# we don't actually want the sampling rate, we want the sampling period
sampling_period = 1. / sampling_rate
# figure out what units, if any, we are dealing with
if hasattr(spiketrain, 'units'):
units = spiketrain.units
spiketrain = spiketrain.magnitude
else:
units = None
# convert everything to the same units, then get the magnitude
if hasattr(sampling_period, 'units'):
if units is None:
raise TypeError('sampling_period cannot be a Quantity if '
'spiketrain is not a quantity')
sampling_period = sampling_period.rescale(units).magnitude
if hasattr(t_start, 'units'):
if units is None:
raise TypeError('t_start cannot be a Quantity if '
'spiketrain is not a quantity')
t_start = t_start.rescale(units).magnitude
if hasattr(t_stop, 'units'):
if units is None:
raise TypeError('t_stop cannot be a Quantity if '
'spiketrain is not a quantity')
t_stop = t_stop.rescale(units).magnitude
# figure out the bin edges
edges = np.arange(t_start - sampling_period / 2,
t_stop + sampling_period * 3 / 2,
sampling_period)
# we don't want to count any spikes before t_start or after t_stop
if edges[-2] > t_stop:
edges = edges[:-1]
if edges[1] < t_start:
edges = edges[1:]
edges[0] = t_start
edges[-1] = t_stop
# this is where we actually get the binarized spike train
res = np.histogram(spiketrain, edges)[0].astype('bool')
# figure out what to output
if not return_times:
return res
if units is None:
return res, np.arange(t_start, t_stop + sampling_period,
sampling_period)
return res, pq.Quantity(np.arange(t_start, t_stop + sampling_period,
sampling_period), units=units)
###########################################################################
#
# Methods to calculate parameters, t_start, t_stop, bin size,
# number of bins
#
###########################################################################
class BinnedSpikeTrain(object):
"""
Class which calculates a binned spike train and provides methods to
transform the binned spike train to a boolean matrix or a matrix with
counted time points.
A binned spike train represents the occurrence of spikes in a certain time
frame.
I.e., a time series like [0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] is
represented as [0, 0, 1, 3, 4, 5, 6]. The outcome is dependent on given
parameter such as size of bins, number of bins, start and stop points.
A boolean matrix represents the binned spike train in a binary (True/False)
manner. Its rows represent the number of spike trains and the columns
represent the binned index position of a spike in a spike train.
The calculated matrix entry containing `True` indicates a spike.
A matrix with counted time points is calculated the same way, but its
entries contain the number of spikes that occurred in the given bin of the
given spike train.
Note that with most common parameter combinations spike times can end up
on bin edges. This makes the binning susceptible to rounding errors which
is accounted for by moving spikes which are within tolerance of the next
bin edge into the following bin. This can be adjusted using the tolerance
parameter and turned off by setting `tolerance=None`.
Parameters
----------
spiketrains : neo.SpikeTrain or list of neo.SpikeTrain or np.ndarray
Spike train(s) to be binned.
bin_size : pq.Quantity, optional
Width of a time bin.
Default: None
n_bins : int, optional
Number of bins of the binned spike train.
Default: None
t_start : pq.Quantity, optional
Time of the left edge of the first bin (left extreme; included).
Default: None
t_stop : pq.Quantity, optional
Time of the right edge of the last bin (right extreme; excluded).
Default: None
tolerance : float, optional
Tolerance for rounding errors in the binning process and in the input
data
Default: 1e-8
sparse_format : {'csr', 'csc'}, optional
The sparse matrix format. By default, CSR format is used to perform
slicing and computations efficiently.
Default: 'csr'
Raises
------
AttributeError
If less than 3 optional parameters are `None`.
TypeError
If `spiketrains` is an np.ndarray with dimensionality different than
NxM or
if type of `n_bins` is not an `int` or `n_bins` < 0.
ValueError
When number of bins calculated from `t_start`, `t_stop` and `bin_size`
differs from provided `n_bins` or
if `t_stop` of any spike train is smaller than any `t_start` or
if any spike train does not cover the full [`t_start`, t_stop`] range.
Warns
-----
UserWarning
If some spikes fall outside of [`t_start`, `t_stop`] range
Notes
-----
There are four minimal configurations of the optional parameters which have
to be provided, otherwise a `ValueError` will be raised:
* t_start, n_bins, bin_size
* t_start, n_bins, t_stop
* t_start, bin_size, t_stop
* t_stop, n_bins, bin_size
If `spiketrains` is a `neo.SpikeTrain` or a list thereof, it is enough to
explicitly provide only one parameter: `n_bins` or `bin_size`. The
`t_start` and `t_stop` will be calculated from given `spiketrains` (max
`t_start` and min `t_stop` of `neo.SpikeTrain`s).
Missing parameter will be calculated automatically.
All parameters will be checked for consistency. A corresponding error will
be raised, if one of the four parameters does not match the consistency
requirements.
"""
@deprecated_alias(binsize='bin_size', num_bins='n_bins')
def __init__(self, spiketrains, bin_size=None, n_bins=None, t_start=None,
t_stop=None, tolerance=1e-8, sparse_format="csr"):
if sparse_format not in ("csr", "csc"):
raise ValueError(f"Invalid 'sparse_format': {sparse_format}. "
"Available: 'csr' and 'csc'")
# Converting spiketrains to a list, if spiketrains is one
# SpikeTrain object
if isinstance(spiketrains, neo.SpikeTrain):
spiketrains = [spiketrains]
# The input params will be rescaled later to unit-less floats
self.tolerance = tolerance
self._t_start = t_start
self._t_stop = t_stop
self.n_bins = n_bins
self._bin_size = bin_size
self.units = None # will be set later
# Check all parameter, set also missing values
self._resolve_input_parameters(spiketrains)
# Now create the sparse matrix
self.sparse_matrix = self._create_sparse_matrix(
spiketrains, sparse_format=sparse_format)
@property
def shape(self):
"""
The shape of the sparse matrix.
"""
return self.sparse_matrix.shape
@property
def bin_size(self):
"""
Bin size quantity.
"""
return pq.Quantity(self._bin_size, units=self.units, copy=False)
@property
def t_start(self):
"""
t_start quantity; spike times below this value have been ignored.
"""
return pq.Quantity(self._t_start, units=self.units, copy=False)
@property
def t_stop(self):
"""
t_stop quantity; spike times above this value have been ignored.
"""
return pq.Quantity(self._t_stop, units=self.units, copy=False)
@property
def binsize(self):
"""
Deprecated in favor of :attr:`bin_size`.
"""
warnings.warn("'.binsize' is deprecated; use '.bin_size'",
DeprecationWarning)
return self._bin_size
@property
def num_bins(self):
"""
Deprecated in favor of :attr:`n_bins`.
"""
warnings.warn("'.num_bins' is deprecated; use '.n_bins'",
DeprecationWarning)
return self.n_bins
def __repr__(self):
return f"{type(self).__name__}(t_start={str(self.t_start)}, " \
f"t_stop={str(self.t_stop)}, bin_size={str(self.bin_size)}; " \
f"shape={self.shape}, " \
f"format={self.sparse_matrix.__class__.__name__})"
def rescale(self, units):
"""
Inplace rescaling to the new quantity units.
Parameters
----------
units : pq.Quantity or str
New quantity units.
Raises
------
TypeError
If the input units are not quantities.
"""
if isinstance(units, str):
units = pq.Quantity(1, units=units)
if units == self.units:
# do nothing
return
if not isinstance(units, pq.Quantity):
raise TypeError("The input units must be quantities or string")
scale = self.units.rescale(units).item()
self._t_stop *= scale
self._t_start *= scale
self._bin_size *= scale
self.units = units
def __resolve_binned(self, spiketrains):
spiketrains = np.asarray(spiketrains)
if spiketrains.ndim != 2 or spiketrains.dtype == np.dtype('O'):
raise ValueError("If the input is not a spiketrain(s), it "
"must be an MxN numpy array, each cell of "
"which represents the number of (binned) "
"spikes that fall in an interval - not "
"raw spike times.")
if self.n_bins is not None:
raise ValueError("When the input is a binned matrix, 'n_bins' "
"must be set to None - it's extracted from the "
"input shape.")
self.n_bins = spiketrains.shape[1]
if self._bin_size is None:
if self._t_start is None or self._t_stop is None:
raise ValueError("To determine the bin size, both 't_start' "
"and 't_stop' must be set")
self._bin_size = (self._t_stop - self._t_start) / self.n_bins
if self._t_start is None and self._t_stop is None:
raise ValueError("Either 't_start' or 't_stop' must be set")
if self._t_start is None:
self._t_start = self._t_stop - self._bin_size * self.n_bins
if self._t_stop is None:
self._t_stop = self._t_start + self._bin_size * self.n_bins
def _resolve_input_parameters(self, spiketrains):
"""
Calculates `t_start`, `t_stop` from given spike trains.
The start and stop points are calculated from given spike trains only
if they are not calculable from given parameters or the number of
parameters is less than three.
Parameters
----------
spiketrains : neo.SpikeTrain or list or np.ndarray of neo.SpikeTrain
"""
def get_n_bins():
n_bins = (self._t_stop - self._t_start) / self._bin_size
if isinstance(n_bins, pq.Quantity):
n_bins = n_bins.simplified.item()
n_bins = round_binning_errors(n_bins, tolerance=self.tolerance)
return n_bins
def check_n_bins_consistency():
if self.n_bins != get_n_bins():
raise ValueError(
"Inconsistent arguments: t_start ({t_start}), "
"t_stop ({t_stop}), bin_size ({bin_size}), and "
"n_bins ({n_bins})".format(
t_start=self.t_start, t_stop=self.t_stop,
bin_size=self.bin_size, n_bins=self.n_bins))
def check_consistency():
if self.t_start >= self.t_stop:
raise ValueError("t_start must be smaller than t_stop")
if not isinstance(self.n_bins, int) or self.n_bins <= 0:
raise TypeError("The number of bins ({}) must be a positive "
"integer".format(self.n_bins))
if not _check_neo_spiketrain(spiketrains):
# a binned numpy matrix
self.__resolve_binned(spiketrains)
self.units = self._bin_size.units
check_n_bins_consistency()
check_consistency()
self._t_start = self._t_start.rescale(self.units).item()
self._t_stop = self._t_stop.rescale(self.units).item()
self._bin_size = self._bin_size.rescale(self.units).item()
return
if self._bin_size is None and self.n_bins is None:
raise ValueError("Either 'bin_size' or 'n_bins' must be given")
try:
check_neo_consistency(spiketrains,
object_type=neo.SpikeTrain,
t_start=self._t_start,
t_stop=self._t_stop,
tolerance=self.tolerance)
except ValueError as er:
# different t_start/t_stop
raise ValueError(er, "If you want to bin over the shared "
"[t_start, t_stop] interval, provide "
"shared t_start and t_stop explicitly, "
"which can be obtained like so: "
"t_start, t_stop = elephant.utils."
"get_common_start_stop_times(spiketrains)"
)
if self._t_start is None:
self._t_start = spiketrains[0].t_start
if self._t_stop is None:
self._t_stop = spiketrains[0].t_stop
# At this point, all spiketrains share the same units.
self.units = spiketrains[0].units
# t_start and t_stop are checked to be time quantities in the
# check_neo_consistency call.
self._t_start = self._t_start.rescale(self.units).item()
self._t_stop = self._t_stop.rescale(self.units).item()
start_shared = max(st.t_start.rescale(self.units).item()
for st in spiketrains)
stop_shared = min(st.t_stop.rescale(self.units).item()
for st in spiketrains)
tolerance = self.tolerance
if tolerance is None:
tolerance = 0
if self._t_start < start_shared - tolerance \
or self._t_stop > stop_shared + tolerance:
raise ValueError("'t_start' ({t_start}) or 't_stop' ({t_stop}) is "
"outside of the shared [{start_shared}, "
"{stop_shared}] interval".format(
t_start=self.t_start, t_stop=self.t_stop,
start_shared=start_shared,
stop_shared=stop_shared))
if self.n_bins is None:
# bin_size is provided
self._bin_size = self._bin_size.rescale(self.units).item()
self.n_bins = get_n_bins()
elif self._bin_size is None:
# n_bins is provided
self._bin_size = (self._t_stop - self._t_start) / self.n_bins
else:
# both n_bins are bin_size are given
self._bin_size = self._bin_size.rescale(self.units).item()
check_n_bins_consistency()
check_consistency()
@property
def bin_edges(self):
"""
Returns all time edges as a quantity array with :attr:`n_bins` bins.
The borders of all time steps between :attr:`t_start` and
:attr:`t_stop` with a step :attr:`bin_size`. It is crucial for many
analyses that all bins have the same size, so if
:attr:`t_stop` - :attr:`t_start` is not divisible by :attr:`bin_size`,
there will be some leftover time at the end
(see https://github.com/NeuralEnsemble/elephant/issues/255).
The length of the returned array should match :attr:`n_bins`.
Returns
-------
bin_edges : pq.Quantity
All edges in interval [:attr:`t_start`, :attr:`t_stop`] with
:attr:`n_bins` bins are returned as a quantity array.
"""
bin_edges = np.linspace(self._t_start, self._t_start + self.n_bins *
self._bin_size,
num=self.n_bins + 1, endpoint=True)
return pq.Quantity(bin_edges, units=self.units, copy=False)
@property
def bin_centers(self):
"""
Returns each center time point of all bins between :attr:`t_start` and
:attr:`t_stop` points.
The center of each bin of all time steps between start and stop.
Returns
-------
bin_edges : pq.Quantity
All center edges in interval (:attr:`start`, :attr:`stop`).
"""
start = self._t_start + self._bin_size / 2
stop = start + (self.n_bins - 1) * self._bin_size
bin_centers = np.linspace(start=start,
stop=stop,
num=self.n_bins, endpoint=True)
bin_centers = pq.Quantity(bin_centers, units=self.units, copy=False)
return bin_centers
def to_sparse_array(self):
"""
Getter for sparse matrix with time points. Deprecated in favor of
:attr:`sparse_matrix`.
Returns
-------
scipy.sparse.csr_matrix or scipy.sparse.csc_matrix
Sparse matrix, version with spike counts.
See also
--------
scipy.sparse.csr_matrix
to_array
"""
warnings.warn("'.to_sparse_array()' function is deprecated; "
"use '.sparse_matrix' attribute directly",
DeprecationWarning)
return self.sparse_matrix
def to_sparse_bool_array(self):
"""
Getter for boolean version of the sparse matrix, calculated from
sparse matrix with counted time points.
Returns
-------
scipy.sparse.csr_matrix or scipy.sparse.csc_matrix
Sparse matrix, binary, boolean version.
See also
--------
scipy.sparse.csr_matrix
to_bool_array
"""
# Return sparse Matrix as a copy
spmat_copy = self.sparse_matrix.copy()
spmat_copy.data = spmat_copy.data.astype(bool)
return spmat_copy
def __eq__(self, other):
if not isinstance(other, BinnedSpikeTrain):
return False
if self.n_bins != other.n_bins:
return
dt_start = other.t_start.rescale(self.units).item() - self._t_start
dt_stop = other.t_stop.rescale(self.units).item() - self._t_stop
dbin_size = other.bin_size.rescale(self.units).item() - self._bin_size
tol = 0 if self.tolerance is None else self.tolerance
if any(abs(diff) > tol for diff in [dt_start, dt_stop, dbin_size]):
return False
sp1 = self.sparse_matrix
sp2 = other.sparse_matrix
if sp1.__class__ is not sp2.__class__ or sp1.shape != sp2.shape \
or sp1.data.shape != sp2.data.shape:
return False
return (sp1.data == sp2.data).all() and \
(sp1.indptr == sp2.indptr).all() and \
(sp1.indices == sp2.indices).all()
def copy(self):
"""
Copies the binned sparse matrix and returns a view. Any changes to
the copied object won't affect the original object.
Returns
-------
BinnedSpikeTrainView
A copied view of itself.
"""
return BinnedSpikeTrainView(t_start=self._t_start,
t_stop=self._t_stop,
bin_size=self._bin_size,
units=self.units,
sparse_matrix=self.sparse_matrix.copy(),
tolerance=self.tolerance)
def __iter_sparse_matrix(self):
spmat = self.sparse_matrix
if isinstance(spmat, sps.csc_matrix):
warnings.warn("The sparse matrix format is CSC. For better "
"performance, specify the CSR format while "
"constructing a "
"BinnedSpikeTrain(sparse_format='csr')")
spmat = spmat.tocsr()
# taken from csr_matrix.__iter__()
i0 = 0
for i1 in spmat.indptr[1:]:
indices = spmat.indices[i0:i1]
data = spmat.data[i0:i1]
yield indices, data
i0 = i1
def __getitem__(self, item):
"""
Returns a binned slice view of itself; `t_start` and `t_stop` will be
set accordingly to the second slicing argument, if any.
Parameters
----------
item : int or slice or tuple
Spike train and bin index slicing, passed to
``self.sparse_matrix``.
Returns
-------
BinnedSpikeTrainView
A slice of itself that carry the original data. Any changes to
the returned binned sparse matrix will affect the original data.
"""
# taken from csr_matrix.__getitem__
row, col = self.sparse_matrix._validate_indices(item)
spmat = self.sparse_matrix[item]
if np.isscalar(spmat):
# data with one element
spmat = sps.csr_matrix(([spmat], ([0], [0])), dtype=spmat.dtype)
if isinstance(col, (int, np.integer)):
start, stop, stride = col, col + 1, 1
elif isinstance(col, slice):
start, stop, stride = col.indices(self.n_bins)
else:
raise TypeError(f"The second slice argument ({col}), which "
"corresponds to bin indices, must be either int "
"or slice.")
t_start = self._t_start + start * self._bin_size
t_stop = self._t_start + stop * self._bin_size
bin_size = stride * self._bin_size
bst = BinnedSpikeTrainView(t_start=t_start,
t_stop=t_stop,
bin_size=bin_size,
units=self.units,
sparse_matrix=spmat,
tolerance=self.tolerance)
return bst
def __setitem__(self, key, value):
"""
Changes the values of ``self.sparse_matrix`` according to `key` and
`value`. A shortcut to ``self.sparse_matrix[key] = value``.
Parameters
----------
key : int or list or tuple or slice
The binned sparse matrix keys (axes slice) to change.
value : int or list or tuple or slice
New values of the sparse matrix selection.
"""
self.sparse_matrix[key] = value
def time_slice(self, t_start=None, t_stop=None, copy=False):
"""
Returns a view or a copied view of currently binned spike trains with
``(t_start, t_stop)`` time slice. Only valid (fully overlapping) bins
are sliced.
Parameters
----------
t_start, t_stop : pq.Quantity or None, optional
Start and stop times or Nones.
Default: None
copy : bool, optional
Copy the sparse matrix or not.
Default: False
Returns
-------
BinnedSpikeTrainView
A time slice of itself.
"""
if not is_time_quantity(t_start, t_stop, allow_none=True):
raise TypeError("t_start and t_stop must be quantities")
if t_start is None and t_stop is None and not copy:
return self
if t_start is None:
start_index = 0
else:
t_start = t_start.rescale(self.units).item()
start_index = (t_start - self._t_start) / self._bin_size
start_index = math.ceil(start_index)
start_index = max(start_index, 0)
if t_stop is None:
stop_index = self.n_bins
else:
t_stop = t_stop.rescale(self.units).item()
stop_index = (t_stop - self._t_start) / self._bin_size
stop_index = round_binning_errors(stop_index,
tolerance=self.tolerance)
stop_index = min(stop_index, self.n_bins)
stop_index = max(stop_index, start_index)
spmat = self.sparse_matrix[:, start_index: stop_index]
if copy:
spmat = spmat.copy()
t_start = self._t_start + start_index * self._bin_size
t_stop = self._t_start + stop_index * self._bin_size
bst = BinnedSpikeTrainView(t_start=t_start,
t_stop=t_stop,
bin_size=self._bin_size,
units=self.units,
sparse_matrix=spmat,
tolerance=self.tolerance)
return bst
def to_spike_trains(self, spikes="random", as_array=False,
annotate_bins=False):
"""
Generate spike trains from the binned spike train object. This function
is inverse to binning such that
.. code-block:: python
BinnedSpikeTrain(binned_st.to_spike_trains()) == binned_st
The object bin size is stored in resulting
``spiketrain.annotations['bin_size']``.
Parameters
----------
spikes : {"left", "center", "random"}, optional
Specifies how to generate spikes inside bins.
* "left": align spikes from left to right to have equal inter-
spike interval;
* "center": align spikes around center to have equal inter-spike
interval;
* "random": generate spikes from a homogenous Poisson process;
it's the fastest mode.
Default: "random"
as_array : bool, optional
If True, numpy arrays are returned; otherwise, wrap the arrays in
`neo.SpikeTrain`.
Default: False
annotate_bins : bool, optional
If `as_array` is False, this flag allows to include the bin index
in resulting ``spiketrain.array_annotations['bins']``.
Default: False
Returns
-------
spiketrains : list of neo.SpikeTrain
A list of spike trains - one possible realisation of spiketrains
that could have been used as the input to `BinnedSpikeTrain`.
"""
description = f"generated from {self.__class__.__name__}"
shift = 0
if spikes == "center":
shift = 1
spikes = "left"
spiketrains = []
for indices, spike_count in self.__iter_sparse_matrix():
bin_indices = np.repeat(indices, spike_count)
t_starts = self._t_start + bin_indices * self._bin_size
if spikes == "random":
spiketrain = np.random.uniform(low=0, high=self._bin_size,
size=spike_count.sum())
spiketrain += t_starts
spiketrain.sort()
elif spikes == "left":
spiketrain = [np.arange(shift, count + shift) / (count + shift)
for count in spike_count]
spiketrain = np.hstack(spiketrain) * self._bin_size
spiketrain += t_starts
else:
raise ValueError(f"Invalid 'spikes' mode: '{spikes}'")
# account for the last bin
spiketrain = spiketrain[spiketrain <= self._t_stop]
if not as_array:
array_ants = None
if annotate_bins:
array_ants = dict(bins=bin_indices)
spiketrain = neo.SpikeTrain(spiketrain, t_start=self._t_start,
t_stop=self._t_stop,
units=self.units, copy=False,
description=description,
array_annotations=array_ants,
bin_size=self.bin_size)
spiketrains.append(spiketrain)
return spiketrains
def get_num_of_spikes(self, axis=None):
"""
Compute the number of binned spikes.
Parameters
----------
axis : int, optional
If `None`, compute the total num. of spikes.
Otherwise, compute num. of spikes along axis.
If axis is `1`, compute num. of spikes per spike train (row).
Default is `None`.
Returns
-------
n_spikes_per_row : int or np.ndarray
The number of binned spikes.
"""
if axis is None:
return self.sparse_matrix.sum(axis=axis)
n_spikes_per_row = self.sparse_matrix.sum(axis=axis)
n_spikes_per_row = np.ravel(n_spikes_per_row)
return n_spikes_per_row
@property
def spike_indices(self):
"""
A list of lists for each spike train (i.e., rows of the binned matrix),
that in turn contains for each spike the index into the binned matrix
where this spike enters.
In contrast to `self.sparse_matrix.nonzero()`, this function will
report two spikes falling in the same bin as two entries.
Examples
--------
>>> import elephant.conversion as conv
>>> import neo as n
>>> import quantities as pq
>>> st = n.SpikeTrain([0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s,
... t_stop=10.0 * pq.s)
>>> x = conv.BinnedSpikeTrain(st, n_bins=10, bin_size=1 * pq.s,
... t_start=0 * pq.s)
>>> print(x.spike_indices)
[[0, 0, 1, 3, 4, 5, 6]]
>>> print(x.sparse_matrix.nonzero()[1])
[0 1 3 4 5 6]
>>> print(x.to_array())
[[2, 1, 0, 1, 1, 1, 1, 0, 0, 0]]
"""
spike_idx = []
for indices, spike_count in self.__iter_sparse_matrix():
# Extract each non-zeros column index and how often it exists,
# i.e., how many spikes fall in this column
n_cols = np.repeat(indices, spike_count)
spike_idx.append(n_cols)
return spike_idx
@property
def is_binary(self):
"""
Returns True if the sparse matrix contains binary values only.
Beware, that the function does not know if the input was binary
because e.g `to_bool_array()` was used before or if the input is just
sparse (i.e. only one spike per bin at maximum).
Returns
-------
bool
True for binary input, False otherwise.
"""
return is_binary(self.sparse_matrix.data)
def to_bool_array(self):
"""
Returns a matrix, in which the rows correspond to the spike trains and
the columns correspond to the bins in the `BinnedSpikeTrain`.
`True` indicates a spike in given bin of given spike train and
`False` indicates lack of spikes.
Returns
-------
numpy.ndarray
Returns a dense matrix representation of the sparse matrix,
with `True` indicating a spike and `False` indicating a no-spike.
The columns represent the index position of the bins and rows
represent the number of spike trains.
See also
--------
scipy.sparse.csr_matrix
scipy.sparse.csr_matrix.toarray
Examples
--------
>>> import elephant.conversion as conv
>>> import neo as n
>>> import quantities as pq
>>> a = n.SpikeTrain([0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s,
... t_stop=10.0 * pq.s)
>>> x = conv.BinnedSpikeTrain(a, n_bins=10, bin_size=1 * pq.s,
... t_start=0 * pq.s)
>>> print(x.to_bool_array())
[[ True True False True True True True False False False]]
"""
return self.to_array(dtype=bool)
def to_array(self, dtype=None):
"""
Returns a dense matrix, calculated from the sparse matrix, with counted
time points of spikes. The rows correspond to spike trains and the
columns correspond to bins in a `BinnedSpikeTrain`.
Entries contain the count of spikes that occurred in the given bin of
the given spike train.
Returns
-------
matrix : np.ndarray
Matrix with spike counts. Columns represent the index positions of
the binned spikes and rows represent the spike trains.
Examples
--------
>>> import elephant.conversion as conv
>>> import neo as n
>>> import quantities as pq
>>> a = n.SpikeTrain([0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s,
... t_stop=10.0 * pq.s)
>>> x = conv.BinnedSpikeTrain(a, n_bins=10, bin_size=1 * pq.s,
... t_start=0 * pq.s)
>>> print(x.to_array())
[[2 1 0 1 1 1 1 0 0 0]]
See also
--------
scipy.sparse.csr_matrix
scipy.sparse.csr_matrix.toarray
"""
array = self.sparse_matrix.toarray()
if dtype is not None:
array = array.astype(dtype)
return array
def binarize(self, copy=True):
"""
Clip the internal array (no. of spikes in a bin) to `0` (no spikes) or
`1` (at least one spike) values only.
Parameters
----------
copy : bool, optional
If True, a **shallow** copy - a view of `BinnedSpikeTrain` - is
returned with the data array filled with zeros and ones. Otherwise,
the binarization (clipping) is done in-place. A shallow copy
means that :attr:`indices` and :attr:`indptr` of a sparse matrix
is shared with the original sparse matrix. Only the data is copied.
If you want to perform a deep copy, call
:func:`BinnedSpikeTrain.copy` prior to binarizing.
Default: True
Returns
-------
bst : BinnedSpikeTrain or BinnedSpikeTrainView
A (view of) `BinnedSpikeTrain` with the sparse matrix data clipped
to zeros and ones.
"""
spmat = self.sparse_matrix
if copy:
data = np.ones(len(spmat.data), dtype=spmat.data.dtype)
spmat = spmat.__class__(
(data, spmat.indices, spmat.indptr),
shape=spmat.shape, copy=False)
bst = BinnedSpikeTrainView(t_start=self._t_start,
t_stop=self._t_stop,
bin_size=self._bin_size,
units=self.units,
sparse_matrix=spmat,
tolerance=self.tolerance)
else:
spmat.data[:] = 1
bst = self
return bst
@property
def sparsity(self):
"""
The sparsity of the sparse matrix computed as the no. of nonzero
elements divided by the matrix size.
Returns
-------
float
"""
num_nonzero = self.sparse_matrix.data.shape[0]
shape = self.sparse_matrix.shape
size = shape[0] * shape[1]
return num_nonzero / size
def _create_sparse_matrix(self, spiketrains, sparse_format):
"""
Converts `neo.SpikeTrain` objects to a scipy sparse matrix, which
contains the binned spike times, and
stores it in :attr:`sparse_matrix`.
Parameters
----------
spiketrains : neo.SpikeTrain or list of neo.SpikeTrain
Spike trains to bin.
"""
# The data type for numeric values
data_dtype = np.int32
if sparse_format == 'csr':
sparse_format = sps.csr_matrix
else:
# csc
sparse_format = sps.csc_matrix
if not _check_neo_spiketrain(spiketrains):
# a binned numpy array
sparse_matrix = sparse_format(spiketrains, dtype=data_dtype)
return sparse_matrix
# Get index dtype that can accomodate the largest index
# (this is the same dtype that will be used for the index arrays of the
# sparse matrix, so already using it here avoids array duplication)
shape = (len(spiketrains), self.n_bins)
numtype = np.int32
if max(shape) > np.iinfo(numtype).max:
numtype = np.int64
row_ids, column_ids = [], []
# data
counts = []
n_discarded = 0
# all spiketrains carry the same units
scale_units = 1 / self._bin_size
for idx, st in enumerate(spiketrains):
times = st.magnitude
times = times[(times >= self._t_start) & (
times <= self._t_stop)] - self._t_start
bins = times * scale_units
# shift spikes that are very close
# to the right edge into the next bin
bins = round_binning_errors(bins, tolerance=self.tolerance)
valid_bins = bins[bins < self.n_bins]
n_discarded += len(bins) - len(valid_bins)
f, c = np.unique(valid_bins, return_counts=True)
# f inherits the dtype np.int32 from bins, but c is created in
# np.unique with the default int dtype (usually np.int64)
c = c.astype(data_dtype)
column_ids.append(f)
counts.append(c)
row_ids.append(np.repeat(idx, repeats=len(f)).astype(numtype))
if n_discarded > 0:
warnings.warn("Binning discarded {} last spike(s) of the "
"input spiketrain".format(n_discarded))
# Stacking preserves the data type. In any case, while creating
# the sparse matrix, a copy is performed even if we set 'copy' to False
# explicitly (however, this might change in future scipy versions -
# this depends on scipy csr matrix initialization implementation).
counts = np.hstack(counts)
column_ids = np.hstack(column_ids)
row_ids = np.hstack(row_ids)
sparse_matrix = sparse_format((counts, (row_ids, column_ids)),
shape=shape, dtype=data_dtype,
copy=False)
return sparse_matrix
class BinnedSpikeTrainView(BinnedSpikeTrain):
"""
A view of :class:`BinnedSpikeTrain`.
This class is used to avoid deep copies in several functions of a binned
spike train object like :meth:`BinnedSpikeTrain.binarize`,
:meth:`BinnedSpikeTrain.time_slice`, etc.
Parameters
----------
t_start, t_stop : float
Unit-less start and stop times that share the same units.
bin_size : float
Unit-less bin size that was used used in binning the `sparse_matrix`.
units : pq.Quantity
The units of input spike trains.
sparse_matrix : scipy.sparse.csr_matrix
Binned sparse matrix.
tolerance : float or None, optional
The tolerance property of the original `BinnedSpikeTrain`.
Default: 1e-8
Warnings
--------
This class is an experimental feature.
"""
def __init__(self, t_start, t_stop, bin_size, units, sparse_matrix,
tolerance=1e-8):
self._t_start = t_start
self._t_stop = t_stop
self._bin_size = bin_size
self.n_bins = sparse_matrix.shape[1]
self.units = units.copy()
self.sparse_matrix = sparse_matrix
self.tolerance = tolerance
def _check_neo_spiketrain(matrix):
"""
Checks if given input contains neo.SpikeTrain objects
Parameters
----------
matrix
Object to test for `neo.SpikeTrain`s
Returns
-------
bool
True if `matrix` is a neo.SpikeTrain or a list or tuple thereof,
otherwise False.
"""
# Check for single spike train
if isinstance(matrix, neo.SpikeTrain):
return True
# Check for list or tuple
if isinstance(matrix, (list, tuple)):
return all(map(_check_neo_spiketrain, matrix))
return False
| [
"numpy.hstack",
"quantities.Quantity",
"numpy.iinfo",
"elephant.utils.is_time_quantity",
"elephant.utils.is_binary",
"numpy.arange",
"numpy.histogram",
"numpy.repeat",
"numpy.isscalar",
"numpy.asarray",
"numpy.max",
"numpy.linspace",
"warnings.warn",
"scipy.sparse.csr_matrix",
"numpy.dty... | [((7770, 7865), 'numpy.arange', 'np.arange', (['(t_start - sampling_period / 2)', '(t_stop + sampling_period * 3 / 2)', 'sampling_period'], {}), '(t_start - sampling_period / 2, t_stop + sampling_period * 3 / 2,\n sampling_period)\n', (7779, 7865), True, 'import numpy as np\n'), ((12604, 12659), 'elephant.utils.deprecated_alias', 'deprecated_alias', ([], {'binsize': '"""bin_size"""', 'num_bins': '"""n_bins"""'}), "(binsize='bin_size', num_bins='n_bins')\n", (12620, 12659), False, 'from elephant.utils import is_binary, deprecated_alias, is_time_quantity, check_neo_consistency, round_binning_errors\n'), ((13955, 14012), 'quantities.Quantity', 'pq.Quantity', (['self._bin_size'], {'units': 'self.units', 'copy': '(False)'}), '(self._bin_size, units=self.units, copy=False)\n', (13966, 14012), True, 'import quantities as pq\n'), ((14164, 14220), 'quantities.Quantity', 'pq.Quantity', (['self._t_start'], {'units': 'self.units', 'copy': '(False)'}), '(self._t_start, units=self.units, copy=False)\n', (14175, 14220), True, 'import quantities as pq\n'), ((14370, 14425), 'quantities.Quantity', 'pq.Quantity', (['self._t_stop'], {'units': 'self.units', 'copy': '(False)'}), '(self._t_stop, units=self.units, copy=False)\n', (14381, 14425), True, 'import quantities as pq\n'), ((14545, 14623), 'warnings.warn', 'warnings.warn', (['"""\'.binsize\' is deprecated; use \'.bin_size\'"""', 'DeprecationWarning'], {}), '("\'.binsize\' is deprecated; use \'.bin_size\'", DeprecationWarning)\n', (14558, 14623), False, 'import warnings\n'), ((14794, 14871), 'warnings.warn', 'warnings.warn', (['"""\'.num_bins\' is deprecated; use \'.n_bins\'"""', 'DeprecationWarning'], {}), '("\'.num_bins\' is deprecated; use \'.n_bins\'", DeprecationWarning)\n', (14807, 14871), False, 'import warnings\n'), ((16038, 16061), 'numpy.asarray', 'np.asarray', (['spiketrains'], {}), '(spiketrains)\n', (16048, 16061), True, 'import numpy as np\n'), ((23098, 23210), 'numpy.linspace', 'np.linspace', (['self._t_start', '(self._t_start + self.n_bins * self._bin_size)'], {'num': '(self.n_bins + 1)', 'endpoint': '(True)'}), '(self._t_start, self._t_start + self.n_bins * self._bin_size,\n num=self.n_bins + 1, endpoint=True)\n', (23109, 23210), True, 'import numpy as np\n'), ((23286, 23338), 'quantities.Quantity', 'pq.Quantity', (['bin_edges'], {'units': 'self.units', 'copy': '(False)'}), '(bin_edges, units=self.units, copy=False)\n', (23297, 23338), True, 'import quantities as pq\n'), ((23858, 23925), 'numpy.linspace', 'np.linspace', ([], {'start': 'start', 'stop': 'stop', 'num': 'self.n_bins', 'endpoint': '(True)'}), '(start=start, stop=stop, num=self.n_bins, endpoint=True)\n', (23869, 23925), True, 'import numpy as np\n'), ((24016, 24070), 'quantities.Quantity', 'pq.Quantity', (['bin_centers'], {'units': 'self.units', 'copy': '(False)'}), '(bin_centers, units=self.units, copy=False)\n', (24027, 24070), True, 'import quantities as pq\n'), ((24498, 24629), 'warnings.warn', 'warnings.warn', (['"""\'.to_sparse_array()\' function is deprecated; use \'.sparse_matrix\' attribute directly"""', 'DeprecationWarning'], {}), '(\n "\'.to_sparse_array()\' function is deprecated; use \'.sparse_matrix\' attribute directly"\n , DeprecationWarning)\n', (24511, 24629), False, 'import warnings\n'), ((28233, 28251), 'numpy.isscalar', 'np.isscalar', (['spmat'], {}), '(spmat)\n', (28244, 28251), True, 'import numpy as np\n'), ((35947, 35973), 'numpy.ravel', 'np.ravel', (['n_spikes_per_row'], {}), '(n_spikes_per_row)\n', (35955, 35973), True, 'import numpy as np\n'), ((37805, 37839), 'elephant.utils.is_binary', 'is_binary', (['self.sparse_matrix.data'], {}), '(self.sparse_matrix.data)\n', (37814, 37839), False, 'from elephant.utils import is_binary, deprecated_alias, is_time_quantity, check_neo_consistency, round_binning_errors\n'), ((45262, 45279), 'numpy.hstack', 'np.hstack', (['counts'], {}), '(counts)\n', (45271, 45279), True, 'import numpy as np\n'), ((45301, 45322), 'numpy.hstack', 'np.hstack', (['column_ids'], {}), '(column_ids)\n', (45310, 45322), True, 'import numpy as np\n'), ((45341, 45359), 'numpy.hstack', 'np.hstack', (['row_ids'], {}), '(row_ids)\n', (45350, 45359), True, 'import numpy as np\n'), ((6589, 6607), 'numpy.max', 'np.max', (['spiketrain'], {}), '(spiketrain)\n', (6595, 6607), True, 'import numpy as np\n'), ((8372, 8433), 'numpy.arange', 'np.arange', (['t_start', '(t_stop + sampling_period)', 'sampling_period'], {}), '(t_start, t_stop + sampling_period, sampling_period)\n', (8381, 8433), True, 'import numpy as np\n'), ((8492, 8553), 'numpy.arange', 'np.arange', (['t_start', '(t_stop + sampling_period)', 'sampling_period'], {}), '(t_start, t_stop + sampling_period, sampling_period)\n', (8501, 8553), True, 'import numpy as np\n'), ((15574, 15601), 'quantities.Quantity', 'pq.Quantity', (['(1)'], {'units': 'units'}), '(1, units=units)\n', (15585, 15601), True, 'import quantities as pq\n'), ((18067, 18121), 'elephant.utils.round_binning_errors', 'round_binning_errors', (['n_bins'], {'tolerance': 'self.tolerance'}), '(n_bins, tolerance=self.tolerance)\n', (18087, 18121), False, 'from elephant.utils import is_binary, deprecated_alias, is_time_quantity, check_neo_consistency, round_binning_errors\n'), ((19587, 19724), 'elephant.utils.check_neo_consistency', 'check_neo_consistency', (['spiketrains'], {'object_type': 'neo.SpikeTrain', 't_start': 'self._t_start', 't_stop': 'self._t_stop', 'tolerance': 'self.tolerance'}), '(spiketrains, object_type=neo.SpikeTrain, t_start=self\n ._t_start, t_stop=self._t_stop, tolerance=self.tolerance)\n', (19608, 19724), False, 'from elephant.utils import is_binary, deprecated_alias, is_time_quantity, check_neo_consistency, round_binning_errors\n'), ((26989, 27154), 'warnings.warn', 'warnings.warn', (['"""The sparse matrix format is CSC. For better performance, specify the CSR format while constructing a BinnedSpikeTrain(sparse_format=\'csr\')"""'], {}), '(\n "The sparse matrix format is CSC. For better performance, specify the CSR format while constructing a BinnedSpikeTrain(sparse_format=\'csr\')"\n )\n', (27002, 27154), False, 'import warnings\n'), ((28309, 28365), 'scipy.sparse.csr_matrix', 'sps.csr_matrix', (['([spmat], ([0], [0]))'], {'dtype': 'spmat.dtype'}), '(([spmat], ([0], [0])), dtype=spmat.dtype)\n', (28323, 28365), True, 'import scipy.sparse as sps\n'), ((30407, 30457), 'elephant.utils.is_time_quantity', 'is_time_quantity', (['t_start', 't_stop'], {'allow_none': '(True)'}), '(t_start, t_stop, allow_none=True)\n', (30423, 30457), False, 'from elephant.utils import is_binary, deprecated_alias, is_time_quantity, check_neo_consistency, round_binning_errors\n'), ((30834, 30856), 'math.ceil', 'math.ceil', (['start_index'], {}), '(start_index)\n', (30843, 30856), False, 'import math\n'), ((31128, 31186), 'elephant.utils.round_binning_errors', 'round_binning_errors', (['stop_index'], {'tolerance': 'self.tolerance'}), '(stop_index, tolerance=self.tolerance)\n', (31148, 31186), False, 'from elephant.utils import is_binary, deprecated_alias, is_time_quantity, check_neo_consistency, round_binning_errors\n'), ((33783, 33814), 'numpy.repeat', 'np.repeat', (['indices', 'spike_count'], {}), '(indices, spike_count)\n', (33792, 33814), True, 'import numpy as np\n'), ((37255, 37286), 'numpy.repeat', 'np.repeat', (['indices', 'spike_count'], {}), '(indices, spike_count)\n', (37264, 37286), True, 'import numpy as np\n'), ((44237, 44289), 'elephant.utils.round_binning_errors', 'round_binning_errors', (['bins'], {'tolerance': 'self.tolerance'}), '(bins, tolerance=self.tolerance)\n', (44257, 44289), False, 'from elephant.utils import is_binary, deprecated_alias, is_time_quantity, check_neo_consistency, round_binning_errors\n'), ((44414, 44455), 'numpy.unique', 'np.unique', (['valid_bins'], {'return_counts': '(True)'}), '(valid_bins, return_counts=True)\n', (44423, 44455), True, 'import numpy as np\n'), ((8203, 8234), 'numpy.histogram', 'np.histogram', (['spiketrain', 'edges'], {}), '(spiketrain, edges)\n', (8215, 8234), True, 'import numpy as np\n'), ((16119, 16132), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (16127, 16132), True, 'import numpy as np\n'), ((34789, 34976), 'neo.SpikeTrain', 'neo.SpikeTrain', (['spiketrain'], {'t_start': 'self._t_start', 't_stop': 'self._t_stop', 'units': 'self.units', 'copy': '(False)', 'description': 'description', 'array_annotations': 'array_ants', 'bin_size': 'self.bin_size'}), '(spiketrain, t_start=self._t_start, t_stop=self._t_stop,\n units=self.units, copy=False, description=description,\n array_annotations=array_ants, bin_size=self.bin_size)\n', (34803, 34976), False, 'import neo\n'), ((43650, 43667), 'numpy.iinfo', 'np.iinfo', (['numtype'], {}), '(numtype)\n', (43658, 43667), True, 'import numpy as np\n'), ((34337, 34358), 'numpy.hstack', 'np.hstack', (['spiketrain'], {}), '(spiketrain)\n', (34346, 34358), True, 'import numpy as np\n'), ((34202, 34233), 'numpy.arange', 'np.arange', (['shift', '(count + shift)'], {}), '(shift, count + shift)\n', (34211, 34233), True, 'import numpy as np\n')] |
import numpy as np
import skimage
from skimage import transform
from PIL import Image
from constants import scale_fact
def float_im(img):
return np.divide(img, 255.)
# Adapted from: https://stackoverflow.com/a/39382475/9768291
def crop_center(img, crop_x, crop_y):
"""
To crop the center of an image
:param img: the image
:param crop_x: how much to crop on the x-axis
:param crop_y: how much to crop on the y-axis
:return: cropped image, floated (values between 0 and 1)
"""
y, x, _ = img.shape
start_x = x//2-(crop_x // 2)
start_y = y//2-(crop_y // 2)
cropped_img = img[start_y:start_y + crop_y, start_x:start_x + crop_x]
return float_im(cropped_img)
# TODO: provide some way of saving FLOAT images
def save_np_img(np_img, path, name):
"""
To save the image.
:param np_img: numpy_array type image
:param path: string type of the existing path where to save the image
:param name: string type that includes the format (ex:"bob.png")
:return: numpy array
"""
assert isinstance(path, str), 'Path of wrong type! (Must be String)'
assert isinstance(name, str), 'Name of wrong type! (Must be String)'
# TODO: To transform float-arrays into int-arrays (see https://stackoverflow.com/questions/52490653/saving-float-numpy-images)
if type(np_img[0][0][0].item()) != int:
np_img = np.multiply(np_img, 255).astype(int)
# File "C:\Users\payne\Anaconda3\envs\ml-gpu\lib\site-packages\PIL\Image.py", line 2460, in fromarray
# mode, rawmode = _fromarray_typemap[typekey]
# KeyError: ((1, 1, 3), '<i4')
# File "C:\Users\payne\Anaconda3\envs\ml-gpu\lib\site-packages\PIL\Image.py", line 2463, in fromarray
# raise TypeError("Cannot handle this data type")
# TypeError: Cannot handle this data type
im = Image.fromarray(np_img)
im.save(path + name)
return np_img
def single_downscale(img, width, height):
"""
Downscales an image by the factor set in the 'constants'
:param img: the image, as a Numpy Array
:param width: width to be downscaled
:param height: height to be downscaled
:return: returns a float-type numpy by default (values between 0 and 1)
"""
# TODO: look into `skimage.transform.downscale_local_mean()`
scaled_img = skimage.transform.resize(
img,
(width // scale_fact, height // scale_fact),
mode='reflect',
anti_aliasing=True)
return scaled_img
| [
"PIL.Image.fromarray",
"skimage.transform.resize",
"numpy.multiply",
"numpy.divide"
] | [((153, 174), 'numpy.divide', 'np.divide', (['img', '(255.0)'], {}), '(img, 255.0)\n', (162, 174), True, 'import numpy as np\n'), ((1864, 1887), 'PIL.Image.fromarray', 'Image.fromarray', (['np_img'], {}), '(np_img)\n', (1879, 1887), False, 'from PIL import Image\n'), ((2338, 2452), 'skimage.transform.resize', 'skimage.transform.resize', (['img', '(width // scale_fact, height // scale_fact)'], {'mode': '"""reflect"""', 'anti_aliasing': '(True)'}), "(img, (width // scale_fact, height // scale_fact),\n mode='reflect', anti_aliasing=True)\n", (2362, 2452), False, 'import skimage\n'), ((1387, 1411), 'numpy.multiply', 'np.multiply', (['np_img', '(255)'], {}), '(np_img, 255)\n', (1398, 1411), True, 'import numpy as np\n')] |
from multimds import compartment_analysis as ca
from multimds import data_tools as dt
from scipy import stats as st
from matplotlib import pyplot as plt
import numpy as np
from multimds import linear_algebra as la
from scipy import signal as sg
from multimds import multimds as mm
path1 = "hic_data/GM12878_combined_19_100kb.bed"
path2 = "hic_data/K562_19_100kb.bed"
struct1, struct2 = mm.full_mds(path1, path2, prefix="test_")
mat1 = dt.matFromBed("hic_data/GM12878_combined_{}_{}kb.bed".format(chrom, res_kb), struct1)
comps1 = ca.get_compartments(mat1, struct1)
mat2 = dt.matFromBed("hic_data/K562_{}_{}kb.bed".format(chrom, res_kb), struct2)
comps2 = ca.get_compartments(mat2, struct2)
r, p = st.pearsonr(comps1, comps2)
if r < 0:
comps1 = -comps1
comp_diffs = np.abs(comps1 - comps2)
dists = np.array([la.calcDistance(coord1, coord2) for coord1, coord2 in zip(struct1.getCoords(), struct2.getCoords())])
dist_peaks = sg.find_peaks_cwt(dists, np.arange(1,10))
plt.subplot2grid((10,10), (0,0), 9, 10, frameon=False)
gen_coords = struct1.getGenCoords()
plt.plot(gen_coords, comp_diffs/max(comp_diffs), lw=2, color=(0.75,0,0), label="Compartment score change", zorder=1)
plt.plot(gen_coords, dists/max(dists), lw=2, color=(0,0,0.75), label="Relocalization", zorder=1)
for dist_peak in dist_peaks:
if comp_diffs[dist_peak] < 0.2:
plt.scatter([gen_coords[dist_peak]], [0.1], color=(0,0,1), s=40, zorder=2)
else:
plt.scatter([gen_coords[dist_peak]], [0.1], color=(0.25,0.25,0.25), s=40, zorder=2)
plt.xlabel("Genomic coordinate", fontsize=20)
plt.ylabel("Normalized change", fontsize=20)
#define offsets
xmin = min(gen_coords)
xmax = max(gen_coords)
x_range = xmax - xmin
x_start = xmin - x_range/25.
x_end = xmax + x_range/25.
ymin = 0
ymax = 1
y_range = ymax - ymin
y_start = ymin - y_range/25.
y_end = ymax + y_range/25.
#define axes with offsets
plt.axis([x_start, x_end, y_start, y_end], frameon=False)
#plot axes (black with line width of 4)
plt.axvline(x=x_start, color="k", lw=2)
plt.axhline(y=y_start, color="k", lw=4)
#plot ticks
plt.tick_params(direction="out", top=False, right=False, length=12, width=3, pad=1, labelsize=18)
plt.plot([x_start, x_end], [0.2, 0.2], color=(0.5,0.5,0.5), linestyle="--")
plt.legend(frameon=False, loc=2, fontsize=16)
plt.savefig("sup20.svg")
| [
"numpy.abs",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.plot",
"multimds.multimds.full_mds",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.scatter",
"sci... | [((388, 429), 'multimds.multimds.full_mds', 'mm.full_mds', (['path1', 'path2'], {'prefix': '"""test_"""'}), "(path1, path2, prefix='test_')\n", (399, 429), True, 'from multimds import multimds as mm\n'), ((533, 567), 'multimds.compartment_analysis.get_compartments', 'ca.get_compartments', (['mat1', 'struct1'], {}), '(mat1, struct1)\n', (552, 567), True, 'from multimds import compartment_analysis as ca\n'), ((658, 692), 'multimds.compartment_analysis.get_compartments', 'ca.get_compartments', (['mat2', 'struct2'], {}), '(mat2, struct2)\n', (677, 692), True, 'from multimds import compartment_analysis as ca\n'), ((701, 728), 'scipy.stats.pearsonr', 'st.pearsonr', (['comps1', 'comps2'], {}), '(comps1, comps2)\n', (712, 728), True, 'from scipy import stats as st\n'), ((771, 794), 'numpy.abs', 'np.abs', (['(comps1 - comps2)'], {}), '(comps1 - comps2)\n', (777, 794), True, 'import numpy as np\n'), ((972, 1028), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(10, 10)', '(0, 0)', '(9)', '(10)'], {'frameon': '(False)'}), '((10, 10), (0, 0), 9, 10, frameon=False)\n', (988, 1028), True, 'from matplotlib import pyplot as plt\n'), ((1512, 1557), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Genomic coordinate"""'], {'fontsize': '(20)'}), "('Genomic coordinate', fontsize=20)\n", (1522, 1557), True, 'from matplotlib import pyplot as plt\n'), ((1558, 1602), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Normalized change"""'], {'fontsize': '(20)'}), "('Normalized change', fontsize=20)\n", (1568, 1602), True, 'from matplotlib import pyplot as plt\n'), ((1868, 1925), 'matplotlib.pyplot.axis', 'plt.axis', (['[x_start, x_end, y_start, y_end]'], {'frameon': '(False)'}), '([x_start, x_end, y_start, y_end], frameon=False)\n', (1876, 1925), True, 'from matplotlib import pyplot as plt\n'), ((1967, 2006), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'x_start', 'color': '"""k"""', 'lw': '(2)'}), "(x=x_start, color='k', lw=2)\n", (1978, 2006), True, 'from matplotlib import pyplot as plt\n'), ((2007, 2046), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': 'y_start', 'color': '"""k"""', 'lw': '(4)'}), "(y=y_start, color='k', lw=4)\n", (2018, 2046), True, 'from matplotlib import pyplot as plt\n'), ((2060, 2161), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'direction': '"""out"""', 'top': '(False)', 'right': '(False)', 'length': '(12)', 'width': '(3)', 'pad': '(1)', 'labelsize': '(18)'}), "(direction='out', top=False, right=False, length=12, width=3,\n pad=1, labelsize=18)\n", (2075, 2161), True, 'from matplotlib import pyplot as plt\n'), ((2159, 2236), 'matplotlib.pyplot.plot', 'plt.plot', (['[x_start, x_end]', '[0.2, 0.2]'], {'color': '(0.5, 0.5, 0.5)', 'linestyle': '"""--"""'}), "([x_start, x_end], [0.2, 0.2], color=(0.5, 0.5, 0.5), linestyle='--')\n", (2167, 2236), True, 'from matplotlib import pyplot as plt\n'), ((2236, 2281), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(False)', 'loc': '(2)', 'fontsize': '(16)'}), '(frameon=False, loc=2, fontsize=16)\n', (2246, 2281), True, 'from matplotlib import pyplot as plt\n'), ((2283, 2307), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""sup20.svg"""'], {}), "('sup20.svg')\n", (2294, 2307), True, 'from matplotlib import pyplot as plt\n'), ((954, 970), 'numpy.arange', 'np.arange', (['(1)', '(10)'], {}), '(1, 10)\n', (963, 970), True, 'import numpy as np\n'), ((814, 845), 'multimds.linear_algebra.calcDistance', 'la.calcDistance', (['coord1', 'coord2'], {}), '(coord1, coord2)\n', (829, 845), True, 'from multimds import linear_algebra as la\n'), ((1342, 1418), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[gen_coords[dist_peak]]', '[0.1]'], {'color': '(0, 0, 1)', 's': '(40)', 'zorder': '(2)'}), '([gen_coords[dist_peak]], [0.1], color=(0, 0, 1), s=40, zorder=2)\n', (1353, 1418), True, 'from matplotlib import pyplot as plt\n'), ((1426, 1515), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[gen_coords[dist_peak]]', '[0.1]'], {'color': '(0.25, 0.25, 0.25)', 's': '(40)', 'zorder': '(2)'}), '([gen_coords[dist_peak]], [0.1], color=(0.25, 0.25, 0.25), s=40,\n zorder=2)\n', (1437, 1515), True, 'from matplotlib import pyplot as plt\n')] |
import os.path as op
import subprocess
import sys
import numpy as np
import pandas as pd
def test_compartment_cli(request, tmpdir):
in_cool = op.join(request.fspath.dirname, 'data/sin_eigs_mat.cool')
out_eig_prefix = op.join(tmpdir, 'test.eigs')
try:
result = subprocess.check_output(
f'python -m cooltools call-compartments -o {out_eig_prefix} {in_cool}',
shell=True
).decode('ascii')
except subprocess.CalledProcessError as e:
print(e.output)
print(sys.exc_info())
raise e
test_eigs = pd.read_table(out_eig_prefix+'.cis.vecs.tsv', sep='\t')
gb = test_eigs.groupby('chrom')
for chrom in gb.groups:
chrom_eigs = gb.get_group(chrom)
r = np.abs(np.corrcoef(chrom_eigs.E1.values,
np.sin(chrom_eigs.start * 2 * np.pi / 500))[0,1])
assert r>0.95
def test_saddle_cli(request, tmpdir):
in_cool = op.join(request.fspath.dirname, 'data/sin_eigs_mat.cool')
out_eig_prefix = op.join(tmpdir, 'test.eigs')
out_expected = op.join(tmpdir, 'test.expected')
out_saddle_prefix = op.join(tmpdir, 'test.saddle')
try:
result = subprocess.check_output(
f'python -m cooltools call-compartments -o {out_eig_prefix} {in_cool}',
shell=True
).decode('ascii')
except subprocess.CalledProcessError as e:
print(e.output)
print(sys.exc_info())
raise e
try:
result = subprocess.check_output(
f'python -m cooltools compute-expected {in_cool} > {out_expected}',
shell=True
).decode('ascii')
except subprocess.CalledProcessError as e:
print(e.output)
print(sys.exc_info())
raise e
try:
result = subprocess.check_output(
f'python -m cooltools compute-saddle -o {out_saddle_prefix} --range -0.5 0.5 '
+f'--n-bins 30 --scale log2 {in_cool} {out_eig_prefix}.cis.vecs.tsv {out_expected}',
shell=True
).decode('ascii')
except subprocess.CalledProcessError as e:
print(e.output)
print(sys.exc_info())
raise e
log2_sad = np.load(out_saddle_prefix + '.saddledump.npz')['saddledata']
bins = np.load(out_saddle_prefix + '.saddledump.npz')['binedges']
binmids = (bins[:-1] + bins[1:]) / 2
log2_theor_sad = np.log2(1 + binmids[None,:] * binmids[:,None])
log2_sad_flat = log2_sad[1:-1, 1:-1].flatten()
log2_theor_sad_flat = log2_theor_sad.flatten()
mask = np.isfinite(log2_sad_flat) & np.isfinite(log2_theor_sad_flat)
cc = np.abs(np.corrcoef(log2_sad_flat[mask], log2_theor_sad_flat[mask])[0][1])
assert cc > 0.9
# def test_digitize_track(request):
# pass
# def test_make_saddle(request):
# pass
# def test_saddleplot(request):
# pass
# def test_saddlestrength(request):
# pass
| [
"subprocess.check_output",
"numpy.corrcoef",
"os.path.join",
"sys.exc_info",
"numpy.isfinite",
"pandas.read_table",
"numpy.sin",
"numpy.log2",
"numpy.load"
] | [((149, 206), 'os.path.join', 'op.join', (['request.fspath.dirname', '"""data/sin_eigs_mat.cool"""'], {}), "(request.fspath.dirname, 'data/sin_eigs_mat.cool')\n", (156, 206), True, 'import os.path as op\n'), ((228, 256), 'os.path.join', 'op.join', (['tmpdir', '"""test.eigs"""'], {}), "(tmpdir, 'test.eigs')\n", (235, 256), True, 'import os.path as op\n'), ((578, 635), 'pandas.read_table', 'pd.read_table', (["(out_eig_prefix + '.cis.vecs.tsv')"], {'sep': '"""\t"""'}), "(out_eig_prefix + '.cis.vecs.tsv', sep='\\t')\n", (591, 635), True, 'import pandas as pd\n'), ((949, 1006), 'os.path.join', 'op.join', (['request.fspath.dirname', '"""data/sin_eigs_mat.cool"""'], {}), "(request.fspath.dirname, 'data/sin_eigs_mat.cool')\n", (956, 1006), True, 'import os.path as op\n'), ((1028, 1056), 'os.path.join', 'op.join', (['tmpdir', '"""test.eigs"""'], {}), "(tmpdir, 'test.eigs')\n", (1035, 1056), True, 'import os.path as op\n'), ((1076, 1108), 'os.path.join', 'op.join', (['tmpdir', '"""test.expected"""'], {}), "(tmpdir, 'test.expected')\n", (1083, 1108), True, 'import os.path as op\n'), ((1133, 1163), 'os.path.join', 'op.join', (['tmpdir', '"""test.saddle"""'], {}), "(tmpdir, 'test.saddle')\n", (1140, 1163), True, 'import os.path as op\n'), ((2387, 2435), 'numpy.log2', 'np.log2', (['(1 + binmids[None, :] * binmids[:, None])'], {}), '(1 + binmids[None, :] * binmids[:, None])\n', (2394, 2435), True, 'import numpy as np\n'), ((2194, 2240), 'numpy.load', 'np.load', (["(out_saddle_prefix + '.saddledump.npz')"], {}), "(out_saddle_prefix + '.saddledump.npz')\n", (2201, 2240), True, 'import numpy as np\n'), ((2266, 2312), 'numpy.load', 'np.load', (["(out_saddle_prefix + '.saddledump.npz')"], {}), "(out_saddle_prefix + '.saddledump.npz')\n", (2273, 2312), True, 'import numpy as np\n'), ((2549, 2575), 'numpy.isfinite', 'np.isfinite', (['log2_sad_flat'], {}), '(log2_sad_flat)\n', (2560, 2575), True, 'import numpy as np\n'), ((2578, 2610), 'numpy.isfinite', 'np.isfinite', (['log2_theor_sad_flat'], {}), '(log2_theor_sad_flat)\n', (2589, 2610), True, 'import numpy as np\n'), ((283, 399), 'subprocess.check_output', 'subprocess.check_output', (['f"""python -m cooltools call-compartments -o {out_eig_prefix} {in_cool}"""'], {'shell': '(True)'}), "(\n f'python -m cooltools call-compartments -o {out_eig_prefix} {in_cool}',\n shell=True)\n", (306, 399), False, 'import subprocess\n'), ((530, 544), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (542, 544), False, 'import sys\n'), ((1191, 1307), 'subprocess.check_output', 'subprocess.check_output', (['f"""python -m cooltools call-compartments -o {out_eig_prefix} {in_cool}"""'], {'shell': '(True)'}), "(\n f'python -m cooltools call-compartments -o {out_eig_prefix} {in_cool}',\n shell=True)\n", (1214, 1307), False, 'import subprocess\n'), ((1438, 1452), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1450, 1452), False, 'import sys\n'), ((1497, 1609), 'subprocess.check_output', 'subprocess.check_output', (['f"""python -m cooltools compute-expected {in_cool} > {out_expected}"""'], {'shell': '(True)'}), "(\n f'python -m cooltools compute-expected {in_cool} > {out_expected}',\n shell=True)\n", (1520, 1609), False, 'import subprocess\n'), ((1740, 1754), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1752, 1754), False, 'import sys\n'), ((1799, 2018), 'subprocess.check_output', 'subprocess.check_output', (["(f'python -m cooltools compute-saddle -o {out_saddle_prefix} --range -0.5 0.5 '\n +\n f'--n-bins 30 --scale log2 {in_cool} {out_eig_prefix}.cis.vecs.tsv {out_expected}'\n )"], {'shell': '(True)'}), "(\n f'python -m cooltools compute-saddle -o {out_saddle_prefix} --range -0.5 0.5 '\n +\n f'--n-bins 30 --scale log2 {in_cool} {out_eig_prefix}.cis.vecs.tsv {out_expected}'\n , shell=True)\n", (1822, 2018), False, 'import subprocess\n'), ((2146, 2160), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2158, 2160), False, 'import sys\n'), ((2628, 2687), 'numpy.corrcoef', 'np.corrcoef', (['log2_sad_flat[mask]', 'log2_theor_sad_flat[mask]'], {}), '(log2_sad_flat[mask], log2_theor_sad_flat[mask])\n', (2639, 2687), True, 'import numpy as np\n'), ((823, 865), 'numpy.sin', 'np.sin', (['(chrom_eigs.start * 2 * np.pi / 500)'], {}), '(chrom_eigs.start * 2 * np.pi / 500)\n', (829, 865), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import rospy
from cv_bridge import CvBridge, CvBridgeError
import cv2
import numpy as np
from sensor_msgs.msg import CompressedImage,Image
import time
class ImageAverageNode(object):
def __init__(self):
self.bridge = CvBridge()
self.publisher = rospy.Publisher("~topic_out",Image,queue_size=1)
# Create subscriber
self.subscriber = rospy.Subscriber("~topic_in", CompressedImage, self.callback)
self.avg = None
self.numFrames = 0.0
# Define Timer callback
def callback(self, msg):
np_arr = np.fromstring(msg.data, np.uint8)
cv_image = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
cv_image = cv_image.astype('float32')
if self.numFrames == 0:
self.avg = cv_image
else:
cv2.addWeighted(self.avg, self.numFrames/(self.numFrames+1),cv_image, 1/(self.numFrames+1), 0.0, self.avg)
self.numFrames+=1
img_msg = self.bridge.cv2_to_imgmsg(self.avg.astype('uint8'), "bgr8")
img_msg.header.stamp = msg.header.stamp
img_msg.header.frame_id = msg.header.frame_id
self.publisher.publish(img_msg)
if __name__ == '__main__':
rospy.init_node('image_average_node')
node = ImageAverageNode()
# spin to keep the script for exiting
rospy.spin()
| [
"rospy.Subscriber",
"rospy.init_node",
"numpy.fromstring",
"cv_bridge.CvBridge",
"cv2.addWeighted",
"rospy.spin",
"cv2.imdecode",
"rospy.Publisher"
] | [((1204, 1241), 'rospy.init_node', 'rospy.init_node', (['"""image_average_node"""'], {}), "('image_average_node')\n", (1219, 1241), False, 'import rospy\n'), ((1318, 1330), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1328, 1330), False, 'import rospy\n'), ((253, 263), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (261, 263), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((289, 339), 'rospy.Publisher', 'rospy.Publisher', (['"""~topic_out"""', 'Image'], {'queue_size': '(1)'}), "('~topic_out', Image, queue_size=1)\n", (304, 339), False, 'import rospy\n'), ((392, 453), 'rospy.Subscriber', 'rospy.Subscriber', (['"""~topic_in"""', 'CompressedImage', 'self.callback'], {}), "('~topic_in', CompressedImage, self.callback)\n", (408, 453), False, 'import rospy\n'), ((582, 615), 'numpy.fromstring', 'np.fromstring', (['msg.data', 'np.uint8'], {}), '(msg.data, np.uint8)\n', (595, 615), True, 'import numpy as np\n'), ((635, 680), 'cv2.imdecode', 'cv2.imdecode', (['np_arr', 'cv2.CV_LOAD_IMAGE_COLOR'], {}), '(np_arr, cv2.CV_LOAD_IMAGE_COLOR)\n', (647, 680), False, 'import cv2\n'), ((817, 937), 'cv2.addWeighted', 'cv2.addWeighted', (['self.avg', '(self.numFrames / (self.numFrames + 1))', 'cv_image', '(1 / (self.numFrames + 1))', '(0.0)', 'self.avg'], {}), '(self.avg, self.numFrames / (self.numFrames + 1), cv_image, \n 1 / (self.numFrames + 1), 0.0, self.avg)\n', (832, 937), False, 'import cv2\n')] |
# For GUI
import tkinter as tk
# To delay time between blocks while visualizing
import time
# For handling arrays in more efficient manner
import numpy as np
# Node for each block containing values to help calculate the shortest path
class Node:
def __init__(self, parent=None, position=None):
# The Node's it's adjacent to
self.parent = parent
# It's position, (row, column)
self.position = position
# g, f, h values as per algorithm
self.g = 0
self.f = 0
self.h = 0
# Overwriting equivalent operator for this class
def __eq__(self, other):
return self.position == other.position
# Class App contains the main GUI and also the algorithm
class App:
def __init__(self, canvas_height=800, canvas_width=800, side_length=16):
self.root = tk.Tk() # The master window of GUI
self.canvas_height = canvas_height
self.canvas_width = canvas_width
self.start_point = self.end_point = None
self.rows = self.canvas_height // side_length
self.cols = self.canvas_width // side_length
# The first row and first column are obstacles by default for this App
self.maze = np.array(
[[1] + [1] * (self.cols - 1)] + [[1] + [0] * (self.cols - 1) for _ in range(self.rows - 1)])
self.visStarted = False # To make sure no changes occur while algorithm is running
self.canvas = tk.Canvas(self.root, width=self.canvas_width, height=self.canvas_height,
bg="white") # The canvas for all blocks
self.side_length = side_length # Side length of square block
self.grid()
# This function initializes the GUI and packs all widgets and buttons
def start(self):
self.root.title('A* Algorithm Path Visualiser')
self.root.geometry(f"{self.canvas_height}x{self.canvas_width + 100}")
self.text_box = tk.Label(self.root, text="Select starting point")
self.text_box.config(font=("Courier", 14), bg="white")
self.text_box.pack(side=tk.TOP, anchor=tk.CENTER)
self.allow_diagonal_mov = tk.IntVar()
self.allow_diagonal_mov.set(0)
dchkbtn = tk.Checkbutton(self.root, text="Allow Diagonal Movement", variable=self.allow_diagonal_mov)
dchkbtn.place(x=self.canvas_height * 0.1, y=25)
self.delaytime = tk.DoubleVar()
self.delaytime.set(0.06) # Changing this value and offvalue below will affect animation speed
tchkbtn = tk.Checkbutton(self.root, text="Show Solution Instantly", variable=self.delaytime, onvalue=0,
offvalue=0.06)
tchkbtn.place(x=self.canvas_height * 0.6, y=25)
sbtn = tk.Button(self.root, text="Start", command=self.find_path)
sbtn.place(x=self.canvas_height * 0.40, y=self.canvas_width + 70, anchor=tk.CENTER)
rbtn = tk.Button(self.root, text="Reset all", command=self.reset)
rbtn.place(x=self.canvas_height * 0.60, y=self.canvas_width + 70, anchor=tk.CENTER)
self.canvas.place(x=0, y=50)
self.canvas.bind('<B1-Motion>', self.draw)
self.canvas.bind('<Button-1>', self.draw)
self.canvas.bind('<Button-3>', self.reset_block)
self.canvas.bind('<Button3-Motion>', self.reset_block)
self.root.bind("<space>", self.find_path)
self.root.mainloop()
# This function is called when 'Reset All' button is clicked. It resets the canvas and required variables
def reset(self):
self.visStarted = False
self.maze = np.array(
[[1] + [1] * (self.cols - 1)] + [[1] + [0] * (self.cols - 1) for _ in range(self.rows - 1)])
self.canvas.delete("all")
self.grid()
self.start_point = self.end_point = None
self.show_text("Select starting point")
# This function is called when 'Start' button is clicked or <space> bar is pressed.
# It checks if start and end point are initialized and then calls the 'Astar' function which is the main algorithm.
def find_path(self, event=None):
if self.start_point and self.end_point:
self.Astar()
elif self.start_point:
self.show_text("Select destination point", "red")
else:
self.show_text("Select starting point", "red")
# It's to change the text in instruction box to help user with running GUI
def show_text(self, text, text_color="black"):
self.text_box.config(text=text, fg=text_color)
# It's to change the text in instruction box to help user with running GUI
def show_block_text(self):
if not self.start_point:
self.show_text("Select starting point")
elif not self.end_point:
self.show_text("Select destination point")
else:
self.show_text("Make obstacles or Right click on any block to remove it.")
# It's to get the position of block in terms of row and column depending upon the canvas co-ordinates
def get_pos(self, side_length, coordinates):
for i in range(1, self.rows):
for j in range(1, self.cols):
if coordinates[0] // side_length <= i and coordinates[1] // side_length <= j:
return (i, j)
return None
# It draws the grid and black blocks at boundary
def grid(self):
pad = self.side_length // 2
self.canvas.create_rectangle(-pad, -pad, pad, pad, fill="black", outline="grey")
for i in range(pad, self.canvas_height, self.side_length):
self.canvas.create_line([(i, 0), (i, self.canvas_width)], tag='grid_line', fill="grey")
self.canvas.create_rectangle(i, -pad, i + self.side_length, pad, fill="black", outline="grey")
self.canvas.create_rectangle(-pad, i, pad, i + self.side_length, fill="black", outline="grey")
new_pad = self.canvas_height - ((self.canvas_height // self.side_length) * self.side_length - pad)
for i in range(pad, self.canvas_width, self.side_length):
self.canvas.create_line([(0, i), (self.canvas_height, i)], tag='grid_line', fill="grey")
self.canvas.create_rectangle(self.canvas_height - new_pad, i, self.canvas_height, i + self.side_length,
fill="black", outline="grey")
self.canvas.create_rectangle(i, self.canvas_height - new_pad, i + self.side_length, self.canvas_height,
fill="black", outline="grey")
# Function to draw square at given row and column
def draw_sqaure(self, xa, ya, color):
x, y = xa * self.side_length, ya * self.side_length
x1, y1 = (x - self.side_length / 2), (y - self.side_length / 2)
x2, y2 = (x + self.side_length / 2), (y + self.side_length / 2)
self.canvas.create_rectangle(x1, y1, x2, y2, fill=color, outline="grey")
# This function is called when algorithm finds a path.
# It retraces the path by following the Node's parent and so on till it reaches start point.
def draw_path(self, current_node):
self.show_text("Path Found!", "green")
self.visStarted = False
path = []
while current_node is not None:
self.draw_sqaure(current_node.position[0], current_node.position[1], "green")
time.sleep(0.05)
self.canvas.update()
path.append(current_node.position)
current_node = current_node.parent
self.show_text(f"Path Found! Number of blocks required to reach destination is {len(path)}", "green")
# This function is for drawing square blocks on user command.
# It's called when Left Mouse button is clicked.
def draw(self, event):
if self.visStarted == True:
return
pos = self.get_pos(self.side_length, (event.x, event.y))
if pos == None:
return
else:
xa, ya = pos
# It is to check which color of block should be drawn if start is None or end is None or if start and end
# points overlap.
if not self.start_point:
self.start_point = Node(None, (xa, ya))
color = "orange"
if self.end_point:
if self.start_point.position == self.end_point.position:
self.end_point = None
elif self.start_point.position == (xa, ya):
self.start_point = None
if not self.end_point:
self.end_point = Node(None, (xa, ya))
color = "cyan"
else:
color = "black"
elif not self.end_point:
self.end_point = Node(None, (xa, ya))
color = "cyan"
elif self.end_point.position == (xa, ya):
self.end_point = None
if not self.start_point:
self.start_point = Node(None, (xa, ya))
color = "orange"
else:
color = "black"
else:
self.maze[xa][ya] = 1
color = "black"
self.draw_sqaure(xa, ya, color)
self.show_block_text()
# It resets the selected block.
def reset_block(self, event=None):
if self.visStarted == True:
return
pos = self.get_pos(self.side_length, (event.x, event.y))
if pos == None:
return
else:
xa, ya = pos
if self.start_point:
if self.start_point.position == (xa, ya):
self.start_point = None
if self.end_point:
if self.end_point.position == (xa, ya):
self.end_point = None
self.maze[xa][ya] = 0
self.draw_sqaure(xa, ya, "white")
self.show_block_text()
# It's the algorithm function which handles all operations for finding path.
def Astar(self):
self.show_text("A* Algorithm running")
self.visStarted = True
to_visit = np.array([]) # Nodes yet to be visited
visited = np.array([]) # Nodes which are visited
to_visit = np.append(to_visit, [self.start_point]) # Starting point is appended in to_visit
# Checks if diagonal movement is allowed and changes allowed adjacent cells range accordingly.
if self.allow_diagonal_mov.get():
adj_cells = [(0, 1), (1, 0), (-1, 0), (0, -1), (1, 1), (-1, -1), (-1, 1), (1, -1)]
else:
adj_cells = [(0, 1), (1, 0), (-1, 0), (0, -1)]
counter = 0
max_count = (len(self.maze) // 2) ** 10 # A reasonable amount of max iterations.
# loop will be run until there are nodes to visit or until path is found
while to_visit.size > 0:
# If reset all button is clicked then it will immediately stop the algorithm
if not self.visStarted:
return
counter += 1 # Keeping track of number of iterations
time.sleep(self.delaytime.get()) # Delay to visualize in smooth manner
self.canvas.update()
current_node = to_visit[0]
idx = 0
for index, item in enumerate(to_visit):
# If another nodes f value is lesser then it is prioritized.
if item.f < current_node.f:
current_node = item
idx = index
# Break loop if limit exceeds
if counter > max_count:
break
# Deleting current node from to_visit array and adding it to visited one
to_visit = np.delete(to_visit, idx)
self.draw_sqaure(current_node.position[0], current_node.position[1], "red")
visited = np.append(visited, [current_node])
# If current node is end point then we have found the shortest path
if current_node == self.end_point:
return self.draw_path(current_node)
# Children is array containing adjacent cell of current node
children = np.array([])
for new_position in adj_cells:
node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])
# To make sure it is within range of length of rows and columns
if node_position[0] > (len(self.maze) - 1) or node_position[0] < 0 or node_position[1] > (
len(self.maze[len(self.maze) - 1]) - 1) or node_position[1] < 0:
continue
# To check if it's an obstacle or not. Obstacle has value 1
if self.maze[node_position[0]][node_position[1]] != 0:
continue
# Instance of Node Class is made with parent being current node and it's then appended to children array.
new_node = Node(current_node, node_position)
children = np.append(children, [new_node])
# This loop calculates the f, g and h and checks if these nodes are previously visited.
for child in children:
# To check if it has been visited before
if len([i for i in visited if child == i]) > 0:
continue
# Here, cost of all edges is 1
child.g = current_node.g + 1
child.h = ((child.position[0] - self.end_point.position[0]) ** 2 +
(child.position[1] - self.end_point.position[1]) ** 2)
child.f = child.g + child.h
if len([j for j in to_visit if child == j and child.g > j.g]) > 0:
continue
self.draw_sqaure(child.position[0], child.position[1], "yellow")
to_visit = np.append(to_visit, [child]) # It is then added to_visit array so we can check it's adjacent cell and so forth.
self.show_text("Wouldn't find path", "red")
self.visStarted = False
# Here, you can set canvas height, canvas width and side length of block
def main():
app = App(800, 800, 16)
app.start()
if __name__ == '__main__':
main()
| [
"tkinter.IntVar",
"tkinter.Checkbutton",
"numpy.delete",
"tkinter.Button",
"time.sleep",
"numpy.append",
"tkinter.Canvas",
"numpy.array",
"tkinter.Tk",
"tkinter.Label",
"tkinter.DoubleVar"
] | [((836, 843), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (841, 843), True, 'import tkinter as tk\n'), ((1441, 1530), 'tkinter.Canvas', 'tk.Canvas', (['self.root'], {'width': 'self.canvas_width', 'height': 'self.canvas_height', 'bg': '"""white"""'}), "(self.root, width=self.canvas_width, height=self.canvas_height, bg\n ='white')\n", (1450, 1530), True, 'import tkinter as tk\n'), ((1932, 1981), 'tkinter.Label', 'tk.Label', (['self.root'], {'text': '"""Select starting point"""'}), "(self.root, text='Select starting point')\n", (1940, 1981), True, 'import tkinter as tk\n'), ((2138, 2149), 'tkinter.IntVar', 'tk.IntVar', ([], {}), '()\n', (2147, 2149), True, 'import tkinter as tk\n'), ((2207, 2303), 'tkinter.Checkbutton', 'tk.Checkbutton', (['self.root'], {'text': '"""Allow Diagonal Movement"""', 'variable': 'self.allow_diagonal_mov'}), "(self.root, text='Allow Diagonal Movement', variable=self.\n allow_diagonal_mov)\n", (2221, 2303), True, 'import tkinter as tk\n'), ((2381, 2395), 'tkinter.DoubleVar', 'tk.DoubleVar', ([], {}), '()\n', (2393, 2395), True, 'import tkinter as tk\n'), ((2517, 2630), 'tkinter.Checkbutton', 'tk.Checkbutton', (['self.root'], {'text': '"""Show Solution Instantly"""', 'variable': 'self.delaytime', 'onvalue': '(0)', 'offvalue': '(0.06)'}), "(self.root, text='Show Solution Instantly', variable=self.\n delaytime, onvalue=0, offvalue=0.06)\n", (2531, 2630), True, 'import tkinter as tk\n'), ((2731, 2789), 'tkinter.Button', 'tk.Button', (['self.root'], {'text': '"""Start"""', 'command': 'self.find_path'}), "(self.root, text='Start', command=self.find_path)\n", (2740, 2789), True, 'import tkinter as tk\n'), ((2898, 2956), 'tkinter.Button', 'tk.Button', (['self.root'], {'text': '"""Reset all"""', 'command': 'self.reset'}), "(self.root, text='Reset all', command=self.reset)\n", (2907, 2956), True, 'import tkinter as tk\n'), ((9939, 9951), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9947, 9951), True, 'import numpy as np\n'), ((9997, 10009), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (10005, 10009), True, 'import numpy as np\n'), ((10056, 10095), 'numpy.append', 'np.append', (['to_visit', '[self.start_point]'], {}), '(to_visit, [self.start_point])\n', (10065, 10095), True, 'import numpy as np\n'), ((7325, 7341), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (7335, 7341), False, 'import time\n'), ((11523, 11547), 'numpy.delete', 'np.delete', (['to_visit', 'idx'], {}), '(to_visit, idx)\n', (11532, 11547), True, 'import numpy as np\n'), ((11658, 11692), 'numpy.append', 'np.append', (['visited', '[current_node]'], {}), '(visited, [current_node])\n', (11667, 11692), True, 'import numpy as np\n'), ((11970, 11982), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (11978, 11982), True, 'import numpy as np\n'), ((12841, 12872), 'numpy.append', 'np.append', (['children', '[new_node]'], {}), '(children, [new_node])\n', (12850, 12872), True, 'import numpy as np\n'), ((13683, 13711), 'numpy.append', 'np.append', (['to_visit', '[child]'], {}), '(to_visit, [child])\n', (13692, 13711), True, 'import numpy as np\n')] |
import os
import sys
import csv
import numpy as np
from __main__ import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import CompareVolumes
moduleDir = os.path.dirname(__file__)
codeDir = os.path.abspath(os.path.join(moduleDir, os.pardir))
sys.path.insert(0, codeDir) # So that it comes first in the list
# Epiloc imports
import PatientModelEpilepsy
import ElectrodesIO
import epiloc_constants as const
import atlaslabels
CENTER = 0x84 # http://doc.qt.io/qt-4.8/qt.html#AlignmentFlag-enum
FIXED = qt.QSizePolicy.Fixed
DIR, FILE = 0, 1
NORMALIZED = 'Normalized'
CT_POST_NODE = 'Postoperative CT'
T1_PRE_NODE = 'Preoperative T1 MRI'
T1_POST_NODE = 'Postoperative T1 MRI'
class EpilocVisualization(ScriptedLoadableModule):
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Epiloc visualization"
self.parent.categories = ["Epiloc"]
self.parent.dependencies = []
self.parent.contributors = ["<NAME> (<EMAIL>)"]
self.parent.helpText = """
This is a scripted module used to visualize the results of the Epiloc pipeline.
"""
self.parent.acknowledgementText = """
PF-STIM
"""
class EpilocVisualizationWidget(ScriptedLoadableModuleWidget):
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
self.setPaths()
self.makeGUI()
self.loadHistory()
self.setCustomSlicerSettings()
self.electrodes = []
self.activeElectrode = None
self.atlasReader = atlaslabels.AtlasReader()
self.mniScene = False
def setPaths(self):
self.patientsDir = os.path.join(codeDir, os.pardir, 'patients')
self.historyDir = os.path.join(moduleDir, 'History')
self.historyPath = os.path.join(self.historyDir, 'history.txt')
def setPatientPaths(self):
self.patientDir = self.patiendFolderLineEdit.text
self.patientsDir = os.path.dirname(self.patientDir)
patientId = os.path.split(self.patientDir)[1]
self.model = PatientModelEpilepsy.PatientModelEpilepsy(patientId, rootDir=self.patientsDir)
def makeGUI(self):
logic = EpilocVisualizationLogic()
self.loadDataCollapsibleButton = ctk.ctkCollapsibleButton()
self.loadDataCollapsibleButton.setChecked(True)
self.loadDataCollapsibleButton.text = 'Load data'
self.loadDataCollapsibleButton.setLayout(qt.QVBoxLayout())
self.parent.layout().addWidget(self.loadDataCollapsibleButton)
patientFolderLayout, self.patiendFolderLineEdit, browsePatientFolderButton = logic.getBrowseLayout('Patient folder:')
browsePatientFolderButton.clicked.connect(self.onBrowsePatientDirectory)
self.loadDataCollapsibleButton.layout().addLayout(patientFolderLayout)
logic.addCenteredPushButtonToLayout(self.loadDataCollapsibleButton.layout(), 'Load data in patient\'s space', self.onLoadPatientData, styleSheet='QPushButton {font: bold}')
logic.addCenteredPushButtonToLayout(self.loadDataCollapsibleButton.layout(), 'Load data in MNI space', self.onLoadPatientMNIData, styleSheet='QPushButton {font: bold}')
self.visualizationCollapsibleButton = ctk.ctkCollapsibleButton()
self.visualizationCollapsibleButton.setVisible(False)
self.visualizationCollapsibleButton.text = 'Visualization'
self.visualizationCollapsibleButton.setLayout(qt.QVBoxLayout())
self.parent.layout().addWidget(self.visualizationCollapsibleButton)
logic.addCenteredPushButtonToLayout(self.visualizationCollapsibleButton.layout(), 'Reset slice views', self.onResetViews, styleSheet='QPushButton {font: bold}')
self.makeVolumesWidget()
self.electrodesCollapsibleButton = ctk.ctkCollapsibleButton()
self.electrodesCollapsibleButton.text = 'Electrodes'
self.electrodesCollapsibleButton.setLayout(qt.QVBoxLayout())
self.visualizationCollapsibleButton.layout().addWidget(self.electrodesCollapsibleButton)
self.reformatModeCheckBox = qt.QCheckBox('View electrode axis')
self.reformatModeCheckBox.setChecked(True)
self.reformatModeCheckBox.toggled.connect(self.onToggleReformatModeCheckBox)
self.electrodesCollapsibleButton.layout().addWidget(self.reformatModeCheckBox)
self.electrodesAndPlotsLayout = qt.QHBoxLayout()
self.electrodesCollapsibleButton.layout().addLayout(self.electrodesAndPlotsLayout)
self.electrodesGroupBox = qt.QGroupBox('Select an electrode')
self.electrodesGroupBox.setLayout(qt.QVBoxLayout())
self.electrodesAndPlotsLayout.addWidget(self.electrodesGroupBox)
if self.developerMode:
self.reloadButton.clicked.connect(self.onReload)
self.layout.addStretch()
def makeVolumesWidget(self):
from SurfaceToolbox import numericInputFrame
self.volumesCollapsibleButton = ctk.ctkCollapsibleButton()
self.volumesCollapsibleButton.text = 'Volumes'
self.volumesCollapsibleButton.setLayout(qt.QVBoxLayout())
# self.volumesCollapsibleButton.setChecked(False)
self.visualizationCollapsibleButton.layout().addWidget(self.volumesCollapsibleButton)
def getSelectorFrame(parent, label, tooltip, nodeType):
layout = qt.QHBoxLayout()
parent.layout().addLayout(layout)
selectorLabel = qt.QLabel(label)
selectorLabel.setToolTip(tooltip)
layout.addWidget(selectorLabel)
selector = slicer.qMRMLNodeComboBox()
selector.nodeTypes = [nodeType]
selector.selectNodeUponCreation = False
selector.addEnabled = False
selector.removeEnabled = False
selector.noneEnabled = True
selector.showHidden = False
selector.showChildNodeTypes = False
selector.setMRMLScene(slicer.mrmlScene)
layout.addWidget(selector)
return selector
self.fgSelector = getSelectorFrame(self.volumesCollapsibleButton, 'Foreground volume:', '', 'vtkMRMLScalarVolumeNode')
self.bgSelector = getSelectorFrame(self.volumesCollapsibleButton, 'Background volume:', '', 'vtkMRMLScalarVolumeNode')
self.fgSelector.currentNodeChanged.connect(self.updateVolumesFromSelectors)
self.bgSelector.currentNodeChanged.connect(self.updateVolumesFromSelectors)
opacityFrame, self.opacitySlider, opacitySpinBox = numericInputFrame(self.parent, 'Foreground opacity:', 'Change the opacity of the foreground volume.',0.0,1.0,0.01,2)
self.opacitySlider.valueChanged.connect(self.updateVolumesFromSelectors)
self.opacitySlider.setValue(1/4.)
opacitySpinBox.hide()
self.volumesCollapsibleButton.layout().addWidget(opacityFrame)
toggleVolumesButton = qt.QPushButton('Toggle volumes')
toggleVolumesButton.clicked.connect(self.onToggleVolumes)
self.volumesCollapsibleButton.layout().addWidget(toggleVolumesButton)
layersGroupBox = qt.QGroupBox('Layers blending')
layersGroupBox.setLayout(qt.QHBoxLayout())
self.volumesCollapsibleButton.layout().addWidget(layersGroupBox)
self.greenAndMagentacheckBox = qt.QCheckBox('Green and magenta blending')
self.greenAndMagentacheckBox.setChecked(False)
self.greenAndMagentacheckBox.toggled.connect(self.onSwitchGrayAndGreenMagenta)
layersGroupBox.layout().addWidget(self.greenAndMagentacheckBox)
self.layerRevealCheckBox = qt.QCheckBox('Layer reveal')
self.layerRevealCheckBox.toggled.connect(self.onLayerRevealCheckBox)
layersGroupBox.layout().addWidget(self.layerRevealCheckBox)
def setCustomSlicerSettings(self):
### CROSSHAIR ###
nodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLSliceCompositeNode')
for idx in range(nodes.GetNumberOfItems()):
node = nodes.GetItemAsObject(idx)
node.SetSliceIntersectionVisibility(True)
# view = slicer.util.getNode('View*')
# if view:
# view.SetOrientationMarkerHumanModelNodeID(modelNode.GetID())
return
### HISTORY ###
def saveHistory(self, patientDir):
if not os.path.exists(self.historyDir):
os.mkdir(self.historyDir)
with open(self.historyPath, 'w') as f:
f.write(patientDir)
def loadHistory(self):
if os.path.exists(self.historyPath):
with open(self.historyPath, 'r') as f:
lastPatientDir = f.read()
if os.path.exists(lastPatientDir):
self.patiendFolderLineEdit.setText(lastPatientDir)
### SLOTS ###
def onBrowsePatientDirectory(self):
dirName = qt.QFileDialog.getExistingDirectory(None, 'Browse patient directory', self.patientsDir)
if dirName:
self.patiendFolderLineEdit.setText(dirName)
self.saveHistory(dirName)
def onLoadPatientData(self):
self.setPatientPaths()
if not os.path.exists(self.patientDir):
slicer.util.delayDisplay('Patient folder does not exist.', 1500)
return
xmlPath = self.getPatientXML()
if not xmlPath:
slicer.util.delayDisplay('No XML file was found.', 1500)
return
self.loadPatientData(xmlPath)
def onLoadPatientMNIData(self):
self.setPatientPaths()
if not os.path.exists(self.patientDir):
slicer.util.delayDisplay('Patient foder does not exist.', 1500)
return
xmlPath = self.getPatientXML()
if not xmlPath:
slicer.util.delayDisplay('No XML file was found.', 1500)
return
csvPath = self.getPatientCSV()
if not csvPath:
slicer.util.delayDisplay('No CSV file was found.', 1500)
return
self.loadPatientMNIData(xmlPath, csvPath)
def onResetViews(self):
logic = EpilocVisualizationLogic()
logic.centerSlices((0,0,0), fitSlices=True) # AC = 0,0,0
logic.setLinkedControl(True)
self.activeElectrode = None
for electrode in self.electrodes:
electrode.show()
# electrode.plotsGroupBox.hide()
def updateVolumesFromSelectors(self):
logic = EpilocVisualizationLogic()
bgVolumeNode = self.bgSelector.currentNode()
fgVolumeNode = self.fgSelector.currentNode()
opacity = self.opacitySlider.value
logic.setBackgroundAndForegroundVolumes(bgVolumeNode, fgVolumeNode, opacity)
def onToggleVolumes(self):
bgVolumeNode = self.bgSelector.currentNode()
fgVolumeNode = self.fgSelector.currentNode()
self.fgSelector.setCurrentNode(bgVolumeNode)
self.bgSelector.setCurrentNode(fgVolumeNode)
opacity = self.opacitySlider.value
newOpacity = -opacity + 1
self.opacitySlider.setValue(newOpacity)
def onReload(self):
logic = EpilocVisualizationLogic()
logic.closeSlicerScene()
# super(EpilocVisualizationWidget, self).onReload()
ScriptedLoadableModuleWidget.onReload(self)
def onToggleReformatModeCheckBox(self):
if self.activeElectrode is None:
return
else:
self.activeElectrode.onPlotsSlicesSpinBox()
def onSwitchGrayAndGreenMagenta(self):
GRAY = 'vtkMRMLColorTableNodeGrey'
GREEN = 'vtkMRMLColorTableNodeGreen'
MAGENTA = 'vtkMRMLColorTableNodeMagenta'
red_logic = slicer.app.layoutManager().sliceWidget("Red").sliceLogic()
red_cn = red_logic.GetSliceCompositeNode()
bgImageDisplayNode = slicer.util.getNode(red_cn.GetBackgroundVolumeID()).GetDisplayNode()
fgImageDisplayNode = slicer.util.getNode(red_cn.GetForegroundVolumeID()).GetDisplayNode()
if self.greenAndMagentacheckBox.isChecked():
# if bgImageDisplayNode.GetColorNodeID() == GRAY:
# red_cn.SetForegroundOpacity(.5)
bgImageDisplayNode.SetAndObserveColorNodeID(GREEN)
fgImageDisplayNode.SetAndObserveColorNodeID(MAGENTA)
else:
bgImageDisplayNode.SetAndObserveColorNodeID(GRAY)
fgImageDisplayNode.SetAndObserveColorNodeID(GRAY)
def onLayerRevealCheckBox(self):
if self.layerRevealCheckBox.checked:
self.layerReveal = CompareVolumes.LayerReveal(width=300, height=300)
else:
self.layerReveal.tearDown()
self.layerReveal = None
### LOAD DATA ###
def getPatientXML(self):
if os.path.exists(self.model.xmlVerifiedPath):
xmlPath = self.model.xmlVerifiedPath
elif os.path.exists(self.model.xmlPath):
xmlPath = self.model.xmlPath
else:
xmlPath = qt.QFileDialog.getOpenFileName(None,
'Choose XML electrodes file',
self.patientDir,
'XML files (*.xml)')
return xmlPath
def getPatientCSV(self):
# self.setPatientPaths() # already done in getPatientXML
if os.path.exists(self.model.localizationsPath):
csvPath = self.model.localizationsPath
else:
csvPath = qt.QFileDialog.getOpenFileName(None,
'Choose CSV electrodes localizations file',
self.patientDir,
'CSV files (*.csv)')
return csvPath
def loadPatientData(self, xmlPath):
self.mniScene = False
logic = EpilocVisualizationLogic()
### LOAD PATIENT DATA ###
self.loadDataCollapsibleButton.setChecked(False)
logic.closeSlicerScene()
## CT-Post
successCt, self.ctPostNativeNode = slicer.util.loadVolume(self.model.ctPostPath, returnNode=True)
successCtToACPC, self.regMatCtToACPCNode = slicer.util.loadTransform(self.model.regMatSlicerCtPost2ACPCPath, returnNode=True)
if successCt:
self.ctPostNativeNode.SetName(CT_POST_NODE)
## Load electrodes ##
for electrode in self.electrodes:
electrode.button.hide()
electrode.plotsGroupBox.hide()
self.electrodes = []
slicer.util.delayDisplay('Loading electrodes from ' + xmlPath, 1500)
self.electrodes = logic.loadElectrodes(xmlPath)
if successCt and successCtToACPC:
self.ctPostNativeNode.SetAndObserveTransformNodeID(self.regMatCtToACPCNode.GetID())
ctToACPCMatrix = logic.getMatrixFromTransformNodeID(self.regMatCtToACPCNode.GetID())
for electrode in self.electrodes:
for plot in electrode.plots:
plot.transformCenter(ctToACPCMatrix)
for electrode in self.electrodes:
self.electrodesGroupBox.layout().addWidget(electrode.getElectrodeButton())
for electrode in self.electrodes:
self.electrodesAndPlotsLayout.addWidget(electrode.getPlotsGroupBox())
electrode.makeAndLoadModels()
## T1-post
successT1Post, self.t1PostNativeNode = slicer.util.loadVolume(self.model.t1mriPostPath, returnNode=True)
if successT1Post:
self.t1PostNativeNode.SetName(T1_POST_NODE)
successT1PostToACPC, self.regMatT1PostToACPCNode = slicer.util.loadTransform(self.model.regMatSlicerT1MriPost2ACPCPath, returnNode=True)
if successT1Post and successT1PostToACPC:
self.t1PostNativeNode.SetAndObserveTransformNodeID(self.regMatT1PostToACPCNode.GetID())
## T1-pre
if os.path.exists(self.model.t1mriPrePath):
successT1Pre, self.t1PreNativeNode = slicer.util.loadVolume(self.model.t1mriPrePath, returnNode=True)
else:
oldT1Path = self.model.t1mriPrePath.replace('_pre', '')
successT1Pre, self.t1PreNativeNode = slicer.util.loadVolume(oldT1Path, returnNode=True)
if successT1Pre:
self.t1PreNativeNode.SetName(T1_PRE_NODE)
successT1PreToACPC, self.regMatT1PreToACPCNode = slicer.util.loadTransform(self.model.regMatSlicerT1Mri2ACPCPath, returnNode=True)
if successT1Pre and successT1PreToACPC:
self.t1PreNativeNode.SetAndObserveTransformNodeID(self.regMatT1PreToACPCNode.GetID())
headAttributes = {'SetSliceIntersectionVisibility': True,
"SetColor": (1, 0.75, 0.8),
'SetOpacity': .05,
'SetBackfaceCulling': False}
successHead, self.headNativeModelNode = logic.loadModel(self.model.meshHeadPath, returnNode=True, attributes=headAttributes)
if successHead and successT1PreToACPC:
self.headNativeModelNode.SetAndObserveTransformNodeID(self.regMatT1PreToACPCNode.GetID())
self.visualizationCollapsibleButton.show()
logic = EpilocVisualizationLogic()
logic.center3DView()
logic.centerSlices((0,0,0))
logic.setLinkedControl(True)
## Show volumes in slices
if successCt:
self.bgSelector.setCurrentNode(self.ctPostNativeNode)
if successT1Post:
self.fgSelector.setCurrentNode(self.t1PostNativeNode)
elif successT1Pre:
self.fgSelector.setCurrentNode(self.t1PreNativeNode)
def loadPatientMNIData(self, xmlPath, csvPath):
self.mniScene = True
logic = EpilocVisualizationLogic()
### LOAD PATIENT DATA ###
self.loadDataCollapsibleButton.setChecked(False)
logic.closeSlicerScene()
## CT-Post
successCt, self.ctPostMNINode = slicer.util.loadVolume(self.model.regImaCtPost2MNIPath, returnNode=True)
if successCt:
self.ctPostMNINode.SetName(NORMALIZED + ' ' + CT_POST_NODE)
## Load electrodes ##
for electrode in self.electrodes:
electrode.button.hide()
electrode.plotsGroupBox.hide()
self.electrodes = []
slicer.util.delayDisplay('Loading electrodes from ' + xmlPath + ' and ' + csvPath, 1500)
self.electrodes = logic.loadElectrodes(xmlPath)
mniElectrodes = logic.loadElectrodes(csvPath)
for electrode in self.electrodes:
for mniElectrode in mniElectrodes:
if mniElectrode.name == electrode.name:
for i in range(len(electrode.plots)):
electrode.plots[i].center = mniElectrode.plots[i].mniCenter
for electrode in self.electrodes:
self.electrodesGroupBox.layout().addWidget(electrode.getElectrodeButton())
for electrode in self.electrodes:
self.electrodesAndPlotsLayout.addWidget(electrode.getPlotsGroupBox())
electrode.makeAndLoadModels()
## T1-post
successT1Post, self.t1PostMNINode = slicer.util.loadVolume(self.model.regImaT1MriPost2MNIPath, returnNode=True)
if successT1Post:
self.t1PostMNINode.SetName(NORMALIZED + ' ' + T1_POST_NODE)
## T1-pre
if os.path.exists(self.model.regImaT1MriPre2MNIPath):
successT1Pre, self.t1PreMNINode = slicer.util.loadVolume(self.model.regImaT1MriPre2MNIPath, returnNode=True)
elif os.path.exists(self.model.regImaT1MriPre2MNIPath.replace('_pre', '')):
oldT1Path = self.model.regImaT1MriPre2MNIPath.replace('_pre', '')
successT1Pre, self.t1PreMNINode = slicer.util.loadVolume(oldT1Path, returnNode=True)
elif os.path.exists(self.model.regImaT1MriPre2MNIPath.replace('_pre', '_head_unbiased')):
unbiasedHeadPath = self.model.regImaT1MriPre2MNIPath.replace('_pre', '_head_unbiased')
successT1Pre, self.t1PreMNINode = slicer.util.loadVolume(unbiasedHeadPath, returnNode=True)
if successT1Pre:
self.t1PreMNINode.SetName(NORMALIZED + ' ' + T1_PRE_NODE)
"""
headAttributes = {'SetSliceIntersectionVisibility': True,
"SetColor": (1, 0.75, 0.8),
'SetOpacity': .05,
'SetBackfaceCulling': False}
successHead, self.headNativeModelNode = logic.loadModel(self.model.meshHeadPath, returnNode=True, attributes=headAttributes)
"""
## MNI template
mniPath = os.path.join(moduleDir, 'Resources/Volumes', 'MNI152_T1_1mm.nii.gz')
successMNI, self.mniNode = slicer.util.loadVolume(mniPath, returnNode=True)
self.visualizationCollapsibleButton.show()
logic = EpilocVisualizationLogic()
logic.center3DView()
logic.centerSlices((0.5, 2.5, -4))
logic.setLinkedControl(True)
if successCt:
self.bgSelector.setCurrentNode(self.ctPostMNINode)
if successMNI:
displayNode = self.mniNode.GetDisplayNode()
displayNode.SetAutoWindowLevel(False)
displayNode.SetWindowLevel(9000, 5000)
self.fgSelector.setCurrentNode(self.mniNode)
elif successT1Post:
self.fgSelector.setCurrentNode(self.t1PostMNINode)
elif successT1Pre:
self.fgSelector.setCurrentNode(self.t1PreMNINode)
class EpilocVisualizationLogic(ScriptedLoadableModuleLogic):
def addCenteredPushButtonToLayout(self, parent, label, slot, styleSheet=None):
layout = qt.QHBoxLayout()
button = qt.QPushButton(label)
button.setSizePolicy(FIXED, FIXED)
button.clicked.connect(slot)
if styleSheet is not None:
button.setStyleSheet(styleSheet)
layout.addWidget(button)
parent.layout().addLayout(layout)
return button
def getBrowseLayout(self, formLabel, defaultText=None):
layout = qt.QHBoxLayout()
lineEdit = qt.QLineEdit()
if defaultText is not None:
lineEdit.setText(defaultText)
formLayout = qt.QFormLayout()
formLayout.addRow(formLabel, lineEdit)
layout.addLayout(formLayout)
browseButton = qt.QPushButton('Browse...')
layout.addWidget(browseButton)
return layout, lineEdit, browseButton
def loadModel(self, modelPath, returnNode=False, attributes={}):
success, modelNode = slicer.util.loadModel(modelPath, returnNode=True)
if success:
displayNode = modelNode.GetDisplayNode()
for key, value in attributes.items():
getattr(displayNode, key)(value)
return success, modelNode
def loadElectrodes(self, path):
electrodesReader = ElectrodesIO.ElectrodesReader()
if path.lower().endswith('.xml'):
electrodes = electrodesReader.getElectrodesFromXML(path)
elif path.lower().endswith('.csv'):
electrodes = electrodesReader.getElectrodesFromLocalizationsCSV(path)
return electrodes
def center3DView(self, point=None):
layoutManager = slicer.app.layoutManager()
threeDWidget = layoutManager.threeDWidget(0)
threeDView = threeDWidget.threeDView()
if point is None:
threeDView.resetFocalPoint()
else:
x,y,z = point
threeDView.setFocalPoint(x,y,z)
def centerSlices(self, point=None, fitSlices=False):
self.setAllSlicesToDefault()
for i, color in enumerate(['Yellow', 'Green', 'Red']):
sliceLogic = slicer.app.layoutManager().sliceWidget(color).sliceLogic()
if fitSlices:
sliceLogic.FitSliceToAll()
if point is not None:
offset = point[i]
sliceLogic.SetSliceOffset(offset)
def getMarkupsFiducialNode(self, name=None, color=None, selectedColor=None, labelFormat=None, glyphScale=None, textScale=None, transformID=None):
fidNode = slicer.vtkMRMLMarkupsFiducialNode()
if name:
fidNode.SetName(name)
if transformID:
fidNode.SetAndObserveTransformNodeID(transformID)
if labelFormat:
fidNode.SetMarkupLabelFormat(labelFormat)
fidNode.SetLocked(True) # the "locked" property seems not to be displayed on the Markups module, even though the node does become locked
slicer.mrmlScene.AddNode(fidNode)
displayNode = fidNode.GetDisplayNode()
if color:
displayNode.SetColor(color)
if selectedColor:
displayNode.SetSelectedColor(selectedColor)
if glyphScale is not None:
displayNode.SetGlyphScale(glyphScale)
if textScale is not None:
displayNode.SetTextScale(textScale)
return fidNode, displayNode
def setLinkedControl(self, state):
for color in ['Red', 'Yellow', 'Green']:
sliceLogic = slicer.app.layoutManager().sliceWidget(color).sliceLogic()
compositeNode = sliceLogic.GetSliceCompositeNode()
compositeNode.SetLinkedControl(state)
def setSliceToDefault(self, scene, sliceColor):
"""
Reset slice default orientation, i.e, axial, coronal, sagittal
:param scene: Slicer scene
:param sliceColor: slice color like found int const.SLICE_COLOR_XXX
"""
node = self.getSliceNodeByColor(scene,sliceColor)
modifyId = node.StartModify()
if node is not None:
if sliceColor == const.SLICE_COLOR_AXIAL:
affine = np.array([[-1, 0, 0, 0],[0, 1, 0, 0],[ 0, 0, 1, 0],[ 0, 0, 0, 1]])
node.SetSliceToRAS(self.getVTK4x4Matrix(affine))
elif sliceColor == const.SLICE_COLOR_CORONAL:
affine = np.array([[-1, 0, 0, 0],[0, 0, 1, 0],[ 0, 1, 0, 0],[ 0, 0, 0, 1]])
node.SetSliceToRAS(self.getVTK4x4Matrix(affine))
elif sliceColor == const.SLICE_COLOR_SAGITTAL:
affine = np.array([[0, 0, 1, 0],[-1, 0, 0, 0],[ 0, 1, 0, 0],[ 0, 0, 0, 1]])
node.SetSliceToRAS(self.getVTK4x4Matrix(affine))
node.UpdateMatrices()
node.EndModify(modifyId)
def setAllSlicesToDefault(self):
"""
Reset all slice default orientation, i.e, axial, coronal, sagittal
:param scene: Slicer scene
"""
scene = slicer.mrmlScene
self.setSliceToDefault(scene,const.SLICE_COLOR_AXIAL)
self.setSliceToDefault(scene,const.SLICE_COLOR_CORONAL)
self.setSliceToDefault(scene,const.SLICE_COLOR_SAGITTAL)
def getVTK4x4Matrix(self, matrix):
vtkMatrix = vtk.vtkMatrix4x4()
for row in xrange(4):
for col in xrange(4):
vtkMatrix.SetElement(row, col, matrix[row,col])
return vtkMatrix
def getMatrixFromTransformNodeID(self, tID):
vtkMatrix = vtk.vtkMatrix4x4()
slicer.mrmlScene.GetNodeByID(tID).GetMatrixTransformToWorld(vtkMatrix)
matrix = np.identity(4, np.float)
for row in xrange(4):
for col in xrange(4):
matrix[row,col] = vtkMatrix.GetElement(row,col)
return matrix
def getSliceNodeByColor(self, scene, sliceColor):
"""
Return the MRLML slice node for the given orientation string.
:param scene: Slicer scene
:param sliceColor: slice color like in const.SLICE_COLOR_XXX
"""
nodes = scene.GetNodesByClass('vtkMRMLSliceNode')
for idx in range(nodes.GetNumberOfItems()):
node = nodes.GetItemAsObject(idx)
if node.GetName() == sliceColor:
return node
return None
def setBackgroundAndForegroundVolumes(self, bgVolumeNode=None, fgVolumeNode=None, opacity=None):
for color in ['Red', 'Yellow', 'Green']:
sliceLogic = slicer.app.layoutManager().sliceWidget(color).sliceLogic()
compositeNode = sliceLogic.GetSliceCompositeNode()
if bgVolumeNode is not None:
compositeNode.SetBackgroundVolumeID(bgVolumeNode.GetID())
if fgVolumeNode is not None:
compositeNode.SetForegroundVolumeID(fgVolumeNode.GetID())
if opacity is not None:
compositeNode.SetForegroundOpacity(opacity)
def closeSlicerScene(self):
# Close scene
slicer.mrmlScene.Clear(0)
def sliceIn3DViewVisibility(self, visibility, sliceColors=['Red', 'Yellow', 'Green']):
for color in sliceColors:
sliceLogic = slicer.app.layoutManager().sliceWidget(color).sliceLogic()
sliceNode = sliceLogic.GetSliceNode()
sliceNode.SetSliceVisible(visibility)
| [
"sys.path.insert",
"CompareVolumes.LayerReveal",
"__main__.slicer.mrmlScene.GetNodesByClass",
"atlaslabels.AtlasReader",
"__main__.qt.QFormLayout",
"numpy.array",
"SurfaceToolbox.numericInputFrame",
"__main__.qt.QHBoxLayout",
"__main__.qt.QFileDialog.getExistingDirectory",
"os.path.exists",
"__m... | [((172, 197), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (187, 197), False, 'import os\n'), ((260, 287), 'sys.path.insert', 'sys.path.insert', (['(0)', 'codeDir'], {}), '(0, codeDir)\n', (275, 287), False, 'import sys\n'), ((224, 258), 'os.path.join', 'os.path.join', (['moduleDir', 'os.pardir'], {}), '(moduleDir, os.pardir)\n', (236, 258), False, 'import os\n'), ((1575, 1600), 'atlaslabels.AtlasReader', 'atlaslabels.AtlasReader', ([], {}), '()\n', (1598, 1600), False, 'import atlaslabels\n'), ((1684, 1728), 'os.path.join', 'os.path.join', (['codeDir', 'os.pardir', '"""patients"""'], {}), "(codeDir, os.pardir, 'patients')\n", (1696, 1728), False, 'import os\n'), ((1755, 1789), 'os.path.join', 'os.path.join', (['moduleDir', '"""History"""'], {}), "(moduleDir, 'History')\n", (1767, 1789), False, 'import os\n'), ((1817, 1861), 'os.path.join', 'os.path.join', (['self.historyDir', '"""history.txt"""'], {}), "(self.historyDir, 'history.txt')\n", (1829, 1861), False, 'import os\n'), ((1980, 2012), 'os.path.dirname', 'os.path.dirname', (['self.patientDir'], {}), '(self.patientDir)\n', (1995, 2012), False, 'import os\n'), ((2088, 2166), 'PatientModelEpilepsy.PatientModelEpilepsy', 'PatientModelEpilepsy.PatientModelEpilepsy', (['patientId'], {'rootDir': 'self.patientsDir'}), '(patientId, rootDir=self.patientsDir)\n', (2129, 2166), False, 'import PatientModelEpilepsy\n'), ((2277, 2303), '__main__.ctk.ctkCollapsibleButton', 'ctk.ctkCollapsibleButton', ([], {}), '()\n', (2301, 2303), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((3250, 3276), '__main__.ctk.ctkCollapsibleButton', 'ctk.ctkCollapsibleButton', ([], {}), '()\n', (3274, 3276), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((3802, 3828), '__main__.ctk.ctkCollapsibleButton', 'ctk.ctkCollapsibleButton', ([], {}), '()\n', (3826, 3828), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((4093, 4128), '__main__.qt.QCheckBox', 'qt.QCheckBox', (['"""View electrode axis"""'], {}), "('View electrode axis')\n", (4105, 4128), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((4393, 4409), '__main__.qt.QHBoxLayout', 'qt.QHBoxLayout', ([], {}), '()\n', (4407, 4409), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((4536, 4571), '__main__.qt.QGroupBox', 'qt.QGroupBox', (['"""Select an electrode"""'], {}), "('Select an electrode')\n", (4548, 4571), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((4961, 4987), '__main__.ctk.ctkCollapsibleButton', 'ctk.ctkCollapsibleButton', ([], {}), '()\n', (4985, 4987), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((6506, 6630), 'SurfaceToolbox.numericInputFrame', 'numericInputFrame', (['self.parent', '"""Foreground opacity:"""', '"""Change the opacity of the foreground volume."""', '(0.0)', '(1.0)', '(0.01)', '(2)'], {}), "(self.parent, 'Foreground opacity:',\n 'Change the opacity of the foreground volume.', 0.0, 1.0, 0.01, 2)\n", (6523, 6630), False, 'from SurfaceToolbox import numericInputFrame\n'), ((6878, 6910), '__main__.qt.QPushButton', 'qt.QPushButton', (['"""Toggle volumes"""'], {}), "('Toggle volumes')\n", (6892, 6910), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((7081, 7112), '__main__.qt.QGroupBox', 'qt.QGroupBox', (['"""Layers blending"""'], {}), "('Layers blending')\n", (7093, 7112), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((7277, 7319), '__main__.qt.QCheckBox', 'qt.QCheckBox', (['"""Green and magenta blending"""'], {}), "('Green and magenta blending')\n", (7289, 7319), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((7570, 7598), '__main__.qt.QCheckBox', 'qt.QCheckBox', (['"""Layer reveal"""'], {}), "('Layer reveal')\n", (7582, 7598), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((7827, 7888), '__main__.slicer.mrmlScene.GetNodesByClass', 'slicer.mrmlScene.GetNodesByClass', (['"""vtkMRMLSliceCompositeNode"""'], {}), "('vtkMRMLSliceCompositeNode')\n", (7859, 7888), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((8462, 8494), 'os.path.exists', 'os.path.exists', (['self.historyPath'], {}), '(self.historyPath)\n', (8476, 8494), False, 'import os\n'), ((8789, 8881), '__main__.qt.QFileDialog.getExistingDirectory', 'qt.QFileDialog.getExistingDirectory', (['None', '"""Browse patient directory"""', 'self.patientsDir'], {}), "(None, 'Browse patient directory', self.\n patientsDir)\n", (8824, 8881), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((12621, 12663), 'os.path.exists', 'os.path.exists', (['self.model.xmlVerifiedPath'], {}), '(self.model.xmlVerifiedPath)\n', (12635, 12663), False, 'import os\n'), ((13236, 13280), 'os.path.exists', 'os.path.exists', (['self.model.localizationsPath'], {}), '(self.model.localizationsPath)\n', (13250, 13280), False, 'import os\n'), ((13974, 14036), '__main__.slicer.util.loadVolume', 'slicer.util.loadVolume', (['self.model.ctPostPath'], {'returnNode': '(True)'}), '(self.model.ctPostPath, returnNode=True)\n', (13996, 14036), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((14088, 14174), '__main__.slicer.util.loadTransform', 'slicer.util.loadTransform', (['self.model.regMatSlicerCtPost2ACPCPath'], {'returnNode': '(True)'}), '(self.model.regMatSlicerCtPost2ACPCPath,\n returnNode=True)\n', (14113, 14174), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((14439, 14507), '__main__.slicer.util.delayDisplay', 'slicer.util.delayDisplay', (["('Loading electrodes from ' + xmlPath)", '(1500)'], {}), "('Loading electrodes from ' + xmlPath, 1500)\n", (14463, 14507), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((15313, 15378), '__main__.slicer.util.loadVolume', 'slicer.util.loadVolume', (['self.model.t1mriPostPath'], {'returnNode': '(True)'}), '(self.model.t1mriPostPath, returnNode=True)\n', (15335, 15378), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((15520, 15609), '__main__.slicer.util.loadTransform', 'slicer.util.loadTransform', (['self.model.regMatSlicerT1MriPost2ACPCPath'], {'returnNode': '(True)'}), '(self.model.regMatSlicerT1MriPost2ACPCPath,\n returnNode=True)\n', (15545, 15609), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((15787, 15826), 'os.path.exists', 'os.path.exists', (['self.model.t1mriPrePath'], {}), '(self.model.t1mriPrePath)\n', (15801, 15826), False, 'import os\n'), ((16261, 16347), '__main__.slicer.util.loadTransform', 'slicer.util.loadTransform', (['self.model.regMatSlicerT1Mri2ACPCPath'], {'returnNode': '(True)'}), '(self.model.regMatSlicerT1Mri2ACPCPath, returnNode\n =True)\n', (16286, 16347), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((17811, 17883), '__main__.slicer.util.loadVolume', 'slicer.util.loadVolume', (['self.model.regImaCtPost2MNIPath'], {'returnNode': '(True)'}), '(self.model.regImaCtPost2MNIPath, returnNode=True)\n', (17833, 17883), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((18168, 18260), '__main__.slicer.util.delayDisplay', 'slicer.util.delayDisplay', (["('Loading electrodes from ' + xmlPath + ' and ' + csvPath)", '(1500)'], {}), "('Loading electrodes from ' + xmlPath + ' and ' +\n csvPath, 1500)\n", (18192, 18260), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((19015, 19090), '__main__.slicer.util.loadVolume', 'slicer.util.loadVolume', (['self.model.regImaT1MriPost2MNIPath'], {'returnNode': '(True)'}), '(self.model.regImaT1MriPost2MNIPath, returnNode=True)\n', (19037, 19090), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((19219, 19268), 'os.path.exists', 'os.path.exists', (['self.model.regImaT1MriPre2MNIPath'], {}), '(self.model.regImaT1MriPre2MNIPath)\n', (19233, 19268), False, 'import os\n'), ((20469, 20537), 'os.path.join', 'os.path.join', (['moduleDir', '"""Resources/Volumes"""', '"""MNI152_T1_1mm.nii.gz"""'], {}), "(moduleDir, 'Resources/Volumes', 'MNI152_T1_1mm.nii.gz')\n", (20481, 20537), False, 'import os\n'), ((20573, 20621), '__main__.slicer.util.loadVolume', 'slicer.util.loadVolume', (['mniPath'], {'returnNode': '(True)'}), '(mniPath, returnNode=True)\n', (20595, 20621), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((21494, 21510), '__main__.qt.QHBoxLayout', 'qt.QHBoxLayout', ([], {}), '()\n', (21508, 21510), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((21528, 21549), '__main__.qt.QPushButton', 'qt.QPushButton', (['label'], {}), '(label)\n', (21542, 21549), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((21886, 21902), '__main__.qt.QHBoxLayout', 'qt.QHBoxLayout', ([], {}), '()\n', (21900, 21902), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((21922, 21936), '__main__.qt.QLineEdit', 'qt.QLineEdit', ([], {}), '()\n', (21934, 21936), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((22037, 22053), '__main__.qt.QFormLayout', 'qt.QFormLayout', ([], {}), '()\n', (22051, 22053), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((22162, 22189), '__main__.qt.QPushButton', 'qt.QPushButton', (['"""Browse..."""'], {}), "('Browse...')\n", (22176, 22189), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((22376, 22425), '__main__.slicer.util.loadModel', 'slicer.util.loadModel', (['modelPath'], {'returnNode': '(True)'}), '(modelPath, returnNode=True)\n', (22397, 22425), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((22697, 22728), 'ElectrodesIO.ElectrodesReader', 'ElectrodesIO.ElectrodesReader', ([], {}), '()\n', (22726, 22728), False, 'import ElectrodesIO\n'), ((23058, 23084), '__main__.slicer.app.layoutManager', 'slicer.app.layoutManager', ([], {}), '()\n', (23082, 23084), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((23937, 23972), '__main__.slicer.vtkMRMLMarkupsFiducialNode', 'slicer.vtkMRMLMarkupsFiducialNode', ([], {}), '()\n', (23970, 23972), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((24341, 24374), '__main__.slicer.mrmlScene.AddNode', 'slicer.mrmlScene.AddNode', (['fidNode'], {}), '(fidNode)\n', (24365, 24374), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((26607, 26625), '__main__.vtk.vtkMatrix4x4', 'vtk.vtkMatrix4x4', ([], {}), '()\n', (26623, 26625), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((26850, 26868), '__main__.vtk.vtkMatrix4x4', 'vtk.vtkMatrix4x4', ([], {}), '()\n', (26866, 26868), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((26965, 26989), 'numpy.identity', 'np.identity', (['(4)', 'np.float'], {}), '(4, np.float)\n', (26976, 26989), True, 'import numpy as np\n'), ((28333, 28358), '__main__.slicer.mrmlScene.Clear', 'slicer.mrmlScene.Clear', (['(0)'], {}), '(0)\n', (28355, 28358), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((2033, 2063), 'os.path.split', 'os.path.split', (['self.patientDir'], {}), '(self.patientDir)\n', (2046, 2063), False, 'import os\n'), ((2467, 2483), '__main__.qt.QVBoxLayout', 'qt.QVBoxLayout', ([], {}), '()\n', (2481, 2483), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((3460, 3476), '__main__.qt.QVBoxLayout', 'qt.QVBoxLayout', ([], {}), '()\n', (3474, 3476), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((3941, 3957), '__main__.qt.QVBoxLayout', 'qt.QVBoxLayout', ([], {}), '()\n', (3955, 3957), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((4614, 4630), '__main__.qt.QVBoxLayout', 'qt.QVBoxLayout', ([], {}), '()\n', (4628, 4630), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((5091, 5107), '__main__.qt.QVBoxLayout', 'qt.QVBoxLayout', ([], {}), '()\n', (5105, 5107), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((5348, 5364), '__main__.qt.QHBoxLayout', 'qt.QHBoxLayout', ([], {}), '()\n', (5362, 5364), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((5439, 5455), '__main__.qt.QLabel', 'qt.QLabel', (['label'], {}), '(label)\n', (5448, 5455), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((5569, 5595), '__main__.slicer.qMRMLNodeComboBox', 'slicer.qMRMLNodeComboBox', ([], {}), '()\n', (5593, 5595), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((7146, 7162), '__main__.qt.QHBoxLayout', 'qt.QHBoxLayout', ([], {}), '()\n', (7160, 7162), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((8272, 8303), 'os.path.exists', 'os.path.exists', (['self.historyDir'], {}), '(self.historyDir)\n', (8286, 8303), False, 'import os\n'), ((8317, 8342), 'os.mkdir', 'os.mkdir', (['self.historyDir'], {}), '(self.historyDir)\n', (8325, 8342), False, 'import os\n'), ((9072, 9103), 'os.path.exists', 'os.path.exists', (['self.patientDir'], {}), '(self.patientDir)\n', (9086, 9103), False, 'import os\n'), ((9117, 9181), '__main__.slicer.util.delayDisplay', 'slicer.util.delayDisplay', (['"""Patient folder does not exist."""', '(1500)'], {}), "('Patient folder does not exist.', 1500)\n", (9141, 9181), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((9277, 9333), '__main__.slicer.util.delayDisplay', 'slicer.util.delayDisplay', (['"""No XML file was found."""', '(1500)'], {}), "('No XML file was found.', 1500)\n", (9301, 9333), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((9476, 9507), 'os.path.exists', 'os.path.exists', (['self.patientDir'], {}), '(self.patientDir)\n', (9490, 9507), False, 'import os\n'), ((9521, 9584), '__main__.slicer.util.delayDisplay', 'slicer.util.delayDisplay', (['"""Patient foder does not exist."""', '(1500)'], {}), "('Patient foder does not exist.', 1500)\n", (9545, 9584), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((9680, 9736), '__main__.slicer.util.delayDisplay', 'slicer.util.delayDisplay', (['"""No XML file was found."""', '(1500)'], {}), "('No XML file was found.', 1500)\n", (9704, 9736), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((9832, 9888), '__main__.slicer.util.delayDisplay', 'slicer.util.delayDisplay', (['"""No CSV file was found."""', '(1500)'], {}), "('No CSV file was found.', 1500)\n", (9856, 9888), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((12417, 12466), 'CompareVolumes.LayerReveal', 'CompareVolumes.LayerReveal', ([], {'width': '(300)', 'height': '(300)'}), '(width=300, height=300)\n', (12443, 12466), False, 'import CompareVolumes\n'), ((12727, 12761), 'os.path.exists', 'os.path.exists', (['self.model.xmlPath'], {}), '(self.model.xmlPath)\n', (12741, 12761), False, 'import os\n'), ((13369, 13495), '__main__.qt.QFileDialog.getOpenFileName', 'qt.QFileDialog.getOpenFileName', (['None', '"""Choose CSV electrodes localizations file"""', 'self.patientDir', '"""CSV files (*.csv)"""'], {}), "(None,\n 'Choose CSV electrodes localizations file', self.patientDir,\n 'CSV files (*.csv)')\n", (13399, 13495), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((15877, 15941), '__main__.slicer.util.loadVolume', 'slicer.util.loadVolume', (['self.model.t1mriPrePath'], {'returnNode': '(True)'}), '(self.model.t1mriPrePath, returnNode=True)\n', (15899, 15941), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((16073, 16123), '__main__.slicer.util.loadVolume', 'slicer.util.loadVolume', (['oldT1Path'], {'returnNode': '(True)'}), '(oldT1Path, returnNode=True)\n', (16095, 16123), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((19316, 19390), '__main__.slicer.util.loadVolume', 'slicer.util.loadVolume', (['self.model.regImaT1MriPre2MNIPath'], {'returnNode': '(True)'}), '(self.model.regImaT1MriPre2MNIPath, returnNode=True)\n', (19338, 19390), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((8608, 8638), 'os.path.exists', 'os.path.exists', (['lastPatientDir'], {}), '(lastPatientDir)\n', (8622, 8638), False, 'import os\n'), ((12840, 12949), '__main__.qt.QFileDialog.getOpenFileName', 'qt.QFileDialog.getOpenFileName', (['None', '"""Choose XML electrodes file"""', 'self.patientDir', '"""XML files (*.xml)"""'], {}), "(None, 'Choose XML electrodes file', self.\n patientDir, 'XML files (*.xml)')\n", (12870, 12949), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((19599, 19649), '__main__.slicer.util.loadVolume', 'slicer.util.loadVolume', (['oldT1Path'], {'returnNode': '(True)'}), '(oldT1Path, returnNode=True)\n', (19621, 19649), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((25519, 25586), 'numpy.array', 'np.array', (['[[-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]'], {}), '([[-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\n', (25527, 25586), True, 'import numpy as np\n'), ((26877, 26910), '__main__.slicer.mrmlScene.GetNodeByID', 'slicer.mrmlScene.GetNodeByID', (['tID'], {}), '(tID)\n', (26905, 26910), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((19893, 19950), '__main__.slicer.util.loadVolume', 'slicer.util.loadVolume', (['unbiasedHeadPath'], {'returnNode': '(True)'}), '(unbiasedHeadPath, returnNode=True)\n', (19915, 19950), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((25735, 25802), 'numpy.array', 'np.array', (['[[-1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]'], {}), '([[-1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])\n', (25743, 25802), True, 'import numpy as np\n'), ((11571, 11597), '__main__.slicer.app.layoutManager', 'slicer.app.layoutManager', ([], {}), '()\n', (11595, 11597), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((25952, 26019), 'numpy.array', 'np.array', (['[[0, 0, 1, 0], [-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1]]'], {}), '([[0, 0, 1, 0], [-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1]])\n', (25960, 26019), True, 'import numpy as np\n'), ((23521, 23547), '__main__.slicer.app.layoutManager', 'slicer.app.layoutManager', ([], {}), '()\n', (23545, 23547), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((24882, 24908), '__main__.slicer.app.layoutManager', 'slicer.app.layoutManager', ([], {}), '()\n', (24906, 24908), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((27821, 27847), '__main__.slicer.app.layoutManager', 'slicer.app.layoutManager', ([], {}), '()\n', (27845, 27847), False, 'from __main__ import vtk, qt, ctk, slicer\n'), ((28511, 28537), '__main__.slicer.app.layoutManager', 'slicer.app.layoutManager', ([], {}), '()\n', (28535, 28537), False, 'from __main__ import vtk, qt, ctk, slicer\n')] |
import numpy as np
import os, glob
from matplotlib import pyplot as plt
import stat_tools as st
from PIL import Image
deg2rad=np.pi/180
camera='HD20'
day='20180310'
coordinate = {'HD815_1': [40.87203321, -72.87348295],
'HD815_2': [40.87189059, -72.873687],
'HD490' : [40.865968816, -72.884647222],
'HD17' : [40.8575056, -72.8547344],
'HD19' : [40.8580088, -72.8575717],
'HD20' : [40.85785, -72.8597],
'HD01' : [40.947353, -72.899617],
'HD02' : [40.948044, -72.898372],
'HD03' : [40.897122, -72.879053],
'HD04' : [40.8975, -72.877497],
'HD05' : [40.915708, -72.892406],
'HD06' : [40.917275, -72.891592]
}
params = {'HD815_1':[2821.0000, 1442.8231, 1421.0000, 0.1700, -0.0135, -2.4368, 0.3465, -0.0026, -0.0038],
'HD815_2':[2821.0000, 1424.0000, 1449.0000, 0.0310, -0.0114, -0.9816, 0.3462, -0.0038, -0.0030],
'HD490' :[2843.0000, 1472.9511, 1482.6685, 0.1616, 0.0210, -0.5859, 0.3465, -0.0043, -0.0030],
'HD17' :[2830.0007, 1473.2675, 1459.7203, -0.0986, -0.0106, -1.2440, 0.3441, -0.0015, -0.0042],
'HD19' :[2826.5389, 1461.0000, 1476.6598, -0.0097, 0.0030, 2.9563, 0.3415, 0.0004, -0.0044],
'HD20' :[2812.7874, 1475.1453, 1415.0000, 0.1410, -0.0126, 0.4769, 0.3441, 0.0004, -0.0046],
'HD05' :[2813.3741, 1435.1706, 1453.7087, -0.0119, -0.0857, -1.8675, 0.3499, -0.0033, -0.0027],
'HD06' :[2809.2813, 1446.4900, 1438.0777, -0.0237, -0.0120, -1.3384, 0.3479, -0.0024, -0.0037],
'HD01' :[2813.7462, 1472.2066, 1446.3682, 0.3196, -0.0200, -1.9636, 0.3444, -0.0008, -0.0042],
'HD03' :[2807.8902, 1436.1619, 1439.3879, -0.3942, 0.0527, 2.4658, 0.3334, 0.0129, -0.0085]}
####set up paths, constantsand initial parameters
inpath = '~/data/images/'
inpath = inpath + camera + '/'
outpath = '~/data/undistort_output/'
lat = coordinate[camera][0]
lon = coordinate[camera][1]
min_scatter_angle = 8
dark_threshold = 25 #### threshold of dark DN value (i.e., shadow band)
var_threshold = 4 #### threshold for cloud spatial variation
rbr_clear = -0.15 ### upper bound of clear sky red/blue index
rbr_cloud = -0.05 ### lower bound of cloud red/blue index
ndilate=19
####dimension of the valid portion of the original image, i.e., the disk with elevation_angle>0
####they need to be tuned for each camera
nx,ny=2001,2001 #### size of the undistorted image
max_theta=70*deg2rad ##### maximum zenith angle used for processing
max_tan = np.tan(max_theta)
dest=outpath+camera
if not os.path.isdir(dest):
os.makedirs(dest)
os.chmod(dest,0o755)
dest=outpath+camera+'/'+day+'/'
if not os.path.isdir(dest):
os.makedirs(dest)
os.chmod(dest,0o755)
xbin,ybin=np.linspace(-max_tan,max_tan,nx), np.linspace(-max_tan,max_tan,ny)
xgrid,ygrid=np.meshgrid(xbin,ybin)####(xgrid, ygrid) are the grids of the undistorted space
valid = xgrid**2+ygrid**2 <= max_tan**2
invalid = xgrid**2+ygrid**2 > (max_tan-1e-2)**2
nx0=ny0=params[camera][0]
nr0=(nx0+ny0)/4
xstart=int(params[camera][2]-nx0/2+0.5); ystart=int(params[camera][1]-ny0/2+0.5)
nx0=int(nx0+0.5); ny0=int(ny0+0.5)
#####compute the zenith and azimuth angles for each pixel
x0,y0=np.meshgrid(np.linspace(-nx0//2,nx0//2,nx0),np.linspace(-ny0//2,ny0//2,ny0));
r0=np.sqrt(x0**2+y0**2)/nr0;
# theta0=2*np.arcsin(r0/np.sqrt(2))
roots=np.zeros(51)
rr=np.arange(51)/100.0
c1,c2,c3=params[camera][6:9]
for i,ref in enumerate(rr):
roots[i]=np.real(np.roots([c3,0,c2,0,c1,-ref])[-1])
theta0=np.interp(r0/2,rr,roots)
phi0 = np.arctan2(x0,y0) - params[camera][3] ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition
phi0=phi0%(2*np.pi)
beta,azm=params[camera][4:6]
theta=theta0; phi=phi0
#####correction for the mis-pointing error
k=np.array((np.sin(azm),np.cos(azm),0))
a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]);
a = np.transpose(a,[1,2,0])
b=np.cos(beta)*a + np.sin(beta)*np.cross(k,a,axisb=2) \
+ np.reshape(np.outer(np.dot(a,k),k),(ny0,nx0,3))*(1-np.cos(beta))
theta=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])
phi=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)
theta_filter = (theta>max_theta) | (theta<=0); theta[theta_filter]=np.nan;
#####coordinate system for the undistorted space
r=np.tan(theta);
x,y=r*np.sin(phi), r*np.cos(phi)
filepath = inpath + day + '/'
flist = sorted(glob.glob(filepath + '*jpg'))
for f in sorted(flist): ###8200
print(f)
# ######read the image to array
im0=plt.imread(f).astype(np.float32);
im0=im0[ystart:ystart+ny0,xstart:xstart+nx0,:]
im0[theta_filter,:]=np.nan
im=np.zeros((ny,nx,3))
for i in range(3):
im[:,:,i]=st.bin_average2_reg(im0[:,:,i],x,y,xbin,ybin,mask=valid);
im[:,:,i]=st.fill_by_mean2(im[:,:,i],7, mask=(np.isnan(im[:,:,i])) & valid )
ims = Image.fromarray(im.astype(np.uint8))
ims.save(outpath+camera+'/'+day+'/'+os.path.basename(f)[:-3]+'jpg', "JPEG")
| [
"numpy.sqrt",
"numpy.roots",
"numpy.arctan2",
"numpy.sin",
"numpy.arange",
"numpy.cross",
"os.chmod",
"numpy.linspace",
"os.path.isdir",
"numpy.dot",
"numpy.meshgrid",
"glob.glob",
"numpy.isnan",
"numpy.cos",
"numpy.interp",
"numpy.transpose",
"numpy.tan",
"os.makedirs",
"matplot... | [((2742, 2759), 'numpy.tan', 'np.tan', (['max_theta'], {}), '(max_theta)\n', (2748, 2759), True, 'import numpy as np\n'), ((3055, 3078), 'numpy.meshgrid', 'np.meshgrid', (['xbin', 'ybin'], {}), '(xbin, ybin)\n', (3066, 3078), True, 'import numpy as np\n'), ((3605, 3617), 'numpy.zeros', 'np.zeros', (['(51)'], {}), '(51)\n', (3613, 3617), True, 'import numpy as np\n'), ((3761, 3789), 'numpy.interp', 'np.interp', (['(r0 / 2)', 'rr', 'roots'], {}), '(r0 / 2, rr, roots)\n', (3770, 3789), True, 'import numpy as np\n'), ((4227, 4253), 'numpy.transpose', 'np.transpose', (['a', '[1, 2, 0]'], {}), '(a, [1, 2, 0])\n', (4239, 4253), True, 'import numpy as np\n'), ((4607, 4620), 'numpy.tan', 'np.tan', (['theta'], {}), '(theta)\n', (4613, 4620), True, 'import numpy as np\n'), ((2788, 2807), 'os.path.isdir', 'os.path.isdir', (['dest'], {}), '(dest)\n', (2801, 2807), False, 'import os, glob\n'), ((2813, 2830), 'os.makedirs', 'os.makedirs', (['dest'], {}), '(dest)\n', (2824, 2830), False, 'import os, glob\n'), ((2835, 2854), 'os.chmod', 'os.chmod', (['dest', '(493)'], {}), '(dest, 493)\n', (2843, 2854), False, 'import os, glob\n'), ((2895, 2914), 'os.path.isdir', 'os.path.isdir', (['dest'], {}), '(dest)\n', (2908, 2914), False, 'import os, glob\n'), ((2920, 2937), 'os.makedirs', 'os.makedirs', (['dest'], {}), '(dest)\n', (2931, 2937), False, 'import os, glob\n'), ((2942, 2961), 'os.chmod', 'os.chmod', (['dest', '(493)'], {}), '(dest, 493)\n', (2950, 2961), False, 'import os, glob\n'), ((2974, 3008), 'numpy.linspace', 'np.linspace', (['(-max_tan)', 'max_tan', 'nx'], {}), '(-max_tan, max_tan, nx)\n', (2985, 3008), True, 'import numpy as np\n'), ((3008, 3042), 'numpy.linspace', 'np.linspace', (['(-max_tan)', 'max_tan', 'ny'], {}), '(-max_tan, max_tan, ny)\n', (3019, 3042), True, 'import numpy as np\n'), ((3462, 3499), 'numpy.linspace', 'np.linspace', (['(-nx0 // 2)', '(nx0 // 2)', 'nx0'], {}), '(-nx0 // 2, nx0 // 2, nx0)\n', (3473, 3499), True, 'import numpy as np\n'), ((3494, 3531), 'numpy.linspace', 'np.linspace', (['(-ny0 // 2)', '(ny0 // 2)', 'ny0'], {}), '(-ny0 // 2, ny0 // 2, ny0)\n', (3505, 3531), True, 'import numpy as np\n'), ((3532, 3558), 'numpy.sqrt', 'np.sqrt', (['(x0 ** 2 + y0 ** 2)'], {}), '(x0 ** 2 + y0 ** 2)\n', (3539, 3558), True, 'import numpy as np\n'), ((3621, 3634), 'numpy.arange', 'np.arange', (['(51)'], {}), '(51)\n', (3630, 3634), True, 'import numpy as np\n'), ((3808, 3826), 'numpy.arctan2', 'np.arctan2', (['x0', 'y0'], {}), '(x0, y0)\n', (3818, 3826), True, 'import numpy as np\n'), ((4439, 4473), 'numpy.arctan2', 'np.arctan2', (['b[:, :, 1]', 'b[:, :, 0]'], {}), '(b[:, :, 1], b[:, :, 0])\n', (4449, 4473), True, 'import numpy as np\n'), ((4709, 4737), 'glob.glob', 'glob.glob', (["(filepath + '*jpg')"], {}), "(filepath + '*jpg')\n", (4718, 4737), False, 'import os, glob\n'), ((4983, 5004), 'numpy.zeros', 'np.zeros', (['(ny, nx, 3)'], {}), '((ny, nx, 3))\n', (4991, 5004), True, 'import numpy as np\n'), ((4108, 4119), 'numpy.sin', 'np.sin', (['azm'], {}), '(azm)\n', (4114, 4119), True, 'import numpy as np\n'), ((4120, 4131), 'numpy.cos', 'np.cos', (['azm'], {}), '(azm)\n', (4126, 4131), True, 'import numpy as np\n'), ((4204, 4218), 'numpy.cos', 'np.cos', (['theta0'], {}), '(theta0)\n', (4210, 4218), True, 'import numpy as np\n'), ((4392, 4434), 'numpy.sqrt', 'np.sqrt', (['(b[:, :, 0] ** 2 + b[:, :, 1] ** 2)'], {}), '(b[:, :, 0] ** 2 + b[:, :, 1] ** 2)\n', (4399, 4434), True, 'import numpy as np\n'), ((4629, 4640), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (4635, 4640), True, 'import numpy as np\n'), ((4644, 4655), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (4650, 4655), True, 'import numpy as np\n'), ((5044, 5107), 'stat_tools.bin_average2_reg', 'st.bin_average2_reg', (['im0[:, :, i]', 'x', 'y', 'xbin', 'ybin'], {'mask': 'valid'}), '(im0[:, :, i], x, y, xbin, ybin, mask=valid)\n', (5063, 5107), True, 'import stat_tools as st\n'), ((3719, 3753), 'numpy.roots', 'np.roots', (['[c3, 0, c2, 0, c1, -ref]'], {}), '([c3, 0, c2, 0, c1, -ref])\n', (3727, 3753), True, 'import numpy as np\n'), ((4148, 4162), 'numpy.sin', 'np.sin', (['theta0'], {}), '(theta0)\n', (4154, 4162), True, 'import numpy as np\n'), ((4163, 4175), 'numpy.cos', 'np.cos', (['phi0'], {}), '(phi0)\n', (4169, 4175), True, 'import numpy as np\n'), ((4176, 4190), 'numpy.sin', 'np.sin', (['theta0'], {}), '(theta0)\n', (4182, 4190), True, 'import numpy as np\n'), ((4191, 4203), 'numpy.sin', 'np.sin', (['phi0'], {}), '(phi0)\n', (4197, 4203), True, 'import numpy as np\n'), ((4253, 4265), 'numpy.cos', 'np.cos', (['beta'], {}), '(beta)\n', (4259, 4265), True, 'import numpy as np\n'), ((4270, 4282), 'numpy.sin', 'np.sin', (['beta'], {}), '(beta)\n', (4276, 4282), True, 'import numpy as np\n'), ((4283, 4306), 'numpy.cross', 'np.cross', (['k', 'a'], {'axisb': '(2)'}), '(k, a, axisb=2)\n', (4291, 4306), True, 'import numpy as np\n'), ((4362, 4374), 'numpy.cos', 'np.cos', (['beta'], {}), '(beta)\n', (4368, 4374), True, 'import numpy as np\n'), ((4837, 4850), 'matplotlib.pyplot.imread', 'plt.imread', (['f'], {}), '(f)\n', (4847, 4850), True, 'from matplotlib import pyplot as plt\n'), ((4331, 4343), 'numpy.dot', 'np.dot', (['a', 'k'], {}), '(a, k)\n', (4337, 4343), True, 'import numpy as np\n'), ((5160, 5181), 'numpy.isnan', 'np.isnan', (['im[:, :, i]'], {}), '(im[:, :, i])\n', (5168, 5181), True, 'import numpy as np\n'), ((5281, 5300), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (5297, 5300), False, 'import os, glob\n')] |
from numpy import log, sqrt, sin, arctan2, pi
# define a posterior with multiple separate peaks
def multimodal_posterior(theta):
x,y = theta
r = sqrt(x**2 + y**2)
phi = arctan2(y,x)
z = ((r - (0.5 + pi - phi*0.5))/0.1)
return -0.5*z**2 + 4*log(sin(phi*2.)**2)
# required for multi-process code when running on windows
if __name__ == "__main__":
from inference.mcmc import GibbsChain, ParallelTempering
# define a set of temperature levels
N_levels = 6
temps = [10**(2.5*k/(N_levels-1.)) for k in range(N_levels)]
# create a set of chains - one with each temperature
chains = [ GibbsChain( posterior=multimodal_posterior, start = [0.5,0.5], temperature=T) for T in temps ]
# When an instance of ParallelTempering is created, a dedicated process for each chain is spawned.
# These separate processes will automatically make use of the available cpu cores, such that the
# computations to advance the separate chains are performed in parallel.
PT = ParallelTempering(chains=chains)
# These processes wait for instructions which can be sent using the methods of the
# ParallelTempering object:
PT.run_for(minutes=0.5)
# To recover a copy of the chains held by the processes
# we can use the return_chains method:
chains = PT.return_chains()
# by looking at the trace plot for the T = 1 chain, we see that it makes
# large jumps across the parameter space due to the swaps.
chains[0].trace_plot()
# Even though the posterior has strongly separated peaks, the T = 1 chain
# was able to explore all of them due to the swaps.
chains[0].matrix_plot()
# We can also visualise the acceptance rates of proposed position swaps between
# each chain using the swap_diagnostics method:
PT.swap_diagnostics()
# Because each process waits for instructions from the ParallelTempering object,
# they will not self-terminate. To terminate all the processes we have to trigger
# a shutdown even using the shutdown method:
PT.shutdown() | [
"numpy.sqrt",
"inference.mcmc.GibbsChain",
"numpy.arctan2",
"numpy.sin",
"inference.mcmc.ParallelTempering"
] | [((155, 176), 'numpy.sqrt', 'sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (159, 176), False, 'from numpy import log, sqrt, sin, arctan2, pi\n'), ((183, 196), 'numpy.arctan2', 'arctan2', (['y', 'x'], {}), '(y, x)\n', (190, 196), False, 'from numpy import log, sqrt, sin, arctan2, pi\n'), ((1015, 1047), 'inference.mcmc.ParallelTempering', 'ParallelTempering', ([], {'chains': 'chains'}), '(chains=chains)\n', (1032, 1047), False, 'from inference.mcmc import GibbsChain, ParallelTempering\n'), ((629, 704), 'inference.mcmc.GibbsChain', 'GibbsChain', ([], {'posterior': 'multimodal_posterior', 'start': '[0.5, 0.5]', 'temperature': 'T'}), '(posterior=multimodal_posterior, start=[0.5, 0.5], temperature=T)\n', (639, 704), False, 'from inference.mcmc import GibbsChain, ParallelTempering\n'), ((267, 281), 'numpy.sin', 'sin', (['(phi * 2.0)'], {}), '(phi * 2.0)\n', (270, 281), False, 'from numpy import log, sqrt, sin, arctan2, pi\n')] |
# import glob
import numpy as np
import os.path as osp
from PIL import Image
import random
import struct
from torch.utils import data
import scipy.ndimage as ndimage
import cv2
from skimage.measure import block_reduce
import h5py
import scipy.ndimage as ndimage
import torch
from tqdm import tqdm
import torchvision.transforms as T
# import PIL
from utils.utils_misc import *
from pathlib import Path
# import pickle
import pickle5 as pickle
from icecream import ic
from utils.utils_total3D.utils_OR_imageops import loadHdr_simple, to_nonhdr
import math
from utils.utils_total3D.data_config import RECON_3D_CLS_OR_dict
from scipy.spatial import cKDTree
import copy
# import math
# from detectron2.structures import BoxMode
# from detectron2.data.dataset_mapper import DatasetMapper
from utils.utils_total3D.utils_OR_vis_labels import RGB_to_01
from utils.utils_total3D.utils_others import Relation_Config, OR4XCLASSES_dict, OR4XCLASSES_not_detect_mapping_ids_dict, OR4X_mapping_catInt_to_RGB
# from detectron2.data import build_detection_test_loader,DatasetCatalog, MetadataCatalog
from utils.utils_scannet import read_ExtM_from_txt, read_img
import utils.utils_nvidia.mdataloader.m_preprocess as m_preprocess
import PIL
import torchvision.transforms as tfv_transform
import warnings
warnings.filterwarnings("ignore")
from utils import transform
from semanticInverse.train.utils_dataset_openrooms_OR_BRDFLight_RAW import *
import json
class iiw(data.Dataset):
def __init__(self, opt, data_list=None, logger=basic_logger(), transforms_fixed=None, transforms_semseg=None, transforms_matseg=None, transforms_resize=None,
split='train', task=None, if_for_training=True, load_first = -1, rseed = 1,
cascadeLevel = 0,
maxNum = 800 ):
if logger is None:
logger = basic_logger()
self.opt = opt
self.cfg = self.opt.cfg
self.logger = logger
self.rseed = rseed
self.dataset_name = 'IIW'
self.split = split
assert self.split in ['train', 'val']
self.task = self.split if task is None else task
self.if_for_training = if_for_training
self.maxNum = maxNum
self.data_root = self.opt.cfg.DATASET.iiw_path
data_list_path = Path(self.cfg.PATH.root) / self.cfg.DATASET.iiw_list_path
# self.data_list = make_dataset_real(opt, self.data_root, data_list_path, logger=self.logger)
if split == 'train':
with open(str(data_list_path / 'IIWTrain.txt'), 'r') as fIn:
im_list = fIn.readlines()
self.data_list = [osp.join(self.data_root, x.strip()) for x in im_list ]
elif split == 'val':
with open(str(data_list_path / 'IIWTest.txt'), 'r') as fIn:
im_list = fIn.readlines()
self.data_list = [osp.join(self.data_root, x.strip()) for x in im_list ]
else:
raise RuntimeError("Invalid split %s for iiw!"%split)
self.json_list = [x.replace('.png', '.json') for x in self.data_list]
logger.info(white_blue('%s: total frames: %d'%(self.dataset_name, len(self.data_list))))
self.cascadeLevel = cascadeLevel
assert transforms_fixed is not None, 'OpenRooms: Need a transforms_fixed!'
self.transforms_fixed = transforms_fixed
self.transforms_resize = transforms_resize
self.transforms_matseg = transforms_matseg
self.transforms_semseg = transforms_semseg
self.logger = logger
# self.target_hw = (cfg.DATA.im_height, cfg.DATA.im_width) # if split in ['train', 'val', 'test'] else (args.test_h, args.test_w)
self.im_width, self.im_height = self.cfg.DATA.iiw.im_width, self.cfg.DATA.iiw.im_height
self.im_height_padded, self.im_width_padded = self.cfg.DATA.iiw.im_height_padded_to, self.cfg.DATA.iiw.im_width_padded_to
self.if_extra_op = False
def __len__(self):
return len(self.data_list)
def __getitem__(self, index):
png_image_path = self.data_list[index]
# frame_info = {'index': index, 'png_image_path': png_image_path}
batch_dict = {'image_index': index}
pad_mask = np.zeros((self.im_height_padded, self.im_width_padded), dtype=np.uint8)
hdr_scale = 1.
# Read PNG image
image = Image.open(str(png_image_path))
hdr_scale = 1.
# Read PNG image
image = Image.open(str(png_image_path))
# im_fixedscale_SDR_uint8 = np.array(image)
# im_fixedscale_SDR_uint8 = cv2.resize(im_fixedscale_SDR_uint8, (self.im_width, self.im_height), interpolation = cv2.INTER_AREA )
# image_transformed_fixed = self.transforms_fixed(im_fixedscale_SDR_uint8)
# im_trainval_SDR = self.transforms_resize(im_fixedscale_SDR_uint8) # not necessarily \in [0., 1.] [!!!!]; already padded
# # print(im_trainval_SDR.shape, type(im_trainval_SDR), torch.max(im_trainval_SDR), torch.min(im_trainval_SDR), torch.mean(im_trainval_SDR))
# im_trainval = im_trainval_SDR # channel first for training
# im_fixedscale_SDR = im_fixedscale_SDR_uint8.astype(np.float32) / 255.
# if self.if_extra_op:
# im_fixedscale_SDR = self.extra_op(im_fixedscale_SDR, name='im_fixedscale_SDR')
# batch_dict.update({'image_path': str(png_image_path)})
im_fixedscale_SDR_uint8 = np.array(image)
im_h, im_w = im_fixedscale_SDR_uint8.shape[0], im_fixedscale_SDR_uint8.shape[1]
if float(im_h) / float(im_w) < float(self.im_height_padded) / float(self.im_width_padded): # flatter
im_w_resized_to = self.im_width_padded
im_h_resized_to = int(float(im_h) / float(im_w) * im_w_resized_to)
assert im_h_resized_to <= self.im_height_padded
pad_mask[:im_h_resized_to, :] = 1
# rs, re = 0, im_h_resized_to
# cs, ce = 0, im_w_resized_to
# gap = im_h_resized_to - self.im_height_padded
# rs = np.random.randint(gap + 1)
# re = rs + self.im_height_padded
else: # taller
im_h_resized_to = self.im_height_padded
im_w_resized_to = int(float(im_w) / float(im_h) * im_h_resized_to)
assert im_w_resized_to <= self.im_width_padded
pad_mask[:, :im_w_resized_to] = 1
# rs, re = 0, self.im_height_padded
# gap = im_w_resized_to - self.im_width_padded
# cs = np.random.randint(gap + 1)
# ce = cs + self.im_width_padded
im_fixedscale_SDR_uint8 = cv2.resize(im_fixedscale_SDR_uint8, (im_w_resized_to, im_h_resized_to), interpolation = cv2.INTER_AREA )
# print(im_w_resized_to, im_h_resized_to, im_w, im_h)
assert self.opt.cfg.DATA.pad_option == 'const'
im_fixedscale_SDR_uint8 = cv2.copyMakeBorder(im_fixedscale_SDR_uint8, 0, self.im_height_padded-im_h_resized_to, 0, self.im_width_padded-im_w_resized_to, cv2.BORDER_CONSTANT, value=0)
im_fixedscale_SDR = im_fixedscale_SDR_uint8.astype(np.float32) / 255.
im_fixedscale_SDR = im_fixedscale_SDR.transpose(2, 0, 1)
# if self.opt.cfg.DATA.if_load_png_not_hdr:
# # [PNG]
# im_fixedscale_HDR = (im_fixedscale_SDR - 0.5) / 0.5
# im_trainval = torch.from_numpy(im_fixedscale_HDR) # channel first for training
# im_trainval_SDR = torch.from_numpy(im_fixedscale_SDR)
# im_fixedscale_SDR = torch.from_numpy(im_fixedscale_SDR)
# else:
# [HDR]
im_fixedscale_HDR = im_fixedscale_SDR ** 2.2
im_trainval = torch.from_numpy(im_fixedscale_HDR) # channel first for training
im_trainval_SDR = torch.from_numpy(im_fixedscale_SDR)
im_fixedscale_SDR = torch.from_numpy(im_fixedscale_SDR)
batch_dict.update({'image_path': str(png_image_path), 'pad_mask': pad_mask, 'brdf_loss_mask': pad_mask})
batch_dict.update({'im_w_resized_to': im_w_resized_to, 'im_h_resized_to': im_h_resized_to})
batch_dict.update({'hdr_scale': hdr_scale, 'im_trainval': im_trainval, 'im_trainval_SDR': im_trainval_SDR, 'im_fixedscale_SDR': im_fixedscale_SDR.permute(1, 2, 0)}) # im_fixedscale_SDR for Tensorboard logging
# load judgements labels
rs, re = 0, im_h_resized_to
cs, ce = 0, im_w_resized_to
judgements = json.load(open(self.json_list[index]))
points = judgements['intrinsic_points']
comparisons = judgements['intrinsic_comparisons']
id_to_points = {p['id']: p for p in points}
eqPoint, eqWeight = [0, 0, 0, 0], [0]
darkerPoint, darkerWeight = [0, 0, 0, 0], [0]
for c in comparisons:
darker = c['darker']
if darker not in ('1', '2', 'E'):
continue
# "darker_score" is "w_i" in our paper
weight = c['darker_score']
if weight <= 0.0 or weight is None:
continue
point1 = id_to_points[c['point1']]
point2 = id_to_points[c['point2']]
if not point1['opaque'] or not point2['opaque']:
continue
r1, c1 = int(point1['y'] * im_h_resized_to ), int(point1['x'] * im_w_resized_to )
r2, c2 = int(point2['y'] * im_h_resized_to ), int(point2['x'] * im_w_resized_to )
pr1 = float(r1 - rs) / float(self.im_height_padded -1 )
pc1 = float(c1 - cs ) / float(self.im_width_padded - 1 )
pr2 = float(r2 - rs ) / float(self.im_height_padded - 1 )
pc2 = float(c2 - cs ) / float(self.im_width_padded - 1 )
if not pr1 >= 0.0 or not pr1 <= 1.0:
continue
assert(pr1 >= 0.0 and pr1 <= 1.0)
if pc1 < 0.0 or pc1 > 1.0:
continue
assert(pc1 >= 0.0 and pc1 <= 1.0)
if not pr2 >= 0.0 or not pr2 <= 1.0:
continue
assert(pr2 >= 0.0 and pr2 <= 1.0)
if pc2 < 0.0 or pc2 > 1.0:
continue
assert(pc2 >= 0.0 and pc2 <= 1.0)
prId1 = int(pr1 * (self.im_height_padded - 1) )
pcId1 = int(pc1 * (self.im_width_padded - 1) )
prId2 = int(pr2 * (self.im_height_padded - 1) )
pcId2 = int(pc2 * (self.im_width_padded - 1) )
# the second point should be darker than the first point
if darker == 'E':
eqPoint = eqPoint + [prId1, pcId1, prId2, pcId2 ]
eqWeight.append(weight )
elif darker == '1':
darkerPoint = darkerPoint + [prId2, pcId2, prId1, pcId1 ]
darkerWeight.append(weight )
elif darker == '2':
darkerPoint = darkerPoint + [prId1, pcId1, prId2, pcId2 ]
darkerWeight.append(weight )
eqWeight = np.asarray(eqWeight, dtype=np.float32 )
eqPoint = np.asarray(eqPoint, dtype=np.long )
eqPoint = eqPoint.reshape([-1, 4] )
darkerWeight = np.asarray(darkerWeight, dtype=np.float32 )
darkerPoint = np.asarray(darkerPoint, dtype=np.float32 )
darkerPoint = darkerPoint.reshape([-1, 4] )
assert(eqPoint.shape[0] == eqWeight.shape[0] )
assert(darkerPoint.shape[0] == darkerWeight.shape[0] )
eqNum = eqPoint.shape[0]
if eqNum < self.maxNum:
gap = self.maxNum - eqNum
eqPoint = np.concatenate([eqPoint, np.zeros( (gap, 4), dtype=np.long) ], axis=0 )
eqWeight = np.concatenate([eqWeight, np.zeros(gap, dtype=np.float32)], axis=0 )
elif eqNum > self.maxNum:
index = np.random.permutation(np.arange(eqNum ) )
eqPoint = eqPoint[index, :]
eqWeight = eqWeight[index ]
eqPoint = eqPoint[0:self.maxNum, :]
eqWeight = eqWeight[0:self.maxNum ]
eqNum = self.maxNum
darkerNum = darkerPoint.shape[0]
if darkerNum < self.maxNum:
gap = self.maxNum - darkerNum
darkerPoint = np.concatenate([darkerPoint, np.zeros( (gap, 4), dtype=np.long) ], axis=0 )
darkerWeight = np.concatenate([darkerWeight, np.zeros(gap, dtype=np.float32)], axis=0 )
elif darkerNum > self.maxNum:
index = np.random.permutation(np.arange(darkerNum ) )
darkerPoint = darkerPoint[index, :]
darkerWeight = darkerWeight[index ]
darkerPoint = darkerPoint[0:self.maxNum, :]
darkerWeight = darkerWeight[0:self.maxNum]
darkerNum = self.maxNum
batch_dict.update({
'eq': {'point' : eqPoint, 'weight' : eqWeight, 'num': eqNum },
'darker': {'point' : darkerPoint, 'weight' : darkerWeight, 'num' : darkerNum },
'judgements': judgements
})
return batch_dict
default_collate = torch.utils.data.dataloader.default_collate
def collate_fn_iiw(batch):
"""
Data collater.
Assumes each instance is a dict.
Applies different collation rules for each field.
Args:
batches: List of loaded elements via Dataset.__getitem__
"""
collated_batch = {}
# iterate over keys
# print(batch[0].keys())
for key in batch[0]:
if key == '':
collated_batch[key] = dict()
for subkey in batch[0][key]:
if subkey in []: # lists of original & more information (e.g. color)
continue
if subkey in []: # list of lists
tensor_batch = [elem[key][subkey] for elem in batch]
else:
list_of_tensor = [recursive_convert_to_torch(elem[key][subkey]) for elem in batch]
try:
tensor_batch = torch.cat(list_of_tensor)
# print(subkey, [x['boxes_batch'][subkey].shape for x in batch], tensor_batch.shape)
except RuntimeError:
print(subkey, [x.shape for x in list_of_tensor])
collated_batch[key][subkey] = tensor_batch
elif key in ['eq', 'darker', 'judgements']:
collated_batch[key] = [elem[key] for elem in batch]
else:
try:
collated_batch[key] = default_collate([elem[key] for elem in batch])
except RuntimeError as e:
print('[!!!!] Type error in collate_fn_OR: ', key, e)
return collated_batch
def recursive_convert_to_torch(elem):
if torch.is_tensor(elem):
return elem
elif type(elem).__module__ == 'numpy':
if elem.size == 0:
return torch.zeros(elem.shape).type(torch.DoubleTensor)
else:
return torch.from_numpy(elem)
elif isinstance(elem, int):
return torch.LongTensor([elem])
elif isinstance(elem, float):
return torch.DoubleTensor([elem])
elif isinstance(elem, collections.Mapping):
return {key: recursive_convert_to_torch(elem[key]) for key in elem}
elif isinstance(elem, collections.Sequence):
return [recursive_convert_to_torch(samples) for samples in elem]
else:
return elem
| [
"torch.DoubleTensor",
"pathlib.Path",
"torch.LongTensor",
"cv2.copyMakeBorder",
"numpy.asarray",
"torch.from_numpy",
"numpy.array",
"torch.is_tensor",
"numpy.zeros",
"torch.cat",
"torch.zeros",
"cv2.resize",
"warnings.filterwarnings",
"numpy.arange"
] | [((1288, 1321), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1311, 1321), False, 'import warnings\n'), ((14466, 14487), 'torch.is_tensor', 'torch.is_tensor', (['elem'], {}), '(elem)\n', (14481, 14487), False, 'import torch\n'), ((4187, 4258), 'numpy.zeros', 'np.zeros', (['(self.im_height_padded, self.im_width_padded)'], {'dtype': 'np.uint8'}), '((self.im_height_padded, self.im_width_padded), dtype=np.uint8)\n', (4195, 4258), True, 'import numpy as np\n'), ((5380, 5395), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (5388, 5395), True, 'import numpy as np\n'), ((6561, 6666), 'cv2.resize', 'cv2.resize', (['im_fixedscale_SDR_uint8', '(im_w_resized_to, im_h_resized_to)'], {'interpolation': 'cv2.INTER_AREA'}), '(im_fixedscale_SDR_uint8, (im_w_resized_to, im_h_resized_to),\n interpolation=cv2.INTER_AREA)\n', (6571, 6666), False, 'import cv2\n'), ((6817, 6986), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['im_fixedscale_SDR_uint8', '(0)', '(self.im_height_padded - im_h_resized_to)', '(0)', '(self.im_width_padded - im_w_resized_to)', 'cv2.BORDER_CONSTANT'], {'value': '(0)'}), '(im_fixedscale_SDR_uint8, 0, self.im_height_padded -\n im_h_resized_to, 0, self.im_width_padded - im_w_resized_to, cv2.\n BORDER_CONSTANT, value=0)\n', (6835, 6986), False, 'import cv2\n'), ((7597, 7632), 'torch.from_numpy', 'torch.from_numpy', (['im_fixedscale_HDR'], {}), '(im_fixedscale_HDR)\n', (7613, 7632), False, 'import torch\n'), ((7688, 7723), 'torch.from_numpy', 'torch.from_numpy', (['im_fixedscale_SDR'], {}), '(im_fixedscale_SDR)\n', (7704, 7723), False, 'import torch\n'), ((7752, 7787), 'torch.from_numpy', 'torch.from_numpy', (['im_fixedscale_SDR'], {}), '(im_fixedscale_SDR)\n', (7768, 7787), False, 'import torch\n'), ((10820, 10858), 'numpy.asarray', 'np.asarray', (['eqWeight'], {'dtype': 'np.float32'}), '(eqWeight, dtype=np.float32)\n', (10830, 10858), True, 'import numpy as np\n'), ((10878, 10912), 'numpy.asarray', 'np.asarray', (['eqPoint'], {'dtype': 'np.long'}), '(eqPoint, dtype=np.long)\n', (10888, 10912), True, 'import numpy as np\n'), ((10981, 11023), 'numpy.asarray', 'np.asarray', (['darkerWeight'], {'dtype': 'np.float32'}), '(darkerWeight, dtype=np.float32)\n', (10991, 11023), True, 'import numpy as np\n'), ((11047, 11088), 'numpy.asarray', 'np.asarray', (['darkerPoint'], {'dtype': 'np.float32'}), '(darkerPoint, dtype=np.float32)\n', (11057, 11088), True, 'import numpy as np\n'), ((2274, 2298), 'pathlib.Path', 'Path', (['self.cfg.PATH.root'], {}), '(self.cfg.PATH.root)\n', (2278, 2298), False, 'from pathlib import Path\n'), ((14680, 14702), 'torch.from_numpy', 'torch.from_numpy', (['elem'], {}), '(elem)\n', (14696, 14702), False, 'import torch\n'), ((14750, 14774), 'torch.LongTensor', 'torch.LongTensor', (['[elem]'], {}), '([elem])\n', (14766, 14774), False, 'import torch\n'), ((11412, 11445), 'numpy.zeros', 'np.zeros', (['(gap, 4)'], {'dtype': 'np.long'}), '((gap, 4), dtype=np.long)\n', (11420, 11445), True, 'import numpy as np\n'), ((11508, 11539), 'numpy.zeros', 'np.zeros', (['gap'], {'dtype': 'np.float32'}), '(gap, dtype=np.float32)\n', (11516, 11539), True, 'import numpy as np\n'), ((11627, 11643), 'numpy.arange', 'np.arange', (['eqNum'], {}), '(eqNum)\n', (11636, 11643), True, 'import numpy as np\n'), ((12031, 12064), 'numpy.zeros', 'np.zeros', (['(gap, 4)'], {'dtype': 'np.long'}), '((gap, 4), dtype=np.long)\n', (12039, 12064), True, 'import numpy as np\n'), ((12135, 12166), 'numpy.zeros', 'np.zeros', (['gap'], {'dtype': 'np.float32'}), '(gap, dtype=np.float32)\n', (12143, 12166), True, 'import numpy as np\n'), ((12258, 12278), 'numpy.arange', 'np.arange', (['darkerNum'], {}), '(darkerNum)\n', (12267, 12278), True, 'import numpy as np\n'), ((14824, 14850), 'torch.DoubleTensor', 'torch.DoubleTensor', (['[elem]'], {}), '([elem])\n', (14842, 14850), False, 'import torch\n'), ((13745, 13770), 'torch.cat', 'torch.cat', (['list_of_tensor'], {}), '(list_of_tensor)\n', (13754, 13770), False, 'import torch\n'), ((14598, 14621), 'torch.zeros', 'torch.zeros', (['elem.shape'], {}), '(elem.shape)\n', (14609, 14621), False, 'import torch\n')] |
# This file is part of OpenCV Zoo project.
# It is subject to the license terms in the LICENSE file found in the same directory.
#
# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved.
# Third party copyrights are property of their respective owners.
import argparse
import numpy as np
import cv2 as cv
from pphumanseg import PPHumanSeg
def str2bool(v):
if v.lower() in ['on', 'yes', 'true', 'y', 't']:
return True
elif v.lower() in ['off', 'no', 'false', 'n', 'f']:
return False
else:
raise NotImplementedError
parser = argparse.ArgumentParser(description='PPHumanSeg (https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.2/contrib/PP-HumanSeg)')
parser.add_argument('--input', '-i', type=str, help='Path to the input image. Omit for using default camera.')
parser.add_argument('--model', '-m', type=str, default='human_segmentation_pphumanseg_2021oct.onnx', help='Path to the model.')
parser.add_argument('--save', '-s', type=str, default=False, help='Set true to save results. This flag is invalid when using camera.')
parser.add_argument('--vis', '-v', type=str2bool, default=True, help='Set true to open a window for result visualization. This flag is invalid when using camera.')
args = parser.parse_args()
def get_color_map_list(num_classes):
"""
Returns the color map for visualizing the segmentation mask,
which can support arbitrary number of classes.
Args:
num_classes (int): Number of classes.
Returns:
(list). The color map.
"""
num_classes += 1
color_map = num_classes * [0, 0, 0]
for i in range(0, num_classes):
j = 0
lab = i
while lab:
color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j))
color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j))
color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j))
j += 1
lab >>= 3
color_map = color_map[3:]
return color_map
def visualize(image, result, weight=0.6, fps=None):
"""
Convert predict result to color image, and save added image.
Args:
image (str): The input image.
result (np.ndarray): The predict result of image.
weight (float): The image weight of visual image, and the result weight is (1 - weight). Default: 0.6
fps (str): The FPS to be drawn on the input image.
Returns:
vis_result (np.ndarray): The visualized result.
"""
color_map = get_color_map_list(256)
color_map = [color_map[i:i + 3] for i in range(0, len(color_map), 3)]
color_map = np.array(color_map).astype(np.uint8)
# Use OpenCV LUT for color mapping
c1 = cv.LUT(result, color_map[:, 0])
c2 = cv.LUT(result, color_map[:, 1])
c3 = cv.LUT(result, color_map[:, 2])
pseudo_img = np.dstack((c1, c2, c3))
vis_result = cv.addWeighted(image, weight, pseudo_img, 1 - weight, 0)
if fps is not None:
cv.putText(vis_result, 'FPS: {:.2f}'.format(fps), (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
return vis_result
if __name__ == '__main__':
# Instantiate PPHumanSeg
model = PPHumanSeg(modelPath=args.model)
if args.input is not None:
# Read image and resize to 192x192
image = cv.imread(args.input)
h, w, _ = image.shape
image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
_image = cv.resize(image, dsize=(192, 192))
# Inference
result = model.infer(_image)
result = cv.resize(result[0, :, :], dsize=(w, h), interpolation=cv.INTER_NEAREST)
# Draw results on the input image
image = visualize(image, result)
# Save results if save is true
if args.save:
print('Results saved to result.jpg\n')
cv.imwrite('result.jpg', image)
# Visualize results in a new window
if args.vis:
cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE)
cv.imshow(args.input, image)
cv.waitKey(0)
else: # Omit input to call default camera
deviceId = 0
cap = cv.VideoCapture(deviceId)
w = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))
tm = cv.TickMeter()
while cv.waitKey(1) < 0:
hasFrame, frame = cap.read()
if not hasFrame:
print('No frames grabbed!')
break
_frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
_frame = cv.resize(_frame, dsize=(192, 192))
# Inference
tm.start()
result = model.infer(_frame)
tm.stop()
result = cv.resize(result[0, :, :], dsize=(w, h), interpolation=cv.INTER_NEAREST)
# Draw results on the input image
frame = visualize(frame, result, fps=tm.getFPS())
# Visualize results in a new window
cv.imshow('PPHumanSeg Demo', frame)
tm.reset() | [
"numpy.dstack",
"pphumanseg.PPHumanSeg",
"cv2.imwrite",
"argparse.ArgumentParser",
"cv2.imshow",
"cv2.addWeighted",
"cv2.TickMeter",
"numpy.array",
"cv2.waitKey",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.resize",
"cv2.LUT",
"cv2.namedWindow",
"cv2.imread"
] | [((623, 763), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PPHumanSeg (https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.2/contrib/PP-HumanSeg)"""'}), "(description=\n 'PPHumanSeg (https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.2/contrib/PP-HumanSeg)'\n )\n", (646, 763), False, 'import argparse\n'), ((2719, 2750), 'cv2.LUT', 'cv.LUT', (['result', 'color_map[:, 0]'], {}), '(result, color_map[:, 0])\n', (2725, 2750), True, 'import cv2 as cv\n'), ((2760, 2791), 'cv2.LUT', 'cv.LUT', (['result', 'color_map[:, 1]'], {}), '(result, color_map[:, 1])\n', (2766, 2791), True, 'import cv2 as cv\n'), ((2801, 2832), 'cv2.LUT', 'cv.LUT', (['result', 'color_map[:, 2]'], {}), '(result, color_map[:, 2])\n', (2807, 2832), True, 'import cv2 as cv\n'), ((2850, 2873), 'numpy.dstack', 'np.dstack', (['(c1, c2, c3)'], {}), '((c1, c2, c3))\n', (2859, 2873), True, 'import numpy as np\n'), ((2892, 2948), 'cv2.addWeighted', 'cv.addWeighted', (['image', 'weight', 'pseudo_img', '(1 - weight)', '(0)'], {}), '(image, weight, pseudo_img, 1 - weight, 0)\n', (2906, 2948), True, 'import cv2 as cv\n'), ((3177, 3209), 'pphumanseg.PPHumanSeg', 'PPHumanSeg', ([], {'modelPath': 'args.model'}), '(modelPath=args.model)\n', (3187, 3209), False, 'from pphumanseg import PPHumanSeg\n'), ((3301, 3322), 'cv2.imread', 'cv.imread', (['args.input'], {}), '(args.input)\n', (3310, 3322), True, 'import cv2 as cv\n'), ((3369, 3405), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2RGB'], {}), '(image, cv.COLOR_BGR2RGB)\n', (3380, 3405), True, 'import cv2 as cv\n'), ((3423, 3457), 'cv2.resize', 'cv.resize', (['image'], {'dsize': '(192, 192)'}), '(image, dsize=(192, 192))\n', (3432, 3457), True, 'import cv2 as cv\n'), ((3533, 3605), 'cv2.resize', 'cv.resize', (['result[0, :, :]'], {'dsize': '(w, h)', 'interpolation': 'cv.INTER_NEAREST'}), '(result[0, :, :], dsize=(w, h), interpolation=cv.INTER_NEAREST)\n', (3542, 3605), True, 'import cv2 as cv\n'), ((4120, 4145), 'cv2.VideoCapture', 'cv.VideoCapture', (['deviceId'], {}), '(deviceId)\n', (4135, 4145), True, 'import cv2 as cv\n'), ((4261, 4275), 'cv2.TickMeter', 'cv.TickMeter', ([], {}), '()\n', (4273, 4275), True, 'import cv2 as cv\n'), ((2634, 2653), 'numpy.array', 'np.array', (['color_map'], {}), '(color_map)\n', (2642, 2653), True, 'import numpy as np\n'), ((3815, 3846), 'cv2.imwrite', 'cv.imwrite', (['"""result.jpg"""', 'image'], {}), "('result.jpg', image)\n", (3825, 3846), True, 'import cv2 as cv\n'), ((3925, 3971), 'cv2.namedWindow', 'cv.namedWindow', (['args.input', 'cv.WINDOW_AUTOSIZE'], {}), '(args.input, cv.WINDOW_AUTOSIZE)\n', (3939, 3971), True, 'import cv2 as cv\n'), ((3984, 4012), 'cv2.imshow', 'cv.imshow', (['args.input', 'image'], {}), '(args.input, image)\n', (3993, 4012), True, 'import cv2 as cv\n'), ((4025, 4038), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (4035, 4038), True, 'import cv2 as cv\n'), ((4290, 4303), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (4300, 4303), True, 'import cv2 as cv\n'), ((4467, 4503), 'cv2.cvtColor', 'cv.cvtColor', (['frame', 'cv.COLOR_BGR2RGB'], {}), '(frame, cv.COLOR_BGR2RGB)\n', (4478, 4503), True, 'import cv2 as cv\n'), ((4525, 4560), 'cv2.resize', 'cv.resize', (['_frame'], {'dsize': '(192, 192)'}), '(_frame, dsize=(192, 192))\n', (4534, 4560), True, 'import cv2 as cv\n'), ((4693, 4765), 'cv2.resize', 'cv.resize', (['result[0, :, :]'], {'dsize': '(w, h)', 'interpolation': 'cv.INTER_NEAREST'}), '(result[0, :, :], dsize=(w, h), interpolation=cv.INTER_NEAREST)\n', (4702, 4765), True, 'import cv2 as cv\n'), ((4936, 4971), 'cv2.imshow', 'cv.imshow', (['"""PPHumanSeg Demo"""', 'frame'], {}), "('PPHumanSeg Demo', frame)\n", (4945, 4971), True, 'import cv2 as cv\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as utils
import torchvision.transforms as transforms
from scipy import io
import numpy as np
import mathematical_operations as mo
def clustering_kmeans(the_image, my_net, shape, img_dir, img_name, show_image=False):
print()
print("*** K - means clustering ***")
print("---------------------------------")
# https://www.datacamp.com/community/tutorials/k-means-clustering-python
# https: // datatofish.com / k - means - clustering - python /
from pandas import DataFrame
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
print("Image shape: ", np.shape(the_image))
the_image_list = []
for row in the_image:
for element in row:
the_image_list.append(element)
print("List of points shape: ", np.shape(the_image_list))
print("Image code got from autoencoder")
image_autoencoded = [my_net.getCode(torch.Tensor(point)).detach().numpy() for point in the_image_list]
print("Creating dataframe from k-clustering")
df = DataFrame(data=image_autoencoded)
print("KMeans clustering")
number_of_clusters = 16
kmeans = KMeans(n_clusters=number_of_clusters).fit(df)
print("Creating list for clustered data")
clastered_data = np.zeros(np.shape(the_image_labels))
print("Clustered data shape: ", shape)
x = 0
y = 0
for i in range(np.shape(clastered_data)[0] * np.shape(clastered_data)[1]):
clastered_data[x][y] = kmeans.predict([image_autoencoded[y * 144 + x]])
x = x + 1
if x == 145:
x = 0
y = y + 1
import matplotlib.pyplot as plt
print(clastered_data)
plt.imshow(clastered_data)
name = img_dir + img_name
plt.savefig(name, bbox_inches='tight')
if show_image:
plt.show()
| [
"matplotlib.pyplot.imshow",
"sklearn.cluster.KMeans",
"matplotlib.pyplot.savefig",
"torch.Tensor",
"pandas.DataFrame",
"numpy.shape",
"matplotlib.pyplot.show"
] | [((1159, 1192), 'pandas.DataFrame', 'DataFrame', ([], {'data': 'image_autoencoded'}), '(data=image_autoencoded)\n', (1168, 1192), False, 'from pandas import DataFrame\n'), ((1787, 1813), 'matplotlib.pyplot.imshow', 'plt.imshow', (['clastered_data'], {}), '(clastered_data)\n', (1797, 1813), True, 'import matplotlib.pyplot as plt\n'), ((1848, 1886), 'matplotlib.pyplot.savefig', 'plt.savefig', (['name'], {'bbox_inches': '"""tight"""'}), "(name, bbox_inches='tight')\n", (1859, 1886), True, 'import matplotlib.pyplot as plt\n'), ((742, 761), 'numpy.shape', 'np.shape', (['the_image'], {}), '(the_image)\n', (750, 761), True, 'import numpy as np\n'), ((920, 944), 'numpy.shape', 'np.shape', (['the_image_list'], {}), '(the_image_list)\n', (928, 944), True, 'import numpy as np\n'), ((1389, 1415), 'numpy.shape', 'np.shape', (['the_image_labels'], {}), '(the_image_labels)\n', (1397, 1415), True, 'import numpy as np\n'), ((1914, 1924), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1922, 1924), True, 'import matplotlib.pyplot as plt\n'), ((1266, 1303), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'number_of_clusters'}), '(n_clusters=number_of_clusters)\n', (1272, 1303), False, 'from sklearn.cluster import KMeans\n'), ((1501, 1525), 'numpy.shape', 'np.shape', (['clastered_data'], {}), '(clastered_data)\n', (1509, 1525), True, 'import numpy as np\n'), ((1531, 1555), 'numpy.shape', 'np.shape', (['clastered_data'], {}), '(clastered_data)\n', (1539, 1555), True, 'import numpy as np\n'), ((1032, 1051), 'torch.Tensor', 'torch.Tensor', (['point'], {}), '(point)\n', (1044, 1051), False, 'import torch\n')] |
from functools import partial
import numpy as np
import matplotlib.pyplot as plt
from vznncv.signal.generator.onedim import create_process_realization
def f_psd_gaussian(f, f_0, alpha):
"""
One side gaussian psd function
.. math::
s = \sqrt{\frac{1}{4 \pi \alpha}} \left(
e^{-\frac{\left( f - f_0 \right) ^ 2}{4 \alpha}}
\right)
:param f:
:param f_0:
:param alpha:
:return:
"""
return np.sqrt(1 / (4 * np.pi * alpha)) * (
np.exp(-(f - f_0) ** 2 / (4 * alpha))
)
def f_psd(f, t):
f_0 = 0.02 + t * 0.002
alpha = 0.001
return f_psd_gaussian(f, f_0=f_0, alpha=alpha)
def f_std(t):
return 1 + t * 0.02
fs = 2.0
t = np.arange(300) / fs
x = create_process_realization(
size=t.size,
f_psd=f_psd,
f_m=0.0,
f_std=f_std,
fs=2.0,
window_size=64
)
plt.plot(t, x)
plt.xlabel('x')
plt.ylabel('t')
plt.show()
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"vznncv.signal.generator.onedim.create_process_realization",
"numpy.exp",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((732, 834), 'vznncv.signal.generator.onedim.create_process_realization', 'create_process_realization', ([], {'size': 't.size', 'f_psd': 'f_psd', 'f_m': '(0.0)', 'f_std': 'f_std', 'fs': '(2.0)', 'window_size': '(64)'}), '(size=t.size, f_psd=f_psd, f_m=0.0, f_std=f_std,\n fs=2.0, window_size=64)\n', (758, 834), False, 'from vznncv.signal.generator.onedim import create_process_realization\n'), ((858, 872), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'x'], {}), '(t, x)\n', (866, 872), True, 'import matplotlib.pyplot as plt\n'), ((873, 888), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (883, 888), True, 'import matplotlib.pyplot as plt\n'), ((889, 904), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""t"""'], {}), "('t')\n", (899, 904), True, 'import matplotlib.pyplot as plt\n'), ((905, 915), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (913, 915), True, 'import matplotlib.pyplot as plt\n'), ((707, 721), 'numpy.arange', 'np.arange', (['(300)'], {}), '(300)\n', (716, 721), True, 'import numpy as np\n'), ((448, 480), 'numpy.sqrt', 'np.sqrt', (['(1 / (4 * np.pi * alpha))'], {}), '(1 / (4 * np.pi * alpha))\n', (455, 480), True, 'import numpy as np\n'), ((493, 530), 'numpy.exp', 'np.exp', (['(-(f - f_0) ** 2 / (4 * alpha))'], {}), '(-(f - f_0) ** 2 / (4 * alpha))\n', (499, 530), True, 'import numpy as np\n')] |
import numpy as np
import pdb
from scipy.spatial import ConvexHull
class system(object):
"""docstring for system"""
def __init__(self, A, B, w_inf, x0):
self.A = A
self.B = B
self.w_inf = w_inf
self.x = [x0]
self.u = []
self.w = []
self.w_v = []
self.w_v.append(w_inf*np.array([ 1 ,1]))
self.w_v.append(w_inf*np.array([ 1,-1]))
self.w_v.append(w_inf*np.array([-1, 1]))
self.w_v.append(w_inf*np.array([-1,-1]))
self.computeRobutInvariant()
def applyInput(self, ut):
self.u.append(ut)
self.w.append(np.array([0,0]))
xnext = np.dot(self.A,self.x[-1]) + np.dot(self.B,self.u[-1]) + self.w[-1]
self.x.append(xnext)
def computeRobutInvariant(self):
self.O_v = [np.array([0,0])]
print("Compute robust invariant")
# TO DO:
# - add check for convergence
# - add check for input and state constraint satifaction
for i in range(0,20):
self.O_v = self.MinkowskiSum(self.O_v, self.w_v)
def MinkowskiSum(self, setA, setB):
vertices = []
for v1 in setA:
for v2 in setB:
vertices.append(np.dot(self.A,v1) + v2)
cvxHull = ConvexHull(vertices)
verticesOut = []
for idx in cvxHull.vertices:
verticesOut.append(vertices[idx])
return verticesOut
| [
"scipy.spatial.ConvexHull",
"numpy.array",
"numpy.dot"
] | [((1078, 1098), 'scipy.spatial.ConvexHull', 'ConvexHull', (['vertices'], {}), '(vertices)\n', (1088, 1098), False, 'from scipy.spatial import ConvexHull\n'), ((534, 550), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (542, 550), True, 'import numpy as np\n'), ((700, 716), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (708, 716), True, 'import numpy as np\n'), ((289, 305), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (297, 305), True, 'import numpy as np\n'), ((332, 349), 'numpy.array', 'np.array', (['[1, -1]'], {}), '([1, -1])\n', (340, 349), True, 'import numpy as np\n'), ((375, 392), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (383, 392), True, 'import numpy as np\n'), ((418, 436), 'numpy.array', 'np.array', (['[-1, -1]'], {}), '([-1, -1])\n', (426, 436), True, 'import numpy as np\n'), ((561, 587), 'numpy.dot', 'np.dot', (['self.A', 'self.x[-1]'], {}), '(self.A, self.x[-1])\n', (567, 587), True, 'import numpy as np\n'), ((589, 615), 'numpy.dot', 'np.dot', (['self.B', 'self.u[-1]'], {}), '(self.B, self.u[-1])\n', (595, 615), True, 'import numpy as np\n'), ((1042, 1060), 'numpy.dot', 'np.dot', (['self.A', 'v1'], {}), '(self.A, v1)\n', (1048, 1060), True, 'import numpy as np\n')] |
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library methods for working with centralized data used in simulation."""
import abc
import collections
from typing import Any, Callable, Iterable, List, Optional, Sequence, Tuple, Union
from absl import logging
import numpy as np
import tensorflow as tf
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.api import computation_base
from tensorflow_federated.python.core.api import computations
class IncompatiblePreprocessFnError(TypeError):
def __init__(self):
message = (
'The preprocess_fn must not be a tff.Computation. Please use a python'
' callable or tf.function instead. This restriction is because '
'`tf.data.Dataset.map` wraps preprocessing functions with a '
'`tf.function` decorator, which cannot call to a `tff.Computation`.')
super().__init__(message)
def is_nonnegative_32_bit_int(x: Any) -> bool:
if isinstance(x, int) and 0 <= x and x < 2**32:
return True
return False
def check_numpy_random_seed(seed: Any) -> None:
"""Determines if an input is a valid random seed for `np.random.RandomState`.
Specifically, this method returns `True` if the input is a nonnegative 32-bit
integer, a sequence of such integers, or `None`.
Args:
seed: The argument that we wish to determine is a valid random seed.
Raises:
InvalidRandomSeedError: If the input argument does not meet any of the
types above.
"""
if seed is None:
return
elif is_nonnegative_32_bit_int(seed):
return
elif isinstance(seed, Sequence) and all(
[is_nonnegative_32_bit_int(x) for x in seed]):
return
raise InvalidRandomSeedError(type(seed))
class InvalidRandomSeedError(TypeError):
def __init__(self, seed_type):
message = (
'The seed must be a nonnegative 32-bit integer, a sequence of such '
'integers, or None. Found {} instead.'.format(seed_type))
super().__init__(message)
class ClientData(object, metaclass=abc.ABCMeta):
"""Object to hold a federated dataset.
The federated dataset is represented as a list of client ids, and
a function to look up the local dataset for each client id.
Note: Cross-device federated learning does not use client IDs or perform any
tracking of clients. However in simulation experiments using centralized test
data the experimenter may select specific clients to be processed per round.
The concept of a client ID is only available at the preprocessing stage when
preparing input data for the simulation and is not part of the TensorFlow
Federated core APIs.
Each client's local dataset is represented as a `tf.data.Dataset`, but
generally this class (and the corresponding datasets hosted by TFF) can
easily be consumed by any Python-based ML framework as `numpy` arrays:
```python
import tensorflow as tf
import tensorflow_federated as tff
import tensorflow_datasets as tfds
for client_id in sampled_client_ids[:5]:
client_local_dataset = tfds.as_numpy(
emnist_train.create_tf_dataset_for_client(client_id))
# client_local_dataset is an iterable of structures of numpy arrays
for example in client_local_dataset:
print(example)
```
If desiring a manner for constructing ClientData objects for testing purposes,
please see the `tff.simulation.datasets.TestClientData` class, as it provides
an easy way to construct toy federated datasets.
"""
@abc.abstractproperty
def client_ids(self) -> List[str]:
"""A list of string identifiers for clients in this dataset."""
pass
@abc.abstractproperty
def serializable_dataset_fn(self):
"""A callable accepting a client ID and returning a `tf.data.Dataset`.
Note that this callable must be traceable by TF, as it will be used in the
context of a `tf.function`.
"""
pass
def create_tf_dataset_for_client(self, client_id: str) -> tf.data.Dataset:
"""Creates a new `tf.data.Dataset` containing the client training examples.
This function will create a dataset for a given client, given that
`client_id` is contained in the `client_ids` property of the `ClientData`.
Unlike `create_dataset`, this method need not be serializable.
Args:
client_id: The string client_id for the desired client.
Returns:
A `tf.data.Dataset` object.
"""
if client_id not in self.client_ids:
raise ValueError(
'ID [{i}] is not a client in this ClientData. See '
'property `client_ids` for the list of valid ids.'.format(
i=client_id))
return self.serializable_dataset_fn(client_id)
@property
def dataset_computation(self):
"""A `tff.Computation` accepting a client ID, returning a dataset.
Note: the `dataset_computation` property is intended as a TFF-specific
performance optimization for distributed execution.
"""
if (not hasattr(self, '_cached_dataset_computation')) or (
self._cached_dataset_computation is None):
@computations.tf_computation(tf.string)
def dataset_computation(client_id):
return self.serializable_dataset_fn(client_id)
self._cached_dataset_computation = dataset_computation
return self._cached_dataset_computation
@abc.abstractproperty
def element_type_structure(self):
"""The element type information of the client datasets.
Returns:
A nested structure of `tf.TensorSpec` objects defining the type of the
elements returned by datasets in this `ClientData` object.
"""
pass
def datasets(
self,
limit_count: Optional[int] = None,
seed: Optional[Union[int, Sequence[int]]] = None
) -> Iterable[tf.data.Dataset]:
"""Yields the `tf.data.Dataset` for each client in random order.
This function is intended for use building a static array of client data
to be provided to the top-level federated computation.
Args:
limit_count: Optional, a maximum number of datasets to return.
seed: Optional, a seed to determine the order in which clients are
processed in the joined dataset. The seed can be any nonnegative 32-bit
integer, an array of such integers, or `None`.
"""
check_numpy_random_seed(seed)
# Create a copy to prevent the original list being reordered
client_ids = self.client_ids.copy()
np.random.RandomState(seed=seed).shuffle(client_ids)
count = 0
for client_id in client_ids:
if limit_count is not None and count >= limit_count:
return
count += 1
dataset = self.create_tf_dataset_for_client(client_id)
py_typecheck.check_type(dataset, tf.data.Dataset)
yield dataset
def create_tf_dataset_from_all_clients(
self,
seed: Optional[Union[int, Sequence[int]]] = None) -> tf.data.Dataset:
"""Creates a new `tf.data.Dataset` containing _all_ client examples.
This function is intended for use training centralized, non-distributed
models (num_clients=1). This can be useful as a point of comparison
against federated models.
Currently, the implementation produces a dataset that contains
all examples from a single client in order, and so generally additional
shuffling should be performed.
Args:
seed: Optional, a seed to determine the order in which clients are
processed in the joined dataset. The seed can be any nonnegative 32-bit
integer, an array of such integers, or `None`.
Returns:
A `tf.data.Dataset` object.
"""
check_numpy_random_seed(seed)
client_ids = self.client_ids.copy()
np.random.RandomState(seed=seed).shuffle(client_ids)
nested_dataset = tf.data.Dataset.from_tensor_slices(client_ids)
# We apply serializable_dataset_fn here to avoid loading all client datasets
# in memory, which is slow. Note that tf.data.Dataset.map implicitly wraps
# the input mapping in a tf.function.
example_dataset = nested_dataset.flat_map(self.serializable_dataset_fn)
return example_dataset
def preprocess(
self, preprocess_fn: Callable[[tf.data.Dataset],
tf.data.Dataset]) -> 'ClientData':
"""Applies `preprocess_fn` to each client's data.
Args:
preprocess_fn: A callable accepting a `tf.data.Dataset` and returning a
preprocessed `tf.data.Dataset`. This function must be traceable by TF.
Returns:
A `tff.simulation.datasets.ClientData`.
Raises:
IncompatiblePreprocessFnError: If `preprocess_fn` is a `tff.Computation`.
"""
py_typecheck.check_callable(preprocess_fn)
if isinstance(preprocess_fn, computation_base.Computation):
raise IncompatiblePreprocessFnError()
return PreprocessClientData(self, preprocess_fn)
@classmethod
def from_clients_and_tf_fn(
cls,
client_ids: Iterable[str],
serializable_dataset_fn: Callable[[str], tf.data.Dataset],
) -> 'ClientData':
"""Constructs a `ClientData` based on the given function.
Args:
client_ids: A non-empty list of strings to use as input to
`create_dataset_fn`.
serializable_dataset_fn: A function that takes a client_id from the above
list, and returns a `tf.data.Dataset`. This function must be
serializable and usable within the context of a `tf.function` and
`tff.Computation`.
Returns:
A `ClientData` object.
"""
return ConcreteClientData(client_ids, serializable_dataset_fn)
@classmethod
def train_test_client_split(
cls,
client_data: 'ClientData',
num_test_clients: int,
seed: Optional[Union[int, Sequence[int]]] = None
) -> Tuple['ClientData', 'ClientData']:
"""Returns a pair of (train, test) `ClientData`.
This method partitions the clients of `client_data` into two `ClientData`
objects with disjoint sets of `ClientData.client_ids`. All clients in the
test `ClientData` are guaranteed to have non-empty datasets, but the
training `ClientData` may have clients with no data.
Note: This method may be expensive, and so it may be useful to avoid calling
multiple times and holding on to the results.
Args:
client_data: The base `ClientData` to split.
num_test_clients: How many clients to hold out for testing. This can be at
most len(client_data.client_ids) - 1, since we don't want to produce
empty `ClientData`.
seed: Optional seed to fix shuffling of clients before splitting. The seed
can be any nonnegative 32-bit integer, an array of such integers, or
`None`.
Returns:
A pair (train_client_data, test_client_data), where test_client_data
has `num_test_clients` selected at random, subject to the constraint they
each have at least 1 batch in their dataset.
Raises:
ValueError: If `num_test_clients` cannot be satistifed by `client_data`,
or too many clients have empty datasets.
"""
if num_test_clients <= 0:
raise ValueError('Please specify num_test_clients > 0.')
if len(client_data.client_ids) <= num_test_clients:
raise ValueError('The client_data supplied has only {} clients, but '
'{} test clients were requested.'.format(
len(client_data.client_ids), num_test_clients))
check_numpy_random_seed(seed)
train_client_ids = list(client_data.client_ids)
np.random.RandomState(seed).shuffle(train_client_ids)
# These clients will be added back into the training set at the end.
clients_with_insufficient_batches = []
test_client_ids = []
while len(test_client_ids) < num_test_clients:
if not train_client_ids or (
# Arbitrarily threshold where "many" (relative to num_test_clients)
# clients have no data. Note: If needed, we could make this limit
# configurable.
len(clients_with_insufficient_batches) > 5 * num_test_clients + 10):
raise ValueError('Encountered too many clients with no data.')
client_id = train_client_ids.pop()
dataset = client_data.create_tf_dataset_for_client(client_id)
try:
_ = next(dataset.__iter__())
except StopIteration:
logging.warning('Client %s had no data, skipping.', client_id)
clients_with_insufficient_batches.append(client_id)
continue
test_client_ids.append(client_id)
# Invariant for successful exit of the above loop:
assert len(test_client_ids) == num_test_clients
def from_ids(client_ids: Iterable[str]) -> 'ClientData':
return cls.from_clients_and_tf_fn(client_ids,
client_data.serializable_dataset_fn)
return (from_ids(train_client_ids + clients_with_insufficient_batches),
from_ids(test_client_ids))
class PreprocessClientData(ClientData):
"""Applies a preprocessing function to every dataset it returns.
This `ClientData` subclass delegates all other aspects of implementation to
its underlying `ClientData` object, simply wiring in its `preprocess_fn`
where necessary.
"""
def __init__( # pylint: disable=super-init-not-called
self, underlying_client_data: ClientData,
preprocess_fn: Callable[[tf.data.Dataset], tf.data.Dataset]):
py_typecheck.check_type(underlying_client_data, ClientData)
py_typecheck.check_callable(preprocess_fn)
self._underlying_client_data = underlying_client_data
self._preprocess_fn = preprocess_fn
example_dataset = self._preprocess_fn(
self._underlying_client_data.create_tf_dataset_for_client(
next(iter(underlying_client_data.client_ids))))
self._element_type_structure = example_dataset.element_spec
self._dataset_computation = None
def serializable_dataset_fn(client_id: str) -> tf.data.Dataset:
return self._preprocess_fn(
self._underlying_client_data.serializable_dataset_fn(client_id)) # pylint:disable=protected-access
self._serializable_dataset_fn = serializable_dataset_fn
@property
def serializable_dataset_fn(self):
return self._serializable_dataset_fn
@property
def client_ids(self):
return self._underlying_client_data.client_ids
def create_tf_dataset_for_client(self, client_id: str) -> tf.data.Dataset:
return self._preprocess_fn(
self._underlying_client_data.create_tf_dataset_for_client(client_id))
@property
def element_type_structure(self):
return self._element_type_structure
class ConcreteClientData(ClientData):
"""A generic `ClientData` object.
This is a simple implementation of client_data, where Datasets are specified
as a function from client_id to Dataset.
The `ConcreteClientData.preprocess` classmethod is provided as a utility
used to wrap another `ClientData` with an additional preprocessing function.
"""
def __init__( # pylint: disable=super-init-not-called
self,
client_ids: Iterable[str],
serializable_dataset_fn: Callable[[str], tf.data.Dataset],
):
"""Creates a `ClientData` from clients and a mapping function.
Args:
client_ids: A non-empty iterable of `string` objects, representing ids for
each client.
serializable_dataset_fn: A function that takes as input a `string`, and
returns a `tf.data.Dataset`. This must be traceable by TF and TFF. That
is, it must be compatible with both `tf.function` and `tff.Computation`
wrappers.
"""
py_typecheck.check_type(client_ids, collections.abc.Iterable)
py_typecheck.check_callable(serializable_dataset_fn)
if not client_ids:
raise ValueError('At least one client_id is required.')
self._client_ids = list(client_ids)
self._serializable_dataset_fn = serializable_dataset_fn
example_dataset = serializable_dataset_fn(next(iter(client_ids)))
self._element_type_structure = example_dataset.element_spec
@property
def client_ids(self) -> List[str]:
return self._client_ids
@property
def serializable_dataset_fn(self):
return self._serializable_dataset_fn
@property
def element_type_structure(self):
return self._element_type_structure
| [
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow_federated.python.common_libs.py_typecheck.check_type",
"tensorflow_federated.python.common_libs.py_typecheck.check_callable",
"absl.logging.warning",
"tensorflow_federated.python.core.api.computations.tf_computation",
"numpy.random.RandomState"
] | [((8253, 8299), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['client_ids'], {}), '(client_ids)\n', (8287, 8299), True, 'import tensorflow as tf\n'), ((9137, 9179), 'tensorflow_federated.python.common_libs.py_typecheck.check_callable', 'py_typecheck.check_callable', (['preprocess_fn'], {}), '(preprocess_fn)\n', (9164, 9179), False, 'from tensorflow_federated.python.common_libs import py_typecheck\n'), ((13857, 13916), 'tensorflow_federated.python.common_libs.py_typecheck.check_type', 'py_typecheck.check_type', (['underlying_client_data', 'ClientData'], {}), '(underlying_client_data, ClientData)\n', (13880, 13916), False, 'from tensorflow_federated.python.common_libs import py_typecheck\n'), ((13921, 13963), 'tensorflow_federated.python.common_libs.py_typecheck.check_callable', 'py_typecheck.check_callable', (['preprocess_fn'], {}), '(preprocess_fn)\n', (13948, 13963), False, 'from tensorflow_federated.python.common_libs import py_typecheck\n'), ((16044, 16105), 'tensorflow_federated.python.common_libs.py_typecheck.check_type', 'py_typecheck.check_type', (['client_ids', 'collections.abc.Iterable'], {}), '(client_ids, collections.abc.Iterable)\n', (16067, 16105), False, 'from tensorflow_federated.python.common_libs import py_typecheck\n'), ((16110, 16162), 'tensorflow_federated.python.common_libs.py_typecheck.check_callable', 'py_typecheck.check_callable', (['serializable_dataset_fn'], {}), '(serializable_dataset_fn)\n', (16137, 16162), False, 'from tensorflow_federated.python.common_libs import py_typecheck\n'), ((5598, 5636), 'tensorflow_federated.python.core.api.computations.tf_computation', 'computations.tf_computation', (['tf.string'], {}), '(tf.string)\n', (5625, 5636), False, 'from tensorflow_federated.python.core.api import computations\n'), ((7194, 7243), 'tensorflow_federated.python.common_libs.py_typecheck.check_type', 'py_typecheck.check_type', (['dataset', 'tf.data.Dataset'], {}), '(dataset, tf.data.Dataset)\n', (7217, 7243), False, 'from tensorflow_federated.python.common_libs import py_typecheck\n'), ((6936, 6968), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (6957, 6968), True, 'import numpy as np\n'), ((8179, 8211), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (8200, 8211), True, 'import numpy as np\n'), ((11989, 12016), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (12010, 12016), True, 'import numpy as np\n'), ((12795, 12857), 'absl.logging.warning', 'logging.warning', (['"""Client %s had no data, skipping."""', 'client_id'], {}), "('Client %s had no data, skipping.', client_id)\n", (12810, 12857), False, 'from absl import logging\n')] |
# the phenotyping problem is annoying since you end up with 25 binary tasks; assuming that all of your prediction csv's
# are properly prefixed with the name of the task and reside within a test results folder, then this script will
# will evaluate model performance on each task, and aggregate performance
# python2 -um IanFairnessHackery.evaluate_phenotype_preds ./IanFairnessHackery/john_results/Phenotyping/test/
from mimic3models import metrics
import sys
import os
import numpy as np
PRED_TASKS = {
"Acute and unspecified renal failure" : False,
"Acute cerebrovascular disease" : False,
"Acute myocardial infarction" : False,
"Cardiac dysrhythmias" : False,
"Chronic kidney disease" : False,
"Chronic obstructive pulmonary disease and bronchiectasis" : False,
"Complications of surgical procedures or medical care" : False,
"Conduction disorders" : False,
"Congestive heart failure" : False,
"nonhypertensive" : False,
"Coronary atherosclerosis and other heart disease" : False,
"Diabetes mellitus with complications" : False,
"Diabetes mellitus without complication" : False,
"Disorders of lipid metabolism" : False,
"Essential hypertension" : False,
"Fluid and electrolyte disorders" : False,
"Gastrointestinal hemorrhage" : False,
"Hypertension with complications and secondary hypertension" : False,
"Other liver diseases" : False,
"Other lower respiratory disease" : False,
"Other upper respiratory disease" : False,
"Pleurisy" : False,
"pneumothorax" : False,
"pulmonary collapse" : False,
"Pneumonia (except that caused by tuberculosis or sexually transmitted disease)" : False
}
def read_file(path):
predictions = []
labels = []
with open(path, 'r') as fr:
fr.readline()
for line in fr:
line = line.strip()
vals = line.split(",")
predictions.append(float(vals[2]))
labels.append(int(vals[3]))
return np.array(predictions), np.array(labels)
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Must provide path to folder containing the prediction csv's in id/episode/pred/label format, and with" +
" filenames that are prefixed by the condition")
exit(-1)
merged_pred = None
merged_Y = None
indir = sys.argv[1]
for filename in os.listdir(indir):
prefixes = PRED_TASKS.keys()
matches = filter(lambda x: filename.startswith(x), prefixes)
# SKIP non-matches files
if len(matches) == 0:
continue
# Make sure only one file for this task
assert(not PRED_TASKS[matches[0]])
PRED_TASKS[matches[0]] = True
print("Evaluating {}".format(matches[0]))
match_pred, match_Y = read_file(os.path.join(indir, filename))
if merged_pred is None:
merged_pred = np.expand_dims(match_pred.copy(), axis=0)
merged_Y = np.expand_dims(match_Y.copy(), axis=0)
else:
merged_pred =np.concatenate((merged_pred, np.expand_dims(match_pred, axis=0)), axis=0)
merged_Y =np.concatenate((merged_Y, np.expand_dims(match_Y ,axis=0)), axis=0)
#print(merged_X.shape)
#print(merged_Y.shape)
metrics.print_metrics_binary(match_Y, match_pred)
print("----------------------------------------")
print("\n==========================================")
print("Evaluating all together:")
metrics.print_metrics_multilabel(merged_Y.T, merged_pred.T)
for key in PRED_TASKS:
if PRED_TASKS[key] != True:
print("WARNING: Data for task {} missing?".format(key)) | [
"mimic3models.metrics.print_metrics_multilabel",
"os.listdir",
"os.path.join",
"numpy.array",
"numpy.expand_dims",
"mimic3models.metrics.print_metrics_binary"
] | [((2384, 2401), 'os.listdir', 'os.listdir', (['indir'], {}), '(indir)\n', (2394, 2401), False, 'import os\n'), ((3495, 3554), 'mimic3models.metrics.print_metrics_multilabel', 'metrics.print_metrics_multilabel', (['merged_Y.T', 'merged_pred.T'], {}), '(merged_Y.T, merged_pred.T)\n', (3527, 3554), False, 'from mimic3models import metrics\n'), ((2000, 2021), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (2008, 2021), True, 'import numpy as np\n'), ((2023, 2039), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (2031, 2039), True, 'import numpy as np\n'), ((3285, 3334), 'mimic3models.metrics.print_metrics_binary', 'metrics.print_metrics_binary', (['match_Y', 'match_pred'], {}), '(match_Y, match_pred)\n', (3313, 3334), False, 'from mimic3models import metrics\n'), ((2816, 2845), 'os.path.join', 'os.path.join', (['indir', 'filename'], {}), '(indir, filename)\n', (2828, 2845), False, 'import os\n'), ((3078, 3112), 'numpy.expand_dims', 'np.expand_dims', (['match_pred'], {'axis': '(0)'}), '(match_pred, axis=0)\n', (3092, 3112), True, 'import numpy as np\n'), ((3171, 3202), 'numpy.expand_dims', 'np.expand_dims', (['match_Y'], {'axis': '(0)'}), '(match_Y, axis=0)\n', (3185, 3202), True, 'import numpy as np\n')] |
#----------
# build the dataset
#----------
import numpy as np, math
import matplotlib.pyplot as plt
from pybrain.datasets import SupervisedDataSet
from pybrain.structure import SigmoidLayer, LinearLayer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
def get_nn_xvalues(xvalues, magnification):
x_range = np.amax(xvalues) - np.amin(xvalues)
density = len(xvalues)/(x_range)
new_x_min = np.amin(xvalues) - magnification*x_range/2
new_x_max = np.amax(xvalues) + magnification*x_range/2
nn_xvalues = np.linspace(new_x_min, new_x_max, density * (new_x_max - new_x_min))
return nn_xvalues
def set_range(xvalues, yvalues, magnification, ax):
x_range = np.amax(xvalues) - np.amin(xvalues)
y_range = np.amax(yvalues) - np.amin(yvalues)
slope = y_range/x_range
x_diff = magnification * x_range / 2
y_diff = x_diff * slope
ax.set_xlim(np.amin(xvalues)-x_diff, np.amax(xvalues)+x_diff)
ax.set_ylim(np.amin(yvalues)-y_diff, np.amax(yvalues)+y_diff)
def train_nn(xvalues, yvalues, rate = 0.0001, batch = False, layers = [100, 100, 100], magnification = 0, iterations = 50):
ds = SupervisedDataSet(1, 1)
for x, y in zip(xvalues, yvalues):
ds.addSample((x,), (y,))
#----------
# build the network
#----------
net = buildNetwork(1,
*layers,
1,
bias = True,
hiddenclass = SigmoidLayer,
outclass = LinearLayer
)
#----------
# train
#----------
fig, ax = plt.subplots()
plt.title("Engine Temp Vs. Probability of Failure")
plt.ylabel("Probability of Failure")
plt.xlabel("Engine Temp in Degrees Celcius")
trainer = BackpropTrainer(net, ds, learningrate = rate, momentum=0, verbose = False, batchlearning=batch)
#trainer.trainUntilConvergence(maxEpochs = 100)
nn_xvalues = get_nn_xvalues(xvalues, magnification)
ax.plot(nn_xvalues,
[ net.activate([x]) for x in nn_xvalues ], linewidth = 2,
color = 'blue', label = 'NN output')
# target function
ax.plot(xvalues,
yvalues, "ro", linewidth = 2, color = 'red')
set_range(xvalues, yvalues, magnification, ax)
for i in range(iterations):
trainer.train()
# neural net approximation
new_yvalues = [ net.activate([x]) for x in nn_xvalues ]
ax.lines[0].set_ydata(new_yvalues)
fig.canvas.draw()
return net | [
"numpy.amax",
"numpy.amin",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.linspace",
"pybrain.tools.shortcuts.buildNetwork",
"pybrain.supervised.trainers.BackpropTrainer",
"matplotlib.pyplot.title",
"pybrain.datasets.SupervisedDataSet",
"matplotlib.pyplot.subplots"
] | [((578, 646), 'numpy.linspace', 'np.linspace', (['new_x_min', 'new_x_max', '(density * (new_x_max - new_x_min))'], {}), '(new_x_min, new_x_max, density * (new_x_max - new_x_min))\n', (589, 646), True, 'import numpy as np, math\n'), ((1207, 1230), 'pybrain.datasets.SupervisedDataSet', 'SupervisedDataSet', (['(1)', '(1)'], {}), '(1, 1)\n', (1224, 1230), False, 'from pybrain.datasets import SupervisedDataSet\n'), ((1379, 1470), 'pybrain.tools.shortcuts.buildNetwork', 'buildNetwork', (['(1)', '*layers', '(1)'], {'bias': '(True)', 'hiddenclass': 'SigmoidLayer', 'outclass': 'LinearLayer'}), '(1, *layers, 1, bias=True, hiddenclass=SigmoidLayer, outclass=\n LinearLayer)\n', (1391, 1470), False, 'from pybrain.tools.shortcuts import buildNetwork\n'), ((1674, 1688), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1686, 1688), True, 'import matplotlib.pyplot as plt\n'), ((1693, 1744), 'matplotlib.pyplot.title', 'plt.title', (['"""Engine Temp Vs. Probability of Failure"""'], {}), "('Engine Temp Vs. Probability of Failure')\n", (1702, 1744), True, 'import matplotlib.pyplot as plt\n'), ((1749, 1785), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability of Failure"""'], {}), "('Probability of Failure')\n", (1759, 1785), True, 'import matplotlib.pyplot as plt\n'), ((1790, 1834), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Engine Temp in Degrees Celcius"""'], {}), "('Engine Temp in Degrees Celcius')\n", (1800, 1834), True, 'import matplotlib.pyplot as plt\n'), ((1859, 1954), 'pybrain.supervised.trainers.BackpropTrainer', 'BackpropTrainer', (['net', 'ds'], {'learningrate': 'rate', 'momentum': '(0)', 'verbose': '(False)', 'batchlearning': 'batch'}), '(net, ds, learningrate=rate, momentum=0, verbose=False,\n batchlearning=batch)\n', (1874, 1954), False, 'from pybrain.supervised.trainers import BackpropTrainer\n'), ((370, 386), 'numpy.amax', 'np.amax', (['xvalues'], {}), '(xvalues)\n', (377, 386), True, 'import numpy as np, math\n'), ((389, 405), 'numpy.amin', 'np.amin', (['xvalues'], {}), '(xvalues)\n', (396, 405), True, 'import numpy as np, math\n'), ((459, 475), 'numpy.amin', 'np.amin', (['xvalues'], {}), '(xvalues)\n', (466, 475), True, 'import numpy as np, math\n'), ((518, 534), 'numpy.amax', 'np.amax', (['xvalues'], {}), '(xvalues)\n', (525, 534), True, 'import numpy as np, math\n'), ((736, 752), 'numpy.amax', 'np.amax', (['xvalues'], {}), '(xvalues)\n', (743, 752), True, 'import numpy as np, math\n'), ((755, 771), 'numpy.amin', 'np.amin', (['xvalues'], {}), '(xvalues)\n', (762, 771), True, 'import numpy as np, math\n'), ((786, 802), 'numpy.amax', 'np.amax', (['yvalues'], {}), '(yvalues)\n', (793, 802), True, 'import numpy as np, math\n'), ((805, 821), 'numpy.amin', 'np.amin', (['yvalues'], {}), '(yvalues)\n', (812, 821), True, 'import numpy as np, math\n'), ((945, 961), 'numpy.amin', 'np.amin', (['xvalues'], {}), '(xvalues)\n', (952, 961), True, 'import numpy as np, math\n'), ((970, 986), 'numpy.amax', 'np.amax', (['xvalues'], {}), '(xvalues)\n', (977, 986), True, 'import numpy as np, math\n'), ((1011, 1027), 'numpy.amin', 'np.amin', (['yvalues'], {}), '(yvalues)\n', (1018, 1027), True, 'import numpy as np, math\n'), ((1036, 1052), 'numpy.amax', 'np.amax', (['yvalues'], {}), '(yvalues)\n', (1043, 1052), True, 'import numpy as np, math\n')] |
import numpy as np
from plico.utils.decorator import override, returns
from plico_dm.client.abstract_deformable_mirror_client import \
AbstractDeformableMirrorClient
from plico_dm.utils.timeout import Timeout
import time
from plico_dm.types.deformable_mirror_status import DeformableMirrorStatus
class SimulatedDeformableMirrorClient(AbstractDeformableMirrorClient):
N_MODES = 4
def __init__(self, timeModule=time):
self._timeMod = timeModule
self._shape = np.zeros(self.N_MODES)
self._commandCounter = 0
self._isControlLoopEnabled = False
self._isShapeSequenceEnabled = False
self._shapeSeq = np.zeros(self.N_MODES).reshape((
self.N_MODES, 1))
self._nElementsShapeSeq = 1
self._seqTimeStepInSeconds = 1
self._shapeSeqIdx = 0
self._timeStampSequence = 0
self._reference_command = np.zeros(self.N_MODES)
self._reference_command_tag = 'zeros'
self._ref_dict = {}
self._ref_dict['zeros'] = np.zeros(self.N_MODES)
@override
def enable_control_loop(self,
boolEnableOrDisable,
timeoutInSec=Timeout.GENERIC_COMMAND):
self._isControlLoopEnabled = boolEnableOrDisable
@override
def load_shape_sequence(self,
shapeSequence,
timeStepInSeconds,
timeoutInSec=Timeout.GENERIC_COMMAND):
self._shapeSeq = shapeSequence
self._seqTimeStepInSeconds = timeStepInSeconds
self._shapeSeqIdx = 0
self._nElementsShapeSeq = self._shapeSeq.shape[1]
@override
def start_shape_sequence(self,
timeoutInSec=Timeout.GENERIC_COMMAND):
self._isShapeSequenceEnabled = True
self._timeStampSequence = self._timeMod.time()
@override
def stop_shape_sequence(self,
timeoutInSec=Timeout.GENERIC_COMMAND):
self._isShapeSequenceEnabled = False
@override
def set_shape(self,
command,
timeoutInSec=Timeout.MIRROR_SET_SHAPE):
self._shape = command + self._reference_command
self._commandCounter += 1
@override
def get_shape(self, timeoutInSec=Timeout.MIRROR_GET_SHAPE):
shape = self._shape - self._reference_command
if self._isShapeSequenceEnabled:
now = self._timeMod.time()
nSteps = int((now - self._timeStampSequence) /
self._seqTimeStepInSeconds)
seqIdx = nSteps % self._nElementsShapeSeq
shape += self._shapeSeq[:, seqIdx]
return shape
@override
def get_number_of_modes(self, timeoutInSec=Timeout.GENERIC_COMMAND):
return self.N_MODES
@override
def get_number_of_actuators(self, timeoutInSec=Timeout.GENERIC_COMMAND):
return self.N_MODES
@override
@returns(DeformableMirrorStatus)
def get_status(self, timeoutInSec=Timeout.MIRROR_GET_STATUS):
return DeformableMirrorStatus(self._shape,
self._commandCounter)
@override
def get_snapshot(self):
return {}
@override
def save_current_shape_as_reference(self, tag):
self._ref_dict[tag] = self._shape
@override
def load_reference(self, tag):
self._reference_command = self._ref_dict[tag]
self._reference_command_tag = tag
@override
def get_reference_shape(self):
return self._reference_command
@override
def get_reference_shape_tag(self):
return self._reference_command_tag
| [
"plico.utils.decorator.returns",
"numpy.zeros",
"plico_dm.types.deformable_mirror_status.DeformableMirrorStatus"
] | [((2955, 2986), 'plico.utils.decorator.returns', 'returns', (['DeformableMirrorStatus'], {}), '(DeformableMirrorStatus)\n', (2962, 2986), False, 'from plico.utils.decorator import override, returns\n'), ((489, 511), 'numpy.zeros', 'np.zeros', (['self.N_MODES'], {}), '(self.N_MODES)\n', (497, 511), True, 'import numpy as np\n'), ((896, 918), 'numpy.zeros', 'np.zeros', (['self.N_MODES'], {}), '(self.N_MODES)\n', (904, 918), True, 'import numpy as np\n'), ((1027, 1049), 'numpy.zeros', 'np.zeros', (['self.N_MODES'], {}), '(self.N_MODES)\n', (1035, 1049), True, 'import numpy as np\n'), ((3068, 3125), 'plico_dm.types.deformable_mirror_status.DeformableMirrorStatus', 'DeformableMirrorStatus', (['self._shape', 'self._commandCounter'], {}), '(self._shape, self._commandCounter)\n', (3090, 3125), False, 'from plico_dm.types.deformable_mirror_status import DeformableMirrorStatus\n'), ((658, 680), 'numpy.zeros', 'np.zeros', (['self.N_MODES'], {}), '(self.N_MODES)\n', (666, 680), True, 'import numpy as np\n')] |
from arg_parser import UserArgs
from collections import Counter
from dataset_handler.dataset import CUB_Xian, SUN_Xian, AWA1_Xian
from dataset_handler.transfer_task_split import ZSLsplit, GZSLsplit, ImbalancedDataSplit, DragonSplit, GFSLSplit
from attribute_expert.model import AttributeExpert
from keras.utils import to_categorical
import numpy as np
class DataLoader(object):
def __init__(self, should_test_split):
# init data factory and split factory
self.data_loaders_factory = {
'CUB': CUB_Xian,
'SUN': SUN_Xian,
'AWA1': AWA1_Xian
}
self.task_factory = {
'ZSL': ZSLsplit(val_fold_id=1),
'GZSL': GZSLsplit(seen_val_seed=1002),
'IMB': ImbalancedDataSplit(classes_shuffle_seed=0, seen_val_seed=0),
'GFSL': GFSLSplit(val_seed=0, test_seed=0, fs_nsamples=UserArgs.train_max_fs_samples),
'DRAGON': DragonSplit(val_seed=0, test_seed=0,
train_dist_function=UserArgs.train_dist,
fs_nsamples=UserArgs.train_max_fs_samples)
}
self.dataset = self.data_loaders_factory[UserArgs.dataset_name](UserArgs.data_dir)
# split dataset to train, val and test
self.data = self.task_factory[UserArgs.transfer_task]._split(self.dataset, should_test_split)
self.data, \
self.X_train, self.Y_train, self.Attributes_train, self.train_classes, \
self.X_val, self.Y_val, self.Attributes_val, self.val_classes, \
self.X_test, self.Y_test, self.Attributes_test, self.test_classes, \
self.input_dim, self.categories_dim, self.attributes_dim, \
self.class_descriptions_crossval, \
self.attributes_groups_ranges_ids = AttributeExpert.prepare_data_for_model(self.data)
# one hot encoding for Y's
self.Y_train_oh = to_categorical(self.Y_train, num_classes=self.categories_dim)
self.Y_val_oh = to_categorical(self.Y_val, num_classes=self.categories_dim)
self.Y_test_oh = to_categorical(self.Y_test, num_classes=self.categories_dim)
# prepare evaluation parameters
self.train_data = (self.X_train, self.Y_train, self.Attributes_train, self.train_classes)
self.val_data = (self.X_val, self.Y_val, self.Attributes_val, self.val_classes)
self.test_data = (self.X_test, self.Y_test, self.Attributes_test, self.test_classes)
train_distribution = self.task_factory[UserArgs.transfer_task].train_distribution
# save num_training_samples_per_class
class_samples_map = Counter(self.Y_train)
self.num_training_samples_per_class = [class_samples_map[key] for key in
sorted(class_samples_map.keys(), reverse=False)]
# save many_shot and few_shot classes
self.ms_classes = self.task_factory[UserArgs.transfer_task].ms_classes
self.fs_classes = self.task_factory[UserArgs.transfer_task].fs_classes
# seperate validation to many shot, few shot indexes
val_ms_indexes, val_fs_indexes = self.get_ms_and_fs_indexes(self.Y_val)
X_val_many = self.X_val[val_ms_indexes]
Y_val_many = self.Y_val[val_ms_indexes]
X_val_few = self.X_val[val_fs_indexes]
Y_val_few = self.Y_val[val_fs_indexes]
self.eval_params = (self.X_val, self.Y_val, self.val_classes,
train_distribution,self.ms_classes, self.fs_classes, X_val_many,
Y_val_many, X_val_few, Y_val_few)
test_ms_indexes, test_fs_indexes = self.get_ms_and_fs_indexes(self.Y_test)
X_test_many = self.X_test[test_ms_indexes]
Y_test_many = self.Y_test[test_ms_indexes]
X_test_few = self.X_test[test_fs_indexes]
Y_test_few = self.Y_test[test_fs_indexes]
self.test_eval_params = (self.X_test, self.Y_test, self.test_classes,
train_distribution, self.ms_classes, self.fs_classes, X_test_many,
Y_test_many, X_test_few, Y_test_few)
print(f"""Dataset: {UserArgs.dataset_name}
Train Shape: {self.X_train.shape}
Val Shape: {self.X_val.shape}
Test Shape: {self.X_test.shape}""")
# Evaluate many and few shot accuracies
def get_ms_and_fs_indexes(self, Y):
# get all indexes of many_shot classes
ms_indexes = np.array([], dtype=int)
for ms_class in self.ms_classes:
cur_class_indexes = np.where(Y == ms_class)[0]
ms_indexes = np.append(ms_indexes, cur_class_indexes)
# get all indexes of few_shot classes
fs_indexes = np.array([], dtype=int)
for fs_class in self.fs_classes:
cur_class_indexes = np.where(Y == fs_class)[0]
fs_indexes = np.append(fs_indexes, cur_class_indexes)
return ms_indexes, fs_indexes
| [
"dataset_handler.transfer_task_split.GFSLSplit",
"numpy.where",
"dataset_handler.transfer_task_split.DragonSplit",
"dataset_handler.transfer_task_split.ZSLsplit",
"keras.utils.to_categorical",
"collections.Counter",
"numpy.array",
"numpy.append",
"attribute_expert.model.AttributeExpert.prepare_data_... | [((1779, 1828), 'attribute_expert.model.AttributeExpert.prepare_data_for_model', 'AttributeExpert.prepare_data_for_model', (['self.data'], {}), '(self.data)\n', (1817, 1828), False, 'from attribute_expert.model import AttributeExpert\n'), ((1890, 1951), 'keras.utils.to_categorical', 'to_categorical', (['self.Y_train'], {'num_classes': 'self.categories_dim'}), '(self.Y_train, num_classes=self.categories_dim)\n', (1904, 1951), False, 'from keras.utils import to_categorical\n'), ((1976, 2035), 'keras.utils.to_categorical', 'to_categorical', (['self.Y_val'], {'num_classes': 'self.categories_dim'}), '(self.Y_val, num_classes=self.categories_dim)\n', (1990, 2035), False, 'from keras.utils import to_categorical\n'), ((2061, 2121), 'keras.utils.to_categorical', 'to_categorical', (['self.Y_test'], {'num_classes': 'self.categories_dim'}), '(self.Y_test, num_classes=self.categories_dim)\n', (2075, 2121), False, 'from keras.utils import to_categorical\n'), ((2605, 2626), 'collections.Counter', 'Counter', (['self.Y_train'], {}), '(self.Y_train)\n', (2612, 2626), False, 'from collections import Counter\n'), ((4428, 4451), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (4436, 4451), True, 'import numpy as np\n'), ((4686, 4709), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (4694, 4709), True, 'import numpy as np\n'), ((654, 677), 'dataset_handler.transfer_task_split.ZSLsplit', 'ZSLsplit', ([], {'val_fold_id': '(1)'}), '(val_fold_id=1)\n', (662, 677), False, 'from dataset_handler.transfer_task_split import ZSLsplit, GZSLsplit, ImbalancedDataSplit, DragonSplit, GFSLSplit\n'), ((699, 728), 'dataset_handler.transfer_task_split.GZSLsplit', 'GZSLsplit', ([], {'seen_val_seed': '(1002)'}), '(seen_val_seed=1002)\n', (708, 728), False, 'from dataset_handler.transfer_task_split import ZSLsplit, GZSLsplit, ImbalancedDataSplit, DragonSplit, GFSLSplit\n'), ((749, 809), 'dataset_handler.transfer_task_split.ImbalancedDataSplit', 'ImbalancedDataSplit', ([], {'classes_shuffle_seed': '(0)', 'seen_val_seed': '(0)'}), '(classes_shuffle_seed=0, seen_val_seed=0)\n', (768, 809), False, 'from dataset_handler.transfer_task_split import ZSLsplit, GZSLsplit, ImbalancedDataSplit, DragonSplit, GFSLSplit\n'), ((831, 908), 'dataset_handler.transfer_task_split.GFSLSplit', 'GFSLSplit', ([], {'val_seed': '(0)', 'test_seed': '(0)', 'fs_nsamples': 'UserArgs.train_max_fs_samples'}), '(val_seed=0, test_seed=0, fs_nsamples=UserArgs.train_max_fs_samples)\n', (840, 908), False, 'from dataset_handler.transfer_task_split import ZSLsplit, GZSLsplit, ImbalancedDataSplit, DragonSplit, GFSLSplit\n'), ((932, 1057), 'dataset_handler.transfer_task_split.DragonSplit', 'DragonSplit', ([], {'val_seed': '(0)', 'test_seed': '(0)', 'train_dist_function': 'UserArgs.train_dist', 'fs_nsamples': 'UserArgs.train_max_fs_samples'}), '(val_seed=0, test_seed=0, train_dist_function=UserArgs.\n train_dist, fs_nsamples=UserArgs.train_max_fs_samples)\n', (943, 1057), False, 'from dataset_handler.transfer_task_split import ZSLsplit, GZSLsplit, ImbalancedDataSplit, DragonSplit, GFSLSplit\n'), ((4577, 4617), 'numpy.append', 'np.append', (['ms_indexes', 'cur_class_indexes'], {}), '(ms_indexes, cur_class_indexes)\n', (4586, 4617), True, 'import numpy as np\n'), ((4835, 4875), 'numpy.append', 'np.append', (['fs_indexes', 'cur_class_indexes'], {}), '(fs_indexes, cur_class_indexes)\n', (4844, 4875), True, 'import numpy as np\n'), ((4525, 4548), 'numpy.where', 'np.where', (['(Y == ms_class)'], {}), '(Y == ms_class)\n', (4533, 4548), True, 'import numpy as np\n'), ((4783, 4806), 'numpy.where', 'np.where', (['(Y == fs_class)'], {}), '(Y == fs_class)\n', (4791, 4806), True, 'import numpy as np\n')] |
""" Domain of parameters to generate configs. """
from itertools import product, islice
from collections import OrderedDict
from copy import copy, deepcopy
from pprint import pformat
import numpy as np
from .utils import must_execute, to_list
from .. import Config, Sampler, make_rng
from ..named_expr import eval_expr
class Alias:
""" Class to create alias for some Python object. This is useful for creating short names for complex objects
such as nested dictionaries.
Parameters
----------
value : object
alias : str, optional
Alias for value, by default None. If None then alias will be equal to `value.__name__`
(if exists) or to `str(value)`.
"""
def __init__(self, value, alias=None):
if isinstance(value, Alias):
self.value = value.value
self.alias = value.alias
else:
self.value = value
if alias is None:
self.alias = self._get_name(value)
else:
self.alias = alias
def __repr__(self):
return 'Alias(' + str(self.alias) + ': ' + str(self.value) + ')'
def _get_name(self, value):
""" Create name for the value. """
if hasattr(value, '__name__'):
return value.__name__
return str(value)
class ConfigAlias:
""" Wrapper for Config to infer its aliased version. Each key and value from initial config will be
wrapped with `Alias` class (if it is not).
Parameters
----------
config : dict, list of tuple
each tuple is a pair (key, value), key is `Alias` or str, value is `Alias` or object.
Notes
-----
ConfigAlias has two main methods: `config` and `alias`. `config` returns initial config as `Config` instance.
`alias` returns aliased versions of config or its string representation.
"""
def __init__(self, config=None):
if isinstance(config, ConfigAlias):
_config = config._config
else:
_config = []
if isinstance(config, (dict, Config)):
config = config.items()
if config is not None:
for key, value in config:
_key = key if isinstance(key, Alias) else Alias(key)
_value = value if isinstance(value, Alias) else Alias(value)
_config.append((_key, _value))
self._config = _config
def alias(self, as_string=False, delim='-'):
""" Returns config alias.
Parameters
----------
as_string : bool, optional
if True, return string representation of ConfigAlias. Different items will be
separated by `delim`, key and value for each pair will be separated by '_'.
delim : str, optional
delimiter for different ConfigAlias items in string representation.
Returns
-------
dict or str
"""
config_alias = Config({item[0].alias: item[1].alias for item in self._config})
if as_string:
config_alias = OrderedDict(sorted(config_alias.items()))
config_alias = delim.join([str(key)+'_'+str(value) for key, value in config_alias.items()])
return config_alias
def config(self):
""" Returns initial config as `Config` instance.
Returns
-------
Config
"""
return Config({item[0].value: item[1].value for item in self._config})
def pop_config(self, key):
""" Pop item from ConfigAlias by config value (not by alias).
Returns
-------
ConfigAlias or None
ConfigAlias for popped keys. None if key doesn't exist.
"""
key = to_list(key)
res = [item for item in self._config if item[0].value in key]
self._config = [item for item in self._config if item[0].value not in key]
if len(res) >= 1:
return ConfigAlias(res)
return None
def pop_alias(self, key):
""" Pop item from ConfigAlias by alias (not by value).
Returns
-------
ConfigAlias or None
ConfigAlias for popped keys. None if key doesn't exist.
"""
key = to_list(key)
res = [item for item in self._config if item[0].alias in key]
self._config = [item for item in self._config if item[0].alias not in key]
if len(res) >= 1:
return ConfigAlias(res)
return None
def set_prefix(self, keys, n_digits):
""" Create prefix from keys. """
prefix = ''
for key in keys:
prefix += self.alias().get('#' + key, 'null') + '_'
fmt = ("{:0" + str(n_digits) + "d}").format(self.config()['repetition'])
self['_prefix'] = prefix + fmt + '_'
return self
def __getitem__(self, key):
""" Returns true value (not alias). """
return self.config()[key]
def __setitem__(self, key, value):
_key = key if isinstance(key, Alias) else Alias(key)
_value = value if isinstance(value, Alias) else Alias(value)
self._config.append((_key, _value))
def __repr__(self):
return pformat(self.alias().config)
def __add__(self, other):
config = ConfigAlias()
config._config = deepcopy(self._config) + deepcopy(other._config)
return config
def keys(self):
return self.config().keys()
class Domain:
""" Domain of parameters to generate configs for experiments.
Parameters
----------
domain : dict
parameter values to try. Each key is a parameter, values is a list of parameter values
or batchflow.Sampler.
**kwargs :
the same as a `domain` dict. `domain` using is preferable when parameter name includes symbols like `'/'`.
Note
----
`Domain` generates configs of parameters. The simplest example is `Domain(a=[1,2,3])`. That domain defines
parameter `'a'` and its possible values `[1,2,3]`. You can iterate over all possible configs (3 configs in our
example) and repeat generated configs in the same order several times (see `n_reps` in :meth:`~.set_iter_params`).
Besides, parameter values can be a `batchflow.Sampler`, e.g. `Domain(a=NumpySampler('normal'))`. In that case
values for parameter `'a'` will be sampled from normal distribution.
Dict in domain definition can consist of several elements, then we will get all possible combinations of parameters,
e.g. `Domain(a=[1,2], b=[3,4])` will produce four configs. If domain has parameters with array-like values and
with sampler as values simultaneously, domain will produce all possible combinations of parameters with array-like
values and for each combination values of other parameters will be sampled.
To get configs from `Domain` use :meth:`~.iterator`. It produces configs wrapped by :class:`~.ConfigAlias`.
Additional parameters like the number of repetitions or the number of samples for domains with samplers
are defined in :meth:`~.set_iter_params`.
**Operations with Domain**
#. sum by `+`: Concatenate two domains. For example, the resulting domain
`Domain(a=[1]) + Domain(b=[1])` will produce two configs: `{'a': 1}`, `{'b': 1}`
(not one dict with `'a'` and `'b'`).
#. multiplication by `*`: Cartesian multiplications of options in Domain.
For example, if `domain1 = Domain({'a': [1, 2]})`, `domain2 = Domain({'b': [3, 4]})` and
`domain3 = Domain({'c': bf.Sampler('n')})` then `domain1 * domain2 * domain3` will have
all options and generate 4 configs: `{'a': 1, 'b': 3, 'c': xi_1}`, `{'a': 1, 'b': 4, 'c': xi_2}`,
`{'a': 2, 'b': 3, 'c': xi_3}`, `{'a': 2, 'b': 4, 'c': xi_4}` where xi_i are independent samples from
normal distribution. The same resulting domain can be defined as `Domain({'a': [1, 2], 'b': [3, 4],
'c': bf.Sampler('n')})`.
#. multiplication by @: element-wise multiplication of array-like options.
For example, if `domain1 = Domain({'a': [1, 2]})` and `domain2 = Domain({'b': [3, 4]})` then
`domain1 @ domain2` will have two configs:
`{'a': 1, `b`: 3}`, `{'a': 2, `b`: 4}`.
#. multiplication with weights: can be used to sample configs from sum of domains.
For example, the first ten configs from `0.3 * Domain({'p1': NS('n', loc=-10)}) + 0.2 * Domain({'p2': NS('u')})
+ 0.5 * Domain({'p3': NS('n', loc=10)})` will be `{'p1': -10.3059}, {'p3': 8.9959},
{'p3': 9.1302}, {'p3': 10.2611}, {'p1': -7.9388}, {'p2': 0.5455}, {'p1': -9.2497},
{'p3': 9.9769}, {'p2': 0.3510}, {'p3': 8.8519}` (depends on seed).
If you sum options with and without weights, they are grouped into consequent groups where all options has or
not weights, for each group configs are generated consequently (for groups with weights) or sampled as described
above. For example, for `domain = domain1 + 1.2 * domain2 + 2.3 * domain3 + domain4 + 1. * domain5` we will get:
- all configs from domain1
- configs will be sampled from 1.2 * domain2 + 2.3 * domain3
- all configs from domain4
- configs will be sampled from 1. * domain4
If one of the domains here is a sampler-like domain, then samples from that domain will be generated endlessly.
"""
def __init__(self, domain=None, **kwargs):
if isinstance(domain, dict):
self.cubes = [self.create_aliases(domain)]
self.weights = np.array([np.nan])
elif isinstance(domain, list) and all(isinstance(item, list) for item in domain):
self.cubes = domain
self.weights = np.array([np.nan] * len(domain))
elif isinstance(domain, Domain):
self.cubes = copy(domain.cubes)
self.weights = copy(domain.weights)
elif len(kwargs) > 0:
self.cubes = [self.create_aliases(kwargs)]
self.weights = np.array([np.nan])
elif domain is None:
self.cubes = []
self.weights = np.array([])
else:
raise ValueError(f'domain can be Domain, dict or nested list but {type(domain)} were given')
self.updates = []
self.n_produced = 0
self._iterator = None
self.n_items = None
self.n_reps = 1
self.repeat_each = None
self.n_updates = 0
self.additional = True
self.create_id_prefix = False
self.random_state = None
self.values_indices = dict()
def _get_all_options_names(self):
options = []
for cube in self.cubes:
for option in cube:
alias = option[0].alias
if alias not in options and alias != 'repetition':
options.append(alias)
return options
def create_aliases(self, options):
""" Create aliases by wrapping into Alias class for each key and value of the dict. """
aliases_options = []
for parameter, values in options.items():
parameter = Alias(parameter)
if isinstance(values, (list, tuple, np.ndarray)):
values = [Alias(value) for value in values]
elif isinstance(values, Sampler):
pass
else:
raise TypeError('`values` must be array-like object or Sampler but {} were given'.format(type(values)))
aliases_options += [(parameter, values)]
return aliases_options
def set_iter_params(self, n_items=None, n_reps=1, repeat_each=None, produced=0, additional=True,
create_id_prefix=False, seed=None):
""" Set parameters for iterator.
Parameters
----------
n_items : int or None
the number of configs that will be generated from domain. If the size
of domain is less then `n_items`, elements will be repeated. If `n_items`
is `None` and there is not a cube that consists only of sampler-options
then `n_items` will be setted to the number of configs that can be produced
from that domain. If `n_items` is None and there is a cube that consists
only of sampler-option then domain will produce infinite number of configs.
n_reps : int
each element will be repeated `n_reps` times.
repeat_each : int
if there is not a cube that consists only of sampler-options then
elements will be repeated after producing `repeat_each` configs. Else
`repeat_each` will be setted to the number of configs that can be produced
from domain.
produced : int
how many configs was produced before (is needed to use after domain update).
additional : bool
append 'repetition' and 'updates' to config or not.
seed : bool or int or object with a seed sequence attribute
see :meth:`~batchflow.utils_random.make_seed_sequence`.
"""
n_configs = self.len # None means that domain has samplers
self.n_items = n_items or n_configs
self.n_reps = n_reps
if self.n_items is not None:
self.repeat_each = repeat_each or self.n_items
else:
self.repeat_each = repeat_each or 100
self.n_produced = produced
self.additional = additional
self.create_id_prefix = create_id_prefix
self.random_state = make_rng(seed)
self.reset_iter()
def set_update(self, function, when, **kwargs):
""" Set domain update parameters. """
if isinstance(when, (int, str)):
when = [when]
iter_kwargs = dict()
for attr in ['n_items', 'n_reps', 'repeat_each']:
iter_kwargs[attr] = kwargs.pop(attr) if attr in kwargs else getattr(self, attr)
self.updates.append({
'function': function,
'when': when,
'kwargs': kwargs,
'iter_kwargs': iter_kwargs
})
def update(self, generated, research):
""" Update domain by `update_func`. If returns None, domain will not be updated. """
for update in self.updates:
if must_execute(generated-1, update['when'], self.n_produced + self.size):
kwargs = eval_expr(update['kwargs'], research=research)
domain = update['function'](**kwargs)
domain.updates = self.updates
domain.n_updates = self.n_updates + 1
domain.values_indices = self.values_indices
domain.set_iter_params(produced=generated, additional=self.additional, seed=self.random_state,
create_id_prefix=self.create_id_prefix, **update['iter_kwargs'])
return domain
return None
@property
def size(self):
""" Return the number of configs that will be produces from domain. """
if self.n_items is not None:
return self.n_reps * self.n_items
return None
@property
def len(self):
""" Return the number of configs that will be produced from domain without repetitions. None if infinite. """
size = 0
for cube in self.cubes:
lengthes = [len(values) for _, values in cube if isinstance(values, (list, tuple, np.ndarray))]
if len(lengthes) == 0:
return None
size += np.product(lengthes)
return size
def __len__(self):
""" __len__ can't return None so we have to separate functions. """
cube_sizes = [
np.prod([len(values) for _, values in cube if isinstance(values, (list, tuple, np.ndarray))], dtype='int')
for cube in self.cubes
] # np.prod returns 1.0 for empty list
return max(0, sum(cube_sizes))
def __mul__(self, other):
if isinstance(other, float) and np.isnan(other):
return self
if self.cubes is None:
result = other
elif isinstance(other, (int, float)):
result = self
weights = self.weights
weights[np.isnan(weights)] = 1
result.weights = weights * other
elif isinstance(other, Domain):
if other.cubes is None:
result = self
else:
res = list(product(self.cubes, other.cubes))
res = [item[0] + item[1] for item in res]
pairs = np.array(list(product(self.weights, other.weights)))
weights = np.array([np.nanprod(item) for item in pairs])
nan_mask = np.array([np.isnan(item).all() for item in pairs])
weights[nan_mask] = np.nan
result = Domain()
result.cubes = res
result.weights = weights
else:
raise TypeError('Arguments must be numeric or Domains')
return result
def __matmul__(self, other):
if self._is_array_option():
that = self._to_scalar_product()
else:
that = self
if other._is_array_option():
other = other._to_scalar_product()
if that._is_scalar_product() and other._is_scalar_product():
if len(that.cubes) == len(other.cubes):
cubes = [cube_1 + cube_2 for cube_1, cube_2 in zip(that.cubes, other.cubes)]
weights = np.nanprod(np.stack([that.weights, other.weights]), axis=0)
nan_mask = np.logical_and(np.isnan(that.weights), np.isnan(other.weights))
weights[nan_mask] = np.nan
domain = Domain()
domain.cubes = cubes
domain.weights = weights
return domain
raise ValueError("The numbers of domain cubes must conincide.")
def __rmul__(self, other):
return self * other
def __add__(self, other):
if self.cubes is None:
result = other
elif other.cubes is None:
result = self
else: # Domain
result = Domain()
result.cubes = self.cubes + other.cubes
result.weights = np.concatenate((self.weights, other.weights))
return result
def __getitem__(self, index):
domain = Domain()
domain.cubes = [self.cubes[index]]
return domain
def __eq__(self, other):
return self.cubes == other.cubes
def __next__(self):
return next(self.iterator)
def reset_iter(self):
""" Reset iterator and set seeds for samplers. """
for cube in self.cubes:
for _, values in cube:
if isinstance(values, Sampler):
values.state = make_rng(self.random_state)
self._iterator = None
def create_iter(self):
""" Create iterator. """
blocks = self._get_sampling_blocks()
keys = self._get_all_options_names()
def _iterator():
while True:
for block in blocks:
weights = self.weights[block]
weights[np.isnan(weights)] = 1
iterators = [self._cube_iterator(cube) for cube in np.array(self.cubes, dtype=object)[block]]
while len(iterators) > 0:
index = self.random_state.choice(len(block), p=weights/weights.sum())
try:
yield next(iterators[index])
except StopIteration:
del iterators[index]
weights = np.delete(weights, index)
block = np.delete(block, index)
def _iterator_with_repetitions():
iterator = _iterator()
if self.n_reps == 1:
i = 0
if self.additional:
additional = ConfigAlias([('repetition', 0)]) + ConfigAlias([('updates', self.n_updates)])
else:
additional = ConfigAlias()
while self.n_items is None or i < self.n_items:
res = next(iterator) + additional # pylint: disable=stop-iteration-return
if self.create_id_prefix:
res.set_prefix(keys, n_digits=int(self.create_id_prefix))
yield res
i += 1
else:
i = 0
while self.n_items is None or i < self.n_items:
samples = list(islice(iterator, int(self.repeat_each)))
for rep in range(self.n_reps):
if self.additional:
additional = ConfigAlias({'repetition': rep}) + ConfigAlias({'updates': self.n_updates})
else:
additional = ConfigAlias()
for sample in samples:
res = sample + additional
if self.create_id_prefix:
res.set_prefix(keys, n_digits=int(self.create_id_prefix))
yield res
i += self.repeat_each
self._iterator = _iterator_with_repetitions()
def _get_sampling_blocks(self):
""" Return groups of cubes. Cubes are split into consequent groups where all cubes has or not weights. """
incl = np.cumsum(np.isnan(self.weights))
excl = np.concatenate(([0], incl[:-1]))
block_indices = incl + excl
return [np.where(block_indices == i)[0] for i in set(block_indices)]
@property
def iterator(self):
""" Get domain iterator. """
if self._iterator is None:
self.set_iter_params(self.n_items, self.n_reps, self.repeat_each, self.n_produced,
self.additional, self.create_id_prefix, self.random_state)
self.create_iter()
return self._iterator
def _is_array_option(self):
""" Return True if domain consists of only one array-like option. """
if len(self.cubes) == 1:
if len(self.cubes[0]) == 1:
if isinstance(self.cubes[0][0][1], (list, tuple, np.ndarray)):
return True
return False
def _is_scalar_product(self):
""" Return True if domain is a result of matmul. It means that each cube has
an only one array-like option of length 1.
"""
for cube in self.cubes:
samplers = [name for name, values in cube if isinstance(values, Sampler)]
if len(samplers) > 0:
return False
if any(len(values) != 1 for _, values in cube):
return False
return True
def _to_scalar_product(self):
""" Transform domain to the matmul format (see :meth:`~.Domain._is_scalar_product`)"""
if self._is_array_option():
name, values = self.cubes[0][0]
cubes = [[[name, [value]]] for value in values]
weights = np.concatenate([[self.weights[0]] * len(cubes)])
domain = Domain()
domain.cubes = cubes
domain.weights = weights
return domain
if self._is_scalar_product():
return Domain(self)
raise ValueError("Domain cannot be represented as scalar product.")
def _cube_iterator(self, cube):
""" Return iterator from the cube. All array-like options will be transformed
to Cartesian product and all sampler-like options will produce independent samples
for each condig. """
arrays = [item for item in cube if isinstance(item[1], (list, tuple, np.ndarray))]
samplers = [item for item in cube if isinstance(item[1], Sampler)]
if len(arrays) > 0:
for combination in list(product(*[self.option_items(name, values) for name, values in arrays])):
res = []
for name, values in samplers:
res.append(self.option_sample(name, values))
res.extend(combination)
yield sum(res, ConfigAlias())
else:
iterators = [self.option_iterator(name, values) for name, values in cube]
while True:
try:
yield sum([next(iterator) for iterator in iterators], ConfigAlias())
except StopIteration:
break
def option_items(self, name, values):
""" Return all possible `ConfigAlias` instances which can be created from the option.
Returns
-------
list of `ConfigAlias` objects.
"""
if not isinstance(values, (list, tuple, np.ndarray)):
raise TypeError('`values` must be array-like object but {} were given'.format(type(values)))
res = []
for value in values:
if self.create_id_prefix:
n_digits = self.create_id_prefix if self.create_id_prefix is not True else 1
option_values = self.values_indices.get(name.alias, dict())
current_index = option_values.get(value.alias, len(option_values))
option_values[value.alias] = current_index
self.values_indices[name.alias] = option_values
fmt = ("{:0" + str(n_digits) + "d}").format(current_index)
res.append(ConfigAlias([[name, value], ["#" + name.alias, fmt]]))
else:
res.append(ConfigAlias([[name, value]]))
return res
def option_sample(self, name, values, size=None):
""" Return `ConfigAlias` objects created on the base of Sampler-option.
Parameters
----------
size : int or None
the size of the sample
Returns
-------
ConfigAlias (if size is None) or list of ConfigAlias objects (otherwise).
"""
if not isinstance(values, Sampler):
raise TypeError('`values` must be Sampler but {} was given'.format(type(values)))
res = []
for _ in range(size or 1):
if self.create_id_prefix:
n_digits = self.create_id_prefix if self.create_id_prefix is not True else 1
current_index = self.values_indices.get(name.alias, -1) + 1
self.values_indices[name.alias] = current_index
fmt = ("{:0" + str(n_digits) + "d}").format(current_index)
res.append(ConfigAlias([[name, values.sample(1)[0, 0]], ["#" + name.alias, fmt]]))
else:
res.append(ConfigAlias([[name, values.sample(1)[0, 0]]]))
if size is None:
res = res[0]
return res
def option_iterator(self, name, values):
""" Produce `ConfigAlias` from the option.
Returns
-------
generator.
"""
if isinstance(values, Sampler):
while True:
yield ConfigAlias([[name, values.sample(1)[0, 0]]])
else:
for value in values:
yield ConfigAlias([[name, value]])
def __repr__(self):
repr = ''
cubes_reprs = []
spacing = 4 * ' '
for cube in self.cubes:
cubes_reprs += [' * '.join([self._option_repr(name, values) for name, values in cube])]
repr += ' + \n'.join(cubes_reprs)
repr += 2 * '\n' + 'params:\n'
repr += '\n'.join([spacing + f"{attr}={getattr(self, attr)}" for attr in ['n_items', 'n_reps', 'repeat_each']])
if len(self.updates) > 0:
repr += 2 * '\n' + 'updates:\n'
update_reprs = []
for update in self.updates:
update_reprs += [str('\n'.join(spacing + f"{key}: {value}" for key, value in update.items()))]
repr += '\n\n'.join(update_reprs)
return repr
def _option_repr(self, name, values):
alias = name.alias
if isinstance(values, (list, tuple, np.ndarray)):
values = [item.alias if not isinstance(item.value, str) else f"'{item.value}'" for item in values]
values = f'[{", ".join(values)}]'
return '{0}: {1}'.format(alias, values)
class Option(Domain):
""" Alias for Domain({name: values}). """
def __init__(self, name, values):
super().__init__({name: values})
KV = Alias # is needed to load and transform old researches
| [
"numpy.product",
"numpy.where",
"numpy.delete",
"itertools.product",
"numpy.nanprod",
"numpy.array",
"numpy.stack",
"numpy.isnan",
"numpy.concatenate",
"copy.deepcopy",
"copy.copy"
] | [((21409, 21441), 'numpy.concatenate', 'np.concatenate', (['([0], incl[:-1])'], {}), '(([0], incl[:-1]))\n', (21423, 21441), True, 'import numpy as np\n'), ((5288, 5310), 'copy.deepcopy', 'deepcopy', (['self._config'], {}), '(self._config)\n', (5296, 5310), False, 'from copy import copy, deepcopy\n'), ((5313, 5336), 'copy.deepcopy', 'deepcopy', (['other._config'], {}), '(other._config)\n', (5321, 5336), False, 'from copy import copy, deepcopy\n'), ((9494, 9512), 'numpy.array', 'np.array', (['[np.nan]'], {}), '([np.nan])\n', (9502, 9512), True, 'import numpy as np\n'), ((15413, 15433), 'numpy.product', 'np.product', (['lengthes'], {}), '(lengthes)\n', (15423, 15433), True, 'import numpy as np\n'), ((15888, 15903), 'numpy.isnan', 'np.isnan', (['other'], {}), '(other)\n', (15896, 15903), True, 'import numpy as np\n'), ((21370, 21392), 'numpy.isnan', 'np.isnan', (['self.weights'], {}), '(self.weights)\n', (21378, 21392), True, 'import numpy as np\n'), ((18128, 18173), 'numpy.concatenate', 'np.concatenate', (['(self.weights, other.weights)'], {}), '((self.weights, other.weights))\n', (18142, 18173), True, 'import numpy as np\n'), ((21494, 21522), 'numpy.where', 'np.where', (['(block_indices == i)'], {}), '(block_indices == i)\n', (21502, 21522), True, 'import numpy as np\n'), ((9761, 9779), 'copy.copy', 'copy', (['domain.cubes'], {}), '(domain.cubes)\n', (9765, 9779), False, 'from copy import copy, deepcopy\n'), ((9807, 9827), 'copy.copy', 'copy', (['domain.weights'], {}), '(domain.weights)\n', (9811, 9827), False, 'from copy import copy, deepcopy\n'), ((16114, 16131), 'numpy.isnan', 'np.isnan', (['weights'], {}), '(weights)\n', (16122, 16131), True, 'import numpy as np\n'), ((17388, 17427), 'numpy.stack', 'np.stack', (['[that.weights, other.weights]'], {}), '([that.weights, other.weights])\n', (17396, 17427), True, 'import numpy as np\n'), ((17479, 17501), 'numpy.isnan', 'np.isnan', (['that.weights'], {}), '(that.weights)\n', (17487, 17501), True, 'import numpy as np\n'), ((17503, 17526), 'numpy.isnan', 'np.isnan', (['other.weights'], {}), '(other.weights)\n', (17511, 17526), True, 'import numpy as np\n'), ((9940, 9958), 'numpy.array', 'np.array', (['[np.nan]'], {}), '([np.nan])\n', (9948, 9958), True, 'import numpy as np\n'), ((19063, 19080), 'numpy.isnan', 'np.isnan', (['weights'], {}), '(weights)\n', (19071, 19080), True, 'import numpy as np\n'), ((10043, 10055), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (10051, 10055), True, 'import numpy as np\n'), ((16333, 16365), 'itertools.product', 'product', (['self.cubes', 'other.cubes'], {}), '(self.cubes, other.cubes)\n', (16340, 16365), False, 'from itertools import product, islice\n'), ((16463, 16499), 'itertools.product', 'product', (['self.weights', 'other.weights'], {}), '(self.weights, other.weights)\n', (16470, 16499), False, 'from itertools import product, islice\n'), ((16538, 16554), 'numpy.nanprod', 'np.nanprod', (['item'], {}), '(item)\n', (16548, 16554), True, 'import numpy as np\n'), ((19157, 19191), 'numpy.array', 'np.array', (['self.cubes'], {'dtype': 'object'}), '(self.cubes, dtype=object)\n', (19165, 19191), True, 'import numpy as np\n'), ((19559, 19584), 'numpy.delete', 'np.delete', (['weights', 'index'], {}), '(weights, index)\n', (19568, 19584), True, 'import numpy as np\n'), ((19621, 19644), 'numpy.delete', 'np.delete', (['block', 'index'], {}), '(block, index)\n', (19630, 19644), True, 'import numpy as np\n'), ((16612, 16626), 'numpy.isnan', 'np.isnan', (['item'], {}), '(item)\n', (16620, 16626), True, 'import numpy as np\n')] |
"""Transforms
* :func:`.quantile_transform`
"""
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
def quantile_transform(v, res=101):
"""Quantile-transform a vector to lie between 0 and 1"""
x = np.linspace(0, 100, res)
prcs = np.nanpercentile(v, x)
return np.interp(v, prcs, x/100.0)
class Scaler(BaseEstimator, TransformerMixin):
"""Z-scores each column (and outputs a DataFrame)
Parameters
----------
cols : list of str
Columns to scale. Default is to scale all columns
"""
def __init__(self, cols=None):
# Check types
if cols is not None and not isinstance(cols, (str, list)):
raise TypeError('cols must be a str or list of str')
# Store parameters
if isinstance(cols, str):
self.cols = [cols]
else:
self.cols = cols
def fit(self, X, y):
"""Fit the scaler to X and y.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to scale
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
StandardScaler
Returns self, the fit object.
"""
# Scale all columns by default
if self.cols is None:
self.cols = X.columns.tolist()
# Compute the mean and std of each column
self.means = dict()
self.stds = dict()
for col in self.cols:
self.means[col] = X[col].mean()
self.stds[col] = X[col].std()
# Return fit object
return self
def transform(self, X, y=None):
"""Perform the scaling.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to scale
Returns
-------
pandas DataFrame
Input DataFrame with scaled columns
"""
# Scale each column
Xo = X.copy()
for col in self.cols:
Xo[col] = (X[col]-self.means[col])/self.stds[col]
# Return dataframe with scaled values
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to scale
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with scaled columns
"""
return self.fit(X, y).transform(X, y)
class Imputer(BaseEstimator, TransformerMixin):
"""Imputes missing vlaues (and outputs a DataFrame)
Parameters
----------
cols : list of str
Columns to impute. Default is to impute all columns
method : str
Method to use for imputation. 'mean' or 'median'.
Default = 'median'
"""
def __init__(self, cols=None, method='median'):
# Check types
if cols is not None and not isinstance(cols, (str, list)):
raise TypeError('cols must be a str or list of str')
if not isinstance(method, str):
raise TypeError('method must be a str')
if method not in ['mean', 'median']:
raise ValueError('method must be \'median\' or \'mean\'')
# Store parameters
if isinstance(cols, str):
self.cols = [cols]
else:
self.cols = cols
self.method = method
def fit(self, X, y):
"""Fit the imputer to X and y.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to impute
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
StandardScaler
Returns self, the fit object.
"""
# Scale all columns by default
if self.cols is None:
self.cols = X.columns.tolist()
# Compute the value to use for imputation
self.val = dict()
for col in self.cols:
if self.method == 'mean':
self.val[col] = X[col].mean()
else:
self.val[col] = X[col].median()
# Return fit object
return self
def transform(self, X, y=None):
"""Perform the imputation.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to impute
Returns
-------
pandas DataFrame
Input DataFrame with imputed values
"""
# Scale each column
Xo = X.copy()
for col in self.cols:
Xo.loc[Xo[col].isnull(), col] = self.val[col]
# Return dataframe with imputed values
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to impute
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with imputed values
"""
return self.fit(X, y).transform(X, y)
| [
"numpy.linspace",
"numpy.nanpercentile",
"numpy.interp"
] | [((281, 305), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', 'res'], {}), '(0, 100, res)\n', (292, 305), True, 'import numpy as np\n'), ((317, 339), 'numpy.nanpercentile', 'np.nanpercentile', (['v', 'x'], {}), '(v, x)\n', (333, 339), True, 'import numpy as np\n'), ((351, 380), 'numpy.interp', 'np.interp', (['v', 'prcs', '(x / 100.0)'], {}), '(v, prcs, x / 100.0)\n', (360, 380), True, 'import numpy as np\n')] |
__author__ = '<NAME>'
import numpy as np
import cv2
import logicFunctions as lf
def myMasking(myImage, myMask):
if myImage.__class__ == np.ndarray and myMask.__class__ == np.ndarray:
Dim = myImage.shape
if len(Dim) == 2:
a, b = myMask.shape
m, n = Dim[0], Dim[1]
if m < a or n < b or a % 2 == 0 or np.mod(b, 2) == 0:
return None
p = np.int64(m - (a - 1))
q = np.int64(n - (b - 1))
LRBorders = np.int64((b - 1) / 2)
UDBorders = np.int64((a - 1) / 2)
Result = np.zeros((p, q))
for i in range(UDBorders, m - UDBorders):
for j in range(LRBorders, n - LRBorders):
subMat = np.float64(myImage[i - UDBorders:i + UDBorders + 1, j - LRBorders:j + LRBorders + 1])
Result[i - UDBorders, j - LRBorders] = np.sum(subMat * np.float64(myMask))
return Result
elif len(Dim) == 3:
b, g, r = cv2.split(myImage)
Mb = myMasking(b, myMask)
Mg = myMasking(g, myMask)
Mr = myMasking(r, myMask)
if Mb.__class__ == None.__class__ or Mg.__class__ == None.__class__ or Mr.__class__ == None.__class__:
return None
return cv2.merge((Mb, Mg, Mr))
else:
return None
else:
return None
def myHistPlotUint8(myImage):
if myImage.__class__ != np.ndarray:
return None
Dim = myImage.shape
if len(Dim) == 2:
if myImage[0,0].__class__ != np.uint8:
return None
maxVal = np.max(myImage)
minVal = np.min(myImage)
Result = np.zeros(256, dtype=np.int64)
for k in range(maxVal - minVal + 1):
Result[k + minVal] = len(myImage[myImage == k + minVal])
return Result
else:
return None
| [
"cv2.merge",
"numpy.int64",
"numpy.float64",
"numpy.max",
"numpy.zeros",
"cv2.split",
"numpy.min",
"numpy.mod"
] | [((1691, 1706), 'numpy.max', 'np.max', (['myImage'], {}), '(myImage)\n', (1697, 1706), True, 'import numpy as np\n'), ((1725, 1740), 'numpy.min', 'np.min', (['myImage'], {}), '(myImage)\n', (1731, 1740), True, 'import numpy as np\n'), ((1759, 1788), 'numpy.zeros', 'np.zeros', (['(256)'], {'dtype': 'np.int64'}), '(256, dtype=np.int64)\n', (1767, 1788), True, 'import numpy as np\n'), ((439, 460), 'numpy.int64', 'np.int64', (['(m - (a - 1))'], {}), '(m - (a - 1))\n', (447, 460), True, 'import numpy as np\n'), ((478, 499), 'numpy.int64', 'np.int64', (['(n - (b - 1))'], {}), '(n - (b - 1))\n', (486, 499), True, 'import numpy as np\n'), ((525, 546), 'numpy.int64', 'np.int64', (['((b - 1) / 2)'], {}), '((b - 1) / 2)\n', (533, 546), True, 'import numpy as np\n'), ((572, 593), 'numpy.int64', 'np.int64', (['((a - 1) / 2)'], {}), '((a - 1) / 2)\n', (580, 593), True, 'import numpy as np\n'), ((618, 634), 'numpy.zeros', 'np.zeros', (['(p, q)'], {}), '((p, q))\n', (626, 634), True, 'import numpy as np\n'), ((1046, 1064), 'cv2.split', 'cv2.split', (['myImage'], {}), '(myImage)\n', (1055, 1064), False, 'import cv2\n'), ((1351, 1374), 'cv2.merge', 'cv2.merge', (['(Mb, Mg, Mr)'], {}), '((Mb, Mg, Mr))\n', (1360, 1374), False, 'import cv2\n'), ((372, 384), 'numpy.mod', 'np.mod', (['b', '(2)'], {}), '(b, 2)\n', (378, 384), True, 'import numpy as np\n'), ((779, 868), 'numpy.float64', 'np.float64', (['myImage[i - UDBorders:i + UDBorders + 1, j - LRBorders:j + LRBorders + 1]'], {}), '(myImage[i - UDBorders:i + UDBorders + 1, j - LRBorders:j +\n LRBorders + 1])\n', (789, 868), True, 'import numpy as np\n'), ((941, 959), 'numpy.float64', 'np.float64', (['myMask'], {}), '(myMask)\n', (951, 959), True, 'import numpy as np\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
sbpy Activity Core Module
Core module functions and classes, especially for handling coma
geometries.
created on June 23, 2017
"""
__all__ = [
'Aperture',
'CircularAperture',
'AnnularAperture',
'RectangularAperture',
'GaussianAperture',
]
from abc import ABC, abstractmethod
import numpy as np
import astropy.units as u
from .. import data as sbd
from .. import units as sbu
class Aperture(ABC):
"""
Abstract base class for photometric apertures.
Notes
-----
The shape of the aperture must be passed as the first argument of
`__init__`, or else `as_length` and `as_angle` must be overridden.
"""
def __init__(self, dim):
if not dim.unit.is_equivalent((u.radian, u.meter)):
raise u.UnitTypeError(
'aperture must be defined with angles or lengths.')
self.dim = dim
def __str__(self):
"""Description of the aperture."""
# assumes preferred format for __repr__
return repr(self)[1:-1].replace('Aperture:', ' aperture,')
@abstractmethod
def __repr__(self):
"""Preferred format <ShapedAperture: size>"""
@sbd.dataclass_input(eph=sbd.Ephem)
@sbd.quantity_to_dataclass(eph=(sbd.Ephem, 'delta'))
def as_angle(self, eph):
"""This aperture in units of angle.
Parameters
----------
eph : dictionary-like, `~sbpy.data.Ephem`, or `~astropy.units.Quantity`
The observer-target distance (``delta``).
Returns
-------
aper
"""
dim = self.dim.to('arcsec', sbu.projected_size(eph))
return type(self)(dim)
@sbd.dataclass_input(eph=sbd.Ephem)
@sbd.quantity_to_dataclass(eph=(sbd.Ephem, 'delta'))
def as_length(self, eph):
"""This aperture in units of length.
Parameters
----------
eph : dictionary-like, `~sbpy.data.Ephem`, or `~astropy.units.Quantity`
The observer-target distance (``delta``).
Returns
-------
aper
"""
dim = self.dim.to('km', sbu.projected_size(eph))
return type(self)(dim)
@abstractmethod
def coma_equivalent_radius(self):
"""Circular aperture radius that yields same flux for a 1/ρ coma.
Returns
-------
rap : `~astropy.units.Quantity`
"""
class CircularAperture(Aperture):
"""Circular aperture projected at the distance of the target.
Parameters
----------
radius : `~astropy.units.Quantity`
Angular or projected linear radius for the aperture.
"""
def __init__(self, radius):
super().__init__(radius)
def __repr__(self):
return '<CircularAperture: radius {}>'.format(self.dim)
@property
def radius(self):
"""Aperture radius."""
return self.dim
def coma_equivalent_radius(self):
return self.radius
coma_equivalent_radius.__doc__ = Aperture.coma_equivalent_radius.__doc__
class AnnularAperture(Aperture):
"""Annular aperture projected at the distance of the target.
Parameters
----------
shape : `~astropy.units.Quantity`
A two-element `~astropy.units.Quantity` of angular or
projected linear size for the inner and outer radius of the
aperture.
"""
def __init__(self, shape):
if len(shape) != 2:
raise ValueError('shape must be 2-elements')
super().__init__(shape)
def __repr__(self):
return ('<AnnularAperture: radii {0[0].value:}–{0[1]:}>'
.format(self.dim))
@property
def shape(self):
"""Annulus inner and outer radii."""
return self.dim
def coma_equivalent_radius(self):
return max(self.dim) - min(self.dim)
coma_equivalent_radius.__doc__ = Aperture.coma_equivalent_radius.__doc__
class RectangularAperture(Aperture):
"""Rectangular aperture projected at the distance of the target.
Parameters
----------
shape : `~astropy.units.Quantity`
A two-element `~astropy.units.Quantity` of angular or
projected linear size for the width and height of the
aperture. The order is not significant.
"""
def __init__(self, shape):
if len(shape) != 2:
raise ValueError('shape must be 2-elements')
super().__init__(shape)
def __repr__(self):
return ("<RectangularAperture: dimensions {0[0].value:}×{0[1]:}>"
.format(self.dim))
@property
def shape(self):
"""Rectangle dimensions."""
return self.dim
def coma_equivalent_radius(self):
# Int_0^θ Int_0^r 1/r * r * dr dθ
# limits on r depend on θ; eq. of a line: x * sec(θ)
# --> Int_0^θ Int_0^x*sec(θ) dr dθ
# --> Int_0^θ x*sec(θ) dθ
# --> x * log(tan(θ) + sec(θ))
# First, integrate the 1/rho distribution over the first
# "octant" of the rectangle in polar coordinates. The
# azimuthal limits are 0 to arctan(y / x). Also, x, and y are
# the full rectangle dimensions, so they must be halved.
# th = np.arctan(y / x)
# I = (x / 2) * log(tan(th) + sec(th))
# sec(th) = cos(th)**-1
# cos(arctan(y / x)) = 1 / sqrt(1 + (y / x)**2)
# sec(th) = sqrt(1 + (y / x)**2)
# I1 = x / 2 * np.log(y / x + np.sqrt(1 + (y / x)**2))
# Then, integrate the second "octant": th = 0 to arctan(x / y)
# I2 = y / 2 * np.log(x / y + np.sqrt(1 + (x / y)**2))
# The two octants correspond to 1/4 the full area:
# I = 4 * (I1 + I2)
# For the circular aperture, the integral is 2 pi rho.
# rho = I / 2 / np.pi
# implement the above, moving constants around
x, y = self.shape
I1 = x * np.log(y / x + np.sqrt(1 + (y / x)**2))
I2 = y * np.log(x / y + np.sqrt(1 + (x / y)**2))
return (I1 + I2) / np.pi
coma_equivalent_radius.__doc__ = Aperture.coma_equivalent_radius.__doc__
class GaussianAperture(Aperture):
"""Gaussian-shaped aperture, e.g., for radio observations.
The aperture is normalized to 1.0 at the center.
Parameters
----------
sigma : `~astropy.units.Quantity`, optional
The width of the Gaussian beam (square-root of the variance)
as an angular or projected size.
fwhm : `~astropy.units.Quantity`, optional
The full-width at half-maximum of the Gaussian beam as an
angular or projected size.
Notes
-----
One of `sigma` or `fwhm` is required.
"""
def __init__(self, sigma=None, fwhm=None):
if (sigma is None) and (fwhm is None):
raise ValueError('One of `sigma` or `fwhm` must be defined')
if sigma is not None:
super().__init__(sigma)
else:
super().__init__(fwhm / 2.3548200450309493)
def __repr__(self):
return "<GaussianAperture: 1-σ width {}>".format(self.dim)
@property
def sigma(self):
"""Beam Gaussian width."""
return self.dim
@property
def fwhm(self):
"""Beam full-width at half-maximum."""
return self.dim * 2.3548200450309493
@sbd.dataclass_input
@sbd.quantity_to_dataclass(eph=(sbd.Ephem, 'delta'))
def __call__(self, rho, eph: sbd.Ephem=None):
"""Evaluate the aperture.
Parameters
----------
rho : `~astropy.units.Quantity`
Position to evaluate, in units of length or angle.
eph : dictionary-like, `~sbpy.data.Ephem`, or `~astropy.units.Quantity`, optional
The observer-target distance (``delta``). Use ``eph`` to
convert between angles and lengths, as needed.
"""
if eph is not None:
equiv = sbu.projected_size(eph)
else:
equiv = []
x = rho.to(self.dim.unit, equiv)
# normalize to 1.0 at the center
return np.exp(-x**2 / self.sigma**2 / 2)
def coma_equivalent_radius(self):
# This beam is normalized to 1.0 at the center.
return np.sqrt(np.pi / 2) * self.sigma
coma_equivalent_radius.__doc__ = Aperture.coma_equivalent_radius.__doc__
| [
"numpy.exp",
"astropy.units.UnitTypeError",
"numpy.sqrt"
] | [((8026, 8063), 'numpy.exp', 'np.exp', (['(-x ** 2 / self.sigma ** 2 / 2)'], {}), '(-x ** 2 / self.sigma ** 2 / 2)\n', (8032, 8063), True, 'import numpy as np\n'), ((832, 899), 'astropy.units.UnitTypeError', 'u.UnitTypeError', (['"""aperture must be defined with angles or lengths."""'], {}), "('aperture must be defined with angles or lengths.')\n", (847, 899), True, 'import astropy.units as u\n'), ((8170, 8188), 'numpy.sqrt', 'np.sqrt', (['(np.pi / 2)'], {}), '(np.pi / 2)\n', (8177, 8188), True, 'import numpy as np\n'), ((5900, 5925), 'numpy.sqrt', 'np.sqrt', (['(1 + (y / x) ** 2)'], {}), '(1 + (y / x) ** 2)\n', (5907, 5925), True, 'import numpy as np\n'), ((5957, 5982), 'numpy.sqrt', 'np.sqrt', (['(1 + (x / y) ** 2)'], {}), '(1 + (x / y) ** 2)\n', (5964, 5982), True, 'import numpy as np\n')] |
import numpy
class Complex:
def __init__(self, a, b):
self.real = a
self.imag = b
def multiply(this, that):
term1 = this.real*that.real
term2 = this.real*that.imag + this.imag*that.real
term3 = this.imag*that.imag*-1
return Complex(term1 + term3, term2)
def generate_fractal():
x = numpy.linspace(0,5,10)
print(x)
# plt.figure(figsize=(10,10))
# plt.scatter(xx, yy, 1, c=color_array, cmap="binary")
# plt.show()
generate_fractal() | [
"numpy.linspace"
] | [((328, 352), 'numpy.linspace', 'numpy.linspace', (['(0)', '(5)', '(10)'], {}), '(0, 5, 10)\n', (342, 352), False, 'import numpy\n')] |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import argparse
import os
import os.path as osp
from chainer import cuda
import chainer.serializers as S
from chainer import Variable
import numpy as np
from scipy.misc import imread
from scipy.misc import imsave
import fcn
from fcn.models import FCN16s
from fcn.models import FCN32s
from fcn.models import FCN8s
class Forwarding(object):
def __init__(self, gpu, chainermodel=None):
self.gpu = gpu
self.target_names = fcn.pascal.SegmentationClassDataset.target_names
self.n_class = len(self.target_names)
if chainermodel is None:
chainermodel = osp.join(fcn.data_dir,
'fcn8s_from_caffe.chainermodel')
self.model_name = 'fcn8s'
self.model = FCN8s(n_class=self.n_class)
elif osp.basename(chainermodel).startswith('fcn8s'):
self.model_name = 'fcn8s'
self.model = FCN8s(n_class=self.n_class)
elif osp.basename(chainermodel).startswith('fcn16s'):
self.model_name = 'fcn16s'
self.model = FCN16s(n_class=self.n_class)
elif osp.basename(chainermodel).startswith('fcn32s'):
self.model_name = 'fcn32s'
self.model = FCN32s(n_class=self.n_class)
else:
raise ValueError(
'Chainer model filename must start with fcn8s, '
'fcn16s or fcn32s: {0}'.format(osp.basename(chainermodel)))
S.load_hdf5(chainermodel, self.model)
if self.gpu != -1:
self.model.to_gpu(self.gpu)
def forward_img_file(self, img_file):
print('{0}:'.format(osp.realpath(img_file)))
# setup image
img = imread(img_file, mode='RGB')
img, resizing_scale = fcn.util.resize_img_with_max_size(img)
print(' - resizing_scale: {0}'.format(resizing_scale))
# setup input datum
datum = fcn.pascal.SegmentationClassDataset.img_to_datum(img.copy())
x_data = np.array([datum], dtype=np.float32)
if self.gpu != -1:
x_data = cuda.to_gpu(x_data, device=self.gpu)
x = Variable(x_data, volatile=False)
# forward
self.model.train = False
self.model(x)
pred = self.model.score
# generate computational_graph
psfile = osp.join(
fcn.data_dir, '{0}_forward.ps'.format(self.model_name))
if not osp.exists(psfile):
fcn.util.draw_computational_graph([pred], output=psfile)
print('- computational_graph: {0}'.format(psfile))
pred_datum = cuda.to_cpu(pred.data)[0]
label = np.argmax(pred_datum, axis=0)
return img, label
def visualize_label(self, img, label):
# visualize result
unique_labels, label_counts = np.unique(label, return_counts=True)
print('- labels:')
label_titles = {}
for label_value, label_count in zip(unique_labels, label_counts):
label_region = label_count / label.size
if label_region < 0.001:
continue
title = '{0}:{1} = {2:.1%}'.format(
label_value, self.target_names[label_value], label_region)
label_titles[label_value] = title
print(' - {0}'.format(title))
result_img = fcn.util.draw_label(
label, img, n_class=self.n_class, label_titles=label_titles)
# save result
height, width = img.shape[:2]
if height > width:
vline = np.ones((height, 3, 3), dtype=np.uint8) * 255
out_img = np.hstack((img, vline, result_img))
else:
hline = np.ones((3, width, 3), dtype=np.uint8) * 255
out_img = np.vstack((img, hline, result_img))
return out_img
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', default=0, type=int,
help='if -1, use cpu only')
parser.add_argument('-c', '--chainermodel')
parser.add_argument('-i', '--img-files', nargs='+', required=True)
args = parser.parse_args()
img_files = args.img_files
gpu = args.gpu
chainermodel = args.chainermodel
save_dir = osp.join(fcn.data_dir, 'forward_out')
if not osp.exists(save_dir):
os.makedirs(save_dir)
forwarding = Forwarding(gpu, chainermodel)
for img_file in img_files:
img, label = forwarding.forward_img_file(img_file)
out_img = forwarding.visualize_label(img, label)
out_file = osp.join(save_dir, osp.basename(img_file))
imsave(out_file, out_img)
print('- out_file: {0}'.format(out_file))
if __name__ == '__main__':
main()
| [
"fcn.models.FCN16s",
"numpy.hstack",
"fcn.models.FCN8s",
"numpy.array",
"os.path.exists",
"fcn.util.resize_img_with_max_size",
"argparse.ArgumentParser",
"scipy.misc.imsave",
"chainer.cuda.to_cpu",
"scipy.misc.imread",
"numpy.vstack",
"chainer.cuda.to_gpu",
"fcn.util.draw_label",
"numpy.on... | [((3895, 3920), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3918, 3920), False, 'import argparse\n'), ((4281, 4318), 'os.path.join', 'osp.join', (['fcn.data_dir', '"""forward_out"""'], {}), "(fcn.data_dir, 'forward_out')\n", (4289, 4318), True, 'import os.path as osp\n'), ((1572, 1609), 'chainer.serializers.load_hdf5', 'S.load_hdf5', (['chainermodel', 'self.model'], {}), '(chainermodel, self.model)\n', (1583, 1609), True, 'import chainer.serializers as S\n'), ((1809, 1837), 'scipy.misc.imread', 'imread', (['img_file'], {'mode': '"""RGB"""'}), "(img_file, mode='RGB')\n", (1815, 1837), False, 'from scipy.misc import imread\n'), ((1868, 1906), 'fcn.util.resize_img_with_max_size', 'fcn.util.resize_img_with_max_size', (['img'], {}), '(img)\n', (1901, 1906), False, 'import fcn\n'), ((2092, 2127), 'numpy.array', 'np.array', (['[datum]'], {'dtype': 'np.float32'}), '([datum], dtype=np.float32)\n', (2100, 2127), True, 'import numpy as np\n'), ((2225, 2257), 'chainer.Variable', 'Variable', (['x_data'], {'volatile': '(False)'}), '(x_data, volatile=False)\n', (2233, 2257), False, 'from chainer import Variable\n'), ((2727, 2756), 'numpy.argmax', 'np.argmax', (['pred_datum'], {'axis': '(0)'}), '(pred_datum, axis=0)\n', (2736, 2756), True, 'import numpy as np\n'), ((2892, 2928), 'numpy.unique', 'np.unique', (['label'], {'return_counts': '(True)'}), '(label, return_counts=True)\n', (2901, 2928), True, 'import numpy as np\n'), ((3403, 3488), 'fcn.util.draw_label', 'fcn.util.draw_label', (['label', 'img'], {'n_class': 'self.n_class', 'label_titles': 'label_titles'}), '(label, img, n_class=self.n_class, label_titles=label_titles\n )\n', (3422, 3488), False, 'import fcn\n'), ((4330, 4350), 'os.path.exists', 'osp.exists', (['save_dir'], {}), '(save_dir)\n', (4340, 4350), True, 'import os.path as osp\n'), ((4360, 4381), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (4371, 4381), False, 'import os\n'), ((4648, 4673), 'scipy.misc.imsave', 'imsave', (['out_file', 'out_img'], {}), '(out_file, out_img)\n', (4654, 4673), False, 'from scipy.misc import imsave\n'), ((733, 788), 'os.path.join', 'osp.join', (['fcn.data_dir', '"""fcn8s_from_caffe.chainermodel"""'], {}), "(fcn.data_dir, 'fcn8s_from_caffe.chainermodel')\n", (741, 788), True, 'import os.path as osp\n'), ((888, 915), 'fcn.models.FCN8s', 'FCN8s', ([], {'n_class': 'self.n_class'}), '(n_class=self.n_class)\n', (893, 915), False, 'from fcn.models import FCN8s\n'), ((2176, 2212), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['x_data'], {'device': 'self.gpu'}), '(x_data, device=self.gpu)\n', (2187, 2212), False, 'from chainer import cuda\n'), ((2512, 2530), 'os.path.exists', 'osp.exists', (['psfile'], {}), '(psfile)\n', (2522, 2530), True, 'import os.path as osp\n'), ((2544, 2600), 'fcn.util.draw_computational_graph', 'fcn.util.draw_computational_graph', (['[pred]'], {'output': 'psfile'}), '([pred], output=psfile)\n', (2577, 2600), False, 'import fcn\n'), ((2685, 2707), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['pred.data'], {}), '(pred.data)\n', (2696, 2707), False, 'from chainer import cuda\n'), ((3672, 3707), 'numpy.hstack', 'np.hstack', (['(img, vline, result_img)'], {}), '((img, vline, result_img))\n', (3681, 3707), True, 'import numpy as np\n'), ((3809, 3844), 'numpy.vstack', 'np.vstack', (['(img, hline, result_img)'], {}), '((img, hline, result_img))\n', (3818, 3844), True, 'import numpy as np\n'), ((4616, 4638), 'os.path.basename', 'osp.basename', (['img_file'], {}), '(img_file)\n', (4628, 4638), True, 'import os.path as osp\n'), ((1040, 1067), 'fcn.models.FCN8s', 'FCN8s', ([], {'n_class': 'self.n_class'}), '(n_class=self.n_class)\n', (1045, 1067), False, 'from fcn.models import FCN8s\n'), ((1748, 1770), 'os.path.realpath', 'osp.realpath', (['img_file'], {}), '(img_file)\n', (1760, 1770), True, 'import os.path as osp\n'), ((3604, 3643), 'numpy.ones', 'np.ones', (['(height, 3, 3)'], {'dtype': 'np.uint8'}), '((height, 3, 3), dtype=np.uint8)\n', (3611, 3643), True, 'import numpy as np\n'), ((3742, 3780), 'numpy.ones', 'np.ones', (['(3, width, 3)'], {'dtype': 'np.uint8'}), '((3, width, 3), dtype=np.uint8)\n', (3749, 3780), True, 'import numpy as np\n'), ((929, 955), 'os.path.basename', 'osp.basename', (['chainermodel'], {}), '(chainermodel)\n', (941, 955), True, 'import os.path as osp\n'), ((1194, 1222), 'fcn.models.FCN16s', 'FCN16s', ([], {'n_class': 'self.n_class'}), '(n_class=self.n_class)\n', (1200, 1222), False, 'from fcn.models import FCN16s\n'), ((1081, 1107), 'os.path.basename', 'osp.basename', (['chainermodel'], {}), '(chainermodel)\n', (1093, 1107), True, 'import os.path as osp\n'), ((1349, 1377), 'fcn.models.FCN32s', 'FCN32s', ([], {'n_class': 'self.n_class'}), '(n_class=self.n_class)\n', (1355, 1377), False, 'from fcn.models import FCN32s\n'), ((1236, 1262), 'os.path.basename', 'osp.basename', (['chainermodel'], {}), '(chainermodel)\n', (1248, 1262), True, 'import os.path as osp\n'), ((1534, 1560), 'os.path.basename', 'osp.basename', (['chainermodel'], {}), '(chainermodel)\n', (1546, 1560), True, 'import os.path as osp\n')] |
import numpy as np
import cv2 as cv
import utils
from table import Table
from PIL import Image
import xlsxwriter
import sys
from pdf2image import convert_from_path
# =====================================================
# IMAGE LOADING
# =====================================================
if len(sys.argv) < 2:
print("Usage: python main.py <img_path>")
sys.exit(1)
path = sys.argv[1]
if not path.endswith(".pdf") and not path.endswith(".jpg"):
print("Must use a pdf or a jpg image to run the program.")
sys.exit(1)
if path.endswith(".pdf"):
ext_img = convert_from_path(path)[0]
else:
ext_img = Image.open(path)
ext_img.save("data/target.png", "PNG")
image = cv.imread("data/target.jpg")
# Convert resized RGB image to grayscale
NUM_CHANNELS = 3
if len(image.shape) == NUM_CHANNELS:
grayscale = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# =====================================================
# IMAGE FILTERING (using adaptive thresholding)
# =====================================================
"""
ADAPTIVE THRESHOLDING
Thresholding changes pixels' color values to a specified pixel value if the current pixel value
is less than a threshold value, which could be:
1. a specified global threshold value provided as an argument to the threshold function (simple thresholding),
2. the mean value of the pixels in the neighboring area (adaptive thresholding - mean method),
3. the weighted sum of neigborhood values where the weights are Gaussian windows (adaptive thresholding - Gaussian method).
The last two parameters to the adaptiveThreshold function are the size of the neighboring area and
the constant C which is subtracted from the mean or weighted mean calculated.
"""
MAX_THRESHOLD_VALUE = 255
BLOCK_SIZE = 15
THRESHOLD_CONSTANT = 0
# Filter image
filtered = cv.adaptiveThreshold(~grayscale, MAX_THRESHOLD_VALUE, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, BLOCK_SIZE, THRESHOLD_CONSTANT)
# =====================================================
# LINE ISOLATION
# =====================================================
"""
HORIZONTAL AND VERTICAL LINE ISOLATION
To isolate the vertical and horizontal lines,
1. Set a scale.
2. Create a structuring element.
3. Isolate the lines by eroding and then dilating the image.
"""
SCALE = 15
# Isolate horizontal and vertical lines using morphological operations
horizontal = filtered.copy()
vertical = filtered.copy()
horizontal_size = int(horizontal.shape[1] / SCALE)
horizontal_structure = cv.getStructuringElement(cv.MORPH_RECT, (horizontal_size, 1))
utils.isolate_lines(horizontal, horizontal_structure)
vertical_size = int(vertical.shape[0] / SCALE)
vertical_structure = cv.getStructuringElement(cv.MORPH_RECT, (1, vertical_size))
utils.isolate_lines(vertical, vertical_structure)
# =====================================================
# TABLE EXTRACTION
# =====================================================
# Create an image mask with just the horizontal
# and vertical lines in the image. Then find
# all contours in the mask.
mask = horizontal + vertical
(contours, _) = cv.findContours(mask, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
# Find intersections between the lines
# to determine if the intersections are table joints.
intersections = cv.bitwise_and(horizontal, vertical)
# Get tables from the images
tables = [] # list of tables
for i in range(len(contours)):
# Verify that region of interest is a table
(rect, table_joints) = utils.verify_table(contours[i], intersections)
if rect == None or table_joints == None:
continue
# Create a new instance of a table
table = Table(rect[0], rect[1], rect[2], rect[3])
# Get an n-dimensional array of the coordinates of the table joints
joint_coords = []
for i in range(len(table_joints)):
joint_coords.append(table_joints[i][0][0])
joint_coords = np.asarray(joint_coords)
# Returns indices of coordinates in sorted order
# Sorts based on parameters (aka keys) starting from the last parameter, then second-to-last, etc
sorted_indices = np.lexsort((joint_coords[:, 0], joint_coords[:, 1]))
joint_coords = joint_coords[sorted_indices]
# Store joint coordinates in the table instance
table.set_joints(joint_coords)
tables.append(table)
#cv.rectangle(image, (table.x, table.y), (table.x + table.w, table.y + table.h), (0, 255, 0), 1, 8, 0)
#cv.imshow("tables", image)
#cv.waitKey(0)
# =====================================================
# OCR AND WRITING TEXT TO EXCEL
# =====================================================
out = "bin/"
table_name = "table.jpg"
psm = 6
oem = 3
mult = 3
utils.mkdir(out)
utils.mkdir("bin/table/")
utils.mkdir("excel/")
workbook = xlsxwriter.Workbook('excel/tables.xlsx')
for table in tables:
worksheet = workbook.add_worksheet()
table_entries = table.get_table_entries()
table_roi = image[table.y:table.y + table.h, table.x:table.x + table.w]
table_roi = cv.resize(table_roi, (table.w * mult, table.h * mult))
cv.imwrite(out + table_name, table_roi)
num_img = 0
for i in range(len(table_entries)):
row = table_entries[i]
for j in range(len(row)):
entry = row[j]
entry_roi = table_roi[entry[1] * mult: (entry[1] + entry[3]) * mult, entry[0] * mult:(entry[0] + entry[2]) * mult]
fname = out + "table/cell" + str(num_img) + ".jpg"
cv.imwrite(fname, entry_roi)
fname = utils.run_textcleaner(fname, num_img)
text = utils.run_tesseract(fname, num_img, psm, oem)
num_img += 1
worksheet.write(i, j, text)
workbook.close()
| [
"utils.run_textcleaner",
"sys.exit",
"numpy.asarray",
"utils.verify_table",
"xlsxwriter.Workbook",
"cv2.cvtColor",
"cv2.resize",
"cv2.imread",
"cv2.imwrite",
"table.Table",
"PIL.Image.open",
"utils.isolate_lines",
"cv2.bitwise_and",
"numpy.lexsort",
"cv2.adaptiveThreshold",
"utils.mkdi... | [((689, 717), 'cv2.imread', 'cv.imread', (['"""data/target.jpg"""'], {}), "('data/target.jpg')\n", (698, 717), True, 'import cv2 as cv\n'), ((1804, 1939), 'cv2.adaptiveThreshold', 'cv.adaptiveThreshold', (['(~grayscale)', 'MAX_THRESHOLD_VALUE', 'cv.ADAPTIVE_THRESH_MEAN_C', 'cv.THRESH_BINARY', 'BLOCK_SIZE', 'THRESHOLD_CONSTANT'], {}), '(~grayscale, MAX_THRESHOLD_VALUE, cv.\n ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, BLOCK_SIZE, THRESHOLD_CONSTANT)\n', (1824, 1939), True, 'import cv2 as cv\n'), ((2484, 2545), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_RECT', '(horizontal_size, 1)'], {}), '(cv.MORPH_RECT, (horizontal_size, 1))\n', (2508, 2545), True, 'import cv2 as cv\n'), ((2546, 2599), 'utils.isolate_lines', 'utils.isolate_lines', (['horizontal', 'horizontal_structure'], {}), '(horizontal, horizontal_structure)\n', (2565, 2599), False, 'import utils\n'), ((2669, 2728), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_RECT', '(1, vertical_size)'], {}), '(cv.MORPH_RECT, (1, vertical_size))\n', (2693, 2728), True, 'import cv2 as cv\n'), ((2729, 2778), 'utils.isolate_lines', 'utils.isolate_lines', (['vertical', 'vertical_structure'], {}), '(vertical, vertical_structure)\n', (2748, 2778), False, 'import utils\n'), ((3077, 3140), 'cv2.findContours', 'cv.findContours', (['mask', 'cv.RETR_EXTERNAL', 'cv.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n', (3092, 3140), True, 'import cv2 as cv\n'), ((3251, 3287), 'cv2.bitwise_and', 'cv.bitwise_and', (['horizontal', 'vertical'], {}), '(horizontal, vertical)\n', (3265, 3287), True, 'import cv2 as cv\n'), ((4645, 4661), 'utils.mkdir', 'utils.mkdir', (['out'], {}), '(out)\n', (4656, 4661), False, 'import utils\n'), ((4662, 4687), 'utils.mkdir', 'utils.mkdir', (['"""bin/table/"""'], {}), "('bin/table/')\n", (4673, 4687), False, 'import utils\n'), ((4689, 4710), 'utils.mkdir', 'utils.mkdir', (['"""excel/"""'], {}), "('excel/')\n", (4700, 4710), False, 'import utils\n'), ((4722, 4762), 'xlsxwriter.Workbook', 'xlsxwriter.Workbook', (['"""excel/tables.xlsx"""'], {}), "('excel/tables.xlsx')\n", (4741, 4762), False, 'import xlsxwriter\n'), ((365, 376), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (373, 376), False, 'import sys\n'), ((524, 535), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (532, 535), False, 'import sys\n'), ((624, 640), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (634, 640), False, 'from PIL import Image\n'), ((830, 867), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (841, 867), True, 'import cv2 as cv\n'), ((3453, 3499), 'utils.verify_table', 'utils.verify_table', (['contours[i]', 'intersections'], {}), '(contours[i], intersections)\n', (3471, 3499), False, 'import utils\n'), ((3614, 3655), 'table.Table', 'Table', (['rect[0]', 'rect[1]', 'rect[2]', 'rect[3]'], {}), '(rect[0], rect[1], rect[2], rect[3])\n', (3619, 3655), False, 'from table import Table\n'), ((3860, 3884), 'numpy.asarray', 'np.asarray', (['joint_coords'], {}), '(joint_coords)\n', (3870, 3884), True, 'import numpy as np\n'), ((4062, 4114), 'numpy.lexsort', 'np.lexsort', (['(joint_coords[:, 0], joint_coords[:, 1])'], {}), '((joint_coords[:, 0], joint_coords[:, 1]))\n', (4072, 4114), True, 'import numpy as np\n'), ((4966, 5020), 'cv2.resize', 'cv.resize', (['table_roi', '(table.w * mult, table.h * mult)'], {}), '(table_roi, (table.w * mult, table.h * mult))\n', (4975, 5020), True, 'import cv2 as cv\n'), ((5026, 5065), 'cv2.imwrite', 'cv.imwrite', (['(out + table_name)', 'table_roi'], {}), '(out + table_name, table_roi)\n', (5036, 5065), True, 'import cv2 as cv\n'), ((577, 600), 'pdf2image.convert_from_path', 'convert_from_path', (['path'], {}), '(path)\n', (594, 600), False, 'from pdf2image import convert_from_path\n'), ((5418, 5446), 'cv2.imwrite', 'cv.imwrite', (['fname', 'entry_roi'], {}), '(fname, entry_roi)\n', (5428, 5446), True, 'import cv2 as cv\n'), ((5468, 5505), 'utils.run_textcleaner', 'utils.run_textcleaner', (['fname', 'num_img'], {}), '(fname, num_img)\n', (5489, 5505), False, 'import utils\n'), ((5525, 5570), 'utils.run_tesseract', 'utils.run_tesseract', (['fname', 'num_img', 'psm', 'oem'], {}), '(fname, num_img, psm, oem)\n', (5544, 5570), False, 'import utils\n')] |
import os
import sys
import csv
import json
import random
import requests
from typing import Any, List, Optional
from datetime import datetime, timedelta, date
from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify
from jinja2 import TemplateNotFound
from sqlalchemy import *
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
import numpy as np
import joblib
import threading
import tempfile
from diskcache import Cache
from filelock import FileLock, Timeout
from pathlib import Path
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
from sentence_transformers import SentenceTransformer
import logging
logging.basicConfig(stream=sys.stderr)
app = Flask(__name__)
app.config.from_object(__name__)
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'annotation-curriculum.db'),
USERNAME='name',
PASSWORD='password',
SECRET_KEY = '<KEY>'
))
engine = create_engine('mysql+mysqlconnector://name:password@localhost/annotation-curriculum',
encoding='utf-8',
pool_recycle=3600,
connect_args={'auth_plugin': 'mysql_native_password'})
Base = declarative_base(engine)
PATH_ANNOTATION = Path(__file__).parent.absolute() / "annotation_task"
PATH_MODELS = Path(__file__).parent.absolute() / "models"
PATH_CACHE = Path(__file__).parent.absolute() / "cache"
PATH_LOCK = Path(tempfile.gettempdir()) / ".locks"
##################################################################
# DATABASE TABLES
##################################################################
class Tweets(Base):
__tablename__ = 'tweets'
__table_args__ = {'autoload':True}
class Users(Base):
__tablename__ = 'users'
__table_args__ = {'autoload':True}
class Misconceptions(Base):
__tablename__ = 'misconceptions'
__table_args__ = {'autoload':True}
class Annotations(Base):
__tablename__ = 'annotations'
__table_args__ = {'autoload':True}
class Strategies(Base):
__tablename__ = 'strategies'
__table_args__ = {'autoload':True}
class Questionnaire(Base):
__tablename__ = 'questionnaire'
__table_args__ = {'autoload':True}
def get_options(line, difficulty):
results = ""
if difficulty == "vez":
results = line[5]
elif difficulty == "ez":
results = line[6]
elif difficulty == "med":
results = line[7]
elif difficulty == "dif":
results = line[8]
else:
results = line[9]
return results.strip().split('\n')
def read_csv(infile):
data = {}
with open(os.path.join(PATH_ANNOTATION,infile),'r',encoding='utf-8') as lines:
reader = csv.reader(lines,delimiter=";",quotechar='"')
next(reader, None)
for i,line in enumerate(reader):
data[i] = {"tweet_id":line[1],
"tweet_text":line[3],
"true_mcid":line[0],
"difficulty":line[4],
"answers":get_options(line,line[4])}
return data
##################################################################
# DATABASE FUNCTIONS
##################################################################
# Create session with all tables
def create_session():
metadata = Base.metadata
Session = sessionmaker(bind=engine)
session = Session()
return session
def get_strategy(strategy_name):
session = create_session()
result = session.query(Strategies).filter_by(name=strategy_name).first().annotation_path
session.close()
return result
def check_user_credentials(username):
session = create_session()
try:
user_id = session.query(Users).filter_by(name=username).first().id
session.commit()
session.close()
return True
except AttributeError:
session.commit()
session.close()
return False
def add_user(username):
session = create_session()
try:
user_id = session.query(Users).filter_by(name=username).first().id
session.commit()
session.close()
return False
except AttributeError:
# Fix to interactive only
strategy_to_set = session.query(Strategies).filter_by(name='interactive').first().id
# Set user credentials and sampling strategy
session.add(Users(name=username.encode('utf-8'), strategy_id=strategy_to_set, finished=0, finished_intro=0, finished_questionnaire=0, consent=0))
session.commit()
user_id = session.query(Users).filter_by(name=username).first().id
session.commit()
session.close()
return True
def remove_user(user_id):
session = create_session()
user = session.query(Users).filter_by(id=user_id).first()
session.delete(user)
session.commit()
session.close()
# NOTE: Only use, when logged in!
def get_user_id(username):
session = create_session()
user_id = session.query(Users).filter_by(name=username).first().id
session.close()
return user_id
def check_user_consent(user_id):
session = create_session()
result = session.query(Users).filter_by(id=user_id).first().consent
session.close()
if result == 0:
return False
else:
return True
def set_user_consent(user_id):
session = create_session()
user = session.query(Users).filter_by(id=user_id).first()
user.consent = 1
session.commit()
session.close()
def get_strategy_path(user_id):
session = create_session()
strategy_id = session.query(Users).filter_by(id=user_id).first().strategy_id
strategy_path = session.query(Strategies).filter_by(id=strategy_id).first().annotation_path
session.close()
return strategy_path
def get_strategy_name(user_id):
session = create_session()
strategy_id = session.query(Users).filter_by(id=user_id).first().strategy_id
strategy_name = session.query(Strategies).filter_by(id=strategy_id).first().name
session.close()
return strategy_name
def check_user_is_done(user_id):
session = create_session()
result = session.query(Users).filter_by(id=user_id).first().finished
session.close()
if result == 0:
return False
else:
return True
def set_user_is_done(user_id):
session = create_session()
user = session.query(Users).filter_by(id=user_id).first()
user.finished = 1
session.commit()
def check_user_is_done_questionnaire(user_id):
session = create_session()
result = session.query(Users).filter_by(id=user_id).first().finished_questionnaire
session.close()
if result == 0:
return False
else:
return True
def set_user_is_done_questionnaire(user_id):
session = create_session()
user = session.query(Users).filter_by(id=user_id).first()
user.finished_questionnaire = 1
session.commit()
def check_user_is_done_intro(user_id):
session = create_session()
result = session.query(Users).filter_by(id=user_id).first().finished_intro
session.close()
if result == 0:
return False
else:
return True
def set_user_is_done_intro(user_id):
session = create_session()
user = session.query(Users).filter_by(id=user_id).first()
user.finished_intro = 1
session.commit()
session.close()
def store_results(user_id, tweet_id, difficulty, true_mc_id, annot_mc_id, false_mc_ids, annotation_time, annotation_order):
session = create_session()
session.add(Annotations(user_id=user_id,
tweet_id=tweet_id,
difficulty=difficulty,
true_misconception=true_mc_id,
annotated_misconception=annot_mc_id,
false_misconceptions=false_mc_ids,
annotation_time=annotation_time,
annotation_order=annotation_order))
session.commit()
session.close()
def get_mctext(mcid):
session = create_session()
result = session.query(Misconceptions).filter_by(id=mcid).first().misconception_text
session.close()
return result
def get_num_finished_annotations(user_id):
result = 0
session = create_session()
result = len(session.query(Annotations).filter_by(user_id=user_id).all())
# Ignore instances that have been done in the introductory experiments
if session.query(Users).filter_by(id=user_id).first().finished_intro == 1:
result = result-10 # we have 10 intro instances
session.close()
return result
# Get all instances and annotation time for a specific user
def train_model(user_id):
session = create_session()
result = session.query(Annotations).filter_by(user_id=user_id).all()
texts = []
times = []
for res in result:
texts.append(session.query(Tweets).filter_by(tweet_id=res.tweet_id).first().tweet_text)
times.append(float(res.annotation_time))
session.close()
training_data = {"texts":texts,"times":times}
model = Model()
iteration = len(times) - 10 # iteration equals total training data minus intro
model._train(training_data, user_id, iteration)
# Get predictions for all remaining tweet instances
def get_model_predictions(user_id,annotation_data,index):
session = create_session()
tmp = session.query(Annotations).filter_by(user_id=user_id).all()
annotated = [x.tweet_id for x in tmp]
session.close()
tweet_ids, tweet_texts = [],[]
for k,v in annotation_data.items():
if v["tweet_id"] in annotated:
continue
tweet_ids.append(v["tweet_id"])
tweet_texts.append(v["tweet_text"])
# final prediction:
if len(annotated) == 59:
return [(tweet_ids[0],tweet_texts[0],)]
elif len(annotated) >=60:
set_user_is_done(user_id)
return False
model = Model()
results = []
for i in model._rerank(tweet_texts, user_id):
results.append((tweet_ids[i],tweet_texts[i],))
return results
def store_questionnaire_results(user_id, data):
session = create_session()
session.add(Questionnaire(user_id=user_id,
difficulty=data['difficulty'],
differences=data['differences'],
ordering=data['ordering'],
ordering_other=data['ordering_other'],
proficiency=data['proficiency'],
years=data['years'],
native_tongue=data['native_tongue'],
annotator=data['annotator'],
conductor=data['conductor']))
user = session.query(Users).filter_by(id=user_id).first()
user.finished_questionnaire = 1
session.commit()
session.close()
##################################################################
# ML Part
##################################################################
class Model:
def __init__(self):
self._featurizer = CachedSentenceTransformer("paraphrase-distilroberta-base-v1")
def _test(self, data, userid):
logging.info("Got sorting request for [%s]", self.userid)
texts = data["texts"]
times = data["times"]
model = self._load_model(userid)
Xf = self._featurizer.featurize(texts)
y = model.predict(Xf)
def _rerank(self, texts, userid):
logging.info("Got sorting request for [%s]", userid)
model = self._load_model(userid)
if model is None:
logging.info("No model for user [%s] yet", userid)
ranks = np.arange(len(texts))
rng = np.random.default_rng()
rng.shuffle(ranks)
else:
Xf = self._featurizer.featurize(texts)
y = model.predict(Xf)
ranks = np.argsort(y)
return [int(r) for r in ranks]
def _train(self, data, userid, iteration):
logging.info("Got training request for [%s] in iteration [%s]", userid, iteration)
texts = data["texts"]
times = data["times"]
try:
# The lock needs to be acquired out here, not in the fn scope, else it would
# just throw the Timeout inside fn.
lock = self._get_lock(userid)
lock.acquire()
def _fn():
try:
Xf = self._featurizer.featurize(texts)
model = GaussianProcessRegressor(kernel=(DotProduct() + WhiteKernel())).fit(Xf, times)
self._save_model(model, userid)
self._save_model(model, f"{userid}_{iteration}") # save temporary models for experiments
finally:
lock.release()
# We spawn a thread and run the training in there so that this HTTP request can return directly
threading.Thread(target=_fn, daemon=True).start()
return True
except Timeout:
logging.info("Already training for user [%s], skipping iteration [%s]!", userid, iteration)
return False
def _load_model(self, userid):
model_path = self._get_model_path(userid)
if model_path.is_file():
logging.debug("Model found for [%s]", model_path)
return joblib.load(model_path)
else:
logging.debug("No model found for [%s]", model_path)
return None
def _save_model(self, model, userid):
model_path = self._get_model_path(userid)
model_path.parent.mkdir(parents=True, exist_ok=True)
tmp_model_path = model_path.with_suffix(".joblib.tmp")
joblib.dump(model, tmp_model_path)
os.replace(tmp_model_path, model_path)
def _get_model_path(self, userid):
return PATH_MODELS / f"model_{userid}.joblib"
def _get_lock(self, userid):
PATH_LOCK.mkdir(parents=True, exist_ok=True)
lock_path = PATH_LOCK / f"{userid}.lock"
return FileLock(lock_path, timeout=1)
class CachedSentenceTransformer:
def __init__(self, model_name: str):
super().__init__()
self._model = SentenceTransformer(model_name)
self._cache = Cache(PATH_CACHE / model_name)
def featurize(self, sentences: List[str]) -> np.ndarray:
result = []
for sentence in sentences:
if sentence in self._cache:
vec = self._cache[sentence]
else:
vec = self._model.encode(sentence).squeeze()
self._cache[sentence] = vec
result.append(vec)
return np.array(result)
##################################################################
# WEBEND
##################################################################
def get_intro_strategy(user_id):
strategy_path = "intro_experiment.csv"
data = {"index":get_num_finished_annotations(user_id),
"date":datetime.today()} # set annotation index and time
annotation_data = read_csv(strategy_path)
try:
data["annotation"] = annotation_data[data["index"]]
data["difficulty"] = annotation_data[data["index"]]["difficulty"]
data["true_mcid"] = annotation_data[data["index"]]["true_mcid"]
data["false_mcid"] = ','.join([str(mcid) for mcid in annotation_data[data["index"]]["answers"]])
data["answers"] = [[mcid,get_mctext(mcid)] for mcid in annotation_data[data["index"]]["answers"]]
data["answers"].append([data["true_mcid"],get_mctext(data["true_mcid"])])
random.shuffle(data["answers"])
return data
except KeyError:
set_user_is_done_intro(user_id)
return False
def get_static_strategy(user_id):
strategy_path = get_strategy_path(user_id)
data = {"index":get_num_finished_annotations(user_id),
"date":datetime.today()} # set annotation index and time
annotation_data = read_csv(strategy_path)
try:
data["annotation"] = annotation_data[data["index"]]
data["difficulty"] = annotation_data[data["index"]]["difficulty"]
data["true_mcid"] = annotation_data[data["index"]]["true_mcid"]
data["false_mcid"] = ','.join([str(mcid) for mcid in annotation_data[data["index"]]["answers"]])
data["answers"] = [[mcid,get_mctext(mcid)] for mcid in annotation_data[data["index"]]["answers"]]
data["answers"].append([data["true_mcid"],get_mctext(data["true_mcid"])])
random.shuffle(data["answers"])
return data
except KeyError:
set_user_is_done(user_id)
return False
def get_model_strategy(user_id):
strategy_path = get_strategy_path(user_id)
index = get_num_finished_annotations(user_id)
print("Number of already finished annotations: {}".format(index))
data = {} # set annotation index and time
annotation_data = read_csv(strategy_path)
try:
# get rank indices for the remaining instances
predictions = get_model_predictions(user_id,annotation_data,index)
# fetch index:
tweet_id, tweet_text = predictions[0]
for i in range(len(annotation_data)):
if annotation_data[i]["tweet_id"] == tweet_id and annotation_data[i]["tweet_text"] == tweet_text:
index = i
break
data["index"] = index
data["date"] = datetime.today()
data["annotation"] = annotation_data[data["index"]]
data["difficulty"] = annotation_data[data["index"]]["difficulty"]
data["true_mcid"] = annotation_data[data["index"]]["true_mcid"]
data["false_mcid"] = ','.join([str(mcid) for mcid in annotation_data[data["index"]]["answers"]])
data["answers"] = [[mcid,get_mctext(mcid)] for mcid in annotation_data[data["index"]]["answers"]]
data["answers"].append([data["true_mcid"],get_mctext(data["true_mcid"])])
random.shuffle(data["answers"])
return data
except TypeError as e:
return False
@app.route('/')
@app.route('/index', methods = ['POST','GET'])
def index(name=None):
return render_template('study_index.html', name=name, data={})
@app.route('/annotation_task_description')
def task_description(name=None):
return render_template('study_task_description.html', name=name, data={})
@app.route('/cefr_description')
def cefr_description(name=None):
return render_template('study_cefr_description.html', name=name, data={})
@app.route('/informed_consent')
def informed_consent(name=None):
return render_template('study_informed_consent_1.html', name=name, data={})
@app.route('/consent_2')
def consent_2(name=None):
return render_template('study_informed_consent_2.html', name=name, data={})
@app.route('/consent_3')
def consent_3(name=None):
return render_template('study_informed_consent_3.html', name=name, data={})
@app.route('/agree')
def give_consent(name=None):
set_user_consent(session['user_id'])
return task_description()
@app.route('/disagree')
def reject_consent(name=None):
flash('No worries; we have deleted your participation key. If you reconsider your participation please register anew.')
remove_user(session['user_id'])
return render_template('study_thank_you.html', name=name, data={})
@app.route('/questionnaire')
def questionnaire(name=None):
return render_template('study_questionnaire.html', name=name, data={})
@app.route('/thank_you')
def thank_you(name=None):
if session['logged_in']:
return render_template('study_thank_you.html', name=name, data={})
return index()
@app.route('/intro_done')
def intro_done(name=None):
if session['logged_in']:
return render_template('study_intro_done.html', name=name, data={})
return index()
@app.route('/register')
def registrate(name=None):
return render_template('study_registrate.html', name=name, data={})
# Add a new user
@app.route('/create_user', methods=['POST'])
def create_user(name=None):
username = request.form['username']
if not add_user(username):
flash('Username {} is already taken. Please select a different one!'.format(username))
return registrate()
else:
flash('Registrated user {}. Thank you for registering!'.format(username))
return study()
# Login
@app.route('/login', methods=['POST','GET'])
def login(name=None):
if request.method == 'POST':
username = request.form['username']
if check_user_credentials(username):
session['logged_in'] = True
session['user_id'] = get_user_id(username)
if check_user_is_done(session['user_id']):
if check_user_is_done_questionnaire(session['user_id']):
return thank_you()
else:
return questionnaire()
else:
flash('Non existing user!')
return index()
else:
return render_template('study_login.html')
# Logout
@app.route('/logout', methods=['POST','GET'])
def logout(name=None):
session['logged_in']=False
return index()
# Get the next ctest
@app.route('/study', methods = ['GET','POST'])
def study(name=None):
if not session.get('logged_in'):
return render_template('study_login.html')
if not check_user_consent(session['user_id']):
return informed_consent()
if check_user_is_done(session['user_id']):
if check_user_is_done_questionnaire(session['user_id']):
return thank_you()
else:
return questionnaire()
if not check_user_is_done_intro(session['user_id']):
data = get_intro_strategy(session['user_id'])
static_strategies = ['random','easy_first','bert_mlm']
if not get_strategy_name(session['user_id']) in static_strategies:
if get_num_finished_annotations(session['user_id']) > 5:
train_model(session['user_id']) # pretrain model for later on
else:
static_strategies = ['random','easy_first','bert_mlm']
if get_strategy_name(session['user_id']) in static_strategies:
data = get_static_strategy(session['user_id'])
else:
data = get_model_strategy(session['user_id'])
train_model(session['user_id'])
if not data:
if not check_user_is_done(session['user_id']):
return intro_done()
else:
if check_user_is_done_questionnaire(session['user_id']):
return thank_you()
else:
return questionnaire()
else:
return render_template('study_task.html',name=name,data=data)
@app.route('/annotate_task', methods = ['POST'])
def annotate_task(name=None):
if not session.get('logged_in'):
return render_template('study_login.html')
# Get the results
tweet_id = request.form['tweet_id']
mc_id = int(request.form['mcid'])
true_mc_id = int(request.form['true_mcid'])
order = int(request.form['order'])
difficulty = request.form['difficulty']
false_mc_id = request.form['false_mcid']
started = request.form['startingdate']
finish = datetime.today()
time_taken = finish - datetime.strptime(started, '%Y-%m-%d %H:%M:%S.%f')
store_results(session['user_id'],
tweet_id,
difficulty,
true_mc_id,
mc_id,
false_mc_id,
time_taken.total_seconds(),
order)
if not check_user_is_done_intro(session['user_id']):
data = get_intro_strategy(session['user_id'])
else:
static_strategies = ['random','easy_first','bert_mlm']
if get_strategy_name(session['user_id']) in static_strategies:
data = get_static_strategy(session['user_id'])
else:
data = get_model_strategy(session['user_id'])
if not data:
if not check_user_is_done(session['user_id']):
return intro_done()
else:
if check_user_is_done_questionnaire(session['user_id']):
return thank_you()
else:
return questionnaire()
else:
return render_template('study_task.html',name=name,data=data)
@app.route('/finish_questionnaire', methods = ['POST'])
def finish_questionnaire(name=None):
data = {
'difficulty':request.form['difficulty'],
'differences':request.form['differences'],
'ordering':request.form['ordering'],
'ordering_other':request.form['ordering-comment'],
'proficiency':request.form['cefr'],
'years':request.form['years'],
'native_tongue':request.form['native-tongue'],
'annotator':request.form['annotator-experience'],
'conductor':request.form['conductor-experience']
}
store_questionnaire_results(session['user_id'],data)
return thank_you()
if __name__ == '__main__':
app.run(host="0.0.0.0", debug=True)
| [
"flask.render_template",
"numpy.random.default_rng",
"logging.debug",
"flask.Flask",
"filelock.FileLock",
"flask.session.delete",
"numpy.array",
"numpy.argsort",
"datetime.datetime.today",
"sklearn.gaussian_process.kernels.WhiteKernel",
"logging.info",
"sqlalchemy.orm.sessionmaker",
"flask.f... | [((841, 879), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stderr'}), '(stream=sys.stderr)\n', (860, 879), False, 'import logging\n'), ((887, 902), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (892, 902), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((1326, 1350), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', (['engine'], {}), '(engine)\n', (1342, 1350), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((3460, 3485), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'engine'}), '(bind=engine)\n', (3472, 3485), False, 'from sqlalchemy.orm import sessionmaker\n'), ((3695, 3710), 'flask.session.close', 'session.close', ([], {}), '()\n', (3708, 3710), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((4755, 4771), 'flask.session.commit', 'session.commit', ([], {}), '()\n', (4769, 4771), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((4776, 4791), 'flask.session.close', 'session.close', ([], {}), '()\n', (4789, 4791), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((4937, 4957), 'flask.session.delete', 'session.delete', (['user'], {}), '(user)\n', (4951, 4957), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((4962, 4978), 'flask.session.commit', 'session.commit', ([], {}), '()\n', (4976, 4978), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((4983, 4998), 'flask.session.close', 'session.close', ([], {}), '()\n', (4996, 4998), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((5167, 5182), 'flask.session.close', 'session.close', ([], {}), '()\n', (5180, 5182), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((5347, 5362), 'flask.session.close', 'session.close', ([], {}), '()\n', (5360, 5362), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((5601, 5617), 'flask.session.commit', 'session.commit', ([], {}), '()\n', (5615, 5617), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((5622, 5637), 'flask.session.close', 'session.close', ([], {}), '()\n', (5635, 5637), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((5896, 5911), 'flask.session.close', 'session.close', ([], {}), '()\n', (5909, 5911), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((6175, 6190), 'flask.session.close', 'session.close', ([], {}), '()\n', (6188, 6190), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((6358, 6373), 'flask.session.close', 'session.close', ([], {}), '()\n', (6371, 6373), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((6613, 6629), 'flask.session.commit', 'session.commit', ([], {}), '()\n', (6627, 6629), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((6804, 6819), 'flask.session.close', 'session.close', ([], {}), '()\n', (6817, 6819), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((7083, 7099), 'flask.session.commit', 'session.commit', ([], {}), '()\n', (7097, 7099), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((7258, 7273), 'flask.session.close', 'session.close', ([], {}), '()\n', (7271, 7273), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((7525, 7541), 'flask.session.commit', 'session.commit', ([], {}), '()\n', (7539, 7541), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((7546, 7561), 'flask.session.close', 'session.close', ([], {}), '()\n', (7559, 7561), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((8234, 8250), 'flask.session.commit', 'session.commit', ([], {}), '()\n', (8248, 8250), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((8255, 8270), 'flask.session.close', 'session.close', ([], {}), '()\n', (8268, 8270), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((8422, 8437), 'flask.session.close', 'session.close', ([], {}), '()\n', (8435, 8437), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((8839, 8854), 'flask.session.close', 'session.close', ([], {}), '()\n', (8852, 8854), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((9286, 9301), 'flask.session.close', 'session.close', ([], {}), '()\n', (9299, 9301), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((9781, 9796), 'flask.session.close', 'session.close', ([], {}), '()\n', (9794, 9796), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((11179, 11195), 'flask.session.commit', 'session.commit', ([], {}), '()\n', (11193, 11195), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((11200, 11215), 'flask.session.close', 'session.close', ([], {}), '()\n', (11213, 11215), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((18528, 18583), 'flask.render_template', 'render_template', (['"""study_index.html"""'], {'name': 'name', 'data': '{}'}), "('study_index.html', name=name, data={})\n", (18543, 18583), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((18672, 18738), 'flask.render_template', 'render_template', (['"""study_task_description.html"""'], {'name': 'name', 'data': '{}'}), "('study_task_description.html', name=name, data={})\n", (18687, 18738), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((18816, 18882), 'flask.render_template', 'render_template', (['"""study_cefr_description.html"""'], {'name': 'name', 'data': '{}'}), "('study_cefr_description.html', name=name, data={})\n", (18831, 18882), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((18960, 19028), 'flask.render_template', 'render_template', (['"""study_informed_consent_1.html"""'], {'name': 'name', 'data': '{}'}), "('study_informed_consent_1.html', name=name, data={})\n", (18975, 19028), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((19096, 19164), 'flask.render_template', 'render_template', (['"""study_informed_consent_2.html"""'], {'name': 'name', 'data': '{}'}), "('study_informed_consent_2.html', name=name, data={})\n", (19111, 19164), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((19232, 19300), 'flask.render_template', 'render_template', (['"""study_informed_consent_3.html"""'], {'name': 'name', 'data': '{}'}), "('study_informed_consent_3.html', name=name, data={})\n", (19247, 19300), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((19491, 19620), 'flask.flash', 'flash', (['"""No worries; we have deleted your participation key. If you reconsider your participation please register anew."""'], {}), "(\n 'No worries; we have deleted your participation key. If you reconsider your participation please register anew.'\n )\n", (19496, 19620), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((19658, 19717), 'flask.render_template', 'render_template', (['"""study_thank_you.html"""'], {'name': 'name', 'data': '{}'}), "('study_thank_you.html', name=name, data={})\n", (19673, 19717), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((19789, 19852), 'flask.render_template', 'render_template', (['"""study_questionnaire.html"""'], {'name': 'name', 'data': '{}'}), "('study_questionnaire.html', name=name, data={})\n", (19804, 19852), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((20291, 20351), 'flask.render_template', 'render_template', (['"""study_registrate.html"""'], {'name': 'name', 'data': '{}'}), "('study_registrate.html', name=name, data={})\n", (20306, 20351), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((23663, 23679), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (23677, 23679), False, 'from datetime import datetime, timedelta, date\n'), ((1554, 1575), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (1573, 1575), False, 'import tempfile\n'), ((2825, 2872), 'csv.reader', 'csv.reader', (['lines'], {'delimiter': '""";"""', 'quotechar': '"""\\""""'}), '(lines, delimiter=\';\', quotechar=\'"\')\n', (2835, 2872), False, 'import csv\n'), ((3895, 3911), 'flask.session.commit', 'session.commit', ([], {}), '()\n', (3909, 3911), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((3920, 3935), 'flask.session.close', 'session.close', ([], {}), '()\n', (3933, 3935), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((4210, 4226), 'flask.session.commit', 'session.commit', ([], {}), '()\n', (4224, 4226), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((4235, 4250), 'flask.session.close', 'session.close', ([], {}), '()\n', (4248, 4250), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((11533, 11590), 'logging.info', 'logging.info', (['"""Got sorting request for [%s]"""', 'self.userid'], {}), "('Got sorting request for [%s]', self.userid)\n", (11545, 11590), False, 'import logging\n'), ((11833, 11885), 'logging.info', 'logging.info', (['"""Got sorting request for [%s]"""', 'userid'], {}), "('Got sorting request for [%s]', userid)\n", (11845, 11885), False, 'import logging\n'), ((12370, 12456), 'logging.info', 'logging.info', (['"""Got training request for [%s] in iteration [%s]"""', 'userid', 'iteration'], {}), "('Got training request for [%s] in iteration [%s]', userid,\n iteration)\n", (12382, 12456), False, 'import logging\n'), ((14076, 14110), 'joblib.dump', 'joblib.dump', (['model', 'tmp_model_path'], {}), '(model, tmp_model_path)\n', (14087, 14110), False, 'import joblib\n'), ((14119, 14157), 'os.replace', 'os.replace', (['tmp_model_path', 'model_path'], {}), '(tmp_model_path, model_path)\n', (14129, 14157), False, 'import os\n'), ((14403, 14433), 'filelock.FileLock', 'FileLock', (['lock_path'], {'timeout': '(1)'}), '(lock_path, timeout=1)\n', (14411, 14433), False, 'from filelock import FileLock, Timeout\n'), ((14560, 14591), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['model_name'], {}), '(model_name)\n', (14579, 14591), False, 'from sentence_transformers import SentenceTransformer\n'), ((14614, 14644), 'diskcache.Cache', 'Cache', (['(PATH_CACHE / model_name)'], {}), '(PATH_CACHE / model_name)\n', (14619, 14644), False, 'from diskcache import Cache\n'), ((15017, 15033), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (15025, 15033), True, 'import numpy as np\n'), ((15338, 15354), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (15352, 15354), False, 'from datetime import datetime, timedelta, date\n'), ((15950, 15981), 'random.shuffle', 'random.shuffle', (["data['answers']"], {}), "(data['answers'])\n", (15964, 15981), False, 'import random\n'), ((16261, 16277), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (16275, 16277), False, 'from datetime import datetime, timedelta, date\n'), ((16873, 16904), 'random.shuffle', 'random.shuffle', (["data['answers']"], {}), "(data['answers'])\n", (16887, 16904), False, 'import random\n'), ((17798, 17814), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (17812, 17814), False, 'from datetime import datetime, timedelta, date\n'), ((18322, 18353), 'random.shuffle', 'random.shuffle', (["data['answers']"], {}), "(data['answers'])\n", (18336, 18353), False, 'import random\n'), ((19949, 20008), 'flask.render_template', 'render_template', (['"""study_thank_you.html"""'], {'name': 'name', 'data': '{}'}), "('study_thank_you.html', name=name, data={})\n", (19964, 20008), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((20139, 20199), 'flask.render_template', 'render_template', (['"""study_intro_done.html"""'], {'name': 'name', 'data': '{}'}), "('study_intro_done.html', name=name, data={})\n", (20154, 20199), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((21419, 21454), 'flask.render_template', 'render_template', (['"""study_login.html"""'], {}), "('study_login.html')\n", (21434, 21454), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((21686, 21710), 'flask.session.get', 'session.get', (['"""logged_in"""'], {}), "('logged_in')\n", (21697, 21710), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((21727, 21762), 'flask.render_template', 'render_template', (['"""study_login.html"""'], {}), "('study_login.html')\n", (21742, 21762), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((23108, 23164), 'flask.render_template', 'render_template', (['"""study_task.html"""'], {'name': 'name', 'data': 'data'}), "('study_task.html', name=name, data=data)\n", (23123, 23164), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((23254, 23278), 'flask.session.get', 'session.get', (['"""logged_in"""'], {}), "('logged_in')\n", (23265, 23278), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((23295, 23330), 'flask.render_template', 'render_template', (['"""study_login.html"""'], {}), "('study_login.html')\n", (23310, 23330), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((23706, 23756), 'datetime.datetime.strptime', 'datetime.strptime', (['started', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(started, '%Y-%m-%d %H:%M:%S.%f')\n", (23723, 23756), False, 'from datetime import datetime, timedelta, date\n'), ((24780, 24836), 'flask.render_template', 'render_template', (['"""study_task.html"""'], {'name': 'name', 'data': 'data'}), "('study_task.html', name=name, data=data)\n", (24795, 24836), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((973, 1028), 'os.path.join', 'os.path.join', (['app.root_path', '"""annotation-curriculum.db"""'], {}), "(app.root_path, 'annotation-curriculum.db')\n", (985, 1028), False, 'import os\n'), ((2739, 2776), 'os.path.join', 'os.path.join', (['PATH_ANNOTATION', 'infile'], {}), '(PATH_ANNOTATION, infile)\n', (2751, 2776), False, 'import os\n'), ((4000, 4016), 'flask.session.commit', 'session.commit', ([], {}), '()\n', (4014, 4016), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((4025, 4040), 'flask.session.close', 'session.close', ([], {}), '()\n', (4038, 4040), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((4650, 4666), 'flask.session.commit', 'session.commit', ([], {}), '()\n', (4664, 4666), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((11975, 12025), 'logging.info', 'logging.info', (['"""No model for user [%s] yet"""', 'userid'], {}), "('No model for user [%s] yet', userid)\n", (11987, 12025), False, 'import logging\n'), ((12086, 12109), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (12107, 12109), True, 'import numpy as np\n'), ((12260, 12273), 'numpy.argsort', 'np.argsort', (['y'], {}), '(y)\n', (12270, 12273), True, 'import numpy as np\n'), ((13655, 13704), 'logging.debug', 'logging.debug', (['"""Model found for [%s]"""', 'model_path'], {}), "('Model found for [%s]', model_path)\n", (13668, 13704), False, 'import logging\n'), ((13724, 13747), 'joblib.load', 'joblib.load', (['model_path'], {}), '(model_path)\n', (13735, 13747), False, 'import joblib\n'), ((13774, 13826), 'logging.debug', 'logging.debug', (['"""No model found for [%s]"""', 'model_path'], {}), "('No model found for [%s]', model_path)\n", (13787, 13826), False, 'import logging\n'), ((21321, 21348), 'flask.flash', 'flash', (['"""Non existing user!"""'], {}), "('Non existing user!')\n", (21326, 21348), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((1370, 1384), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1374, 1384), False, 'from pathlib import Path\n'), ((1437, 1451), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1441, 1451), False, 'from pathlib import Path\n'), ((1494, 1508), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1498, 1508), False, 'from pathlib import Path\n'), ((13405, 13500), 'logging.info', 'logging.info', (['"""Already training for user [%s], skipping iteration [%s]!"""', 'userid', 'iteration'], {}), "('Already training for user [%s], skipping iteration [%s]!',\n userid, iteration)\n", (13417, 13500), False, 'import logging\n'), ((4882, 4902), 'flask.session.query', 'session.query', (['Users'], {}), '(Users)\n', (4895, 4902), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((5525, 5545), 'flask.session.query', 'session.query', (['Users'], {}), '(Users)\n', (5538, 5545), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((6536, 6556), 'flask.session.query', 'session.query', (['Users'], {}), '(Users)\n', (6549, 6556), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((6992, 7012), 'flask.session.query', 'session.query', (['Users'], {}), '(Users)\n', (7005, 7012), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((7442, 7462), 'flask.session.query', 'session.query', (['Users'], {}), '(Users)\n', (7455, 7462), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((9009, 9035), 'flask.session.query', 'session.query', (['Annotations'], {}), '(Annotations)\n', (9022, 9035), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((9675, 9701), 'flask.session.query', 'session.query', (['Annotations'], {}), '(Annotations)\n', (9688, 9701), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((11088, 11108), 'flask.session.query', 'session.query', (['Users'], {}), '(Users)\n', (11101, 11108), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((13295, 13336), 'threading.Thread', 'threading.Thread', ([], {'target': '_fn', 'daemon': '(True)'}), '(target=_fn, daemon=True)\n', (13311, 13336), False, 'import threading\n'), ((3611, 3636), 'flask.session.query', 'session.query', (['Strategies'], {}), '(Strategies)\n', (3624, 3636), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((5106, 5126), 'flask.session.query', 'session.query', (['Users'], {}), '(Users)\n', (5119, 5126), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((5284, 5304), 'flask.session.query', 'session.query', (['Users'], {}), '(Users)\n', (5297, 5304), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((5733, 5753), 'flask.session.query', 'session.query', (['Users'], {}), '(Users)\n', (5746, 5753), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((5816, 5841), 'flask.session.query', 'session.query', (['Strategies'], {}), '(Strategies)\n', (5829, 5841), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((6023, 6043), 'flask.session.query', 'session.query', (['Users'], {}), '(Users)\n', (6036, 6043), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((6106, 6131), 'flask.session.query', 'session.query', (['Strategies'], {}), '(Strategies)\n', (6119, 6131), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((6294, 6314), 'flask.session.query', 'session.query', (['Users'], {}), '(Users)\n', (6307, 6314), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((6726, 6746), 'flask.session.query', 'session.query', (['Users'], {}), '(Users)\n', (6739, 6746), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((7188, 7208), 'flask.session.query', 'session.query', (['Users'], {}), '(Users)\n', (7201, 7208), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((8342, 8371), 'flask.session.query', 'session.query', (['Misconceptions'], {}), '(Misconceptions)\n', (8355, 8371), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((8564, 8590), 'flask.session.query', 'session.query', (['Annotations'], {}), '(Annotations)\n', (8577, 8590), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((3830, 3850), 'flask.session.query', 'session.query', (['Users'], {}), '(Users)\n', (3843, 3850), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((4145, 4165), 'flask.session.query', 'session.query', (['Users'], {}), '(Users)\n', (4158, 4165), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((8707, 8727), 'flask.session.query', 'session.query', (['Users'], {}), '(Users)\n', (8720, 8727), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((4368, 4393), 'flask.session.query', 'session.query', (['Strategies'], {}), '(Strategies)\n', (4381, 4393), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((4685, 4705), 'flask.session.query', 'session.query', (['Users'], {}), '(Users)\n', (4698, 4705), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((9145, 9166), 'flask.session.query', 'session.query', (['Tweets'], {}), '(Tweets)\n', (9158, 9166), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, Blueprint, jsonify\n'), ((12907, 12919), 'sklearn.gaussian_process.kernels.DotProduct', 'DotProduct', ([], {}), '()\n', (12917, 12919), False, 'from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel\n'), ((12922, 12935), 'sklearn.gaussian_process.kernels.WhiteKernel', 'WhiteKernel', ([], {}), '()\n', (12933, 12935), False, 'from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel\n')] |
import math
import numpy as np
from scipy.sparse import csc_matrix
def lag(mat, lagged, N, lag_number, fill=np.NaN):
height = int(mat.shape[0] / N)
for i in range(N):
start_row = i * height
end_row = start_row + height
mat_i = mat[start_row:end_row, :]
lagged_i = lagged[start_row:end_row, :]
lagged_i[0:lag_number, :] = fill
lagged_i[lag_number:height, :] = mat_i[0:(height - lag_number), :]
def get_first_diff_table(ori_arr: np.ndarray, N: int):
num_cols = ori_arr.shape[1]
num_rows = ori_arr.shape[0]
height = int(num_rows / N)
lag_arr = np.zeros((num_rows, num_cols), dtype='float64')
tbr_arr = np.zeros((num_rows, num_cols), dtype='float64')
lag(ori_arr, lag_arr, N, 1)
tbr_arr = ori_arr - lag_arr
return tbr_arr
def get_fod_table(ori_arr: np.ndarray, N: int):
num_rows = ori_arr.shape[0]
height = int(num_rows / N)
num_cols = ori_arr.shape[1]
tbr = np.empty((num_rows, num_cols), dtype='float64')
next_sum = np.empty((1, num_cols), dtype='float64')
this_sum = np.empty((1, num_cols), dtype='float64')
this_avg = np.empty((1, num_cols), dtype='float64')
temp = np.empty((height, num_cols), dtype='float64')
tbr[:] = np.NaN
this_sum[:] = np.NaN
for i in range(N):
ori_i = ori_arr[i * height:(i * height + height), :]
tbr_i = tbr[i * height:(i * height + height), :]
temp.fill(np.NaN)
next_sum.fill(np.NaN)
next_count = 0
for j in range(height - 2, -1, -1):
if np.isnan(ori_i[range(j + 1, j + 2), :]).any(axis=1):
this_count = next_count
this_sum = next_sum
temp[j, :] = temp[j + 1, :]
else:
this_count = next_count + 1
this_sum = np.nansum(np.vstack([next_sum, ori_i[j + 1, :]]), axis=0)
this_avg = this_sum * (1.0 / this_count)
temp[j, :] = (ori_i[j, :] - this_avg) * math.sqrt(this_count / (this_count + 1))
next_sum = this_sum
next_count = this_count
tbr_i[0, :] = np.NaN
tbr_i[range(1, height), :] = temp[range(0, height - 1), :]
return tbr
def sum_product(listOflist, n_rows):
num_elements = len(listOflist)
for i in range(n_rows):
list_temp = []
for j in range(num_elements):
if type(listOflist[j]) == list:
var_list = listOflist[j]
list_temp.append(var_list[i])
elif type(listOflist[j]) == np.ndarray:
var_mat = listOflist[j]
list_temp.append(var_mat)
else:
pass # throw error
temp = np.linalg.multi_dot(list_temp)
if i == 0:
tbr = temp
else:
tbr += temp
return (tbr)
def Windmeijer(M2, _M2_XZ_W2, W2_inv, zs2, vcov_step1, Cx_list, z_list, residual1, N):
D = np.empty((M2.shape[0], M2.shape[1]), dtype='float64')
x_height = int(Cx_list.shape[0] / N)
z_height = int(z_list.shape[0] / N)
for j in range(0, Cx_list.shape[1]):
for i in range(0, N):
x = Cx_list[(i * x_height):(i * x_height + x_height), :]
u = residual1[(i * x_height):(i * x_height + x_height), 0:1]
z = z_list[(i * z_height):(i * z_height + z_height), :]
xu = np.matmul(x[:, j:(j + 1)], u.transpose())
temp = z @ (xu + xu.transpose()) @ z.transpose()
# temp_zxuzt=z@ xu @ z.transpose()
# temp=temp_zxuzt + temp_zxuzt.transpose()
if i == 0:
zxz = temp
else:
zxz += temp
partial_dir = (-1.0 / N) * zxz
Dj = np.linalg.multi_dot([_M2_XZ_W2, partial_dir, W2_inv, zs2])
Dj = (-1) * Dj
D[:, j:(j + 1)] = Dj
# temp = np.multiply(N, M2) + np.multiply(N, np.matmul(D, M2)) + np.multiply(N, np.matmul(M2, D.transpose()))
temp_D_M2 = D @ M2
temp = np.multiply(N, M2) + np.multiply(N, temp_D_M2) + np.multiply(N, temp_D_M2.transpose())
temp = temp + np.matmul(np.matmul(D, vcov_step1), D.transpose())
#
return (temp)
def make_sparse_list(arr_list):
nrow = len(arr_list)
new_list = []
for i in range(nrow):
arr = arr_list[i]
new_arr = csc_matrix(arr)
new_list.append(new_arr)
return (new_list)
| [
"numpy.multiply",
"numpy.linalg.multi_dot",
"math.sqrt",
"numpy.zeros",
"numpy.empty",
"numpy.matmul",
"numpy.vstack",
"scipy.sparse.csc_matrix"
] | [((643, 690), 'numpy.zeros', 'np.zeros', (['(num_rows, num_cols)'], {'dtype': '"""float64"""'}), "((num_rows, num_cols), dtype='float64')\n", (651, 690), True, 'import numpy as np\n'), ((706, 753), 'numpy.zeros', 'np.zeros', (['(num_rows, num_cols)'], {'dtype': '"""float64"""'}), "((num_rows, num_cols), dtype='float64')\n", (714, 753), True, 'import numpy as np\n'), ((1010, 1057), 'numpy.empty', 'np.empty', (['(num_rows, num_cols)'], {'dtype': '"""float64"""'}), "((num_rows, num_cols), dtype='float64')\n", (1018, 1057), True, 'import numpy as np\n'), ((1074, 1114), 'numpy.empty', 'np.empty', (['(1, num_cols)'], {'dtype': '"""float64"""'}), "((1, num_cols), dtype='float64')\n", (1082, 1114), True, 'import numpy as np\n'), ((1131, 1171), 'numpy.empty', 'np.empty', (['(1, num_cols)'], {'dtype': '"""float64"""'}), "((1, num_cols), dtype='float64')\n", (1139, 1171), True, 'import numpy as np\n'), ((1188, 1228), 'numpy.empty', 'np.empty', (['(1, num_cols)'], {'dtype': '"""float64"""'}), "((1, num_cols), dtype='float64')\n", (1196, 1228), True, 'import numpy as np\n'), ((1241, 1286), 'numpy.empty', 'np.empty', (['(height, num_cols)'], {'dtype': '"""float64"""'}), "((height, num_cols), dtype='float64')\n", (1249, 1286), True, 'import numpy as np\n'), ((3054, 3107), 'numpy.empty', 'np.empty', (['(M2.shape[0], M2.shape[1])'], {'dtype': '"""float64"""'}), "((M2.shape[0], M2.shape[1]), dtype='float64')\n", (3062, 3107), True, 'import numpy as np\n'), ((2818, 2848), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['list_temp'], {}), '(list_temp)\n', (2837, 2848), True, 'import numpy as np\n'), ((3871, 3929), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['[_M2_XZ_W2, partial_dir, W2_inv, zs2]'], {}), '([_M2_XZ_W2, partial_dir, W2_inv, zs2])\n', (3890, 3929), True, 'import numpy as np\n'), ((4477, 4492), 'scipy.sparse.csc_matrix', 'csc_matrix', (['arr'], {}), '(arr)\n', (4487, 4492), False, 'from scipy.sparse import csc_matrix\n'), ((4139, 4157), 'numpy.multiply', 'np.multiply', (['N', 'M2'], {}), '(N, M2)\n', (4150, 4157), True, 'import numpy as np\n'), ((4160, 4185), 'numpy.multiply', 'np.multiply', (['N', 'temp_D_M2'], {}), '(N, temp_D_M2)\n', (4171, 4185), True, 'import numpy as np\n'), ((4255, 4279), 'numpy.matmul', 'np.matmul', (['D', 'vcov_step1'], {}), '(D, vcov_step1)\n', (4264, 4279), True, 'import numpy as np\n'), ((1909, 1947), 'numpy.vstack', 'np.vstack', (['[next_sum, ori_i[j + 1, :]]'], {}), '([next_sum, ori_i[j + 1, :]])\n', (1918, 1947), True, 'import numpy as np\n'), ((2072, 2112), 'math.sqrt', 'math.sqrt', (['(this_count / (this_count + 1))'], {}), '(this_count / (this_count + 1))\n', (2081, 2112), False, 'import math\n')] |
from __future__ import division
import os
import numpy as np
from fdint import fdk, ifd1h
from ifg.units_converter import SiAtomicConverter
from ifg.utils import dump_to_csv
THRESHOLD = 1e10
def _1d_call(func, array, *args, **kwargs):
return func(array.reshape(-1), *args, **kwargs).reshape(array.shape)
def _fdk(array, k):
return fdk(k, array)
def get_chemical_potential(vv, tt, gbar=2.0, *args, **kwargs):
# type: (np.ndarray, np.ndarray, float, list, dict) -> np.ndarray
"""Get IFG chemical potential mu in atomic units.
:param vv: Matrix of specific volumes in atomic units.
:param tt: Matrix of temperatures in atomic units.
:param gbar: degeneracy factor, for IFG g = 2s + 1
:return: `mu[i][j]` - chemical potential in atomic units.
*i*-th index is for temperature, *j*-th one is for volume
"""
to_inverse = np.sqrt(2) * np.pi ** 2 / (gbar * tt ** (1.5) * vv)
mu_div_temperature = _1d_call(ifd1h, to_inverse)
mu = mu_div_temperature * tt
# mu = np.multiply(temperature, mu_div_temperature.T).T
return mu
def get_F_potential(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs):
# type: (np.ndarray, np.ndarray, np.ndarray, float, list, dict) -> np.ndarray
"""Get IFG Helmholtz potential F in atomic units.
:param vv: Matrix of specific volumes in atomic units.
:param tt: Matrix of temperatures in atomic units.
:param gbar: degeneracy factor, for IFG g = 2s + 1
:param chemical_potential: Chemical potential in atomic units.
:return: F[i][j] - Helmholtz free energy in atomic units.
*i*-th index is for temperature, *j*-th one is for volume
"""
# y = chemical_potential/temperature
y = chemical_potential / tt
F = gbar / np.sqrt(2.0) / np.pi ** 2 * tt ** (2.5) * vv
F *= y * _1d_call(_fdk, y, k=0.5) - 2.0 / 3.0 * _1d_call(_fdk, y, k=1.5)
return F
def get_pressure(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs):
# type: (np.ndarray, np.ndarray, float, list, dict) -> np.ndarray
"""Get IFG pressure P in atomic units.
:param vv: Matrix of specific volumes in atomic units.
:param tt: Matrix of temperatures in atomic units.
:param chemical_potential: Chemical potential in atomic units.
:param gbar: degeneracy factor, for IFG g = 2s + 1
:return: P[i][j] - Pressure in atomic units.
*i*-th index is for temperature, *j*-th one is for volume
"""
y = chemical_potential / tt
pressure = (
gbar * np.sqrt(2) / (3 * np.pi ** 2) * tt ** (2.5) * _1d_call(_fdk, y, k=1.5)
)
return pressure
def get_energy(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs):
# type: (np.ndarray, np.ndarray, np.ndarray, float, list, dict) -> np.ndarray
"""Get IFG energy E in atomic units.
:param vv: Matrix of specific volumes in atomic units.
:param tt: Matrix of temperatures in atomic units.
:param chemical_potential: Chemical potential in atomic units.
:param gbar: degeneracy factor, for IFG g = 2s + 1
:return: E[i][j] - Energy in atomic units.
*i*-th index is for temperature, *j*-th one is for volume
"""
y = chemical_potential / tt
energy = (
gbar * vv / (np.sqrt(2) * np.pi ** 2) * tt ** 2.5 * _1d_call(_fdk, y, k=1.5)
)
return energy
def get_entropy(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs):
# type: (np.ndarray, np.ndarray, np.ndarray, float, list, dict) -> np.ndarray
"""Get IFG entropy S in atomic units.
:param vv: Matrix of specific volumes in atomic units.
:param tt: Matrix of temperatures in atomic units.
:param chemical_potential: Chemical potential in atomic units.
:param gbar: degeneracy factor, for IFG g = 2s + 1
:return: S[i][j] - Entropy in atomic units.
*i*-th index is for temperature, *j*-th one is for volume
"""
y = chemical_potential / tt
# There is a precision problem with "-" (minus) operator
# We'll use asymptotic formula for low temperatures to avoid that problem
y_low = y[y < THRESHOLD]
vv_low, vv_high = vv[y < THRESHOLD], vv[y >= THRESHOLD]
tt_low, tt_high = tt[y < THRESHOLD], tt[y >= THRESHOLD]
# high temperatures - low numbers
S_low = (
-gbar
* np.sqrt(2)
/ (6 * np.pi ** 2)
* tt_low ** (3 / 2)
* vv_low
* (
3 * y_low * _1d_call(_fdk, y_low, k=1 / 2)
- 5 * _1d_call(_fdk, y_low, k=3 / 2)
)
)
# low temperatures - high numbers
S_high = (gbar * np.pi / 6) ** (2 / 3) * tt_high * vv_high ** (2 / 3)
return np.concatenate((S_low, S_high)).reshape(y.shape)
def get_heat_capacity_volume(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs):
# type: (np.ndarray, np.ndarray, np.ndarray, float, list, dict) -> np.ndarray
"""Get IFG heat capacity C_V in atomic units.
:param vv: Matrix of specific volumes in atomic units.
:param tt: Matrix of temperatures in atomic units.
:param chemical_potential: Chemical potential in atomic units.
:param gbar: degeneracy factor, for IFG g = 2s + 1
:return: C_V[i][j] - C_V in atomic units.
*i*-th index is for temperature, *j*-th one is for volume
"""
y = chemical_potential / tt
# There is a precision problem with "-" (minus) operator
# We'll use asymptotic formula for high temperatures to avoid that problem
y_low = y[y < THRESHOLD]
vv_low, vv_high = vv[y < THRESHOLD], vv[y >= THRESHOLD]
tt_low, tt_high = tt[y < THRESHOLD], tt[y >= THRESHOLD]
# high temperatures - low numbers
C_V_low = 5 * _1d_call(_fdk, y_low, k=-1 / 2) * _1d_call(_fdk, y_low, k=3 / 2)
C_V_low -= 9 * _1d_call(_fdk, y_low, k=1 / 2) ** 2
C_V_low *= gbar * np.sqrt(2) / (4 * np.pi ** 2) * tt_low ** (3 / 2) * vv_low
C_V_low /= _1d_call(_fdk, y_low, k=-1 / 2)
# low temperatures - high numbers
C_V_high = (gbar * np.pi / 6) ** (2 / 3) * tt_high * vv_high ** (2 / 3)
return np.concatenate((C_V_low, C_V_high)).reshape(y.shape)
def get_heat_capacity_pressure(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs):
# type: (np.ndarray, np.ndarray, np.ndarray, float, list, dict) -> np.ndarray
"""Get IFG heat capacity C_P in atomic units.
:param vv: Matrix of specific volumes in atomic units.
:param tt: Matrix of temperatures in atomic units.
:param chemical_potential: Chemical potential in atomic units.
:param gbar: degeneracy factor, for IFG g = 2s + 1
:return: C_P[i][j] - C_P in atomic units.
*i*-th index is for temperature, *j*-th one is for volume
"""
y = chemical_potential / tt
# There is a precision problem with "-" (minus) operator
# We'll use asymptotic formula for high temperatures to avoid that problem
y_low = y[y < THRESHOLD]
vv_low, vv_high = vv[y < THRESHOLD], vv[y >= THRESHOLD]
tt_low, tt_high = tt[y < THRESHOLD], tt[y >= THRESHOLD]
# high temperatures - low numbers
C_P_low = 5 * gbar * np.sqrt(2) / (36 * np.pi ** 2) * tt_low ** (3 / 2) * vv_low
C_P_low *= (
5 * _1d_call(_fdk, y_low, k=-1 / 2) * _1d_call(_fdk, y_low, k=3 / 2)
- 9 * _1d_call(_fdk, y_low, k=1 / 2) ** 2
)
C_P_low *= _1d_call(_fdk, y_low, k=3 / 2) / _1d_call(_fdk, y_low, k=1 / 2) ** 2
# low temperatures - high numbers
C_P_high = (gbar * np.pi / 6) ** (2 / 3) * tt_high * vv_high ** (2 / 3)
return np.concatenate((C_P_low, C_P_high)).reshape(y.shape)
def get_sound_speed_temperature(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs):
# type: (np.ndarray, np.ndarray, np.ndarray, float, list, dict) -> np.ndarray
"""Get IFG sound speed C_T in atomic units.
:param vv: Matrix of specific volumes in atomic units.
:param tt: Matrix of temperatures in atomic units.
:param chemical_potential: Chemical potential in atomic units.
:param gbar: degeneracy factor, for IFG g = 2s + 1
:return: C_T[i][j] - C_T in atomic units.
*i*-th index is for temperature, *j*-th one is for volume
"""
y = chemical_potential / tt
C_T = (
2 ** (1 / 4)
* np.sqrt(gbar)
/ np.pi
* np.sqrt(vv)
* tt ** (5 / 4)
* _1d_call(_fdk, y, k=1 / 2)
/ np.sqrt(_1d_call(_fdk, y, k=-1 / 2))
)
return C_T
def get_sound_speed_entropy(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs):
# type: (np.ndarray, np.ndarray, np.ndarray, float, list, dict) -> np.ndarray
"""Get IFG sound speed C_S in atomic units.
:param vv: Matrix of specific volumes in atomic units.
:param tt: Matrix of temperatures in atomic units.
:param chemical_potential: Chemical potential in atomic units.
:param gbar: degeneracy factor, for IFG g = 2s + 1
:return: C_S[i][j] - C_S in atomic units.
*i*-th index is for temperature, *j*-th one is for volume
"""
y = chemical_potential / tt
C_S = (
np.sqrt(5)
* np.sqrt(gbar)
* 2 ** (1 / 4)
/ (3 * np.pi)
* tt ** (5 / 4)
* np.sqrt(vv * _1d_call(_fdk, y, k=3 / 2))
)
return C_S
def get_all_properties(vv, tt, gbar=2.0, csv_dir=None):
# type: (np.ndarray, np.ndarray, float, str) -> dict
"""Calculate all properties and save them to csv file.
:param vv: Matrix of specific volumes in atomic units.
:param tt: Matrix of temperatures in atomic units.
:param vv: Specific volume in atomic units
:param tt: Temperature in atomic units
:param gbar: degeneracy factor, for IFG g = 2s + 1
:param csv_dir: Directory to save csv files to
:return: dict {'property_name': ndarray}
"""
properties = dict(
mu=get_chemical_potential,
F=get_F_potential,
p=get_pressure,
S=get_entropy,
C_P=get_heat_capacity_pressure,
C_V=get_heat_capacity_volume,
C_T=get_sound_speed_temperature,
C_S=get_sound_speed_entropy,
)
for key in properties.keys():
properties[key] = properties[key](
vv=vv, tt=tt, gbar=gbar, chemical_potential=properties["mu"]
)
if csv_dir:
for i, volume in enumerate(vv[0, :]):
dump_to_csv(
os.path.join(
os.getcwd(),
csv_dir,
"{}_v={}_atomic_units.csv".format(key, volume),
),
np.array([tt[0, :], properties[key][:, i]]).T,
)
return properties
class IfgCalculator:
def __init__(
self,
temperatures=None,
volumes=None,
thetas=None,
densities=None,
rs=None,
input_in_si=None,
output_in_si=None,
g=None,
mr=None,
):
# def __init__(self, specific_volumes, temperatures,
# input_in_si, output_in_si, g=2., mr=1.):
# type: (np.ndarray, np.ndarray, bool, bool, float, float) -> None
"""Main class for IFG calculations.
:param volumes, rs, densities: Array of volumes, rs or densities, respectively
(only one parameter is possible)
:param temperatures, thetas: Array of temperatures or thetas, respectively
(only one parameter is possible; in case of thetas the length of
thetas array should be not more than 1)
:param input_in_is: Whether input values are in SI units (False - atomic units, default)
:param output_in_si: Whether output values are in SI units (False - atomic units, default)
:param g: degeneracy of spin states, g = 2s + 1, s - spin, g = 2 by default
:param mr: mass of particles with respect to electron mass, mr = 1 by default
"""
# Default values
input_in_si_default = False
output_in_si_default = False
g_default = 2.0
mr_default = 1.0
# Checking if temperatures or thetas argument is given
if temperatures is None and thetas is None:
raise ValueError("temperatures or thetas parameter is obligatory")
# Checking if both temperatures and thetas arguments are given
if temperatures is not None and thetas is not None:
raise ValueError(
"Only one named parameter must be used for temperature: temperatures or thetas"
)
# Checking if any of volumes or densities of rs argument is given
if volumes is None and densities is None and rs is None:
raise ValueError(
"One of volumes or densities or rs parameter is obligatory"
)
# Cannot have more than one argument
if sum([x is not None for x in (volumes, densities, rs)]) > 1:
raise ValueError(
"Only one named parameter must be used for volume: volumes or densities or rs"
)
# If volumes argument is given, simply convert to np.ndarray
if volumes is not None:
volumes = np.array(volumes)
# If densities argument is given, calculate volumes
if densities is not None:
volumes = 1.0 / np.array(densities)
# If rs argument is given, calculate volumes
if rs is not None:
volumes = 4.0 * np.pi * np.array(rs) ** 3 / 3.0
# If temperatures argument is given, simply convert to np.ndarray
if temperatures is not None:
temperatures = np.array(temperatures)
# thetas argument is a special case: theta depends both on temperature and volume
# Calculate vv and tt matrices, for thetas using cycle, otherwise using np.meshgrid
if thetas is not None:
thetas = np.array(thetas)
tt = np.zeros((len(thetas), len(volumes)))
vv = np.zeros((len(thetas), len(volumes)))
i = 0
for th in thetas:
j = 0
for v in volumes:
tt[i, j] = 0.5 * th * (3.0 * np.pi * np.pi / v) ** (2.0 / 3.0)
vv[i, j] = v
j = j + 1
i = i + 1
else:
vv, tt = np.meshgrid(volumes, temperatures)
if input_in_si is not None:
self.input_in_si = input_in_si
else:
self.input_in_si = input_in_si_default
if output_in_si is not None:
self.output_in_si = output_in_si
else:
self.output_in_si = output_in_si_default
self.g = g if g is not None else g_default
self.mr = mr if mr is not None else mr_default
self.gbar = self.g * self.mr ** 1.5
self.converter = SiAtomicConverter(from_si=True)
self.reverse_converter = SiAtomicConverter(from_si=False)
vv, tt = map(np.array, [vv, tt])
self.vv = self.converter.convert_volume(vv) if self.input_in_si else vv
self.tt = self.converter.convert_temperature(tt) if self.input_in_si else tt
def generic_getter(self, calc_function, attribute_name, convert_function):
cache = "__{}_cached__".format(attribute_name)
if hasattr(self, cache):
# return cached value if possible
return getattr(self, cache)
elif attribute_name == "mu":
# `mu` is a special case since it is used in `calc_function` below
return get_chemical_potential(vv=self.vv, tt=self.tt, gbar=self.gbar)
# Cache is not available
value = calc_function(
vv=self.vv, tt=self.tt, chemical_potential=self.mu, gbar=self.gbar
)
if self.output_in_si:
# Call `convert_function` on `value` if output is in SI
value = getattr(self.reverse_converter, convert_function)(value)
# Store cache
setattr(self, cache, value)
return value
@property
def mu(self):
"""Get IFG chemical potential mu in atomic units.
:return: `mu[i][j]` - chemical potential in atomic units.\
*i*-th index is for temperature, *j*-th one is for volume
"""
return self.generic_getter(get_chemical_potential, "mu", "convert_energy")
@property
def F(self):
"""Get IFG Helmholtz potential F in atomic units.
:return: F[i][j] - Helmholtz free energy in atomic units.\
*i*-th index is for temperature, *j*-th one is for volume
"""
return self.generic_getter(get_F_potential, "F", "convert_energy")
@property
def P(self):
"""Get IFG pressure P in atomic units.
:return: P[i][j] - Pressure in atomic units.\
*i*-th index is for temperature, *j*-th one is for volume
"""
return self.generic_getter(get_pressure, "p", "convert_pressure")
@property
def E(self):
"""Get IFG energy E in atomic units.
:return: E[i][j] - Energy in atomic units.\
*i*-th index is for temperature, *j*-th one is for volume
"""
return self.generic_getter(get_energy, "E", "convert_energy")
@property
def S(self):
"""Get IFG entropy S in atomic units.
:return: S[i][j] - Entropy in atomic units.\
*i*-th index is for temperature, *j*-th one is for volume
"""
return self.generic_getter(get_entropy, "S", "convert_entropy")
@property
def C_V(self):
"""Get IFG heat capacity C_V in atomic units.
:return: C_V[i][j] - C_V in atomic units.\
*i*-th index is for temperature, *j*-th one is for volume
"""
return self.generic_getter(
get_heat_capacity_volume, "C_V", "convert_heat_capacity"
)
@property
def C_P(self):
"""Get IFG heat capacity C_P in atomic units.
:return: C_P[i][j] - C_P in atomic units.\
*i*-th index is for temperature, *j*-th one is for volume
"""
return self.generic_getter(
get_heat_capacity_pressure, "C_P", "convert_heat_capacity"
)
@property
def C_T(self):
"""Get IFG sound speed C_T in atomic units.
:return: C_T[i][j] - C_T in atomic units.\
*i*-th index is for temperature, *j*-th one is for volume
"""
return self.generic_getter(
get_sound_speed_temperature, "C_T", "convert_sound_speed"
)
@property
def C_S(self):
"""Get IFG sound speed C_S in atomic units.
:return: C_S[i][j] - C_S in atomic units.\
*i*-th index is for temperature, *j*-th one is for volume
"""
return self.generic_getter(
get_sound_speed_entropy, "C_S", "convert_sound_speed"
)
def get_all_properties(self, csv_dir=None):
# type: (str) -> dict
"""Calculate all properties and save them to csv file.
:param csv_dir: Directory to save csv files to
:return: dict {'property_name': ndarray}
"""
properties = {
prop: getattr(self, prop)
for prop in ["mu", "F", "P", "E", "S", "C_P", "C_V", "C_T", "C_S"]
}
if csv_dir is not None:
for key, value in properties.items():
for i, volume in enumerate(self.vv[0, :]):
dump_to_csv(
os.path.join(
os.getcwd(),
csv_dir,
"{}_v={}_atomic_units.csv".format(key, volume),
),
np.array([self.tt[0, :], value[:, i]]).T,
)
return properties
| [
"numpy.sqrt",
"fdint.fdk",
"os.getcwd",
"ifg.units_converter.SiAtomicConverter",
"numpy.array",
"numpy.concatenate",
"numpy.meshgrid"
] | [((347, 360), 'fdint.fdk', 'fdk', (['k', 'array'], {}), '(k, array)\n', (350, 360), False, 'from fdint import fdk, ifd1h\n'), ((14594, 14625), 'ifg.units_converter.SiAtomicConverter', 'SiAtomicConverter', ([], {'from_si': '(True)'}), '(from_si=True)\n', (14611, 14625), False, 'from ifg.units_converter import SiAtomicConverter\n'), ((14659, 14691), 'ifg.units_converter.SiAtomicConverter', 'SiAtomicConverter', ([], {'from_si': '(False)'}), '(from_si=False)\n', (14676, 14691), False, 'from ifg.units_converter import SiAtomicConverter\n'), ((869, 879), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (876, 879), True, 'import numpy as np\n'), ((4588, 4619), 'numpy.concatenate', 'np.concatenate', (['(S_low, S_high)'], {}), '((S_low, S_high))\n', (4602, 4619), True, 'import numpy as np\n'), ((5959, 5994), 'numpy.concatenate', 'np.concatenate', (['(C_V_low, C_V_high)'], {}), '((C_V_low, C_V_high))\n', (5973, 5994), True, 'import numpy as np\n'), ((7389, 7424), 'numpy.concatenate', 'np.concatenate', (['(C_P_low, C_P_high)'], {}), '((C_P_low, C_P_high))\n', (7403, 7424), True, 'import numpy as np\n'), ((12950, 12967), 'numpy.array', 'np.array', (['volumes'], {}), '(volumes)\n', (12958, 12967), True, 'import numpy as np\n'), ((13391, 13413), 'numpy.array', 'np.array', (['temperatures'], {}), '(temperatures)\n', (13399, 13413), True, 'import numpy as np\n'), ((13649, 13665), 'numpy.array', 'np.array', (['thetas'], {}), '(thetas)\n', (13657, 13665), True, 'import numpy as np\n'), ((14087, 14121), 'numpy.meshgrid', 'np.meshgrid', (['volumes', 'temperatures'], {}), '(volumes, temperatures)\n', (14098, 14121), True, 'import numpy as np\n'), ((13091, 13110), 'numpy.array', 'np.array', (['densities'], {}), '(densities)\n', (13099, 13110), True, 'import numpy as np\n'), ((1755, 1767), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (1762, 1767), True, 'import numpy as np\n'), ((2498, 2508), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2505, 2508), True, 'import numpy as np\n'), ((3213, 3223), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3220, 3223), True, 'import numpy as np\n'), ((5728, 5738), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5735, 5738), True, 'import numpy as np\n'), ((6970, 6980), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6977, 6980), True, 'import numpy as np\n'), ((8130, 8141), 'numpy.sqrt', 'np.sqrt', (['vv'], {}), '(vv)\n', (8137, 8141), True, 'import numpy as np\n'), ((4250, 4260), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4257, 4260), True, 'import numpy as np\n'), ((8892, 8902), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (8899, 8902), True, 'import numpy as np\n'), ((8913, 8926), 'numpy.sqrt', 'np.sqrt', (['gbar'], {}), '(gbar)\n', (8920, 8926), True, 'import numpy as np\n'), ((10217, 10228), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (10226, 10228), False, 'import os\n'), ((10378, 10421), 'numpy.array', 'np.array', (['[tt[0, :], properties[key][:, i]]'], {}), '([tt[0, :], properties[key][:, i]])\n', (10386, 10421), True, 'import numpy as np\n'), ((13228, 13240), 'numpy.array', 'np.array', (['rs'], {}), '(rs)\n', (13236, 13240), True, 'import numpy as np\n'), ((8090, 8103), 'numpy.sqrt', 'np.sqrt', (['gbar'], {}), '(gbar)\n', (8097, 8103), True, 'import numpy as np\n'), ((19211, 19222), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (19220, 19222), False, 'import os\n'), ((19388, 19426), 'numpy.array', 'np.array', (['[self.tt[0, :], value[:, i]]'], {}), '([self.tt[0, :], value[:, i]])\n', (19396, 19426), True, 'import numpy as np\n')] |
"""
Tests of neo.io.hdf5io_new
"""
import unittest
import sys
import numpy as np
from numpy.testing import assert_array_equal
from quantities import kHz, mV, ms, second, nA
try:
import h5py
HAVE_H5PY = True
except ImportError:
HAVE_H5PY = False
from neo.io.hdf5io import NeoHdf5IO
from neo.test.iotest.common_io_test import BaseTestIO
from neo.test.iotest.tools import get_test_file_full_path
@unittest.skipUnless(HAVE_H5PY, "requires h5py")
class ReadOldNeoHdf5IOTest(BaseTestIO, unittest.TestCase):
"""
Test that data generated by NeoHdf5IO in Neo versions 0.3, 0.4 are
read correctly.
"""
ioclass = NeoHdf5IO
files_to_test = ["neo_hdf5_example.h5"]
files_to_download = files_to_test
def test_read_with_merge(self):
test_file = get_test_file_full_path(self.ioclass, filename=self.files_to_test[0],
directory=self.local_test_dir, clean=False)
io = NeoHdf5IO(test_file)
blocks = io.read_all_blocks(merge_singles=True)
# general tests, true for both blocks
for block in blocks:
for segment in block.segments:
self.assertEqual(segment.block, block)
# tests of Block #1, which is constructed from "array" (multi-channel)
# objects, so should be straightforward to convert to the version 0.5 API
block0 = blocks[0]
self.assertEqual(block0.name, "block1")
self.assertEqual(block0.index, 1234)
self.assertEqual(block0.annotations["foo"], "bar")
self.assertEqual(len(block0.segments), 3)
for segment in block0.segments:
self.assertEqual(len(segment.analogsignals), 2)
as0 = segment.analogsignals[0]
self.assertEqual(as0.shape, (1000, 4))
self.assertEqual(as0.sampling_rate, 1 * kHz)
self.assertEqual(as0.units, mV)
self.assertEqual(as0.segment, segment)
self.assertEqual(len(segment.spiketrains), 4)
st = segment.spiketrains[-1]
self.assertEqual(st.units, ms)
self.assertEqual(st.t_stop, 1000 * ms)
self.assertEqual(st.t_start, 0 * ms)
self.assertEqual(st.segment, segment)
self.assertEqual(len(segment.events), 1)
ev = segment.events[0]
assert_array_equal(ev.labels,
np.array(['trig0', 'trig1', 'trig2'],
dtype=(sys.byteorder == 'little' and '<' or '>') + 'U5'))
self.assertEqual(ev.units, second)
assert_array_equal(ev.magnitude, np.arange(0, 30, 10))
self.assertEqual(ev.segment, segment)
self.assertEqual(len(segment.epochs), 1)
ep = segment.epochs[0]
assert_array_equal(ep.labels,
np.array(['btn0', 'btn1', 'btn2'],
dtype=(sys.byteorder == 'little' and '<' or '>') + 'U4'))
assert_array_equal(ep.durations.magnitude,
np.array([10, 5, 7]))
self.assertEqual(ep.units, second)
assert_array_equal(ep.magnitude, np.arange(0, 30, 10))
self.assertEqual(ep.segment, segment)
self.assertEqual(len(segment.irregularlysampledsignals), 2)
iss0 = segment.irregularlysampledsignals[0]
self.assertEqual(iss0.shape, (3, 2))
assert_array_equal(iss0.times,
[0.01, 0.03, 0.12] * second)
assert_array_equal(iss0.magnitude,
np.array([[4, 3],
[5, 4],
[6, 3]]))
self.assertEqual(iss0.units, nA)
self.assertEqual(iss0.segment, segment)
iss1 = segment.irregularlysampledsignals[1]
self.assertEqual(iss1.shape, (3, 1))
assert_array_equal(iss1.times,
[0.02, 0.05, 0.15] * second)
self.assertEqual(iss1.units, nA)
assert_array_equal(iss1.magnitude,
np.array([[3], [4], [3]]))
# tests of Block #2, which is constructed from "singleton"
# (single-channel) objects, so is potentially tricky to convert to the
# version 0.5 API
block1 = blocks[1]
self.assertEqual(block1.name, "block2")
for segment in block1.segments:
self.assertEqual(len(segment.analogsignals), 2)
as0 = segment.analogsignals[0]
self.assertEqual(as0.shape, (1000, 4))
self.assertEqual(as0.sampling_rate, 1 * kHz)
self.assertEqual(as0.units, mV)
self.assertEqual(as0.segment, segment)
self.assertEqual(len(segment.spiketrains), 7)
st = segment.spiketrains[-1]
self.assertEqual(st.units, ms)
self.assertEqual(st.t_stop, 1000 * ms)
self.assertEqual(st.t_start, 0 * ms)
self.assertEqual(st.segment, segment)
self.assertEqual(len(segment.events), 0)
self.assertEqual(len(segment.epochs), 0)
self.assertEqual(len(block1.channel_indexes), 3)
ci0 = block1.channel_indexes[0]
self.assertEqual(ci0.name, "electrode1")
self.assertEqual(len(ci0.analogsignals), 1)
as00 = ci0.analogsignals[0]
self.assertEqual(as00.segment, segment)
self.assertEqual(as00.shape, (1000, 4))
self.assertEqual(id(as00), id(segment.analogsignals[0]))
self.assertEqual(as00.mean(), segment.analogsignals[0].mean())
self.assertEqual(as00.channel_index, ci0)
assert_array_equal(ci0.index, np.array([0, 1, 2, 3]))
assert_array_equal(ci0.channel_ids, np.array([0, 1, 2, 3]))
self.assertEqual(len(ci0.units), 2)
self.assertEqual(len(ci0.units[0].spiketrains), 2)
self.assertEqual(id(ci0.units[0].spiketrains[0]),
id(block1.segments[0].spiketrains[0]))
self.assertEqual(id(ci0.units[0].spiketrains[1]),
id(block1.segments[1].spiketrains[0]))
self.assertEqual(id(ci0.units[1].spiketrains[0]),
id(block1.segments[0].spiketrains[1]))
ci1 = block1.channel_indexes[1]
self.assertEqual(ci1.name, "electrode2")
self.assertEqual(len(ci1.analogsignals), 1)
as10 = ci1.analogsignals[0]
self.assertEqual(as10.segment, segment)
self.assertEqual(as10.shape, (1000, 4))
self.assertEqual(id(as10), id(segment.analogsignals[1]))
self.assertEqual(as10.mean(), segment.analogsignals[1].mean())
self.assertEqual(as10.channel_index, ci1)
assert_array_equal(ci1.index, np.array([0, 1, 2, 3]))
assert_array_equal(ci1.channel_ids, np.array([4, 5, 6, 7]))
self.assertEqual(len(ci1.units), 5)
self.assertEqual(id(ci1.units[0].spiketrains[0]),
id(block1.segments[0].spiketrains[2]))
self.assertEqual(id(ci1.units[3].spiketrains[1]),
id(block1.segments[1].spiketrains[5]))
ci2 = block1.channel_indexes[2]
self.assertEqual(ci2.name, "my_favourite_channels")
self.assertEqual(len(ci2.analogsignals), 1)
self.assertEqual(id(ci2.analogsignals[0]), id(as00))
assert_array_equal(ci2.index, np.array([1, 3]))
assert_array_equal(ci2.channel_ids, np.array([1, 3]))
| [
"neo.io.hdf5io.NeoHdf5IO",
"numpy.arange",
"numpy.testing.assert_array_equal",
"unittest.skipUnless",
"numpy.array",
"neo.test.iotest.tools.get_test_file_full_path"
] | [((412, 459), 'unittest.skipUnless', 'unittest.skipUnless', (['HAVE_H5PY', '"""requires h5py"""'], {}), "(HAVE_H5PY, 'requires h5py')\n", (431, 459), False, 'import unittest\n'), ((789, 906), 'neo.test.iotest.tools.get_test_file_full_path', 'get_test_file_full_path', (['self.ioclass'], {'filename': 'self.files_to_test[0]', 'directory': 'self.local_test_dir', 'clean': '(False)'}), '(self.ioclass, filename=self.files_to_test[0],\n directory=self.local_test_dir, clean=False)\n', (812, 906), False, 'from neo.test.iotest.tools import get_test_file_full_path\n'), ((960, 980), 'neo.io.hdf5io.NeoHdf5IO', 'NeoHdf5IO', (['test_file'], {}), '(test_file)\n', (969, 980), False, 'from neo.io.hdf5io import NeoHdf5IO\n'), ((3460, 3519), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['iss0.times', '([0.01, 0.03, 0.12] * second)'], {}), '(iss0.times, [0.01, 0.03, 0.12] * second)\n', (3478, 3519), False, 'from numpy.testing import assert_array_equal\n'), ((3962, 4021), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['iss1.times', '([0.02, 0.05, 0.15] * second)'], {}), '(iss1.times, [0.02, 0.05, 0.15] * second)\n', (3980, 4021), False, 'from numpy.testing import assert_array_equal\n'), ((5754, 5776), 'numpy.array', 'np.array', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (5762, 5776), True, 'import numpy as np\n'), ((5822, 5844), 'numpy.array', 'np.array', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (5830, 5844), True, 'import numpy as np\n'), ((6813, 6835), 'numpy.array', 'np.array', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (6821, 6835), True, 'import numpy as np\n'), ((6881, 6903), 'numpy.array', 'np.array', (['[4, 5, 6, 7]'], {}), '([4, 5, 6, 7])\n', (6889, 6903), True, 'import numpy as np\n'), ((7445, 7461), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (7453, 7461), True, 'import numpy as np\n'), ((7507, 7523), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (7515, 7523), True, 'import numpy as np\n'), ((2403, 2501), 'numpy.array', 'np.array', (["['trig0', 'trig1', 'trig2']"], {'dtype': "((sys.byteorder == 'little' and '<' or '>') + 'U5')"}), "(['trig0', 'trig1', 'trig2'], dtype=(sys.byteorder == 'little' and\n '<' or '>') + 'U5')\n", (2411, 2501), True, 'import numpy as np\n'), ((2631, 2651), 'numpy.arange', 'np.arange', (['(0)', '(30)', '(10)'], {}), '(0, 30, 10)\n', (2640, 2651), True, 'import numpy as np\n'), ((2865, 2960), 'numpy.array', 'np.array', (["['btn0', 'btn1', 'btn2']"], {'dtype': "((sys.byteorder == 'little' and '<' or '>') + 'U4')"}), "(['btn0', 'btn1', 'btn2'], dtype=(sys.byteorder == 'little' and '<' or\n '>') + 'U4')\n", (2873, 2960), True, 'import numpy as np\n'), ((3084, 3104), 'numpy.array', 'np.array', (['[10, 5, 7]'], {}), '([10, 5, 7])\n', (3092, 3104), True, 'import numpy as np\n'), ((3198, 3218), 'numpy.arange', 'np.arange', (['(0)', '(30)', '(10)'], {}), '(0, 30, 10)\n', (3207, 3218), True, 'import numpy as np\n'), ((3629, 3663), 'numpy.array', 'np.array', (['[[4, 3], [5, 4], [6, 3]]'], {}), '([[4, 3], [5, 4], [6, 3]])\n', (3637, 3663), True, 'import numpy as np\n'), ((4176, 4201), 'numpy.array', 'np.array', (['[[3], [4], [3]]'], {}), '([[3], [4], [3]])\n', (4184, 4201), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
import vtk
def mandelbrot_set(X, Y, maxiter, horizon=2.0):
C = X + Y[:, None]*1j
N = np.zeros(C.shape, dtype=int)
Z = np.zeros(C.shape, np.complex64)
for n in range(maxiter):
if n % (maxiter / 10) == 0:
print('progress: %d/%d' % (n, maxiter))
I = np.less(abs(Z), horizon)
N[I] = n
Z[I] = Z[I]**2 + C[I]
N[N == maxiter-1] = 0
return Z.transpose(), N.transpose()
nx = 800
ny = 600
x = np.linspace(-2.25, 0.75, nx, dtype=np.float32)
y = np.linspace(-1.25, 1.25, ny, dtype=np.float32)
Z, N = mandelbrot_set(x, y, 2000, 2.0 ** 40)
filename = 'mandel_polydata'
points = vtk.vtkPoints()
vertices = vtk.vtkCellArray()
d0 = vtk.vtkFloatArray()
d0.SetName('N')
d0.SetNumberOfTuples(nx * ny)
d0.SetNumberOfComponents(1)
n = 0
for ix, vx in enumerate(x):
if ix % (nx / 10) == 0:
print('saving: %d/%d' % (ix, nx))
for iy, vy in enumerate(y):
id = points.InsertNextPoint(vx, vy, 0)
d0.SetComponent(n, 0, N[(ix, iy)])
vertices.InsertNextCell(1)
vertices.InsertCellPoint(id)
n += 1
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.GetPointData().AddArray(d0)
polydata.GetPointData().SetScalars(d0)
polydata.SetVerts(vertices)
delaunay = vtk.vtkDelaunay2D()
delaunay.SetInputData(polydata)
delaunay.Update()
writer = vtk.vtkXMLPolyDataWriter()
writer.SetFileName('%s.vtp' % (filename))
writer.SetInputData(delaunay.GetOutput())
writer.Write()
print('%s.vtp generated' % (filename))
| [
"vtk.vtkXMLPolyDataWriter",
"vtk.vtkCellArray",
"vtk.vtkPolyData",
"vtk.vtkPoints",
"numpy.linspace",
"numpy.zeros",
"vtk.vtkFloatArray",
"vtk.vtkDelaunay2D"
] | [((461, 507), 'numpy.linspace', 'np.linspace', (['(-2.25)', '(0.75)', 'nx'], {'dtype': 'np.float32'}), '(-2.25, 0.75, nx, dtype=np.float32)\n', (472, 507), True, 'import numpy as np\n'), ((512, 558), 'numpy.linspace', 'np.linspace', (['(-1.25)', '(1.25)', 'ny'], {'dtype': 'np.float32'}), '(-1.25, 1.25, ny, dtype=np.float32)\n', (523, 558), True, 'import numpy as np\n'), ((645, 660), 'vtk.vtkPoints', 'vtk.vtkPoints', ([], {}), '()\n', (658, 660), False, 'import vtk\n'), ((672, 690), 'vtk.vtkCellArray', 'vtk.vtkCellArray', ([], {}), '()\n', (688, 690), False, 'import vtk\n'), ((697, 716), 'vtk.vtkFloatArray', 'vtk.vtkFloatArray', ([], {}), '()\n', (714, 716), False, 'import vtk\n'), ((1090, 1107), 'vtk.vtkPolyData', 'vtk.vtkPolyData', ([], {}), '()\n', (1105, 1107), False, 'import vtk\n'), ((1251, 1270), 'vtk.vtkDelaunay2D', 'vtk.vtkDelaunay2D', ([], {}), '()\n', (1268, 1270), False, 'import vtk\n'), ((1331, 1357), 'vtk.vtkXMLPolyDataWriter', 'vtk.vtkXMLPolyDataWriter', ([], {}), '()\n', (1355, 1357), False, 'import vtk\n'), ((132, 160), 'numpy.zeros', 'np.zeros', (['C.shape'], {'dtype': 'int'}), '(C.shape, dtype=int)\n', (140, 160), True, 'import numpy as np\n'), ((167, 198), 'numpy.zeros', 'np.zeros', (['C.shape', 'np.complex64'], {}), '(C.shape, np.complex64)\n', (175, 198), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from scipy.special import boxcox1p, boxcox
"""
load data
"""
train = pd.read_csv('./data/train.csv')
test = pd.read_csv('./data/test.csv')
"""
fix salePrice skewness
"""
train["SalePrice"] = np.log1p(train["SalePrice"])
y_train_values = train.SalePrice.values
all_features_data = train
all_features_data.drop(['SalePrice'], axis=1, inplace=True)
"""
fix NaN
"""
for col in ['GarageType', 'GarageFinish', 'GarageQual', 'GarageCond',
'PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu',
'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'MasVnrType',
'MSZoning', 'Functional', 'Electrical','KitchenQual', 'Exterior1st',
'Exterior2nd', 'SaleType', 'MSSubClass'
]:
all_features_data[col] = all_features_data[col].fillna(all_features_data[col].mode()[0])
for col in ['GarageYrBlt', 'GarageArea', 'GarageCars', 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF',
'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath', 'MasVnrArea',
'Electrical','KitchenQual', 'Exterior1st', 'Exterior2nd', 'SaleType',
'LotFrontage'
]:
all_features_data[col] = all_features_data[col].fillna(0)
"""
encode categorical features
"""
from sklearn.preprocessing import LabelEncoder
cols_encoding_needed = ('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond',
'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1',
'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope',
'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond',
'YrSold', 'MoSold')
for col in cols_encoding_needed:
lbl = LabelEncoder()
lbl.fit(list(all_features_data[col].values))
all_features_data[col] = lbl.transform(list(all_features_data[col].values))
"""
fix numeric features skewness by applying boxcox
"""
numeric_columns = all_features_data.dtypes[all_features_data.dtypes != "object"].index
from scipy.stats import skew
skewed_columns = all_features_data[numeric_columns].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)
skewed_columns = skewed_columns[abs(skewed_columns) > 0.75]
from scipy.special import boxcox1p
skewed_features = skewed_columns.index
for feat in skewed_features:
all_features_data[feat] = boxcox1p(all_features_data[feat], 0.15)
all_features_data = pd.get_dummies(all_features_data)
"""
lasso fit
"""
from sklearn.linear_model import Lasso
lasso = Lasso()
lasso.set_params(alpha=0.0005, normalize=True)
model = lasso.fit(all_features_data.values, y_train_values)
from sklearn.metrics import mean_squared_error
from math import sqrt
y_pred = model.predict(all_features_data)
print("Root Mean Squared Error")
print(sqrt(mean_squared_error(y_train_values, y_pred)))
print(np.expm1(y_pred))
print(train.head())
| [
"sklearn.preprocessing.LabelEncoder",
"scipy.special.boxcox1p",
"pandas.read_csv",
"sklearn.linear_model.Lasso",
"numpy.expm1",
"sklearn.metrics.mean_squared_error",
"pandas.get_dummies",
"numpy.log1p"
] | [((110, 141), 'pandas.read_csv', 'pd.read_csv', (['"""./data/train.csv"""'], {}), "('./data/train.csv')\n", (121, 141), True, 'import pandas as pd\n'), ((150, 180), 'pandas.read_csv', 'pd.read_csv', (['"""./data/test.csv"""'], {}), "('./data/test.csv')\n", (161, 180), True, 'import pandas as pd\n'), ((233, 261), 'numpy.log1p', 'np.log1p', (["train['SalePrice']"], {}), "(train['SalePrice'])\n", (241, 261), True, 'import numpy as np\n'), ((2461, 2494), 'pandas.get_dummies', 'pd.get_dummies', (['all_features_data'], {}), '(all_features_data)\n', (2475, 2494), True, 'import pandas as pd\n'), ((2563, 2570), 'sklearn.linear_model.Lasso', 'Lasso', ([], {}), '()\n', (2568, 2570), False, 'from sklearn.linear_model import Lasso\n'), ((1768, 1782), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1780, 1782), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2399, 2438), 'scipy.special.boxcox1p', 'boxcox1p', (['all_features_data[feat]', '(0.15)'], {}), '(all_features_data[feat], 0.15)\n', (2407, 2438), False, 'from scipy.special import boxcox1p\n'), ((2890, 2906), 'numpy.expm1', 'np.expm1', (['y_pred'], {}), '(y_pred)\n', (2898, 2906), True, 'import numpy as np\n'), ((2838, 2880), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_train_values', 'y_pred'], {}), '(y_train_values, y_pred)\n', (2856, 2880), False, 'from sklearn.metrics import mean_squared_error\n')] |
# Copyright (C) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import argparse
import sys
import numpy as np
from mmcv.utils import get_logger
from ote_sdk.configuration.helper import create
from ote_sdk.entities.datasets import DatasetEntity
from ote_sdk.entities.inference_parameters import InferenceParameters
from ote_sdk.entities.label import Domain
from ote_sdk.entities.label_schema import LabelSchemaEntity
from ote_sdk.entities.model import ModelEntity, ModelStatus
from ote_sdk.entities.model_template import parse_model_template
from ote_sdk.entities.optimization_parameters import OptimizationParameters
from ote_sdk.entities.resultset import ResultSetEntity
from ote_sdk.entities.subset import Subset
from ote_sdk.entities.task_environment import TaskEnvironment
from ote_sdk.usecases.tasks.interfaces.export_interface import ExportType
from ote_sdk.usecases.tasks.interfaces.optimization_interface import OptimizationType
from mmdet.apis.ote.apis.detection.ote_utils import get_task_class
logger = get_logger(name='sample')
def parse_args():
parser = argparse.ArgumentParser(description='Sample showcasing the new API')
parser.add_argument('template_file_path', help='path to template file')
parser.add_argument('--export', action='store_true')
return parser.parse_args()
def load_test_dataset():
from ote_sdk.entities.annotation import Annotation, AnnotationSceneEntity, AnnotationSceneKind
from ote_sdk.entities.dataset_item import DatasetItemEntity
from ote_sdk.entities.image import Image
from ote_sdk.entities.label import LabelEntity
from ote_sdk.entities.scored_label import ScoredLabel
from ote_sdk.entities.shapes.rectangle import Rectangle
from ote_sdk.entities.subset import Subset
def gen_image(resolution, x1, y1, x2, y2):
w, h = resolution
image = np.full([h, w, 3], fill_value=255, dtype=np.uint8)
image[int(y1 * h):int(y2 * h), int(x1 * w):int(x2 * w), :] = np.array([0, 128, 128], dtype=np.uint8)[None, None, :]
return (image, Rectangle(x1=x1, y1=y1, x2=x2, y2=y2))
images = [
gen_image((640, 480), 0.0, 0.0, 0.5, 0.5),
gen_image((640, 480), 0.5, 0.0, 1.0, 0.5),
gen_image((640, 480), 0.0, 0.5, 0.5, 1.0),
gen_image((640, 480), 0.5, 0.5, 1.0, 1.0),
]
labels = [
LabelEntity(name='rect', domain=Domain.DETECTION, id=0)
]
def get_image(i, subset):
image, bbox = images[i]
return DatasetItemEntity(
media=Image(data=image),
annotation_scene=AnnotationSceneEntity(
annotations=[Annotation(bbox, labels=[ScoredLabel(label=labels[0])])],
kind=AnnotationSceneKind.ANNOTATION
),
subset=subset,
)
items = [
get_image(0, Subset.TRAINING),
get_image(1, Subset.TRAINING),
get_image(2, Subset.TRAINING),
get_image(3, Subset.TRAINING),
get_image(0, Subset.TRAINING),
get_image(1, Subset.TRAINING),
get_image(2, Subset.TRAINING),
get_image(3, Subset.TRAINING),
get_image(0, Subset.TRAINING),
get_image(1, Subset.TRAINING),
get_image(0, Subset.VALIDATION),
get_image(1, Subset.VALIDATION),
get_image(2, Subset.VALIDATION),
get_image(3, Subset.VALIDATION),
get_image(0, Subset.TESTING),
get_image(1, Subset.TESTING),
get_image(2, Subset.TESTING),
get_image(3, Subset.TESTING),
]
return DatasetEntity(items), labels
def main(args):
logger.info('Initialize dataset')
dataset, labels_list = load_test_dataset()
labels_schema = LabelSchemaEntity.from_labels(labels_list)
logger.info(f'Train dataset: {len(dataset.get_subset(Subset.TRAINING))} items')
logger.info(f'Validation dataset: {len(dataset.get_subset(Subset.VALIDATION))} items')
logger.info('Load model template')
model_template = parse_model_template(args.template_file_path)
logger.info('Set hyperparameters')
params = create(model_template.hyper_parameters.data)
params.learning_parameters.num_iters = 5
params.learning_parameters.learning_rate_warmup_iters = 1
params.learning_parameters.batch_size = 2
logger.info('Setup environment')
environment = TaskEnvironment(model=None, hyper_parameters=params, label_schema=labels_schema, model_template=model_template)
logger.info('Create base Task')
task_impl_path = model_template.entrypoints.base
task_cls = get_task_class(task_impl_path)
task = task_cls(task_environment=environment)
logger.info('Train model')
output_model = ModelEntity(
dataset,
environment.get_model_configuration(),
model_status=ModelStatus.NOT_READY)
task.train(dataset, output_model)
logger.info('Get predictions on the validation set')
validation_dataset = dataset.get_subset(Subset.VALIDATION)
predicted_validation_dataset = task.infer(
validation_dataset.with_empty_annotations(),
InferenceParameters(is_evaluation=True))
resultset = ResultSetEntity(
model=output_model,
ground_truth_dataset=validation_dataset,
prediction_dataset=predicted_validation_dataset,
)
logger.info('Estimate quality on validation set')
task.evaluate(resultset)
logger.info(str(resultset.performance))
if args.export:
logger.info('Export model')
exported_model = ModelEntity(
dataset,
environment.get_model_configuration(),
model_status=ModelStatus.NOT_READY)
task.export(ExportType.OPENVINO, exported_model)
logger.info('Create OpenVINO Task')
environment.model = exported_model
openvino_task_impl_path = model_template.entrypoints.openvino
openvino_task_cls = get_task_class(openvino_task_impl_path)
openvino_task = openvino_task_cls(environment)
logger.info('Get predictions on the validation set')
predicted_validation_dataset = openvino_task.infer(
validation_dataset.with_empty_annotations(),
InferenceParameters(is_evaluation=True))
resultset = ResultSetEntity(
model=output_model,
ground_truth_dataset=validation_dataset,
prediction_dataset=predicted_validation_dataset,
)
logger.info('Estimate quality on validation set')
openvino_task.evaluate(resultset)
logger.info(str(resultset.performance))
logger.info('Run POT optimization')
optimized_model = ModelEntity(
dataset,
environment.get_model_configuration(),
model_status=ModelStatus.NOT_READY)
openvino_task.optimize(
OptimizationType.POT,
dataset.get_subset(Subset.TRAINING),
optimized_model,
OptimizationParameters())
logger.info('Get predictions on the validation set')
predicted_validation_dataset = openvino_task.infer(
validation_dataset.with_empty_annotations(),
InferenceParameters(is_evaluation=True))
resultset = ResultSetEntity(
model=optimized_model,
ground_truth_dataset=validation_dataset,
prediction_dataset=predicted_validation_dataset,
)
logger.info('Performance of optimized model:')
openvino_task.evaluate(resultset)
logger.info(str(resultset.performance))
if __name__ == '__main__':
sys.exit(main(parse_args()) or 0)
| [
"mmdet.apis.ote.apis.detection.ote_utils.get_task_class",
"mmcv.utils.get_logger",
"ote_sdk.entities.datasets.DatasetEntity",
"ote_sdk.entities.task_environment.TaskEnvironment",
"argparse.ArgumentParser",
"ote_sdk.entities.label_schema.LabelSchemaEntity.from_labels",
"ote_sdk.entities.image.Image",
"... | [((1535, 1560), 'mmcv.utils.get_logger', 'get_logger', ([], {'name': '"""sample"""'}), "(name='sample')\n", (1545, 1560), False, 'from mmcv.utils import get_logger\n'), ((1594, 1662), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Sample showcasing the new API"""'}), "(description='Sample showcasing the new API')\n", (1617, 1662), False, 'import argparse\n'), ((4183, 4225), 'ote_sdk.entities.label_schema.LabelSchemaEntity.from_labels', 'LabelSchemaEntity.from_labels', (['labels_list'], {}), '(labels_list)\n', (4212, 4225), False, 'from ote_sdk.entities.label_schema import LabelSchemaEntity\n'), ((4463, 4508), 'ote_sdk.entities.model_template.parse_model_template', 'parse_model_template', (['args.template_file_path'], {}), '(args.template_file_path)\n', (4483, 4508), False, 'from ote_sdk.entities.model_template import parse_model_template\n'), ((4562, 4606), 'ote_sdk.configuration.helper.create', 'create', (['model_template.hyper_parameters.data'], {}), '(model_template.hyper_parameters.data)\n', (4568, 4606), False, 'from ote_sdk.configuration.helper import create\n'), ((4816, 4932), 'ote_sdk.entities.task_environment.TaskEnvironment', 'TaskEnvironment', ([], {'model': 'None', 'hyper_parameters': 'params', 'label_schema': 'labels_schema', 'model_template': 'model_template'}), '(model=None, hyper_parameters=params, label_schema=\n labels_schema, model_template=model_template)\n', (4831, 4932), False, 'from ote_sdk.entities.task_environment import TaskEnvironment\n'), ((5033, 5063), 'mmdet.apis.ote.apis.detection.ote_utils.get_task_class', 'get_task_class', (['task_impl_path'], {}), '(task_impl_path)\n', (5047, 5063), False, 'from mmdet.apis.ote.apis.detection.ote_utils import get_task_class\n'), ((5610, 5739), 'ote_sdk.entities.resultset.ResultSetEntity', 'ResultSetEntity', ([], {'model': 'output_model', 'ground_truth_dataset': 'validation_dataset', 'prediction_dataset': 'predicted_validation_dataset'}), '(model=output_model, ground_truth_dataset=validation_dataset,\n prediction_dataset=predicted_validation_dataset)\n', (5625, 5739), False, 'from ote_sdk.entities.resultset import ResultSetEntity\n'), ((2368, 2418), 'numpy.full', 'np.full', (['[h, w, 3]'], {'fill_value': '(255)', 'dtype': 'np.uint8'}), '([h, w, 3], fill_value=255, dtype=np.uint8)\n', (2375, 2418), True, 'import numpy as np\n'), ((2854, 2909), 'ote_sdk.entities.label.LabelEntity', 'LabelEntity', ([], {'name': '"""rect"""', 'domain': 'Domain.DETECTION', 'id': '(0)'}), "(name='rect', domain=Domain.DETECTION, id=0)\n", (2865, 2909), False, 'from ote_sdk.entities.label import LabelEntity\n'), ((4031, 4051), 'ote_sdk.entities.datasets.DatasetEntity', 'DatasetEntity', (['items'], {}), '(items)\n', (4044, 4051), False, 'from ote_sdk.entities.datasets import DatasetEntity\n'), ((5553, 5592), 'ote_sdk.entities.inference_parameters.InferenceParameters', 'InferenceParameters', ([], {'is_evaluation': '(True)'}), '(is_evaluation=True)\n', (5572, 5592), False, 'from ote_sdk.entities.inference_parameters import InferenceParameters\n'), ((6352, 6391), 'mmdet.apis.ote.apis.detection.ote_utils.get_task_class', 'get_task_class', (['openvino_task_impl_path'], {}), '(openvino_task_impl_path)\n', (6366, 6391), False, 'from mmdet.apis.ote.apis.detection.ote_utils import get_task_class\n'), ((6699, 6828), 'ote_sdk.entities.resultset.ResultSetEntity', 'ResultSetEntity', ([], {'model': 'output_model', 'ground_truth_dataset': 'validation_dataset', 'prediction_dataset': 'predicted_validation_dataset'}), '(model=output_model, ground_truth_dataset=validation_dataset,\n prediction_dataset=predicted_validation_dataset)\n', (6714, 6828), False, 'from ote_sdk.entities.resultset import ResultSetEntity\n'), ((7658, 7791), 'ote_sdk.entities.resultset.ResultSetEntity', 'ResultSetEntity', ([], {'model': 'optimized_model', 'ground_truth_dataset': 'validation_dataset', 'prediction_dataset': 'predicted_validation_dataset'}), '(model=optimized_model, ground_truth_dataset=\n validation_dataset, prediction_dataset=predicted_validation_dataset)\n', (7673, 7791), False, 'from ote_sdk.entities.resultset import ResultSetEntity\n'), ((2488, 2527), 'numpy.array', 'np.array', (['[0, 128, 128]'], {'dtype': 'np.uint8'}), '([0, 128, 128], dtype=np.uint8)\n', (2496, 2527), True, 'import numpy as np\n'), ((2566, 2603), 'ote_sdk.entities.shapes.rectangle.Rectangle', 'Rectangle', ([], {'x1': 'x1', 'y1': 'y1', 'x2': 'x2', 'y2': 'y2'}), '(x1=x1, y1=y1, x2=x2, y2=y2)\n', (2575, 2603), False, 'from ote_sdk.entities.shapes.rectangle import Rectangle\n'), ((6638, 6677), 'ote_sdk.entities.inference_parameters.InferenceParameters', 'InferenceParameters', ([], {'is_evaluation': '(True)'}), '(is_evaluation=True)\n', (6657, 6677), False, 'from ote_sdk.entities.inference_parameters import InferenceParameters\n'), ((7380, 7404), 'ote_sdk.entities.optimization_parameters.OptimizationParameters', 'OptimizationParameters', ([], {}), '()\n', (7402, 7404), False, 'from ote_sdk.entities.optimization_parameters import OptimizationParameters\n'), ((7597, 7636), 'ote_sdk.entities.inference_parameters.InferenceParameters', 'InferenceParameters', ([], {'is_evaluation': '(True)'}), '(is_evaluation=True)\n', (7616, 7636), False, 'from ote_sdk.entities.inference_parameters import InferenceParameters\n'), ((3031, 3048), 'ote_sdk.entities.image.Image', 'Image', ([], {'data': 'image'}), '(data=image)\n', (3036, 3048), False, 'from ote_sdk.entities.image import Image\n'), ((3156, 3184), 'ote_sdk.entities.scored_label.ScoredLabel', 'ScoredLabel', ([], {'label': 'labels[0]'}), '(label=labels[0])\n', (3167, 3184), False, 'from ote_sdk.entities.scored_label import ScoredLabel\n')] |
import os
import numpy as np
import cv2 as cv
import random
import math
def read_image(img_path="", img_h=128, img_w=128):
image = cv.imread(img_path)
i_height = np.size(image, 0)
i_width = np.size(image, 1)
file_data = np.array(cv.imread(img_path))
if (file_data.any() != None):
if (i_height != img_h or i_width != img_w):
file_data = cv.resize(file_data, dsize=(img_h, img_w), interpolation=cv.INTER_LANCZOS4)
file_data = file_data.reshape((file_data.shape[0]), (file_data.shape[1]), 3)
return file_data
else:
return None
def img_list(folder_path="", img_h=128, img_w=128, batch_size=1000):
dirs = os.walk(folder_path)
paths_labels = []
for s_dir in dirs:
dir_path = s_dir[0]
dir_files = s_dir[2]
for file in dir_files:
single_image_path = os.path.join(dir_path, file)
single_image_label = os.path.basename(dir_path)
paths_labels.append([single_image_path, single_image_label])
random.shuffle(paths_labels)
tdata_len = len(paths_labels)
iterations = math.ceil(tdata_len / batch_size)
for count in range(iterations):
m_data = []
m_label = []
if ((count + 1) * batch_size < tdata_len):
to_loop = paths_labels[count * batch_size:(count + 1) * batch_size]
for img in to_loop:
m_data.append(read_image(img[0], img_h, img_w))
m_label.append([int(img[1])-1])
yield np.array(m_data, dtype=float), np.array(m_label)
else:
to_loop = paths_labels[count * batch_size:]
for img in to_loop:
m_data.append(read_image(img[0], img_h, img_w))
m_label.append([int(img[1])-1])
yield np.array(m_data, dtype=float), np.array(m_label)
def total_files(folder_path=""):
dirs = os.walk(folder_path)
files_count =0
for s_dir in dirs:
dir_path = s_dir[0]
dir_files = s_dir[2]
files_count = files_count + len(dir_files)
print(f"Files Count:= {files_count}")
return int(files_count)
| [
"math.ceil",
"random.shuffle",
"numpy.size",
"os.path.join",
"numpy.array",
"os.path.basename",
"cv2.resize",
"cv2.imread",
"os.walk"
] | [((145, 164), 'cv2.imread', 'cv.imread', (['img_path'], {}), '(img_path)\n', (154, 164), True, 'import cv2 as cv\n'), ((181, 198), 'numpy.size', 'np.size', (['image', '(0)'], {}), '(image, 0)\n', (188, 198), True, 'import numpy as np\n'), ((214, 231), 'numpy.size', 'np.size', (['image', '(1)'], {}), '(image, 1)\n', (221, 231), True, 'import numpy as np\n'), ((704, 724), 'os.walk', 'os.walk', (['folder_path'], {}), '(folder_path)\n', (711, 724), False, 'import os\n'), ((1071, 1099), 'random.shuffle', 'random.shuffle', (['paths_labels'], {}), '(paths_labels)\n', (1085, 1099), False, 'import random\n'), ((1153, 1186), 'math.ceil', 'math.ceil', (['(tdata_len / batch_size)'], {}), '(tdata_len / batch_size)\n', (1162, 1186), False, 'import math\n'), ((1960, 1980), 'os.walk', 'os.walk', (['folder_path'], {}), '(folder_path)\n', (1967, 1980), False, 'import os\n'), ((260, 279), 'cv2.imread', 'cv.imread', (['img_path'], {}), '(img_path)\n', (269, 279), True, 'import cv2 as cv\n'), ((396, 471), 'cv2.resize', 'cv.resize', (['file_data'], {'dsize': '(img_h, img_w)', 'interpolation': 'cv.INTER_LANCZOS4'}), '(file_data, dsize=(img_h, img_w), interpolation=cv.INTER_LANCZOS4)\n', (405, 471), True, 'import cv2 as cv\n'), ((900, 928), 'os.path.join', 'os.path.join', (['dir_path', 'file'], {}), '(dir_path, file)\n', (912, 928), False, 'import os\n'), ((963, 989), 'os.path.basename', 'os.path.basename', (['dir_path'], {}), '(dir_path)\n', (979, 989), False, 'import os\n'), ((1570, 1599), 'numpy.array', 'np.array', (['m_data'], {'dtype': 'float'}), '(m_data, dtype=float)\n', (1578, 1599), True, 'import numpy as np\n'), ((1601, 1618), 'numpy.array', 'np.array', (['m_label'], {}), '(m_label)\n', (1609, 1618), True, 'import numpy as np\n'), ((1861, 1890), 'numpy.array', 'np.array', (['m_data'], {'dtype': 'float'}), '(m_data, dtype=float)\n', (1869, 1890), True, 'import numpy as np\n'), ((1892, 1909), 'numpy.array', 'np.array', (['m_label'], {}), '(m_label)\n', (1900, 1909), True, 'import numpy as np\n')] |
import pandas as pd
from flask import Flask, jsonify, request, Response
import pickle
import base64
import jsonpickle
import numpy as np
import cv2
import json
from PIL import Image
# app
app = Flask(__name__)
prototxt = 'model/bvlc_googlenet.prototxt'
model = 'model/bvlc_googlenet.caffemodel'
labels = 'model/synset_words.txt'
# load the class labels from disk
rows = open(labels).read().strip().split("\n")
classes = [r[r.find(" ") + 1:].split(",")[0] for r in rows]
# load our serialized model from disk
net = cv2.dnn.readNetFromCaffe(prototxt, model)
# routes
@app.route('/', methods=['POST', 'GET'])
def predict():
return 'Homepage Backend'
@app.route('/api/test', methods=['POST', 'GET'])
def test():
try:
if request.method == 'POST':
r = request
img = Image.open(r.files['file_field'])
image = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
cv2.imwrite('image.jpg', image)
# our CNN requires fixed spatial dimensions for our input image(s)
# so we need to ensure it is resized to 224x224 pixels while
# performing mean subtraction (104, 117, 123) to normalize the input;
# after executing this command our "blob" now has the shape:
# (1, 3, 224, 224)
blob = cv2.dnn.blobFromImage(image, 1, (224, 224), (104, 117, 123))
# set the blob as input to the network and perform a forward-pass to
# obtain our output classification
net.setInput(blob)
preds = net.forward()
# sort the indexes of the probabilities in descending order (higher
# probabilitiy first) and grab the top-5 predictions
idxs = np.argsort(preds[0])[::-1][:50]
listResults = []
# loop over the top-5 predictions and display them
for (i, idx) in enumerate(idxs):
# draw the top prediction on the input image
if i == 0:
text = "Label: {}, {:.2f}%".format(classes[idx],
preds[0][idx] * 100)
cv2.putText(image, text, (5, 25), cv2.FONT_HERSHEY_SIMPLEX,
0.7, (0, 0, 255), 2)
# display the predicted label + associated probability to the console
output = ("{}, {}, {:.5}".format(i + 1,
classes[idx], preds[0][idx]))
listResults.append(output)
response = {'results' : listResults}
response_pickled = jsonpickle.encode(response)
return Response(response=response_pickled, status=200, mimetype="application/json")
else:
return ('[ERROR] La richiesta non è in POST')
except Exception as e:
response = {'Error' : str(e)}
if __name__ == '__main__':
app.run(port = 5000, debug=True)
| [
"cv2.dnn.blobFromImage",
"cv2.imwrite",
"PIL.Image.open",
"flask.Flask",
"cv2.dnn.readNetFromCaffe",
"cv2.putText",
"numpy.argsort",
"numpy.array",
"flask.Response",
"jsonpickle.encode"
] | [((195, 210), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (200, 210), False, 'from flask import Flask, jsonify, request, Response\n'), ((518, 559), 'cv2.dnn.readNetFromCaffe', 'cv2.dnn.readNetFromCaffe', (['prototxt', 'model'], {}), '(prototxt, model)\n', (542, 559), False, 'import cv2\n'), ((811, 844), 'PIL.Image.open', 'Image.open', (["r.files['file_field']"], {}), "(r.files['file_field'])\n", (821, 844), False, 'from PIL import Image\n'), ((924, 955), 'cv2.imwrite', 'cv2.imwrite', (['"""image.jpg"""', 'image'], {}), "('image.jpg', image)\n", (935, 955), False, 'import cv2\n'), ((1314, 1374), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['image', '(1)', '(224, 224)', '(104, 117, 123)'], {}), '(image, 1, (224, 224), (104, 117, 123))\n', (1335, 1374), False, 'import cv2\n'), ((2561, 2588), 'jsonpickle.encode', 'jsonpickle.encode', (['response'], {}), '(response)\n', (2578, 2588), False, 'import jsonpickle\n'), ((2608, 2684), 'flask.Response', 'Response', ([], {'response': 'response_pickled', 'status': '(200)', 'mimetype': '"""application/json"""'}), "(response=response_pickled, status=200, mimetype='application/json')\n", (2616, 2684), False, 'from flask import Flask, jsonify, request, Response\n'), ((878, 891), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (886, 891), True, 'import numpy as np\n'), ((1746, 1766), 'numpy.argsort', 'np.argsort', (['preds[0]'], {}), '(preds[0])\n', (1756, 1766), True, 'import numpy as np\n'), ((2137, 2222), 'cv2.putText', 'cv2.putText', (['image', 'text', '(5, 25)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.7)', '(0, 0, 255)', '(2)'], {}), '(image, text, (5, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2\n )\n', (2148, 2222), False, 'import cv2\n')] |
# coding=utf-8
"""
This python script trains a ConvNet on CIFAR-10 with BinaryNet.
It should run for about 15 hours on a GeForce GTX 980 Ti GPU.
The final test error should be around 11.40%.
Source:
https://github.com/MatthieuCourbariaux/BinaryNet
"""
from __future__ import print_function
import lasagne
# specifying the gpu to use
import theano.sandbox.cuda
from scripts.ann_architectures.BinaryConnect import binary_net
theano.sandbox.cuda.use('gpu0')
def build_network():
"""Build network.
Returns
-------
"""
import theano
import theano.tensor as t
from collections import OrderedDict
# BinaryOut
activation = binary_net.binary_tanh_unit
print("activation = binary_net.binary_tanh_unit")
# activation = binary_net.binary_sigmoid_unit
# print("activation = binary_net.binary_sigmoid_unit")
# BinaryConnect
binary = True
print("binary = " + str(binary))
stochastic = False
print("stochastic = " + str(stochastic))
# (-h,+h) are the two binary values
# h = "Glorot"
h = 1.
print("h = " + str(h))
# w_lr_scale = 1.
# "Glorot" means we are using the coefficients from Glorot's paper
w_lr_scale = "Glorot"
print("w_lr_scale = " + str(w_lr_scale))
# Prepare Theano variables for inputs and targets
input_var = t.tensor4('inputs')
target = t.matrix('targets')
lr = t.scalar('lr', dtype=theano.config.floatX)
cnn = lasagne.layers.InputLayer(shape=(None, 3, 32, 32),
input_var=input_var)
# 128C3-128C3-P2
cnn = binary_net.Conv2DLayer(
cnn,
# b=None,
binary=binary,
stochastic=stochastic,
H=h,
W_LR_scale=w_lr_scale,
num_filters=128,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
cnn = binary_net.Conv2DLayer(
cnn,
# b=None,
binary=binary,
stochastic=stochastic,
H=h,
W_LR_scale=w_lr_scale,
num_filters=128,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
# 256C3-256C3-P2
cnn = binary_net.Conv2DLayer(
cnn,
# b=None,
binary=binary,
stochastic=stochastic,
H=h,
W_LR_scale=w_lr_scale,
num_filters=256,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
cnn = binary_net.Conv2DLayer(
cnn,
# b=None,
binary=binary,
stochastic=stochastic,
H=h,
W_LR_scale=w_lr_scale,
num_filters=256,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
# 512C3-512C3-P2
cnn = binary_net.Conv2DLayer(
cnn,
# b=None,
binary=binary,
stochastic=stochastic,
H=h,
W_LR_scale=w_lr_scale,
num_filters=512,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
cnn = binary_net.Conv2DLayer(
cnn,
# b=None,
binary=binary,
stochastic=stochastic,
H=h,
W_LR_scale=w_lr_scale,
num_filters=512,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
# print(model.output_shape)
# 1024FP-1024FP-10FP
cnn = binary_net.DenseLayer(
cnn,
# b=None,
binary=binary,
stochastic=stochastic,
H=h,
W_LR_scale=w_lr_scale,
nonlinearity=lasagne.nonlinearities.identity,
num_units=1024)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
cnn = binary_net.DenseLayer(
cnn,
# b=None,
binary=binary,
stochastic=stochastic,
H=h,
W_LR_scale=w_lr_scale,
nonlinearity=lasagne.nonlinearities.identity,
num_units=1024)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
cnn = binary_net.DenseLayer(
cnn,
# b=None,
binary=binary,
stochastic=stochastic,
H=h,
W_LR_scale=w_lr_scale,
nonlinearity=lasagne.nonlinearities.identity,
num_units=10)
train_output = lasagne.layers.get_output(cnn, deterministic=False)
# squared hinge loss
loss = t.mean(t.sqr(t.maximum(0., 1. - target * train_output)))
if binary:
from itertools import chain
# w updates
w = lasagne.layers.get_all_params(cnn, binary=True)
w_grads = binary_net.compute_grads(loss, cnn)
updates = lasagne.updates.adam(loss_or_grads=w_grads, params=w,
learning_rate=lr)
updates = binary_net.clipping_scaling(updates, cnn)
# other parameters updates
params = lasagne.layers.get_all_params(cnn, trainable=True,
binary=False)
updates = OrderedDict(chain(updates.items(), lasagne.updates.adam(
loss_or_grads=loss, params=params, learning_rate=lr).items()))
else:
params = lasagne.layers.get_all_params(cnn, trainable=True)
updates = lasagne.updates.adam(loss_or_grads=loss, params=params,
learning_rate=lr)
test_output = lasagne.layers.get_output(cnn, deterministic=True)
test_loss = t.mean(t.sqr(t.maximum(0., 1. - target * test_output)))
test_err = t.mean(t.neq(t.argmax(test_output, axis=1),
t.argmax(target, axis=1)),
dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function([input_var, target, lr], loss, updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input_var, target], [test_loss, test_err])
return cnn, train_fn, val_fn
if __name__ == "__main__":
from pylearn2.datasets.cifar10 import CIFAR10
import numpy as np
from snntoolbox.datasets.utils import save_parameters
np.random.seed(1234) # for reproducibility?
# Training parameters
batch_size = 50
print("batch_size = " + str(batch_size))
num_epochs = 500
print("num_epochs = " + str(num_epochs))
# Decaying LR
LR_start = 0.001
print("LR_start = " + str(LR_start))
LR_fin = 0.0000003
print("LR_fin = " + str(LR_fin))
LR_decay = (LR_fin / LR_start) ** (1. / num_epochs)
print("LR_decay = " + str(LR_decay))
# BTW, LR decay might good for the BN moving average...
train_set_size = 45000
print("train_set_size = " + str(train_set_size))
shuffle_parts = 1
print("shuffle_parts = " + str(shuffle_parts))
print('Loading CIFAR-10 dataset...')
train_set = CIFAR10(which_set="train", start=0, stop=train_set_size)
valid_set = CIFAR10(which_set="train", start=train_set_size, stop=50000)
test_set = CIFAR10(which_set="test")
# bc01 format
# Inputs in the range [-1,+1]
# print("Inputs in the range [-1,+1]")
train_set.X = np.reshape(
np.subtract(np.multiply(2. / 255., train_set.X), 1.), (-1, 3, 32, 32))
valid_set.X = np.reshape(
np.subtract(np.multiply(2. / 255., valid_set.X), 1.), (-1, 3, 32, 32))
test_set.X = np.reshape(
np.subtract(np.multiply(2. / 255., test_set.X), 1.), (-1, 3, 32, 32))
# flatten targets
train_set.y = np.hstack(train_set.y)
valid_set.y = np.hstack(valid_set.y)
test_set.y = np.hstack(test_set.y)
# Onehot the targets
train_set.y = np.float32(np.eye(10)[train_set.y])
valid_set.y = np.float32(np.eye(10)[valid_set.y])
test_set.y = np.float32(np.eye(10)[test_set.y])
# for hinge loss
train_set.y = 2 * train_set.y - 1.
valid_set.y = 2 * valid_set.y - 1.
test_set.y = 2 * test_set.y - 1.
print('Building the CNN...')
model, train_func, val_func = build_network()
print('Training...')
binary_net.train(train_func, val_func, model, batch_size, LR_start,
LR_decay,
num_epochs, train_set.X, train_set.y, valid_set.X,
valid_set.y, test_set.X, test_set.y,
shuffle_parts=shuffle_parts)
W = lasagne.layers.get_all_layers(model)[1].W.get_value()
import matplotlib.pyplot as plt
plt.hist(W.flatten())
plt.title("Weight distribution of first hidden convolution layer")
# Dump the network weights to a file
filepath = '70.14.h5'
parameters = lasagne.layers.get_all_param_values(model)
save_parameters(parameters, filepath)
| [
"snntoolbox.datasets.utils.save_parameters",
"numpy.hstack",
"scripts.ann_architectures.BinaryConnect.binary_net.train",
"theano.tensor.argmax",
"lasagne.layers.MaxPool2DLayer",
"lasagne.layers.get_all_params",
"lasagne.updates.adam",
"numpy.multiply",
"theano.function",
"pylearn2.datasets.cifar10... | [((434, 465), 'theano.sandbox.cuda.use', 'theano.sandbox.cuda.use', (['"""gpu0"""'], {}), "('gpu0')\n", (457, 465), False, 'import theano\n'), ((1335, 1354), 'theano.tensor.tensor4', 't.tensor4', (['"""inputs"""'], {}), "('inputs')\n", (1344, 1354), True, 'import theano.tensor as t\n'), ((1368, 1387), 'theano.tensor.matrix', 't.matrix', (['"""targets"""'], {}), "('targets')\n", (1376, 1387), True, 'import theano.tensor as t\n'), ((1397, 1439), 'theano.tensor.scalar', 't.scalar', (['"""lr"""'], {'dtype': 'theano.config.floatX'}), "('lr', dtype=theano.config.floatX)\n", (1405, 1439), True, 'import theano.tensor as t\n'), ((1451, 1522), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', ([], {'shape': '(None, 3, 32, 32)', 'input_var': 'input_var'}), '(shape=(None, 3, 32, 32), input_var=input_var)\n', (1476, 1522), False, 'import lasagne\n'), ((1591, 1782), 'scripts.ann_architectures.BinaryConnect.binary_net.Conv2DLayer', 'binary_net.Conv2DLayer', (['cnn'], {'binary': 'binary', 'stochastic': 'stochastic', 'H': 'h', 'W_LR_scale': 'w_lr_scale', 'num_filters': '(128)', 'filter_size': '(3, 3)', 'pad': '(1)', 'nonlinearity': 'lasagne.nonlinearities.identity'}), '(cnn, binary=binary, stochastic=stochastic, H=h,\n W_LR_scale=w_lr_scale, num_filters=128, filter_size=(3, 3), pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n', (1613, 1782), False, 'from scripts.ann_architectures.BinaryConnect import binary_net\n'), ((1888, 1950), 'lasagne.layers.NonlinearityLayer', 'lasagne.layers.NonlinearityLayer', (['cnn'], {'nonlinearity': 'activation'}), '(cnn, nonlinearity=activation)\n', (1920, 1950), False, 'import lasagne\n'), ((1979, 2170), 'scripts.ann_architectures.BinaryConnect.binary_net.Conv2DLayer', 'binary_net.Conv2DLayer', (['cnn'], {'binary': 'binary', 'stochastic': 'stochastic', 'H': 'h', 'W_LR_scale': 'w_lr_scale', 'num_filters': '(128)', 'filter_size': '(3, 3)', 'pad': '(1)', 'nonlinearity': 'lasagne.nonlinearities.identity'}), '(cnn, binary=binary, stochastic=stochastic, H=h,\n W_LR_scale=w_lr_scale, num_filters=128, filter_size=(3, 3), pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n', (2001, 2170), False, 'from scripts.ann_architectures.BinaryConnect import binary_net\n'), ((2276, 2328), 'lasagne.layers.MaxPool2DLayer', 'lasagne.layers.MaxPool2DLayer', (['cnn'], {'pool_size': '(2, 2)'}), '(cnn, pool_size=(2, 2))\n', (2305, 2328), False, 'import lasagne\n'), ((2340, 2402), 'lasagne.layers.NonlinearityLayer', 'lasagne.layers.NonlinearityLayer', (['cnn'], {'nonlinearity': 'activation'}), '(cnn, nonlinearity=activation)\n', (2372, 2402), False, 'import lasagne\n'), ((2452, 2643), 'scripts.ann_architectures.BinaryConnect.binary_net.Conv2DLayer', 'binary_net.Conv2DLayer', (['cnn'], {'binary': 'binary', 'stochastic': 'stochastic', 'H': 'h', 'W_LR_scale': 'w_lr_scale', 'num_filters': '(256)', 'filter_size': '(3, 3)', 'pad': '(1)', 'nonlinearity': 'lasagne.nonlinearities.identity'}), '(cnn, binary=binary, stochastic=stochastic, H=h,\n W_LR_scale=w_lr_scale, num_filters=256, filter_size=(3, 3), pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n', (2474, 2643), False, 'from scripts.ann_architectures.BinaryConnect import binary_net\n'), ((2749, 2811), 'lasagne.layers.NonlinearityLayer', 'lasagne.layers.NonlinearityLayer', (['cnn'], {'nonlinearity': 'activation'}), '(cnn, nonlinearity=activation)\n', (2781, 2811), False, 'import lasagne\n'), ((2840, 3031), 'scripts.ann_architectures.BinaryConnect.binary_net.Conv2DLayer', 'binary_net.Conv2DLayer', (['cnn'], {'binary': 'binary', 'stochastic': 'stochastic', 'H': 'h', 'W_LR_scale': 'w_lr_scale', 'num_filters': '(256)', 'filter_size': '(3, 3)', 'pad': '(1)', 'nonlinearity': 'lasagne.nonlinearities.identity'}), '(cnn, binary=binary, stochastic=stochastic, H=h,\n W_LR_scale=w_lr_scale, num_filters=256, filter_size=(3, 3), pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n', (2862, 3031), False, 'from scripts.ann_architectures.BinaryConnect import binary_net\n'), ((3137, 3189), 'lasagne.layers.MaxPool2DLayer', 'lasagne.layers.MaxPool2DLayer', (['cnn'], {'pool_size': '(2, 2)'}), '(cnn, pool_size=(2, 2))\n', (3166, 3189), False, 'import lasagne\n'), ((3201, 3263), 'lasagne.layers.NonlinearityLayer', 'lasagne.layers.NonlinearityLayer', (['cnn'], {'nonlinearity': 'activation'}), '(cnn, nonlinearity=activation)\n', (3233, 3263), False, 'import lasagne\n'), ((3313, 3504), 'scripts.ann_architectures.BinaryConnect.binary_net.Conv2DLayer', 'binary_net.Conv2DLayer', (['cnn'], {'binary': 'binary', 'stochastic': 'stochastic', 'H': 'h', 'W_LR_scale': 'w_lr_scale', 'num_filters': '(512)', 'filter_size': '(3, 3)', 'pad': '(1)', 'nonlinearity': 'lasagne.nonlinearities.identity'}), '(cnn, binary=binary, stochastic=stochastic, H=h,\n W_LR_scale=w_lr_scale, num_filters=512, filter_size=(3, 3), pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n', (3335, 3504), False, 'from scripts.ann_architectures.BinaryConnect import binary_net\n'), ((3610, 3672), 'lasagne.layers.NonlinearityLayer', 'lasagne.layers.NonlinearityLayer', (['cnn'], {'nonlinearity': 'activation'}), '(cnn, nonlinearity=activation)\n', (3642, 3672), False, 'import lasagne\n'), ((3701, 3892), 'scripts.ann_architectures.BinaryConnect.binary_net.Conv2DLayer', 'binary_net.Conv2DLayer', (['cnn'], {'binary': 'binary', 'stochastic': 'stochastic', 'H': 'h', 'W_LR_scale': 'w_lr_scale', 'num_filters': '(512)', 'filter_size': '(3, 3)', 'pad': '(1)', 'nonlinearity': 'lasagne.nonlinearities.identity'}), '(cnn, binary=binary, stochastic=stochastic, H=h,\n W_LR_scale=w_lr_scale, num_filters=512, filter_size=(3, 3), pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n', (3723, 3892), False, 'from scripts.ann_architectures.BinaryConnect import binary_net\n'), ((3998, 4050), 'lasagne.layers.MaxPool2DLayer', 'lasagne.layers.MaxPool2DLayer', (['cnn'], {'pool_size': '(2, 2)'}), '(cnn, pool_size=(2, 2))\n', (4027, 4050), False, 'import lasagne\n'), ((4062, 4124), 'lasagne.layers.NonlinearityLayer', 'lasagne.layers.NonlinearityLayer', (['cnn'], {'nonlinearity': 'activation'}), '(cnn, nonlinearity=activation)\n', (4094, 4124), False, 'import lasagne\n'), ((4211, 4373), 'scripts.ann_architectures.BinaryConnect.binary_net.DenseLayer', 'binary_net.DenseLayer', (['cnn'], {'binary': 'binary', 'stochastic': 'stochastic', 'H': 'h', 'W_LR_scale': 'w_lr_scale', 'nonlinearity': 'lasagne.nonlinearities.identity', 'num_units': '(1024)'}), '(cnn, binary=binary, stochastic=stochastic, H=h,\n W_LR_scale=w_lr_scale, nonlinearity=lasagne.nonlinearities.identity,\n num_units=1024)\n', (4232, 4373), False, 'from scripts.ann_architectures.BinaryConnect import binary_net\n'), ((4467, 4529), 'lasagne.layers.NonlinearityLayer', 'lasagne.layers.NonlinearityLayer', (['cnn'], {'nonlinearity': 'activation'}), '(cnn, nonlinearity=activation)\n', (4499, 4529), False, 'import lasagne\n'), ((4558, 4720), 'scripts.ann_architectures.BinaryConnect.binary_net.DenseLayer', 'binary_net.DenseLayer', (['cnn'], {'binary': 'binary', 'stochastic': 'stochastic', 'H': 'h', 'W_LR_scale': 'w_lr_scale', 'nonlinearity': 'lasagne.nonlinearities.identity', 'num_units': '(1024)'}), '(cnn, binary=binary, stochastic=stochastic, H=h,\n W_LR_scale=w_lr_scale, nonlinearity=lasagne.nonlinearities.identity,\n num_units=1024)\n', (4579, 4720), False, 'from scripts.ann_architectures.BinaryConnect import binary_net\n'), ((4814, 4876), 'lasagne.layers.NonlinearityLayer', 'lasagne.layers.NonlinearityLayer', (['cnn'], {'nonlinearity': 'activation'}), '(cnn, nonlinearity=activation)\n', (4846, 4876), False, 'import lasagne\n'), ((4905, 5065), 'scripts.ann_architectures.BinaryConnect.binary_net.DenseLayer', 'binary_net.DenseLayer', (['cnn'], {'binary': 'binary', 'stochastic': 'stochastic', 'H': 'h', 'W_LR_scale': 'w_lr_scale', 'nonlinearity': 'lasagne.nonlinearities.identity', 'num_units': '(10)'}), '(cnn, binary=binary, stochastic=stochastic, H=h,\n W_LR_scale=w_lr_scale, nonlinearity=lasagne.nonlinearities.identity,\n num_units=10)\n', (4926, 5065), False, 'from scripts.ann_architectures.BinaryConnect import binary_net\n'), ((5168, 5219), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['cnn'], {'deterministic': '(False)'}), '(cnn, deterministic=False)\n', (5193, 5219), False, 'import lasagne\n'), ((6233, 6283), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['cnn'], {'deterministic': '(True)'}), '(cnn, deterministic=True)\n', (6258, 6283), False, 'import lasagne\n'), ((6692, 6755), 'theano.function', 'theano.function', (['[input_var, target, lr]', 'loss'], {'updates': 'updates'}), '([input_var, target, lr], loss, updates=updates)\n', (6707, 6755), False, 'import theano\n'), ((6846, 6905), 'theano.function', 'theano.function', (['[input_var, target]', '[test_loss, test_err]'], {}), '([input_var, target], [test_loss, test_err])\n', (6861, 6905), False, 'import theano\n'), ((7105, 7125), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (7119, 7125), True, 'import numpy as np\n'), ((7819, 7875), 'pylearn2.datasets.cifar10.CIFAR10', 'CIFAR10', ([], {'which_set': '"""train"""', 'start': '(0)', 'stop': 'train_set_size'}), "(which_set='train', start=0, stop=train_set_size)\n", (7826, 7875), False, 'from pylearn2.datasets.cifar10 import CIFAR10\n'), ((7892, 7952), 'pylearn2.datasets.cifar10.CIFAR10', 'CIFAR10', ([], {'which_set': '"""train"""', 'start': 'train_set_size', 'stop': '(50000)'}), "(which_set='train', start=train_set_size, stop=50000)\n", (7899, 7952), False, 'from pylearn2.datasets.cifar10 import CIFAR10\n'), ((7968, 7993), 'pylearn2.datasets.cifar10.CIFAR10', 'CIFAR10', ([], {'which_set': '"""test"""'}), "(which_set='test')\n", (7975, 7993), False, 'from pylearn2.datasets.cifar10 import CIFAR10\n'), ((8456, 8478), 'numpy.hstack', 'np.hstack', (['train_set.y'], {}), '(train_set.y)\n', (8465, 8478), True, 'import numpy as np\n'), ((8497, 8519), 'numpy.hstack', 'np.hstack', (['valid_set.y'], {}), '(valid_set.y)\n', (8506, 8519), True, 'import numpy as np\n'), ((8537, 8558), 'numpy.hstack', 'np.hstack', (['test_set.y'], {}), '(test_set.y)\n', (8546, 8558), True, 'import numpy as np\n'), ((8998, 9201), 'scripts.ann_architectures.BinaryConnect.binary_net.train', 'binary_net.train', (['train_func', 'val_func', 'model', 'batch_size', 'LR_start', 'LR_decay', 'num_epochs', 'train_set.X', 'train_set.y', 'valid_set.X', 'valid_set.y', 'test_set.X', 'test_set.y'], {'shuffle_parts': 'shuffle_parts'}), '(train_func, val_func, model, batch_size, LR_start,\n LR_decay, num_epochs, train_set.X, train_set.y, valid_set.X, valid_set.\n y, test_set.X, test_set.y, shuffle_parts=shuffle_parts)\n', (9014, 9201), False, 'from scripts.ann_architectures.BinaryConnect import binary_net\n'), ((9408, 9474), 'matplotlib.pyplot.title', 'plt.title', (['"""Weight distribution of first hidden convolution layer"""'], {}), "('Weight distribution of first hidden convolution layer')\n", (9417, 9474), True, 'import matplotlib.pyplot as plt\n'), ((9560, 9602), 'lasagne.layers.get_all_param_values', 'lasagne.layers.get_all_param_values', (['model'], {}), '(model)\n', (9595, 9602), False, 'import lasagne\n'), ((9607, 9644), 'snntoolbox.datasets.utils.save_parameters', 'save_parameters', (['parameters', 'filepath'], {}), '(parameters, filepath)\n', (9622, 9644), False, 'from snntoolbox.datasets.utils import save_parameters\n'), ((5398, 5445), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['cnn'], {'binary': '(True)'}), '(cnn, binary=True)\n', (5427, 5445), False, 'import lasagne\n'), ((5464, 5499), 'scripts.ann_architectures.BinaryConnect.binary_net.compute_grads', 'binary_net.compute_grads', (['loss', 'cnn'], {}), '(loss, cnn)\n', (5488, 5499), False, 'from scripts.ann_architectures.BinaryConnect import binary_net\n'), ((5518, 5589), 'lasagne.updates.adam', 'lasagne.updates.adam', ([], {'loss_or_grads': 'w_grads', 'params': 'w', 'learning_rate': 'lr'}), '(loss_or_grads=w_grads, params=w, learning_rate=lr)\n', (5538, 5589), False, 'import lasagne\n'), ((5647, 5688), 'scripts.ann_architectures.BinaryConnect.binary_net.clipping_scaling', 'binary_net.clipping_scaling', (['updates', 'cnn'], {}), '(updates, cnn)\n', (5674, 5688), False, 'from scripts.ann_architectures.BinaryConnect import binary_net\n'), ((5742, 5806), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['cnn'], {'trainable': '(True)', 'binary': '(False)'}), '(cnn, trainable=True, binary=False)\n', (5771, 5806), False, 'import lasagne\n'), ((6032, 6082), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['cnn'], {'trainable': '(True)'}), '(cnn, trainable=True)\n', (6061, 6082), False, 'import lasagne\n'), ((6101, 6174), 'lasagne.updates.adam', 'lasagne.updates.adam', ([], {'loss_or_grads': 'loss', 'params': 'params', 'learning_rate': 'lr'}), '(loss_or_grads=loss, params=params, learning_rate=lr)\n', (6121, 6174), False, 'import lasagne\n'), ((5270, 5313), 'theano.tensor.maximum', 't.maximum', (['(0.0)', '(1.0 - target * train_output)'], {}), '(0.0, 1.0 - target * train_output)\n', (5279, 5313), True, 'import theano.tensor as t\n'), ((6313, 6355), 'theano.tensor.maximum', 't.maximum', (['(0.0)', '(1.0 - target * test_output)'], {}), '(0.0, 1.0 - target * test_output)\n', (6322, 6355), True, 'import theano.tensor as t\n'), ((6384, 6413), 'theano.tensor.argmax', 't.argmax', (['test_output'], {'axis': '(1)'}), '(test_output, axis=1)\n', (6392, 6413), True, 'import theano.tensor as t\n'), ((6443, 6467), 'theano.tensor.argmax', 't.argmax', (['target'], {'axis': '(1)'}), '(target, axis=1)\n', (6451, 6467), True, 'import theano.tensor as t\n'), ((8140, 8177), 'numpy.multiply', 'np.multiply', (['(2.0 / 255.0)', 'train_set.X'], {}), '(2.0 / 255.0, train_set.X)\n', (8151, 8177), True, 'import numpy as np\n'), ((8249, 8286), 'numpy.multiply', 'np.multiply', (['(2.0 / 255.0)', 'valid_set.X'], {}), '(2.0 / 255.0, valid_set.X)\n', (8260, 8286), True, 'import numpy as np\n'), ((8357, 8393), 'numpy.multiply', 'np.multiply', (['(2.0 / 255.0)', 'test_set.X'], {}), '(2.0 / 255.0, test_set.X)\n', (8368, 8393), True, 'import numpy as np\n'), ((8614, 8624), 'numpy.eye', 'np.eye', (['(10)'], {}), '(10)\n', (8620, 8624), True, 'import numpy as np\n'), ((8668, 8678), 'numpy.eye', 'np.eye', (['(10)'], {}), '(10)\n', (8674, 8678), True, 'import numpy as np\n'), ((8721, 8731), 'numpy.eye', 'np.eye', (['(10)'], {}), '(10)\n', (8727, 8731), True, 'import numpy as np\n'), ((9286, 9322), 'lasagne.layers.get_all_layers', 'lasagne.layers.get_all_layers', (['model'], {}), '(model)\n', (9315, 9322), False, 'import lasagne\n'), ((5907, 5980), 'lasagne.updates.adam', 'lasagne.updates.adam', ([], {'loss_or_grads': 'loss', 'params': 'params', 'learning_rate': 'lr'}), '(loss_or_grads=loss, params=params, learning_rate=lr)\n', (5927, 5980), False, 'import lasagne\n')] |
# -*- coding: utf-8 -*-
import numpy as np
from ..signal import signal_merge
from ..signal import signal_distort
def eda_simulate(duration=10, length=None, sampling_rate=1000, noise=0.01,
scr_number=1, drift=-0.01, random_state=None):
"""Simulate Electrodermal Activity (EDA) signal.
Generate an artificial (synthetic) EDA signal of a given duration and sampling rate.
Parameters
----------
duration : int
Desired recording length in seconds.
sampling_rate, length : int
The desired sampling rate (in Hz, i.e., samples/second) or the desired
length of the signal (in samples).
noise : float
Noise level (amplitude of the laplace noise).
scr_number : int
Desired number of skin conductance responses (SCRs), i.e., peaks.
drift : float or list
The slope of a linear drift of the signal.
random_state : int
Seed for the random number generator.
Returns
----------
array
Vector containing the EDA signal.
Examples
----------
>>> import neurokit as nk
>>> import pandas as pd
>>>
>>> eda = nk.eda_simulate(duration=10, scr_number=3)
>>> nk.signal_plot(eda)
See Also
--------
ecg_simulate, rsp_simulate, emg_simulate, ppg_simulate
References
-----------
- <NAME>., <NAME>., <NAME>., & <NAME>. (2010). Modelling event-related skin conductance responses. International Journal of Psychophysiology, 75(3), 349-356.
"""
# Seed the random generator for reproducible results
np.random.seed(random_state)
# Generate number of samples automatically if length is unspecified
if length is None:
length = duration * sampling_rate
eda = np.full(length, 1.0)
eda += (drift * np.linspace(0, duration, length))
time = [0, duration]
start_peaks = np.linspace(0, duration, scr_number, endpoint=False)
for start_peak in start_peaks:
relative_time_peak = np.abs(np.random.normal(0, 5, size=1)) + 3.0745
scr = _eda_simulate_scr(sampling_rate=sampling_rate,
time_peak=relative_time_peak)
time_scr = [start_peak, start_peak+9]
if time_scr[0] < 0:
scr = scr[int(np.round(np.abs(time_scr[0])*sampling_rate))::]
time_scr[0] = 0
if time_scr[1] > duration:
scr = scr[0:int(np.round((duration - time_scr[0])*sampling_rate))]
time_scr[1] = duration
eda = signal_merge(signal1=eda, signal2=scr, time1=time, time2=time_scr)
# Add random noise
if noise > 0:
eda = signal_distort(eda,
sampling_rate=sampling_rate,
noise_amplitude=noise,
noise_frequency=[5, 10, 100],
noise_shape="laplace",
silent=True)
# Reset random seed (so it doesn't affect global)
np.random.seed(None)
return eda
def _eda_simulate_scr(sampling_rate=1000,
length=None,
time_peak=3.0745,
rise=0.7013,
decay=[3.1487, 14.1257]):
"""Simulate a canonical skin conductance response (SCR)
Based on `Bach (2010) <https://sourceforge.net/p/scralyze/code/HEAD/tree/branches/version_b2.1.8/scr_bf_crf.m#l24>`_
Parameters
-------------
time_peak : float
Time to peak.
rise : float
Variance of rise defining gaussian.
decay : list
Decay constants.
Examples
--------
>>> scr1 = _eda_simulate_canonical(time_peak=3.0745)
>>> scr2 = _eda_simulate_canonical(time_peak=10)
>>> pd.DataFrame({"SCR1": scr1, "SCR2": scr2}).plot()
"""
if length is None:
length = 9*sampling_rate
t = np.linspace(sampling_rate/10000, 90, length)
gt = np.exp(-((t - time_peak)**2)/(2*rise**2))
ht = np.exp(-t/decay[0]) + np.exp(-t/decay[1])
ft = np.convolve(gt, ht)
ft = ft[0:len(t)]
ft = ft/np.max(ft)
return ft
def _eda_simulate_bateman(sampling_rate=1000, t1=.75, t2=2):
"""
Generates the bateman function:
:math:`b = e^{-t/T1} - e^{-t/T2}`
Parameters
----------
fsamp : float
Sampling frequency
par_bat: list (T1, T2)
Parameters of the bateman function
Returns
-------
bateman : array
The bateman function
Examples
----------
>>> bateman = _eda_simulate_bateman()
>>> nk.signal_plot(bateman)
"""
idx_T1 = t1 * sampling_rate
idx_T2 = t2 * sampling_rate
len_bat = idx_T2 * 10
idx_bat = np.arange(len_bat)
bateman = np.exp(-idx_bat / idx_T2) - np.exp(-idx_bat / idx_T1)
# normalize
bateman = sampling_rate * bateman / np.sum(bateman)
return bateman
| [
"numpy.random.normal",
"numpy.abs",
"numpy.convolve",
"numpy.round",
"numpy.max",
"numpy.exp",
"numpy.sum",
"numpy.linspace",
"numpy.random.seed",
"numpy.full",
"numpy.arange"
] | [((1570, 1598), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (1584, 1598), True, 'import numpy as np\n'), ((1748, 1768), 'numpy.full', 'np.full', (['length', '(1.0)'], {}), '(length, 1.0)\n', (1755, 1768), True, 'import numpy as np\n'), ((1867, 1919), 'numpy.linspace', 'np.linspace', (['(0)', 'duration', 'scr_number'], {'endpoint': '(False)'}), '(0, duration, scr_number, endpoint=False)\n', (1878, 1919), True, 'import numpy as np\n'), ((2960, 2980), 'numpy.random.seed', 'np.random.seed', (['None'], {}), '(None)\n', (2974, 2980), True, 'import numpy as np\n'), ((3832, 3878), 'numpy.linspace', 'np.linspace', (['(sampling_rate / 10000)', '(90)', 'length'], {}), '(sampling_rate / 10000, 90, length)\n', (3843, 3878), True, 'import numpy as np\n'), ((3887, 3934), 'numpy.exp', 'np.exp', (['(-(t - time_peak) ** 2 / (2 * rise ** 2))'], {}), '(-(t - time_peak) ** 2 / (2 * rise ** 2))\n', (3893, 3934), True, 'import numpy as np\n'), ((3990, 4009), 'numpy.convolve', 'np.convolve', (['gt', 'ht'], {}), '(gt, ht)\n', (4001, 4009), True, 'import numpy as np\n'), ((4653, 4671), 'numpy.arange', 'np.arange', (['len_bat'], {}), '(len_bat)\n', (4662, 4671), True, 'import numpy as np\n'), ((1789, 1821), 'numpy.linspace', 'np.linspace', (['(0)', 'duration', 'length'], {}), '(0, duration, length)\n', (1800, 1821), True, 'import numpy as np\n'), ((3938, 3959), 'numpy.exp', 'np.exp', (['(-t / decay[0])'], {}), '(-t / decay[0])\n', (3944, 3959), True, 'import numpy as np\n'), ((3960, 3981), 'numpy.exp', 'np.exp', (['(-t / decay[1])'], {}), '(-t / decay[1])\n', (3966, 3981), True, 'import numpy as np\n'), ((4044, 4054), 'numpy.max', 'np.max', (['ft'], {}), '(ft)\n', (4050, 4054), True, 'import numpy as np\n'), ((4686, 4711), 'numpy.exp', 'np.exp', (['(-idx_bat / idx_T2)'], {}), '(-idx_bat / idx_T2)\n', (4692, 4711), True, 'import numpy as np\n'), ((4714, 4739), 'numpy.exp', 'np.exp', (['(-idx_bat / idx_T1)'], {}), '(-idx_bat / idx_T1)\n', (4720, 4739), True, 'import numpy as np\n'), ((4797, 4812), 'numpy.sum', 'np.sum', (['bateman'], {}), '(bateman)\n', (4803, 4812), True, 'import numpy as np\n'), ((1992, 2022), 'numpy.random.normal', 'np.random.normal', (['(0)', '(5)'], {'size': '(1)'}), '(0, 5, size=1)\n', (2008, 2022), True, 'import numpy as np\n'), ((2395, 2445), 'numpy.round', 'np.round', (['((duration - time_scr[0]) * sampling_rate)'], {}), '((duration - time_scr[0]) * sampling_rate)\n', (2403, 2445), True, 'import numpy as np\n'), ((2265, 2284), 'numpy.abs', 'np.abs', (['time_scr[0]'], {}), '(time_scr[0])\n', (2271, 2284), True, 'import numpy as np\n')] |
"""
The module provides a convenient function to train CFPD model given right parameters
"""
from collections import defaultdict
import copy
import os
import sys
import time
from matplotlib import pyplot as plt
import numpy as np
import torch
from utils import makedir
def train_model(model, data_loaders, dataset_sizes, loss_fn, optimizer, scheduler,
device, num_epochs, model_save_path, start_epoch=0):
losses = {"train": [], "validation": []}
makedir(model_save_path)
best_model = copy.deepcopy(model.state_dict())
best_loss = sys.maxsize
print_str = "Epoch {}/{} Phase: {} Batch: {}/{} Batch Loss: {} Time elapsed: {:.4f}m {:.4f}s"
start = time.time()
for epoch in range(start_epoch, num_epochs):
print(f"Epoch {epoch+1}/{num_epochs}")
print(60*"-")
for phase in ["train", "validation"]:
if phase == "train":
for _ in range(start_epoch):
scheduler.step()
scheduler.step()
model.train()
else:
model.eval()
running_loss = 0.0
batch = 0
for inputs, gt_coords in data_loaders[phase]:
batch_start = time.time()
inputs = inputs.to(device)
gt_coords = gt_coords.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == "train"):
output_coords = model(inputs)
loss = loss_fn(output_coords, gt_coords)
if phase == "train":
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.shape[0]
n_batches = dataset_sizes[phase]//inputs.shape[0]
batch_end = time.time()
batch_time_elapsed = batch_end - batch_start
print(print_str.format(epoch, num_epochs, phase, batch, n_batches, loss.item(),
batch_time_elapsed//60, batch_time_elapsed % 60))
batch += 1
epoch_loss = running_loss / dataset_sizes[phase]
losses[phase].append(epoch_loss)
print(f"Phase: {phase} Epoch: {epoch+1}/{num_epochs} Loss: {epoch_loss:.4f}")
if phase == "validation" and epoch_loss < best_loss:
best_loss = epoch_loss
best_model = copy.deepcopy(model.state_dict())
torch.save(best_model, os.path.join(model_save_path, f"cfpd_model_{epoch}_{epoch_loss}.pth"))
track_losses(losses, model_save_path)
end = time.time()
time_elapsed = end - start
print(f"Training has been completed in {time_elapsed//60:.4f}m {time_elapsed%60:.4f}s")
print(f"Minimum Loss: {best_loss:4f}")
with open(os.path.join(model_save_path, "best_validation_loss.txt"), "w") as f:
print(best_loss, file=f)
def track_losses(losses, save_path):
validation_losses = losses["validation"]
train_losses = losses["train"]
save_txt = np.column_stack((range(len(validation_losses)), train_losses, validation_losses))
save_losses = os.path.join(save_path, "losses.txt")
np.savetxt(save_losses, save_txt)
draw_loss_graph(train_losses, validation_losses, save_path)
def draw_loss_graph(train_losses, validation_losses, save_path):
plt.plot(validation_losses, label="Validation Losses")
plt.plot(train_losses, label="Train Losses")
plt.legend(loc='upper right')
plt.ylim(top=np.max([validation_losses[0], train_losses[0]]))
save_path = os.path.join(save_path, "loss_graph.jpg")
plt.savefig(save_path)
plt.clf()
def test_model(model, test_loaders, loss_fn, results_save_path, device, best_loss=None):
"""
Test model
"""
dataset_losses = defaultdict(lambda: [])
model.eval()
makedir(os.path.dirname(results_save_path))
results = ""
if best_loss:
results += f"Validation lost: {best_loss}\n"
for testset_name in test_loaders:
running_loss = 0.0
with torch.no_grad():
for inputs, gt_coords in test_loaders[testset_name]:
inputs = inputs.to(device)
gt_coords = gt_coords.to(device)
output_coords = model(inputs)
loss = loss_fn(output_coords, gt_coords)
dataset_losses[testset_name].append(loss.item())
running_loss += loss.item() * inputs.shape[0]
epoch_loss = running_loss / len(test_loaders[testset_name].dataset)
print_str = f"{testset_name} Loss: {epoch_loss:.4f}\n"
results += print_str
print(print_str)
dataset_losses["full_set"] = dataset_losses["common_set"] + dataset_losses["challenging_set"]
full_set_loss = np.mean(dataset_losses["full_set"])
results += f"full_set loss: {full_set_loss}\n"
with open(results_save_path, "w") as file_:
print(results, file=file_)
return dataset_losses
| [
"numpy.mean",
"matplotlib.pyplot.savefig",
"torch.set_grad_enabled",
"matplotlib.pyplot.plot",
"os.path.join",
"matplotlib.pyplot.clf",
"utils.makedir",
"numpy.max",
"os.path.dirname",
"collections.defaultdict",
"numpy.savetxt",
"torch.no_grad",
"time.time",
"matplotlib.pyplot.legend"
] | [((474, 498), 'utils.makedir', 'makedir', (['model_save_path'], {}), '(model_save_path)\n', (481, 498), False, 'from utils import makedir\n'), ((690, 701), 'time.time', 'time.time', ([], {}), '()\n', (699, 701), False, 'import time\n'), ((2669, 2680), 'time.time', 'time.time', ([], {}), '()\n', (2678, 2680), False, 'import time\n'), ((3201, 3238), 'os.path.join', 'os.path.join', (['save_path', '"""losses.txt"""'], {}), "(save_path, 'losses.txt')\n", (3213, 3238), False, 'import os\n'), ((3243, 3276), 'numpy.savetxt', 'np.savetxt', (['save_losses', 'save_txt'], {}), '(save_losses, save_txt)\n', (3253, 3276), True, 'import numpy as np\n'), ((3413, 3467), 'matplotlib.pyplot.plot', 'plt.plot', (['validation_losses'], {'label': '"""Validation Losses"""'}), "(validation_losses, label='Validation Losses')\n", (3421, 3467), True, 'from matplotlib import pyplot as plt\n'), ((3472, 3516), 'matplotlib.pyplot.plot', 'plt.plot', (['train_losses'], {'label': '"""Train Losses"""'}), "(train_losses, label='Train Losses')\n", (3480, 3516), True, 'from matplotlib import pyplot as plt\n'), ((3521, 3550), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (3531, 3550), True, 'from matplotlib import pyplot as plt\n'), ((3633, 3674), 'os.path.join', 'os.path.join', (['save_path', '"""loss_graph.jpg"""'], {}), "(save_path, 'loss_graph.jpg')\n", (3645, 3674), False, 'import os\n'), ((3679, 3701), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {}), '(save_path)\n', (3690, 3701), True, 'from matplotlib import pyplot as plt\n'), ((3706, 3715), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3713, 3715), True, 'from matplotlib import pyplot as plt\n'), ((3859, 3883), 'collections.defaultdict', 'defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (3870, 3883), False, 'from collections import defaultdict\n'), ((4833, 4868), 'numpy.mean', 'np.mean', (["dataset_losses['full_set']"], {}), "(dataset_losses['full_set'])\n", (4840, 4868), True, 'import numpy as np\n'), ((3912, 3946), 'os.path.dirname', 'os.path.dirname', (['results_save_path'], {}), '(results_save_path)\n', (3927, 3946), False, 'import os\n'), ((2863, 2920), 'os.path.join', 'os.path.join', (['model_save_path', '"""best_validation_loss.txt"""'], {}), "(model_save_path, 'best_validation_loss.txt')\n", (2875, 2920), False, 'import os\n'), ((3568, 3615), 'numpy.max', 'np.max', (['[validation_losses[0], train_losses[0]]'], {}), '([validation_losses[0], train_losses[0]])\n', (3574, 3615), True, 'import numpy as np\n'), ((4114, 4129), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4127, 4129), False, 'import torch\n'), ((1235, 1246), 'time.time', 'time.time', ([], {}), '()\n', (1244, 1246), False, 'import time\n'), ((1833, 1844), 'time.time', 'time.time', ([], {}), '()\n', (1842, 1844), False, 'import time\n'), ((1399, 1439), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (["(phase == 'train')"], {}), "(phase == 'train')\n", (1421, 1439), False, 'import torch\n'), ((2541, 2610), 'os.path.join', 'os.path.join', (['model_save_path', 'f"""cfpd_model_{epoch}_{epoch_loss}.pth"""'], {}), "(model_save_path, f'cfpd_model_{epoch}_{epoch_loss}.pth')\n", (2553, 2610), False, 'import os\n')] |
# File to plot the reconstruction error of the vaes in comparison to
# the mean of the slice values/ the intensity of the slices
import os
import numpy as np
import torch
import utils
from utils import tonp
import torch.distributions as dist
import matplotlib.pyplot as plt
import seaborn as sns
import yaml
import torch.nn as nn
root_dir = os.path.join('..','..','small-results','7.10.2021','recon vs mean of vae')
for run in os.listdir(os.path.join('logs','exman-train-vae.py','runs')):
run_path = os.path.join('logs','exman-train-vae.py','runs',run)
params_path = os.path.join(run_path,'params.yaml')
with open(params_path) as file:
params_dic = yaml.full_load(file)
layer = params_dic['data_dir'].rsplit('/')[-2]
vae = utils.load_vae(os.path.join(run_path),device=torch.device('cpu'))
data_dir = os.path.join('data','resnet20','3x3','layer_{}'.format(layer[-1]),'conv')
test_bs = 512
z_dim = 2
testloader, D = utils.get_dataloader(os.path.join(data_dir, 'test.npy'), test_bs, shuffle=False)
prior = dist.Normal(torch.FloatTensor([0.]).to(vae.device), torch.FloatTensor([1.]).to(vae.device))
tuples = []
for i,data in enumerate(testloader):
data = data[:25].to(vae.device)
[z_mu, z_var], [x_mu, x_var] = vae(data)
for x, x_rec in zip(data.reshape((-1, D, D)), x_mu.reshape((-1, D, D))):
tuples.append([torch.linalg.norm(torch.flatten(x)),torch.linalg.norm(torch.flatten(x-x_rec))])
tuples = np.array(tuples)
plt.figure()
plt.scatter(tuples[:,0],tuples[:,1])
plt.xlim([0,3.5])
plt.ylim([0,3.5])
plt.title('mean of slice vs recon error in {}'.format(layer))
plt.xlabel('mean of slice')
plt.ylabel('reconstruction')
plt.savefig(os.path.join(root_dir, layer), dpi=200)
| [
"yaml.full_load",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"torch.FloatTensor",
"torch.flatten",
"torch.device"
] | [((359, 437), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""small-results"""', '"""7.10.2021"""', '"""recon vs mean of vae"""'], {}), "('..', '..', 'small-results', '7.10.2021', 'recon vs mean of vae')\n", (371, 437), False, 'import os\n'), ((457, 507), 'os.path.join', 'os.path.join', (['"""logs"""', '"""exman-train-vae.py"""', '"""runs"""'], {}), "('logs', 'exman-train-vae.py', 'runs')\n", (469, 507), False, 'import os\n'), ((524, 579), 'os.path.join', 'os.path.join', (['"""logs"""', '"""exman-train-vae.py"""', '"""runs"""', 'run'], {}), "('logs', 'exman-train-vae.py', 'runs', run)\n", (536, 579), False, 'import os\n'), ((596, 633), 'os.path.join', 'os.path.join', (['run_path', '"""params.yaml"""'], {}), "(run_path, 'params.yaml')\n", (608, 633), False, 'import os\n'), ((1529, 1545), 'numpy.array', 'np.array', (['tuples'], {}), '(tuples)\n', (1537, 1545), True, 'import numpy as np\n'), ((1551, 1563), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1561, 1563), True, 'import matplotlib.pyplot as plt\n'), ((1569, 1608), 'matplotlib.pyplot.scatter', 'plt.scatter', (['tuples[:, 0]', 'tuples[:, 1]'], {}), '(tuples[:, 0], tuples[:, 1])\n', (1580, 1608), True, 'import matplotlib.pyplot as plt\n'), ((1611, 1629), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 3.5]'], {}), '([0, 3.5])\n', (1619, 1629), True, 'import matplotlib.pyplot as plt\n'), ((1634, 1652), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 3.5]'], {}), '([0, 3.5])\n', (1642, 1652), True, 'import matplotlib.pyplot as plt\n'), ((1724, 1751), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""mean of slice"""'], {}), "('mean of slice')\n", (1734, 1751), True, 'import matplotlib.pyplot as plt\n'), ((1757, 1785), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""reconstruction"""'], {}), "('reconstruction')\n", (1767, 1785), True, 'import matplotlib.pyplot as plt\n'), ((692, 712), 'yaml.full_load', 'yaml.full_load', (['file'], {}), '(file)\n', (706, 712), False, 'import yaml\n'), ((791, 813), 'os.path.join', 'os.path.join', (['run_path'], {}), '(run_path)\n', (803, 813), False, 'import os\n'), ((1010, 1044), 'os.path.join', 'os.path.join', (['data_dir', '"""test.npy"""'], {}), "(data_dir, 'test.npy')\n", (1022, 1044), False, 'import os\n'), ((1803, 1832), 'os.path.join', 'os.path.join', (['root_dir', 'layer'], {}), '(root_dir, layer)\n', (1815, 1832), False, 'import os\n'), ((821, 840), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (833, 840), False, 'import torch\n'), ((1095, 1119), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.0]'], {}), '([0.0])\n', (1112, 1119), False, 'import torch\n'), ((1135, 1159), 'torch.FloatTensor', 'torch.FloatTensor', (['[1.0]'], {}), '([1.0])\n', (1152, 1159), False, 'import torch\n'), ((1453, 1469), 'torch.flatten', 'torch.flatten', (['x'], {}), '(x)\n', (1466, 1469), False, 'import torch\n'), ((1489, 1513), 'torch.flatten', 'torch.flatten', (['(x - x_rec)'], {}), '(x - x_rec)\n', (1502, 1513), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Implementation for Single Image Haze Removal Using Dark Channel Prior.
Reference:
http://research.microsoft.com/en-us/um/people/kahe/cvpr09/
http://research.microsoft.com/en-us/um/people/kahe/eccv10/
"""
import numpy as np
from PIL import Image
from guidedfilter import guided_filter
R, G, B = 0, 1, 2 # index for convenience
L = 256 # color depth
def get_dark_channel(I, w):
"""Get the dark channel prior in the (RGB) image data.
Parameters
-----------
I: an M * N * 3 numpy array containing data ([0, L-1]) in the image where
M is the height, N is the width, 3 represents R/G/B channels.
w: window size
Return
-----------
An M * N array for the dark channel prior ([0, L-1]).
"""
M, N, _ = I.shape
padded = np.pad(I, ((w / 2, w / 2), (w / 2, w / 2), (0, 0)), 'edge')
darkch = np.zeros((M, N))
for i, j in np.ndindex(darkch.shape):
darkch[i, j] = np.min(padded[i:i + w, j:j + w, :]) # CVPR09, eq.5
return darkch
def get_atmosphere(I, darkch, p):
"""Get the atmosphere light in the (RGB) image data.
Parameters
-----------
I: the M * N * 3 RGB image data ([0, L-1]) as numpy array
darkch: the dark channel prior of the image as an M * N numpy array
p: percentage of pixels for estimating the atmosphere light
Return
-----------
A 3-element array containing atmosphere light ([0, L-1]) for each channel
"""
# reference CVPR09, 4.4
M, N = darkch.shape
flatI = I.reshape(M * N, 3)
flatdark = darkch.ravel()
searchidx = (-flatdark).argsort()[:M * N * p] # find top M * N * p indexes
print('atmosphere light region:', [(i / N, i % N) for i in searchidx])
# return the highest intensity for each channel
return np.max(flatI.take(searchidx, axis=0), axis=0)
def get_transmission(I, A, darkch, omega, w):
"""Get the transmission esitmate in the (RGB) image data.
Parameters
-----------
I: the M * N * 3 RGB image data ([0, L-1]) as numpy array
A: a 3-element array containing atmosphere light
([0, L-1]) for each channel
darkch: the dark channel prior of the image as an M * N numpy array
omega: bias for the estimate
w: window size for the estimate
Return
-----------
An M * N array containing the transmission rate ([0.0, 1.0])
"""
return 1 - omega * get_dark_channel(I / A, w) # CVPR09, eq.12
def dehaze_raw(I, tmin=0.2, Amax=220, w=15, p=0.0001,
omega=0.95, guided=True, r=40, eps=1e-3):
"""Get the dark channel prior, atmosphere light, transmission rate
and refined transmission rate for raw RGB image data.
Parameters
-----------
I: M * N * 3 data as numpy array for the hazy image
tmin: threshold of transmission rate
Amax: threshold of atmosphere light
w: window size of the dark channel prior
p: percentage of pixels for estimating the atmosphere light
omega: bias for the transmission estimate
guided: whether to use the guided filter to fine the image
r: the radius of the guidance
eps: epsilon for the guided filter
Return
-----------
(Idark, A, rawt, refinedt) if guided=False, then rawt == refinedt
"""
m, n, _ = I.shape
Idark = get_dark_channel(I, w)
A = get_atmosphere(I, Idark, p)
A = np.minimum(A, Amax) # threshold A
print('atmosphere', A)
rawt = get_transmission(I, A, Idark, omega, w)
print('raw transmission rate',)
print('between [%.4f, %.4f]' % (rawt.min(), rawt.max()))
rawt = refinedt = np.maximum(rawt, tmin) # threshold t
if guided:
normI = (I - I.min()) / (I.max() - I.min()) # normalize I
refinedt = guided_filter(normI, refinedt, r, eps)
print('refined transmission rate',)
print('between [%.4f, %.4f]' % (refinedt.min(), refinedt.max()))
return Idark, A, rawt, refinedt
def get_radiance(I, A, t):
"""Recover the radiance from raw image data with atmosphere light
and transmission rate estimate.
Parameters
----------
I: M * N * 3 data as numpy array for the hazy image
A: a 3-element array containing atmosphere light
([0, L-1]) for each channel
t: estimate fothe transmission rate
Return
----------
M * N * 3 numpy array for the recovered radiance
"""
tiledt = np.zeros_like(I) # tiled to M * N * 3
tiledt[:, :, R] = tiledt[:, :, G] = tiledt[:, :, B] = t
return (I - A) / tiledt + A # CVPR09, eq.16
def dehaze(im, tmin=0.2, Amax=220, w=15, p=0.0001,
omega=0.95, guided=True, r=40, eps=1e-3):
"""Dehaze the given RGB image.
Parameters
----------
im: the Image object of the RGB image
guided: refine the dehazing with guided filter or not
other parameters are the same as `dehaze_raw`
Return
----------
(dark, rawt, refinedt, rawrad, rerad)
Images for dark channel prior, raw transmission estimate,
refiend transmission estimate, recovered radiance with raw t,
recovered radiance with refined t.
"""
I = np.asarray(im, dtype=np.float64)
Idark, A, rawt, refinedt = dehaze_raw(I, tmin, Amax, w, p,
omega, guided, r, eps)
white = np.full_like(Idark, L - 1)
def to_img(raw):
# threshold to [0, L-1]
cut = np.maximum(np.minimum(raw, L - 1), 0).astype(np.uint8)
if len(raw.shape) == 3:
print('Range for each channel:')
for ch in range(3):
print('[%.2f, %.2f]' % (raw[:, :, ch].max(), raw[:, :, ch].min()))
return Image.fromarray(cut)
else:
return Image.fromarray(cut)
return [to_img(raw) for raw in (Idark, white * rawt, white * refinedt,
get_radiance(I, A, rawt),
get_radiance(I, A, refinedt))]
| [
"guidedfilter.guided_filter",
"PIL.Image.fromarray",
"numpy.minimum",
"numpy.full_like",
"numpy.asarray",
"numpy.ndindex",
"numpy.pad",
"numpy.zeros",
"numpy.min",
"numpy.maximum",
"numpy.zeros_like"
] | [((823, 882), 'numpy.pad', 'np.pad', (['I', '((w / 2, w / 2), (w / 2, w / 2), (0, 0))', '"""edge"""'], {}), "(I, ((w / 2, w / 2), (w / 2, w / 2), (0, 0)), 'edge')\n", (829, 882), True, 'import numpy as np\n'), ((896, 912), 'numpy.zeros', 'np.zeros', (['(M, N)'], {}), '((M, N))\n', (904, 912), True, 'import numpy as np\n'), ((929, 953), 'numpy.ndindex', 'np.ndindex', (['darkch.shape'], {}), '(darkch.shape)\n', (939, 953), True, 'import numpy as np\n'), ((3444, 3463), 'numpy.minimum', 'np.minimum', (['A', 'Amax'], {}), '(A, Amax)\n', (3454, 3463), True, 'import numpy as np\n'), ((3678, 3700), 'numpy.maximum', 'np.maximum', (['rawt', 'tmin'], {}), '(rawt, tmin)\n', (3688, 3700), True, 'import numpy as np\n'), ((4477, 4493), 'numpy.zeros_like', 'np.zeros_like', (['I'], {}), '(I)\n', (4490, 4493), True, 'import numpy as np\n'), ((5203, 5235), 'numpy.asarray', 'np.asarray', (['im'], {'dtype': 'np.float64'}), '(im, dtype=np.float64)\n', (5213, 5235), True, 'import numpy as np\n'), ((5376, 5402), 'numpy.full_like', 'np.full_like', (['Idark', '(L - 1)'], {}), '(Idark, L - 1)\n', (5388, 5402), True, 'import numpy as np\n'), ((978, 1013), 'numpy.min', 'np.min', (['padded[i:i + w, j:j + w, :]'], {}), '(padded[i:i + w, j:j + w, :])\n', (984, 1013), True, 'import numpy as np\n'), ((3817, 3855), 'guidedfilter.guided_filter', 'guided_filter', (['normI', 'refinedt', 'r', 'eps'], {}), '(normI, refinedt, r, eps)\n', (3830, 3855), False, 'from guidedfilter import guided_filter\n'), ((5738, 5758), 'PIL.Image.fromarray', 'Image.fromarray', (['cut'], {}), '(cut)\n', (5753, 5758), False, 'from PIL import Image\n'), ((5792, 5812), 'PIL.Image.fromarray', 'Image.fromarray', (['cut'], {}), '(cut)\n', (5807, 5812), False, 'from PIL import Image\n'), ((5482, 5504), 'numpy.minimum', 'np.minimum', (['raw', '(L - 1)'], {}), '(raw, L - 1)\n', (5492, 5504), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import scipy
from scipy.stats import laplace
def estimate_precsion(max, min ):
diff= 1/max
precision=(diff - min) / (max - min)
return precision
def match_vals(row, cumsum, precision):
cdf=float(cumsum[cumsum.index==row['relative_time']])
#cdf plus
val_plus= row['relative_time']+precision
if val_plus>=1:
cdf_plus=1.0
else:
cdf_plus=float(cumsum[cumsum.index <= val_plus].max())
#cdf minus
val_minus = row['relative_time'] - precision
if val_minus < 0:
cdf_minus = 0.0
else:
cdf_minus = float(cumsum[cumsum.index <= val_minus].max())
return [cdf, cdf_plus, cdf_minus]
def epsilon_vectorized_internal(data, delta):
if data.p_k+delta >=1:
#in case p_k+delta>1, set epsilon = 0.5
return 0.1
# r =1 because of normalization
return (- np.log(data.p_k / (1.0 - data.p_k) * (1.0 / (delta + data.p_k) - 1.0)))
def add_noise(data, max, min):
noise=0
sens_time=1
noise = laplace.rvs(loc=0, scale=sens_time / data['eps'], size=1)[0]
if noise+data['relative_time_original']<0:
noise=-data['relative_time_original']
# noise = abs(noise)
noise=noise *(max-min)+min
return noise
def estimate_epsilon_risk_for_start_timestamp(data,delta):
start_time=data[data.prev_state==0]
min_time = start_time['time:timestamp'].min()
start_time['time_diff'] = start_time['time:timestamp'] - min_time
"""Days resolution"""
start_time['relative_time'] = start_time['time_diff'].astype('timedelta64[D]')
result = estimate_epsilon(start_time.relative_time, delta)
# data['eps_days'] = result['eps']
# data['time_diff_days'] = data['time_diff'] + pd.to_timedelta(result['noise'], unit='D')
# df[['eps', 'p_k', 'relative_time_original', 'relative_time_max', 'relative_time_min']]
# data['eps']=result['eps']
# data['p_k']=result['p_k']
# data['relative_time_original'] = result['relative_time_original']
# data['relative_time_max'] = result['relative_time_max']
# data['relative_time_min'] = result['relative_time_min']
data.update(result[['eps', 'p_k', 'relative_time_original', 'relative_time_max', 'relative_time_min']])
# data.iloc[result.index,['eps', 'p_k', 'relative_time_original', 'relative_time_max', 'relative_time_min']]=result[['eps', 'p_k', 'relative_time_original', 'relative_time_max', 'relative_time_min']]
return data
def estimate_epsilon(vals, delta):
#normalization
min=vals.min()
max=vals.max()
precision = estimate_precsion(max, min)
norm_vals=(vals-min)/(max-min)
norm_vals=norm_vals.round(5)
norm_vals= norm_vals.sort_values()
x, counts = np.unique(norm_vals, return_counts=True)
counts = pd.Series(data=counts, index=x)
cumsum= counts.cumsum()
cumsum = cumsum / cumsum.iloc[-1]
df=norm_vals.to_frame()
df.columns = ['relative_time']
temp=df.apply( match_vals,cumsum=cumsum, precision=precision ,axis=1)
temp = temp.to_frame()
t2 = pd.DataFrame.from_records(temp[0])
t2.index = temp.index
df['cdf']=t2[0]
df['cdf_plus'] = t2[1]
df['cdf_minus'] = t2[2]
df['p_k']=df['cdf_plus']- df['cdf_minus']
df['eps']= df.apply(epsilon_vectorized_internal, delta=delta, axis=1)
df['relative_time_original']=df['relative_time'] *(max-min)+min
df['noise']=df.apply(add_noise, max=max, min= min, axis=1)
df['time_diff']=df['noise'] +df['relative_time_original']
df['relative_time_max']=max
df['relative_time_min']=min
return df[['eps','p_k','relative_time_original','relative_time_max','relative_time_min']] | [
"pandas.Series",
"scipy.stats.laplace.rvs",
"pandas.DataFrame.from_records",
"numpy.unique",
"numpy.log"
] | [((2741, 2781), 'numpy.unique', 'np.unique', (['norm_vals'], {'return_counts': '(True)'}), '(norm_vals, return_counts=True)\n', (2750, 2781), True, 'import numpy as np\n'), ((2795, 2826), 'pandas.Series', 'pd.Series', ([], {'data': 'counts', 'index': 'x'}), '(data=counts, index=x)\n', (2804, 2826), True, 'import pandas as pd\n'), ((3070, 3104), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['temp[0]'], {}), '(temp[0])\n', (3095, 3104), True, 'import pandas as pd\n'), ((894, 964), 'numpy.log', 'np.log', (['(data.p_k / (1.0 - data.p_k) * (1.0 / (delta + data.p_k) - 1.0))'], {}), '(data.p_k / (1.0 - data.p_k) * (1.0 / (delta + data.p_k) - 1.0))\n', (900, 964), True, 'import numpy as np\n'), ((1039, 1096), 'scipy.stats.laplace.rvs', 'laplace.rvs', ([], {'loc': '(0)', 'scale': "(sens_time / data['eps'])", 'size': '(1)'}), "(loc=0, scale=sens_time / data['eps'], size=1)\n", (1050, 1096), False, 'from scipy.stats import laplace\n')] |
from astropy.io import fits
from imageCoCenter import imageCoCenter
from PoissonSolverFFT import PoissonSolverFFT
from PoissonSolverExp import PoissonSolverExp
from compensate import compensate
from ZernikeEval import ZernikeEval
from ZernikeAnnularEval import ZernikeAnnularEval
import copy
import numpy as np
from wcsSetup import wcsSetup
def wcs(I1File, I1fldx, I1fldy, I2File, I2fldx, I2fldy, instruFile, algoFile, model):
if (isinstance(I1File, str) and isinstance(I2File,str)):
if (I1File.endswith(".txt") or I2File.endswith(".TXT")):
I1 = np.loadtxt(I1File)
I2 = np.loadtxt(I2File)
# flip along horizontal axis #np.filpud() also works
# because of how ZEMAX writes out#images and how MATLAB reads in images
I1 = I1[ ::-1,:]
I2 = I2[ ::-1,:]
else:
I1HDU = fits.open(I1File)
I1 = I1HDU[0].data
I2HDU = fits.open(I2File)
I2 = I2HDU[0].data
I1HDU.close()
I2HDU.close()
else:
I1 = I1File
I2 = I2File
# MATLAB Image convolution (for investigation of pixel aliasing effect)
# convkernel=load('test/gau_6x6_fwhm1.txt');
# convkernel=load('test/gau_10x10_fwhm3.txt');
# convkernel=load('test/gau_40x40_fwhm10.txt');
# convkernel=load('test/gau_100x100_fwhm30.txt');
# I1= conv2(I1, convkernel, 'same');
# I2= conv2(I2, convkernel, 'same');
# I1=downResolution(I1,10,120,120);
# I2=downResolution(I2,10,120,120);
m = wcsSetup(I1, I1fldx, I1fldy, I2, I2fldx, I2fldy, instruFile, algoFile)
m.converge = np.zeros((m.numTerms, m.outerItr + 1))
#for estimating m.Wconverge
ySensor, xSensor = np.mgrid[-(m.sensorSamples/2-0.5):(m.sensorSamples/2+0.5), \
-(m.sensorSamples/2-0.5):(m.sensorSamples/2+0.5)]
xSensor=xSensor/(m.sensorSamples/2/m.sensorFactor)
ySensor=ySensor/(m.sensorSamples/2/m.sensorFactor)
r2Sensor=xSensor**2+ySensor**2
idx=(r2Sensor>1) | (r2Sensor<m.obscuration**2)
xSensor[idx]=np.nan
ySensor[idx]=np.nan
m = imageCoCenter(m, I1fldx, I1fldy, I2fldx, I2fldy)
#print m.__dict__.keys()
m0 = copy.deepcopy(m)
if m.compMode == 'zer':
ztot = np.zeros(m.numTerms)
m.caustic = 0
if 'Axis' in model: #onAxis or offAxis
m.I1, tmp = compensate(m0, ztot, 'intra', 1, I1fldx, I1fldy, model)
m.I2, tmp = compensate(m0, ztot, 'extra', 1, I2fldx, I2fldy, model)
# model='paraxial';
# matrix or element multiplication
if (I1fldx != I2fldx or I1fldy != I2fldy):
m.I1 = m.I1 * m.pMask
m.I2 = m.I2 * np.rot90(m.pMask, 2)
m.I1 = m.I1 / np.sum(m.I1)
m.I2 = m.I2 / np.sum(m.I2) #no need vignetting correction, this is after masking already
if m.PoissonSolver == 'fft':
m = PoissonSolverFFT(m, m.sumclipSequence[0])
# print m.converge.shape, m.zc.shape, m.innerItr
m.converge[:, 0] = ztot + m.zc[:, m.innerItr-1]
else:
m = PoissonSolverExp(m)
m.converge[:, 0] = ztot + m.zc
# m.West includes Zernikes presented by m.zc
m.Wconverge=m.West;
for j in range(int(m.outerItr)):
if not m.caustic:
if (m.PoissonSolver == 'fft'):
ztmp = m.zc[:, -1]
else:
ztmp = m.zc
# print m.compSequence.shape
if (m.compSequence.ndim == 1):
ztmp[m.compSequence[j]:] = 0
else:
ztmp = ztmp * m.compSequence[:, j]
ztot = ztot + ztmp * m.feedbackGain
m.I1, caustic = compensate(m0, ztot,'intra', 1, I1fldx, I1fldy, model)
if caustic > 0:
m.caustic = caustic
m.I2, caustic = compensate(m0, ztot, 'extra', 1, I2fldx, I2fldy, model)
if caustic > 0:
m.caustic = caustic
if (I1fldx != I2fldx or I1fldy != I2fldy):
m.I1 = m.I1 * m.pMask
m.I2 = m.I2 * np.rot90(m.pMask, k=2);
m.I1 = m.I1 / np.sum(m.I1)
m.I2 = m.I2 / np.sum(m.I2) #no need vignetting correction, this is after masking already
if (m.PoissonSolver == 'fft'):
m = PoissonSolverFFT(m, m.sumclipSequence[j])
m.converge[:, j+1] = ztot + m.zc[:, -1]
else:
m = PoissonSolverExp(m)
m.converge[:, j+1] = ztot + m.zc
#m.West is the estimated wavefront from the last run of Poisson
#solver. ztot is what had be compensated before that run.
#m.West includes two parts: latest m.zc, and m.Wres
#m.Wres is the residual wavefront on top of m.converge(:,end),
#m.Wres is only available for the iterative FFT algorithm.
if (m.zobsR==0):
m.Wconverge=ZernikeEval(np.concatenate(([0,0,0],ztot[3:]),axis=0.8),\
xSensor,ySensor)+m.West
else:
m.Wconverge=ZernikeAnnularEval(np.concatenate(([0,0,0],ztot[3:]),axis=0.8),\
xSensor,ySensor,m.zobsR)+m.West;
else: # once we run into caustic, stop here, results may be
# close to real aberration. Continuation may lead to disatrous results
m.converge[:, j+1] = m.converge[:, j]
elif (m.compMode == 'opd'):
wtot = np.zeros(m.sensorSamples, m.sensorSamples)
m.caustic = 0
if ('Axis' in model): #onAxis or offAxis
compensate(m0, wtot, 'intra', 1, I1fldx, I1fldy, model)
compensate(m0, wtot, 'extra', 1, I2fldx, I2fldy, model)
if (I1fldx != I2fldx or I1fldy != I2fldy):
m.I1 = m.I1 * m.pMask
m.I2 = m.I2 * np.rot90(m.pMask, k=2)
m.I1 = m.I1 / np.sum(m.I1)
m.I2 = m.I2 / np.sum(m.I2) #no need vignetting correction, this is after masking already
if (m.PoissonSolver == 'fft'):
m = PoissonSolverFFT(m, m.sumclipSequence(1))
else:
m = PoissonSolverExp(m)
Wconverge = wtot + m.West
m.converge[:, 0] = ZernikeMaskedFit(Wconverge, xSensor, ySensor, m.numTerms, m.pMask, m.zobsR)
for j in range(int(m.outerItr)):
if not m.caustic:
wtmp = m.West
wtot = wtot + wtmp * m.feedbackGain
m.I1, caustic = compensate(m0, wtot, 'intra', 1, I1fldx, I1fldy, model)
if caustic > 0:
m.caustic = caustic
m.I2, caustic = compensate(m0, wtot, 'extra', 1, I2fldx, I2fldy, model)
if caustic > 0:
m.caustic = caustic
if (I1fldx != I2fldx or I1fldy != I2fldy):
m.I1 = m.I1 * m.pMask
m.I2 = m.I2 * np.rot90(m.pMask, k=2)
m.I1 = m.I1 / np.sum(m.I1)
m.I2 = m.I2 / np.sum(m.I2) #no need vignetting correction this is after masking already
if (m.PoissonSolver == 'fft'):
m = PoissonSolverFFT(m, m.sumclipSequence[j])
else:
m = PoissonSolverExp(m)
Wconverge = wtot + m.West
m.converge[:, j] = ZernikeMaskedFit(Wconverge, xSensor, ySensor, m.numTerms,
m.pMask, m.zobsR)
else: #once we run into caustic, stop here, results may be close to real aberration. Continuation may lead to disatrous results
m.converge[:, j] = m.converge[:, j]
m.I1 = I1
m.I2 = I2
return m
| [
"PoissonSolverFFT.PoissonSolverFFT",
"wcsSetup.wcsSetup",
"imageCoCenter.imageCoCenter",
"numpy.sum",
"numpy.zeros",
"PoissonSolverExp.PoissonSolverExp",
"numpy.rot90",
"copy.deepcopy",
"astropy.io.fits.open",
"numpy.concatenate",
"numpy.loadtxt",
"compensate.compensate"
] | [((1522, 1592), 'wcsSetup.wcsSetup', 'wcsSetup', (['I1', 'I1fldx', 'I1fldy', 'I2', 'I2fldx', 'I2fldy', 'instruFile', 'algoFile'], {}), '(I1, I1fldx, I1fldy, I2, I2fldx, I2fldy, instruFile, algoFile)\n', (1530, 1592), False, 'from wcsSetup import wcsSetup\n'), ((1611, 1649), 'numpy.zeros', 'np.zeros', (['(m.numTerms, m.outerItr + 1)'], {}), '((m.numTerms, m.outerItr + 1))\n', (1619, 1649), True, 'import numpy as np\n'), ((2102, 2150), 'imageCoCenter.imageCoCenter', 'imageCoCenter', (['m', 'I1fldx', 'I1fldy', 'I2fldx', 'I2fldy'], {}), '(m, I1fldx, I1fldy, I2fldx, I2fldy)\n', (2115, 2150), False, 'from imageCoCenter import imageCoCenter\n'), ((2198, 2214), 'copy.deepcopy', 'copy.deepcopy', (['m'], {}), '(m)\n', (2211, 2214), False, 'import copy\n'), ((2263, 2283), 'numpy.zeros', 'np.zeros', (['m.numTerms'], {}), '(m.numTerms)\n', (2271, 2283), True, 'import numpy as np\n'), ((573, 591), 'numpy.loadtxt', 'np.loadtxt', (['I1File'], {}), '(I1File)\n', (583, 591), True, 'import numpy as np\n'), ((615, 633), 'numpy.loadtxt', 'np.loadtxt', (['I2File'], {}), '(I2File)\n', (625, 633), True, 'import numpy as np\n'), ((877, 894), 'astropy.io.fits.open', 'fits.open', (['I1File'], {}), '(I1File)\n', (886, 894), False, 'from astropy.io import fits\n'), ((946, 963), 'astropy.io.fits.open', 'fits.open', (['I2File'], {}), '(I2File)\n', (955, 963), False, 'from astropy.io import fits\n'), ((2389, 2444), 'compensate.compensate', 'compensate', (['m0', 'ztot', '"""intra"""', '(1)', 'I1fldx', 'I1fldy', 'model'], {}), "(m0, ztot, 'intra', 1, I1fldx, I1fldy, model)\n", (2399, 2444), False, 'from compensate import compensate\n'), ((2477, 2532), 'compensate.compensate', 'compensate', (['m0', 'ztot', '"""extra"""', '(1)', 'I2fldx', 'I2fldy', 'model'], {}), "(m0, ztot, 'extra', 1, I2fldx, I2fldy, model)\n", (2487, 2532), False, 'from compensate import compensate\n'), ((2982, 3023), 'PoissonSolverFFT.PoissonSolverFFT', 'PoissonSolverFFT', (['m', 'm.sumclipSequence[0]'], {}), '(m, m.sumclipSequence[0])\n', (2998, 3023), False, 'from PoissonSolverFFT import PoissonSolverFFT\n'), ((3182, 3201), 'PoissonSolverExp.PoissonSolverExp', 'PoissonSolverExp', (['m'], {}), '(m)\n', (3198, 3201), False, 'from PoissonSolverExp import PoissonSolverExp\n'), ((6177, 6219), 'numpy.zeros', 'np.zeros', (['m.sensorSamples', 'm.sensorSamples'], {}), '(m.sensorSamples, m.sensorSamples)\n', (6185, 6219), True, 'import numpy as np\n'), ((2748, 2768), 'numpy.rot90', 'np.rot90', (['m.pMask', '(2)'], {}), '(m.pMask, 2)\n', (2756, 2768), True, 'import numpy as np\n'), ((2795, 2807), 'numpy.sum', 'np.sum', (['m.I1'], {}), '(m.I1)\n', (2801, 2807), True, 'import numpy as np\n'), ((2846, 2858), 'numpy.sum', 'np.sum', (['m.I2'], {}), '(m.I2)\n', (2852, 2858), True, 'import numpy as np\n'), ((4001, 4056), 'compensate.compensate', 'compensate', (['m0', 'ztot', '"""intra"""', '(1)', 'I1fldx', 'I1fldy', 'model'], {}), "(m0, ztot, 'intra', 1, I1fldx, I1fldy, model)\n", (4011, 4056), False, 'from compensate import compensate\n'), ((4193, 4248), 'compensate.compensate', 'compensate', (['m0', 'ztot', '"""extra"""', '(1)', 'I2fldx', 'I2fldy', 'model'], {}), "(m0, ztot, 'extra', 1, I2fldx, I2fldy, model)\n", (4203, 4248), False, 'from compensate import compensate\n'), ((6327, 6382), 'compensate.compensate', 'compensate', (['m0', 'wtot', '"""intra"""', '(1)', 'I1fldx', 'I1fldy', 'model'], {}), "(m0, wtot, 'intra', 1, I1fldx, I1fldy, model)\n", (6337, 6382), False, 'from compensate import compensate\n'), ((6403, 6458), 'compensate.compensate', 'compensate', (['m0', 'wtot', '"""extra"""', '(1)', 'I2fldx', 'I2fldy', 'model'], {}), "(m0, wtot, 'extra', 1, I2fldx, I2fldy, model)\n", (6413, 6458), False, 'from compensate import compensate\n'), ((6929, 6948), 'PoissonSolverExp.PoissonSolverExp', 'PoissonSolverExp', (['m'], {}), '(m)\n', (6945, 6948), False, 'from PoissonSolverExp import PoissonSolverExp\n'), ((4809, 4850), 'PoissonSolverFFT.PoissonSolverFFT', 'PoissonSolverFFT', (['m', 'm.sumclipSequence[j]'], {}), '(m, m.sumclipSequence[j])\n', (4825, 4850), False, 'from PoissonSolverFFT import PoissonSolverFFT\n'), ((4990, 5009), 'PoissonSolverExp.PoissonSolverExp', 'PoissonSolverExp', (['m'], {}), '(m)\n', (5006, 5009), False, 'from PoissonSolverExp import PoissonSolverExp\n'), ((6599, 6621), 'numpy.rot90', 'np.rot90', (['m.pMask'], {'k': '(2)'}), '(m.pMask, k=2)\n', (6607, 6621), True, 'import numpy as np\n'), ((6656, 6668), 'numpy.sum', 'np.sum', (['m.I1'], {}), '(m.I1)\n', (6662, 6668), True, 'import numpy as np\n'), ((6707, 6719), 'numpy.sum', 'np.sum', (['m.I2'], {}), '(m.I2)\n', (6713, 6719), True, 'import numpy as np\n'), ((7330, 7385), 'compensate.compensate', 'compensate', (['m0', 'wtot', '"""intra"""', '(1)', 'I1fldx', 'I1fldy', 'model'], {}), "(m0, wtot, 'intra', 1, I1fldx, I1fldy, model)\n", (7340, 7385), False, 'from compensate import compensate\n'), ((7523, 7578), 'compensate.compensate', 'compensate', (['m0', 'wtot', '"""extra"""', '(1)', 'I2fldx', 'I2fldy', 'model'], {}), "(m0, wtot, 'extra', 1, I2fldx, I2fldy, model)\n", (7533, 7578), False, 'from compensate import compensate\n'), ((4504, 4526), 'numpy.rot90', 'np.rot90', (['m.pMask'], {'k': '(2)'}), '(m.pMask, k=2)\n', (4512, 4526), True, 'import numpy as np\n'), ((4578, 4590), 'numpy.sum', 'np.sum', (['m.I1'], {}), '(m.I1)\n', (4584, 4590), True, 'import numpy as np\n'), ((4645, 4657), 'numpy.sum', 'np.sum', (['m.I2'], {}), '(m.I2)\n', (4651, 4657), True, 'import numpy as np\n'), ((8128, 8169), 'PoissonSolverFFT.PoissonSolverFFT', 'PoissonSolverFFT', (['m', 'm.sumclipSequence[j]'], {}), '(m, m.sumclipSequence[j])\n', (8144, 8169), False, 'from PoissonSolverFFT import PoissonSolverFFT\n'), ((8248, 8267), 'PoissonSolverExp.PoissonSolverExp', 'PoissonSolverExp', (['m'], {}), '(m)\n', (8264, 8267), False, 'from PoissonSolverExp import PoissonSolverExp\n'), ((5553, 5600), 'numpy.concatenate', 'np.concatenate', (['([0, 0, 0], ztot[3:])'], {'axis': '(0.8)'}), '(([0, 0, 0], ztot[3:]), axis=0.8)\n', (5567, 5600), True, 'import numpy as np\n'), ((5740, 5787), 'numpy.concatenate', 'np.concatenate', (['([0, 0, 0], ztot[3:])'], {'axis': '(0.8)'}), '(([0, 0, 0], ztot[3:]), axis=0.8)\n', (5754, 5787), True, 'import numpy as np\n'), ((7855, 7877), 'numpy.rot90', 'np.rot90', (['m.pMask'], {'k': '(2)'}), '(m.pMask, k=2)\n', (7863, 7877), True, 'import numpy as np\n'), ((7913, 7925), 'numpy.sum', 'np.sum', (['m.I1'], {}), '(m.I1)\n', (7919, 7925), True, 'import numpy as np\n'), ((7980, 7992), 'numpy.sum', 'np.sum', (['m.I2'], {}), '(m.I2)\n', (7986, 7992), True, 'import numpy as np\n')] |
""" File with callback functions for NN training and testing """
import os
import json
import cv2
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.callbacks import Callback
from tools.images import postprocess_img, write_to_img
from tools.metrics import sigmoid_np
class LoggingCallback(Callback):
""" Custom callback for visualising the network's predictions during training """
def __init__(self, exp_dir, data, mode=True, period=1, show=False, display_dim=(256, 256)):
"""Initialisation method of the class.
Parameters
----------
"""
self.exp_dir = exp_dir
self.period = period
self.show = show
self.mode = mode
self.display_dim = display_dim
self.model = None
self.critic = None
self._format_data(data)
self._create_vis()
if self.mode != "val":
self._create_log()
def _format_data(self, data):
if isinstance(data, list) or isinstance(data, tuple):
self.data = {'x': data[0], 'y': data[1]}
elif isinstance(data, dict):
self.data = data
else:
raise TypeError("'data' passed to LoggingCallback is of unsupported type '{}'".format(type(data)))
def _create_log(self):
self.log_path = self.exp_dir + "logs/{}/loss_log.txt".format(self.mode)
f = open(self.log_path, "w")
f.close()
def _create_vis(self):
self.vis_dir = os.path.join(self.exp_dir, "vis/{}/".format(self.mode))
def set_model(self, model):
self.model = model
def set_critic(self, critic):
self.critic = critic
def predict(self):
""" Obtain (and optionally visualise) the network's predictions on the callback data """
assert self.model is not None
preds = self.model.predict(self.data['x'])
if self.show:
plt.imshow(preds)
return preds
def _score_preds(self, preds):
""" Score the given predictions using a pre-selected critic model """
assert self.critic is not None
pred_scores = self.critic.predict(preds)
# TODO: generalise this code neatly - should be an option to specify logits vs probits
#pred_scores = sigmoid_np(pred_scores)
return pred_scores
def _add_UV_channels_to_image(self, image_index):
# Expand the Y input channels to include the U and V components (both at 0.0)
input_y = self.data['x'][image_index]
input_yuv = np.zeros(shape=(*input_y.shape[:2], 3), dtype=float)
input_yuv[:, :, 0] = np.squeeze(input_y)
return input_y, input_yuv
def _compute_RMS_error_image(self, preds, image_index):
# Calculate the RMSE difference image over the U and V channels
pred = preds[image_index]
assert(np.all(np.abs(pred[:, :, 1:]) <= 0.5))
gt = self.data['y'][image_index]
rmse_channel = 2*np.sqrt(np.mean(np.square(gt[:, :, 1:] - pred[:, :, 1:]), axis=-1))
rmse_img = np.zeros(shape=(*gt.shape[:2], 3), dtype=float)
rmse_img[:, :, 0] = np.clip(rmse_channel, 0.0, 1.0)
return rmse_img, pred, gt
def _store_preds(self, preds, epoch, scores={}):
""" Save the input, prediction, and GT images """
images_to_write = []
for image_index in range(len(preds)):
# Add UV channels back to Y-channel input image
_, input_yuv = self._add_UV_channels_to_image(image_index)
# Compute difference image for visual error comparison
rmse_img, pred, gt = self._compute_RMS_error_image(preds, image_index)
# Postprocess the YUV input, output, GT, and RMSE difference images
input_rgb = postprocess_img(input_yuv, img_dim=self.display_dim, convert_to_rgb=True)
pred_rgb = postprocess_img(pred, img_dim=self.display_dim, convert_to_rgb=True)
gt_reshaped = postprocess_img(gt, img_dim=self.display_dim, convert_to_rgb=True)
rmse_img_reshaped = postprocess_img(rmse_img, img_dim=self.display_dim, convert_to_rgb=True)
# Label the output and GT images with their score, if provided
if scores:
pred_score = scores["pred_scores"][image_index]
pred_score_text = "Score: {:.03f}".format(*pred_score)
pred_rgb = write_to_img(pred_rgb, pred_score_text)
gt_score = scores["gt_scores"][image_index]
gt_score_text = "Score: {:.03f}".format(*gt_score)
gt_reshaped = write_to_img(gt_reshaped, gt_score_text)
# Concatenate into a 'comparison' image
images_to_store = [input_rgb, pred_rgb, gt_reshaped, rmse_img_reshaped]
comparison_img = np.concatenate(images_to_store, axis=1)
images_to_write.append(comparison_img)
# Write the RGB 'comparison' images to file as a single, composite image
composite_img = np.concatenate(images_to_write, axis=0)
composite_id = "epoch_{:05d}.comparison.png".format(epoch + 1, image_index + 1)
cv2.imwrite(os.path.join(self.vis_dir, composite_id), cv2.cvtColor(composite_img, cv2.COLOR_RGB2BGR))
def _predict_and_store(self, epoch):
# Predict on callback data
preds = self.predict()
# Critique (score) predictions and GT data if critic model was set
scores = {}
if self.critic is not None:
scores["pred_scores"] = self._score_preds(preds)
scores["gt_scores"] = self._score_preds(self.data['y'])
# Store the predictions
self._store_preds(preds, epoch, scores)
def _write_logs_to_file(self, epoch, logs):
# Store losses in log file
with open(self.log_path, "a") as f:
# TODO: test and debug -- see if losses are output correctly
f.write(json.dumps({'epoch': epoch})[:-1] + ", " + json.dumps(logs) + '\n')
def on_epoch_begin(self, epoch, logs=None):
epoch = int(epoch)
if epoch == 1:
self._predict_and_store(epoch=0)
def on_epoch_end(self, epoch, logs=None):
""" Store the model loss and accuracy at the end of every epoch, and store a model prediction on the callback data """
epoch = int(epoch)
if logs is not None and self.mode == "train":
self._write_logs_to_file(epoch, logs)
if epoch % self.period == 0:
self._predict_and_store(epoch)
| [
"numpy.clip",
"matplotlib.pyplot.imshow",
"numpy.abs",
"tools.images.write_to_img",
"json.dumps",
"os.path.join",
"tools.images.postprocess_img",
"numpy.squeeze",
"numpy.square",
"numpy.zeros",
"numpy.concatenate",
"cv2.cvtColor"
] | [((2537, 2589), 'numpy.zeros', 'np.zeros', ([], {'shape': '(*input_y.shape[:2], 3)', 'dtype': 'float'}), '(shape=(*input_y.shape[:2], 3), dtype=float)\n', (2545, 2589), True, 'import numpy as np\n'), ((2619, 2638), 'numpy.squeeze', 'np.squeeze', (['input_y'], {}), '(input_y)\n', (2629, 2638), True, 'import numpy as np\n'), ((3061, 3108), 'numpy.zeros', 'np.zeros', ([], {'shape': '(*gt.shape[:2], 3)', 'dtype': 'float'}), '(shape=(*gt.shape[:2], 3), dtype=float)\n', (3069, 3108), True, 'import numpy as np\n'), ((3137, 3168), 'numpy.clip', 'np.clip', (['rmse_channel', '(0.0)', '(1.0)'], {}), '(rmse_channel, 0.0, 1.0)\n', (3144, 3168), True, 'import numpy as np\n'), ((5048, 5087), 'numpy.concatenate', 'np.concatenate', (['images_to_write'], {'axis': '(0)'}), '(images_to_write, axis=0)\n', (5062, 5087), True, 'import numpy as np\n'), ((1916, 1933), 'matplotlib.pyplot.imshow', 'plt.imshow', (['preds'], {}), '(preds)\n', (1926, 1933), True, 'import matplotlib.pyplot as plt\n'), ((3790, 3863), 'tools.images.postprocess_img', 'postprocess_img', (['input_yuv'], {'img_dim': 'self.display_dim', 'convert_to_rgb': '(True)'}), '(input_yuv, img_dim=self.display_dim, convert_to_rgb=True)\n', (3805, 3863), False, 'from tools.images import postprocess_img, write_to_img\n'), ((3887, 3955), 'tools.images.postprocess_img', 'postprocess_img', (['pred'], {'img_dim': 'self.display_dim', 'convert_to_rgb': '(True)'}), '(pred, img_dim=self.display_dim, convert_to_rgb=True)\n', (3902, 3955), False, 'from tools.images import postprocess_img, write_to_img\n'), ((3983, 4049), 'tools.images.postprocess_img', 'postprocess_img', (['gt'], {'img_dim': 'self.display_dim', 'convert_to_rgb': '(True)'}), '(gt, img_dim=self.display_dim, convert_to_rgb=True)\n', (3998, 4049), False, 'from tools.images import postprocess_img, write_to_img\n'), ((4083, 4155), 'tools.images.postprocess_img', 'postprocess_img', (['rmse_img'], {'img_dim': 'self.display_dim', 'convert_to_rgb': '(True)'}), '(rmse_img, img_dim=self.display_dim, convert_to_rgb=True)\n', (4098, 4155), False, 'from tools.images import postprocess_img, write_to_img\n'), ((4851, 4890), 'numpy.concatenate', 'np.concatenate', (['images_to_store'], {'axis': '(1)'}), '(images_to_store, axis=1)\n', (4865, 4890), True, 'import numpy as np\n'), ((5196, 5236), 'os.path.join', 'os.path.join', (['self.vis_dir', 'composite_id'], {}), '(self.vis_dir, composite_id)\n', (5208, 5236), False, 'import os\n'), ((5238, 5284), 'cv2.cvtColor', 'cv2.cvtColor', (['composite_img', 'cv2.COLOR_RGB2BGR'], {}), '(composite_img, cv2.COLOR_RGB2BGR)\n', (5250, 5284), False, 'import cv2\n'), ((2863, 2885), 'numpy.abs', 'np.abs', (['pred[:, :, 1:]'], {}), '(pred[:, :, 1:])\n', (2869, 2885), True, 'import numpy as np\n'), ((4430, 4469), 'tools.images.write_to_img', 'write_to_img', (['pred_rgb', 'pred_score_text'], {}), '(pred_rgb, pred_score_text)\n', (4442, 4469), False, 'from tools.images import postprocess_img, write_to_img\n'), ((4644, 4684), 'tools.images.write_to_img', 'write_to_img', (['gt_reshaped', 'gt_score_text'], {}), '(gt_reshaped, gt_score_text)\n', (4656, 4684), False, 'from tools.images import postprocess_img, write_to_img\n'), ((2990, 3030), 'numpy.square', 'np.square', (['(gt[:, :, 1:] - pred[:, :, 1:])'], {}), '(gt[:, :, 1:] - pred[:, :, 1:])\n', (2999, 3030), True, 'import numpy as np\n'), ((6012, 6028), 'json.dumps', 'json.dumps', (['logs'], {}), '(logs)\n', (6022, 6028), False, 'import json\n'), ((5969, 5997), 'json.dumps', 'json.dumps', (["{'epoch': epoch}"], {}), "({'epoch': epoch})\n", (5979, 5997), False, 'import json\n')] |
""" Implementation of Cosmic RIM estimator"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
print(physical_devices)
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
world_size = len(physical_devices)
import numpy as np
import os, sys, argparse, time
from scipy.interpolate import InterpolatedUnivariateSpline as iuspline
from scipy.interpolate import interp1d
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import json
from rim_utils import build_rim_parallel_single, myAdam
from recon_models import Recon_Bias
from modelhalo import HaloData, check_2pt, check_im, get_data, get_diff_spectra
import flowpm
from flowpm import linear_field, lpt_init, nbody, cic_paint, cic_readout
from flowpm.utils import r2c3d, c2r3d
sys.path.append('../../utils/')
import tools
from getbiasparams import getbias
import diagnostics as dg
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--nc', type=int, default=32, help='Grid size')
parser.add_argument('--ncf', type=int, default=4, help='Grid size')
parser.add_argument('--bs', type=float, default=200, help='Box Size')
parser.add_argument('--numd', type=float, default=0.001, help='number density')
parser.add_argument('--nsteps', type=int, default=3, help='')
parser.add_argument('--niter', type=int, default=200, help='Number of iterations/Max iterations')
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate')
parser.add_argument('--decay', type=float, default=0.9, help='Decay rate')
parser.add_argument('--decayiter', type=int, default=100, help='Decay rate')
parser.add_argument('--optimizer', type=str, default='adam', help='Which optimizer to use')
parser.add_argument('--batch_size', type=int, default=8, help='Batch size')
parser.add_argument('--nsims', type=int, default=100, help='Number of simulations')
parser.add_argument('--nbody', type=str2bool, default=False, help='Number of simulationss')
parser.add_argument('--lpt_order', type=int, default=2, help='Order of LPT Initial conditions')
parser.add_argument('--input_size', type=int, default=8, help='Input layer channel size')
parser.add_argument('--cell_size', type=int, default=8, help='Cell channel size')
parser.add_argument('--rim_iter', type=int, default=10, help='Optimization iteration')
parser.add_argument('--epochs', type=int, default=20, help='Number of epochs')
parser.add_argument('--suffix', type=str, default='', help='Suffix for folder pathname')
parser.add_argument('--batch_in_epoch', type=int, default=20, help='Number of batches in epochs')
parser.add_argument('--posdata', type=str2bool, default=True, help='Position data')
parser.add_argument('--parallel', type=str2bool, default=True, help='Parallel')
parser.add_argument('--stdinit', type=str2bool, default=False, help='Parallel')
parser.add_argument('--Rstd', type=int, default=128, help='Parallel')
parser.add_argument('--priorinit', type=str2bool, default=False, help='Start with priorinit')
parser.add_argument('--nsimsbias', type=int, default=10, help='Number of simulations to get bias')
parser.add_argument('--diffps', type=str2bool, default=False, help='Parallel')
parser.add_argument('--prior', type=str2bool, default=False, help='Use prior for RIM')
args = parser.parse_args()
nc, bs = args.nc, args.bs
numd = args.numd
ncf = args.ncf*args.nc
niter = args.niter
lr = args.lr
a0, af, nsteps = 0.1, 1.0, args.nsteps
stages = np.linspace(a0, af, nsteps, endpoint=True)
args.stages = stages
args.a0, args.af = a0, af
args.world_size = world_size
RRs = [2.0, 1.0, 0.5, 0.0]
#
klin = np.loadtxt('../../data/Planck15_a1p00.txt').T[0]
plin = np.loadtxt('../../data//Planck15_a1p00.txt').T[1]
ipklin = iuspline(klin, plin)
# Compute necessary Fourier kernels
kvec = tools.fftk((nc, nc, nc), boxsize=bs, symmetric=False)
kmesh = (sum(k**2 for k in kvec)**0.5).astype(np.float32)
priorwt = ipklin(kmesh)
args.kmesh = kmesh
args.ipklin = ipklin
args.priorwt = priorwt
datamodel = HaloData(args)
########################################
#RIM params
params = {}
params['input_size'] = args.input_size
params['cell_size'] = args.cell_size
params['strides'] = 2
params['middle_size'] = args.input_size // params['strides'] #lets divide by strides
params['cell_kernel_size'] = 5
params['input_kernel_size'] = 5
params['middle_kernel_size'] = 5
params['output_kernel_size'] = 5
params['rim_iter'] = args.rim_iter
params['input_activation'] = 'tanh'
params['output_activation'] = 'linear'
params['nc'] = nc
rim = build_rim_parallel_single(params)
adam = myAdam(params['rim_iter'])
adam10 = myAdam(10*params['rim_iter'])
learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
args.lr,
decay_steps=args.decayiter,
decay_rate=args.decay,
staircase=False)
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
#optimizer = tf.keras.optimizers.Adam(learning_rate=args.lr)
#################################
traindata, testdata = get_data(args)
print(traindata.shape, testdata.shape)
if args.stdinit:
ipkdiff, b1eul = get_diff_spectra(args, ipklin, nsims=args.nsimsbias, nsteps=3)
print("B1 eulerian : ", b1eul)
BUFFER_SIZE = len(traindata)
GLOBAL_BATCH_SIZE = args.batch_size
train_dataset = tf.data.Dataset.from_tensor_slices((traindata[:, 0], traindata[:, 1:])).shuffle(BUFFER_SIZE).batch(GLOBAL_BATCH_SIZE)
test_dataset = tf.data.Dataset.from_tensor_slices((testdata[:, 0], testdata[:, 1:])).shuffle(len(testdata)).batch(1)
grad_fn = datamodel.recon_grad
bias, errormesh = datamodel.setupbias(traindata, nsims=args.nsimsbias)
errormesh = tf.constant(np.expand_dims(errormesh, 0), dtype=tf.float32)
print(bias)
print(errormesh.shape)
grad_params = [bias, errormesh]
#
if args.parallel: suffpath = '_halo' + args.suffix
else: suffpath = '_halo_split' + args.suffix
if args.nbody: ofolder = './models/L%04d_N%03d/T%02d%s/'%(bs, nc, nsteps, suffpath)
else: ofolder = './models/L%04d_N%03d/LPT%d%s/'%(bs, nc, args.lpt_order, suffpath)
try: os.makedirs(ofolder)
except Exception as e: print(e)
with open(ofolder + 'params.json', 'w') as fp:
json.dump(params, fp)
#######################################
x_test, y_test = testdata[0:1, 0], testdata[0:1, 1:]
x_test = tf.constant(x_test, dtype=tf.float32)
fpos = datamodel.pmpos(x_test)[1].numpy()[0]*bs/nc
bparams, bmodel = getbias(bs, nc, y_test[0, 0], x_test.numpy()[0], fpos)
bias_test = tf.constant([bparams[0], bparams[1]], dtype=tf.float32)
print('Bias test : ', bias_test)
bmodeltf = datamodel.biasfield(x_test, bias_test).numpy()
errormesh = y_test[:, 0] - bmodeltf
kerror, perror = tools.power(errormesh[0], boxsize=bs)
kerror, perror = kerror[1:], perror[1:]
ipkerror = interp1d(kerror, perror, bounds_error=False, fill_value=(perror[0], perror.max()))
errormesh_test = tf.expand_dims(tf.constant(ipkerror(kmesh), dtype=tf.float32), 0)
#
if args.stdinit:
x_init = tf.constant(y_test[:, 1] / b1eul , dtype=tf.float32)
if args.diffps : x_init = x_init + linear_field(nc, bs, ipkdiff, batch_size=y_test.shape[0])
elif args.priorinit:
x_init = linear_field(nc, bs, ipklin, batch_size=y_test.shape[0])
else:
x_init = tf.random.normal(x_test.shape)
y_test = tf.constant(y_test[:, 0])
pred_adam = adam(x_init, y_test, grad_fn, grad_params)
pred_adam10 = adam10(x_init, y_test, grad_fn, grad_params)
#fid_recon = Recon_Bias(nc, bs, bias, errormesh, a0=0.1, af=1.0, nsteps=args.nsteps, nbody=args.nbody, lpt_order=2, anneal=True, prior=True)
#minic, minfin = fid_recon.reconstruct(tf.constant(y_test), RRs=RRs, niter=args.rim_iter*10, lr=0.1)
minic, minfin = datamodel.reconstruct(tf.constant(y_test), bias_test, errormesh_test,
RRs=RRs, niter=args.rim_iter*20, lr=0.5, x_init=x_init, useprior=True)
check_2pt(datamodel,
#[[x_test, y_test], [x_init, minic]],
#[[x_test, y_test], [pred_adam, pred_adam10, minic]], grad_params, ofolder + 'fid_recon')
[[x_test+1., y_test], [x_init+1., minic+1.]],
[[x_test+1., y_test], [pred_adam+1., pred_adam10+1., minic+1.]], grad_params, ofolder + 'fid_recon')
#######################################
def train_step(inputs):
x_true, y = inputs
if args.stdinit:
x_init = y[:, 1] / b1eul
if args.diffps : x_init = x_init + linear_field(nc, bs, ipkdiff, batch_size=y.shape[0])
elif args.priorinit:
x_init = linear_field(nc, bs, ipklin, batch_size=y.shape[0])
else:
x_init = tf.random.normal(x_true.shape)
y = y[:, 0]
if len(rim.trainable_variables) == 0:
#Hack since sometimes this si the first time RIM is called and so hasn't been inisitalized
i = 0
a, b, c = x_init[i:i+1], y[i:i+1], x_true[i:i+1]
_ = rim(tf.constant(a), tf.constant(b), grad_fn, tf.constant(c), grad_params)[1] / args.batch_size
#
gradients = [0.]*len(rim.trainable_variables)
#n = args.sims_in_loop
for i in range(args.batch_size // world_size):
with tf.GradientTape() as tape:
a, b, c = x_init[i:i+1], y[i:i+1], x_true[i:i+1]
loss = rim(tf.constant(a), tf.constant(b), grad_fn, tf.constant(c), grad_params)[1] / args.batch_size
grads = tape.gradient(loss, rim.trainable_variables)
for j in range(len(grads)):
gradients[j] = gradients[j] + grads[j]
optimizer.apply_gradients(zip(gradients, rim.trainable_variables))
return loss
def test_step(inputs):
x_true, y = inputs
#x_init = tf.random.normal(x_true.shape)
if args.stdinit:
x_init = y[:, 1] / b1eul
if args.diffps: x_init = x_init + linear_field(nc, bs, ipkdiff, batch_size=y.shape[0])
elif args.priorinit:
x_init = linear_field(nc, bs, ipklin, batch_size=y.shape[0])
else: x_init = tf.random.normal(x_true.shape)
y = y[:, 0]
x_pred = rim(x_init, y, grad_fn, x_true, grad_params)[0]
return x_pred, x_init, x_true, y
###########################################
####Train
###
#Training
losses = []
for epoch in range(args.epochs):
print("\nFor epoch %d\n"%epoch)
#TRAIN LOOP
total_loss = 0.0
num_batches = 0
starte = time.time()
for x in train_dataset:
#print(len(x), x[0].values[0].shape)
startb = time.time()
loss = train_step(x)
losses.append(loss.numpy())
total_loss += loss
print("epoch %d, num batch %d, loss : "%(epoch, num_batches), loss)
print("Time taken : ", time.time() - startb)
num_batches += 1
train_loss = total_loss / num_batches
#print("Train loss for epoch %d "%epoch, train_loss)
#print("Time taken for epoch %d: "%epoch, time.time() - starte)
plt.plot(losses)
plt.savefig(ofolder + 'losses.png')
##Test Epoch Training
for x in test_dataset:
print('Testing')
a, b, c, d = test_step(x)
#print(a.values[0].shape, b.values[0].shape, c.values[0].shape, d.values[0].shape)
try: pred, x_init, xx, yy = a.values[0], b.values[0], c.values[0], d.values[0]
except: pred, x_init, xx, yy = a, b, c, d
#pred_adam = adam(x_init, yy, grad_fn, grad_params)
#pred_adam10 = adam10(x_init, yy, grad_fn, grad_params)
check_im(xx[0].numpy(), x_init[0].numpy(), pred[0].numpy(), ofolder + 'rim-im-%d.png'%epoch)
check_2pt(datamodel,
#[[xx, yy], [x_init, pred]],
#[[x_test, y_test], [pred_adam, pred_adam10, minic]], grad_params, ofolder + 'rim-2pt-%d.png'%epoch)
[[xx+1., yy], [x_init+1., pred+1.]],
[[x_test+1., y_test], [pred_adam+1., pred_adam10+1., minic+1.]], grad_params, ofolder + 'rim-2pt-%d.png'%epoch)
break
rim.save_weights(ofolder + '/%d'%epoch)
| [
"tools.power",
"tensorflow.GradientTape",
"sys.path.append",
"tensorflow.random.normal",
"argparse.ArgumentParser",
"tensorflow.data.Dataset.from_tensor_slices",
"matplotlib.pyplot.plot",
"rim_utils.build_rim_parallel_single",
"numpy.linspace",
"tools.fftk",
"flowpm.linear_field",
"matplotlib.... | [((199, 250), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (243, 250), True, 'import tensorflow as tf\n'), ((362, 429), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['physical_devices[0]', '(True)'], {}), '(physical_devices[0], True)\n', (402, 429), True, 'import tensorflow as tf\n'), ((644, 665), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (658, 665), False, 'import matplotlib\n'), ((1015, 1046), 'sys.path.append', 'sys.path.append', (['"""../../utils/"""'], {}), "('../../utils/')\n", (1030, 1046), False, 'import os, sys, argparse, time\n'), ((1419, 1480), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process some integers."""'}), "(description='Process some integers.')\n", (1442, 1480), False, 'import os, sys, argparse, time\n'), ((3973, 4015), 'numpy.linspace', 'np.linspace', (['a0', 'af', 'nsteps'], {'endpoint': '(True)'}), '(a0, af, nsteps, endpoint=True)\n', (3984, 4015), True, 'import numpy as np\n'), ((4244, 4264), 'scipy.interpolate.InterpolatedUnivariateSpline', 'iuspline', (['klin', 'plin'], {}), '(klin, plin)\n', (4252, 4264), True, 'from scipy.interpolate import InterpolatedUnivariateSpline as iuspline\n'), ((4398, 4451), 'tools.fftk', 'tools.fftk', (['(nc, nc, nc)'], {'boxsize': 'bs', 'symmetric': '(False)'}), '((nc, nc, nc), boxsize=bs, symmetric=False)\n', (4408, 4451), False, 'import tools\n'), ((4611, 4625), 'modelhalo.HaloData', 'HaloData', (['args'], {}), '(args)\n', (4619, 4625), False, 'from modelhalo import HaloData, check_2pt, check_im, get_data, get_diff_spectra\n'), ((5143, 5176), 'rim_utils.build_rim_parallel_single', 'build_rim_parallel_single', (['params'], {}), '(params)\n', (5168, 5176), False, 'from rim_utils import build_rim_parallel_single, myAdam\n'), ((5184, 5210), 'rim_utils.myAdam', 'myAdam', (["params['rim_iter']"], {}), "(params['rim_iter'])\n", (5190, 5210), False, 'from rim_utils import build_rim_parallel_single, myAdam\n'), ((5220, 5251), 'rim_utils.myAdam', 'myAdam', (["(10 * params['rim_iter'])"], {}), "(10 * params['rim_iter'])\n", (5226, 5251), False, 'from rim_utils import build_rim_parallel_single, myAdam\n'), ((5266, 5394), 'tensorflow.keras.optimizers.schedules.ExponentialDecay', 'tf.keras.optimizers.schedules.ExponentialDecay', (['args.lr'], {'decay_steps': 'args.decayiter', 'decay_rate': 'args.decay', 'staircase': '(False)'}), '(args.lr, decay_steps=args.\n decayiter, decay_rate=args.decay, staircase=False)\n', (5312, 5394), True, 'import tensorflow as tf\n'), ((5419, 5472), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (5443, 5472), True, 'import tensorflow as tf\n'), ((5596, 5610), 'modelhalo.get_data', 'get_data', (['args'], {}), '(args)\n', (5604, 5610), False, 'from modelhalo import HaloData, check_2pt, check_im, get_data, get_diff_spectra\n'), ((6851, 6888), 'tensorflow.constant', 'tf.constant', (['x_test'], {'dtype': 'tf.float32'}), '(x_test, dtype=tf.float32)\n', (6862, 6888), True, 'import tensorflow as tf\n'), ((7025, 7080), 'tensorflow.constant', 'tf.constant', (['[bparams[0], bparams[1]]'], {'dtype': 'tf.float32'}), '([bparams[0], bparams[1]], dtype=tf.float32)\n', (7036, 7080), True, 'import tensorflow as tf\n'), ((7225, 7262), 'tools.power', 'tools.power', (['errormesh[0]'], {'boxsize': 'bs'}), '(errormesh[0], boxsize=bs)\n', (7236, 7262), False, 'import tools\n'), ((7814, 7839), 'tensorflow.constant', 'tf.constant', (['y_test[:, 0]'], {}), '(y_test[:, 0])\n', (7825, 7839), True, 'import tensorflow as tf\n'), ((8396, 8593), 'modelhalo.check_2pt', 'check_2pt', (['datamodel', '[[x_test + 1.0, y_test], [x_init + 1.0, minic + 1.0]]', '[[x_test + 1.0, y_test], [pred_adam + 1.0, pred_adam10 + 1.0, minic + 1.0]]', 'grad_params', "(ofolder + 'fid_recon')"], {}), "(datamodel, [[x_test + 1.0, y_test], [x_init + 1.0, minic + 1.0]],\n [[x_test + 1.0, y_test], [pred_adam + 1.0, pred_adam10 + 1.0, minic + \n 1.0]], grad_params, ofolder + 'fid_recon')\n", (8405, 8593), False, 'from modelhalo import HaloData, check_2pt, check_im, get_data, get_diff_spectra\n'), ((5688, 5750), 'modelhalo.get_diff_spectra', 'get_diff_spectra', (['args', 'ipklin'], {'nsims': 'args.nsimsbias', 'nsteps': '(3)'}), '(args, ipklin, nsims=args.nsimsbias, nsteps=3)\n', (5704, 5750), False, 'from modelhalo import HaloData, check_2pt, check_im, get_data, get_diff_spectra\n'), ((6232, 6260), 'numpy.expand_dims', 'np.expand_dims', (['errormesh', '(0)'], {}), '(errormesh, 0)\n', (6246, 6260), True, 'import numpy as np\n'), ((6620, 6640), 'os.makedirs', 'os.makedirs', (['ofolder'], {}), '(ofolder)\n', (6631, 6640), False, 'import os, sys, argparse, time\n'), ((6725, 6746), 'json.dump', 'json.dump', (['params', 'fp'], {}), '(params, fp)\n', (6734, 6746), False, 'import json\n'), ((7512, 7563), 'tensorflow.constant', 'tf.constant', (['(y_test[:, 1] / b1eul)'], {'dtype': 'tf.float32'}), '(y_test[:, 1] / b1eul, dtype=tf.float32)\n', (7523, 7563), True, 'import tensorflow as tf\n'), ((8238, 8257), 'tensorflow.constant', 'tf.constant', (['y_test'], {}), '(y_test)\n', (8249, 8257), True, 'import tensorflow as tf\n'), ((10799, 10810), 'time.time', 'time.time', ([], {}), '()\n', (10808, 10810), False, 'import os, sys, argparse, time\n'), ((11330, 11346), 'matplotlib.pyplot.plot', 'plt.plot', (['losses'], {}), '(losses)\n', (11338, 11346), True, 'from matplotlib import pyplot as plt\n'), ((11351, 11386), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(ofolder + 'losses.png')"], {}), "(ofolder + 'losses.png')\n", (11362, 11386), True, 'from matplotlib import pyplot as plt\n'), ((4129, 4172), 'numpy.loadtxt', 'np.loadtxt', (['"""../../data/Planck15_a1p00.txt"""'], {}), "('../../data/Planck15_a1p00.txt')\n", (4139, 4172), True, 'import numpy as np\n'), ((4185, 4229), 'numpy.loadtxt', 'np.loadtxt', (['"""../../data//Planck15_a1p00.txt"""'], {}), "('../../data//Planck15_a1p00.txt')\n", (4195, 4229), True, 'import numpy as np\n'), ((7697, 7753), 'flowpm.linear_field', 'linear_field', (['nc', 'bs', 'ipklin'], {'batch_size': 'y_test.shape[0]'}), '(nc, bs, ipklin, batch_size=y_test.shape[0])\n', (7709, 7753), False, 'from flowpm import linear_field, lpt_init, nbody, cic_paint, cic_readout\n'), ((7774, 7804), 'tensorflow.random.normal', 'tf.random.normal', (['x_test.shape'], {}), '(x_test.shape)\n', (7790, 7804), True, 'import tensorflow as tf\n'), ((10901, 10912), 'time.time', 'time.time', ([], {}), '()\n', (10910, 10912), False, 'import os, sys, argparse, time\n'), ((11973, 12173), 'modelhalo.check_2pt', 'check_2pt', (['datamodel', '[[xx + 1.0, yy], [x_init + 1.0, pred + 1.0]]', '[[x_test + 1.0, y_test], [pred_adam + 1.0, pred_adam10 + 1.0, minic + 1.0]]', 'grad_params', "(ofolder + 'rim-2pt-%d.png' % epoch)"], {}), "(datamodel, [[xx + 1.0, yy], [x_init + 1.0, pred + 1.0]], [[x_test +\n 1.0, y_test], [pred_adam + 1.0, pred_adam10 + 1.0, minic + 1.0]],\n grad_params, ofolder + 'rim-2pt-%d.png' % epoch)\n", (11982, 12173), False, 'from modelhalo import HaloData, check_2pt, check_im, get_data, get_diff_spectra\n'), ((1354, 1407), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (1380, 1407), False, 'import os, sys, argparse, time\n'), ((7604, 7661), 'flowpm.linear_field', 'linear_field', (['nc', 'bs', 'ipkdiff'], {'batch_size': 'y_test.shape[0]'}), '(nc, bs, ipkdiff, batch_size=y_test.shape[0])\n', (7616, 7661), False, 'from flowpm import linear_field, lpt_init, nbody, cic_paint, cic_readout\n'), ((9019, 9070), 'flowpm.linear_field', 'linear_field', (['nc', 'bs', 'ipklin'], {'batch_size': 'y.shape[0]'}), '(nc, bs, ipklin, batch_size=y.shape[0])\n', (9031, 9070), False, 'from flowpm import linear_field, lpt_init, nbody, cic_paint, cic_readout\n'), ((9099, 9129), 'tensorflow.random.normal', 'tf.random.normal', (['x_true.shape'], {}), '(x_true.shape)\n', (9115, 9129), True, 'import tensorflow as tf\n'), ((9627, 9644), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (9642, 9644), True, 'import tensorflow as tf\n'), ((10353, 10404), 'flowpm.linear_field', 'linear_field', (['nc', 'bs', 'ipklin'], {'batch_size': 'y.shape[0]'}), '(nc, bs, ipklin, batch_size=y.shape[0])\n', (10365, 10404), False, 'from flowpm import linear_field, lpt_init, nbody, cic_paint, cic_readout\n'), ((10424, 10454), 'tensorflow.random.normal', 'tf.random.normal', (['x_true.shape'], {}), '(x_true.shape)\n', (10440, 10454), True, 'import tensorflow as tf\n'), ((5868, 5939), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(traindata[:, 0], traindata[:, 1:])'], {}), '((traindata[:, 0], traindata[:, 1:]))\n', (5902, 5939), True, 'import tensorflow as tf\n'), ((6002, 6071), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(testdata[:, 0], testdata[:, 1:])'], {}), '((testdata[:, 0], testdata[:, 1:]))\n', (6036, 6071), True, 'import tensorflow as tf\n'), ((8923, 8975), 'flowpm.linear_field', 'linear_field', (['nc', 'bs', 'ipkdiff'], {'batch_size': 'y.shape[0]'}), '(nc, bs, ipkdiff, batch_size=y.shape[0])\n', (8935, 8975), False, 'from flowpm import linear_field, lpt_init, nbody, cic_paint, cic_readout\n'), ((10257, 10309), 'flowpm.linear_field', 'linear_field', (['nc', 'bs', 'ipkdiff'], {'batch_size': 'y.shape[0]'}), '(nc, bs, ipkdiff, batch_size=y.shape[0])\n', (10269, 10309), False, 'from flowpm import linear_field, lpt_init, nbody, cic_paint, cic_readout\n'), ((11112, 11123), 'time.time', 'time.time', ([], {}), '()\n', (11121, 11123), False, 'import os, sys, argparse, time\n'), ((9376, 9390), 'tensorflow.constant', 'tf.constant', (['a'], {}), '(a)\n', (9387, 9390), True, 'import tensorflow as tf\n'), ((9392, 9406), 'tensorflow.constant', 'tf.constant', (['b'], {}), '(b)\n', (9403, 9406), True, 'import tensorflow as tf\n'), ((9417, 9431), 'tensorflow.constant', 'tf.constant', (['c'], {}), '(c)\n', (9428, 9431), True, 'import tensorflow as tf\n'), ((9740, 9754), 'tensorflow.constant', 'tf.constant', (['a'], {}), '(a)\n', (9751, 9754), True, 'import tensorflow as tf\n'), ((9756, 9770), 'tensorflow.constant', 'tf.constant', (['b'], {}), '(b)\n', (9767, 9770), True, 'import tensorflow as tf\n'), ((9781, 9795), 'tensorflow.constant', 'tf.constant', (['c'], {}), '(c)\n', (9792, 9795), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
import numpy as np
import os, os.path
import subprocess
def string_label_to_label_vector(label_string, outcome_maps):
label_vec = []
for label_val in label_string.split('#'):
(label, val) = label_val.split('=')
cur_map = outcome_maps[label]
label_ind = cur_map[val]
label_vec.append(label_ind)
return label_vec
def get_data_dimensions(data_file):
wc_out = subprocess.check_output(['wc', data_file])
wc_fields = wc_out.decode().strip().split(' ')
file_len = int(wc_fields[0])
num_feats = 0
for line in open(data_file):
max_dim = int( line.rstrip().split(' ')[-1].split(':')[0] )
if max_dim > num_feats:
num_feats = max_dim
return (file_len, num_feats)
def flatten_outputs(Y):
maxes = Y.max(0)
#print("Maxes = %s" % (maxes) )
reqd_dims = 0
indices = [0]
## Create an indices array that maps from "true" label indices to neural network
## output layer indices -- binary labels map to single output nodes (2->1) while n-ary
## labels map to n nodes.
for val in maxes:
if val == 1:
reqd_dims += 1
elif val > 1:
reqd_dims += (int(val) + 1)
else:
raise Exception("There is a column with all zeros!")
indices.append(reqd_dims)
Y_adj = np.zeros( (Y.shape[0], reqd_dims) )
for row_ind in range(0, Y.shape[0]):
for col_ind in range(0, Y.shape[1]):
if maxes[col_ind] == 1:
## For binary variables just need the offset and copy the value
Y_adj[row_ind][ int(indices[col_ind]) ] = Y[row_ind][col_ind]
else:
## for n-ary variables we use the value to find the offset that will
## be set to 1.
Y_adj[row_ind][ int(indices[col_ind]) + int(Y[row_ind][col_ind]) ] = 1
return Y_adj, indices
def read_outcome_maps(dirname):
raw_outcomes = []
raw_outcomes.append(None)
derived_maps = {}
lookup_map = {}
## First read outcome file
for line in open(os.path.join(dirname, 'outcome-lookup.txt') ):
(index, label) = line.rstrip().split(' ')
raw_outcomes.append(label)
for task_label in label.split('#'):
#print(task_label)
(task, val) = task_label.rstrip().split("=")
if not task in derived_maps:
derived_maps[task] = {}
lookup_map[task] = []
cur_map = derived_maps[task]
lookup = lookup_map[task]
if not val in cur_map:
cur_map[val] = len(cur_map)
lookup.append(val)
return raw_outcomes, derived_maps, lookup_map
def outcome_list(raw_outcomes):
outcomes = []
for outcome_val in raw_outcomes[1].split("#"):
outcomes.append(outcome_val.split("=")[0])
return outcomes
def read_multitask_liblinear(dirname):
raw_outcomes, derived_maps, outcome_lookups = read_outcome_maps(dirname)
data_file = os.path.join(dirname, 'training-data.liblinear')
(data_points, feat_dims) = get_data_dimensions(data_file)
## Remove bias feature -- will be part of any neural network
label_dims = len(derived_maps)
label_matrix = np.zeros( (data_points, label_dims) )
feat_matrix = np.zeros( (data_points, feat_dims) )
line_ind = 0
for line in open( data_file ):
label_and_feats = line.rstrip().split(' ')
label = label_and_feats[0]
string_label = raw_outcomes[int(label)]
label_vec = string_label_to_label_vector(string_label, derived_maps)
for ind, val in enumerate(label_vec):
label_matrix[line_ind, ind] = val
## Go from 2 on -- skip both the label and the first feature since it will be
## the bias term from the liblinear data writer.
# feat_list = feature_array_to_list( label_and_feats[1:], feat_dims )
# feat_matrix[line_ind,:] = feat_list[1:]
feat_matrix[line_ind, :] = feature_array_to_list( label_and_feats[1:], feat_dims )
# for feat in label_and_feats[1:]:
# (ind, val) = feat.split(':')
# feat_ind = int(ind) - 1 ## since feats are indexed at 1
# feat_matrix[line_ind, feat_ind] = float(val)
line_ind += 1
return label_matrix, feat_matrix
def convert_multi_output_to_string(outcomes, outcome_list, lookup_map, raw_outcomes):
"""Return the int value corresponding to the class implied by the
set of outputs in the outcomes array."""
str = ''
for ind, label in enumerate(outcome_list):
str += label
str += "="
str += lookup_map[label][outcomes[ind]]
str += "#"
str = str[:-1]
return str
def feature_string_to_list( feat_string, length=-1 ):
return feature_array_to_list( feat_string.split(' '), length )
def feature_array_to_list( feats, length=-1 ):
if length == -1:
length = len(feats)
#f = np.zeros(length)
f = [0] * length
for feat in feats:
(ind, val) = feat.split(':')
ind = int(ind) - 1
if int(ind) >= len(f):
raise Exception("Feature index %d is larger than feature vector length %d -- you may need to specify the expected length of the vector." % (int(ind), len(f) ) )
f[int(ind)] = val
return f
if __name__ == "__main__":
(labels, feats) = read_multitask_liblinear('data_testing/multitask_assertion/train_and_test/')
print("train[0][100] = %f" % feats[0][100])
| [
"subprocess.check_output",
"numpy.zeros",
"os.path.join"
] | [((453, 495), 'subprocess.check_output', 'subprocess.check_output', (["['wc', data_file]"], {}), "(['wc', data_file])\n", (476, 495), False, 'import subprocess\n'), ((1400, 1433), 'numpy.zeros', 'np.zeros', (['(Y.shape[0], reqd_dims)'], {}), '((Y.shape[0], reqd_dims))\n', (1408, 1433), True, 'import numpy as np\n'), ((3140, 3188), 'os.path.join', 'os.path.join', (['dirname', '"""training-data.liblinear"""'], {}), "(dirname, 'training-data.liblinear')\n", (3152, 3188), False, 'import os, os.path\n'), ((3385, 3420), 'numpy.zeros', 'np.zeros', (['(data_points, label_dims)'], {}), '((data_points, label_dims))\n', (3393, 3420), True, 'import numpy as np\n'), ((3441, 3475), 'numpy.zeros', 'np.zeros', (['(data_points, feat_dims)'], {}), '((data_points, feat_dims))\n', (3449, 3475), True, 'import numpy as np\n'), ((2154, 2197), 'os.path.join', 'os.path.join', (['dirname', '"""outcome-lookup.txt"""'], {}), "(dirname, 'outcome-lookup.txt')\n", (2166, 2197), False, 'import os, os.path\n')] |
import cv2
import os
import sys
import pickle
import numpy as np
from PIL import Image
sys.path.insert(0, '/Workspace-Github/face_recognition/code')
import opencv_tools
import keras
from keras.callbacks import ModelCheckpoint
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
subjects = ["", "YANG MI", "BABY"]
def prepare_training_data(data_folder_path):
#faces,labels = opencv_tools.prepare_training_data(data_folder_path)
f = open('D:/Workspace-Github/face_recognition/serialized/data_train.file', 'rb')
data = pickle.load(f)
faces, labels = data[0], data[1]
x_train = []
for face in faces:
im = Image.fromarray(face)
imResize = im.resize((128,128), Image.ANTIALIAS)
x_train.append(np.array(imResize))
y_train = labels
return np.array(x_train), np.array(y_train)
def train_CNN(x_train, y_train):
batch_size = 50
num_classes = 2
epochs = 20
print(np.shape(x_train))
x_train = x_train.reshape(-1, 16384,1)
x_train = x_train.astype('float32')
x_train /= 255
print(x_train.shape[0], 'train samples')
y_train = keras.utils.to_categorical(y_train-1, num_classes)
img_rows, img_cols = 128, 128
x_train = x_train.reshape(x_train.shape[0], img_cols, img_rows, 1) #1 means: grey 1 layer
model = Sequential()
model.add(Conv2D(64, (5, 5), activation='relu', input_shape=(img_cols, img_rows, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten(input_shape=model.output_shape[1:])) # input: 64 layers of 4*4, output: =64*4*4=1024
model.add(Dense(64, activation='relu')) #=128
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.SGD(),
metrics=['accuracy'])
# check-points
filepath="/Workspace-Github/face_recognition/serialized/weights-improvement-{epoch:02d}-{loss:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
run = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=None,
callbacks=callbacks_list)
return model
def predict(test_img, model):
#make a copy of the image as we don't want to chang original image
img = test_img.copy()
#detect face from the image
face, rect = opencv_tools.detect_face_CV2(img)
im = Image.fromarray(face)
imResize = im.resize((128,128), Image.ANTIALIAS)
y_test = (np.array(imResize)/255).reshape(1,128,128,1).astype('float32')
#predict the image using our face recognizer
y_label = np.argmax(model.predict(y_test, verbose=0))+1
print(y_label)
#get name of respective label returned by face recognizer
label_text = subjects[y_label]
#draw a rectangle around face detected
opencv_tools.draw_rectangle(img, rect)
#draw name of predicted person
opencv_tools.draw_text(img, label_text, rect[0], rect[1]-5)
return img | [
"opencv_tools.detect_face_CV2",
"PIL.Image.fromarray",
"sys.path.insert",
"keras.layers.Conv2D",
"keras.layers.Flatten",
"keras.callbacks.ModelCheckpoint",
"keras.layers.MaxPooling2D",
"pickle.load",
"opencv_tools.draw_rectangle",
"keras.models.Sequential",
"keras.utils.to_categorical",
"openc... | [((87, 148), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/Workspace-Github/face_recognition/code"""'], {}), "(0, '/Workspace-Github/face_recognition/code')\n", (102, 148), False, 'import sys\n'), ((585, 599), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (596, 599), False, 'import pickle\n'), ((1190, 1242), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['(y_train - 1)', 'num_classes'], {}), '(y_train - 1, num_classes)\n', (1216, 1242), False, 'import keras\n'), ((1391, 1403), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1401, 1403), False, 'from keras.models import Sequential\n'), ((2223, 2312), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'monitor': '"""loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""min"""'}), "(filepath, monitor='loss', verbose=1, save_best_only=True,\n mode='min')\n", (2238, 2312), False, 'from keras.callbacks import ModelCheckpoint\n'), ((2788, 2821), 'opencv_tools.detect_face_CV2', 'opencv_tools.detect_face_CV2', (['img'], {}), '(img)\n', (2816, 2821), False, 'import opencv_tools\n'), ((2831, 2852), 'PIL.Image.fromarray', 'Image.fromarray', (['face'], {}), '(face)\n', (2846, 2852), False, 'from PIL import Image\n'), ((3262, 3300), 'opencv_tools.draw_rectangle', 'opencv_tools.draw_rectangle', (['img', 'rect'], {}), '(img, rect)\n', (3289, 3300), False, 'import opencv_tools\n'), ((3340, 3401), 'opencv_tools.draw_text', 'opencv_tools.draw_text', (['img', 'label_text', 'rect[0]', '(rect[1] - 5)'], {}), '(img, label_text, rect[0], rect[1] - 5)\n', (3362, 3401), False, 'import opencv_tools\n'), ((690, 711), 'PIL.Image.fromarray', 'Image.fromarray', (['face'], {}), '(face)\n', (705, 711), False, 'from PIL import Image\n'), ((844, 861), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (852, 861), True, 'import numpy as np\n'), ((863, 880), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (871, 880), True, 'import numpy as np\n'), ((1005, 1022), 'numpy.shape', 'np.shape', (['x_train'], {}), '(x_train)\n', (1013, 1022), True, 'import numpy as np\n'), ((1418, 1492), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(5, 5)'], {'activation': '"""relu"""', 'input_shape': '(img_cols, img_rows, 1)'}), "(64, (5, 5), activation='relu', input_shape=(img_cols, img_rows, 1))\n", (1424, 1492), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten\n'), ((1508, 1538), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1520, 1538), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten\n'), ((1559, 1596), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(5, 5)'], {'activation': '"""relu"""'}), "(64, (5, 5), activation='relu')\n", (1565, 1596), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten\n'), ((1612, 1642), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1624, 1642), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten\n'), ((1663, 1706), 'keras.layers.Flatten', 'Flatten', ([], {'input_shape': 'model.output_shape[1:]'}), '(input_shape=model.output_shape[1:])\n', (1670, 1706), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten\n'), ((1770, 1798), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (1775, 1798), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten\n'), ((1820, 1832), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1827, 1832), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten\n'), ((1848, 1888), 'keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""'}), "(num_classes, activation='softmax')\n", (1853, 1888), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten\n'), ((792, 810), 'numpy.array', 'np.array', (['imResize'], {}), '(imResize)\n', (800, 810), True, 'import numpy as np\n'), ((2007, 2029), 'keras.optimizers.SGD', 'keras.optimizers.SGD', ([], {}), '()\n', (2027, 2029), False, 'import keras\n'), ((2921, 2939), 'numpy.array', 'np.array', (['imResize'], {}), '(imResize)\n', (2929, 2939), True, 'import numpy as np\n')] |
# coding=utf-8
"""
PYOPENGL-TOOLBOX FIGURES
Utilitary functions to draw figures in PyOpenGL.
MIT License
Copyright (c) 2015-2019 <NAME>.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Library imports
from math import sqrt as _sqrt
from math import pi as _pi
from numpy import array as _array
from OpenGL.arrays import vbo as _vbo
from PyOpenGLtoolbox.utils import print_gl_error as _print_gl_error
from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, \
draw_vertex_list_create_normal_textured
from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2
# noinspection PyPep8Naming
import OpenGL.GL as _gl
# noinspection PyPep8Naming
import OpenGL.GLUT as _glut
# Constants
_FIGURES_FIGURE_LIST = 0xfa01
_FIGURES_FIGURE_VBO = 0xfa02
_FIGURES_ERRS = []
for i in range(10):
_FIGURES_ERRS.append(False)
class VBObject(object):
"""
VBO object that can load and draw elements using shaders.
"""
def __init__(self, vertex, fragment, total_vertex, texture=None):
"""
Constructor.
:param vertex: Vertex shader
:param fragment: Fragment shader
:param total_vertex: Total vertex (int)
:param texture: Texture list
"""
if isinstance(vertex, _vbo.VBO) and isinstance(fragment, _vbo.VBO):
if type(total_vertex) is int:
self.vertex = vertex
self.fragment = fragment
self.totalVertex = total_vertex
self.texture = texture
if self.texture is None:
self.texlen = 0
else:
self.texlen = len(self.texture)
else:
raise Exception('total_vertex must be int type')
else:
raise Exception('vertex and fragment must be VBO type (OpenGL.arrays.vbo)')
def draw(self, pos=None, rgb=None):
"""
Draw the object.
:param pos: Position
:param rgb: Color
:type pos: list
:type rgb: list
"""
if pos is None:
pos = [0.0, 0.0, 0.0]
try:
# Create new matrix
_gl.glPushMatrix()
# Make bind between vbos and shader program
self.vertex.bind()
_gl.glVertexPointerf(self.vertex)
self.fragment.bind()
_gl.glNormalPointerf(self.fragment)
# Enable vbos
_gl.glEnableClientState(_gl.GL_VERTEX_ARRAY)
_gl.glEnableClientState(_gl.GL_NORMAL_ARRAY)
# Enable transform
if rgb is not None:
_gl.glColor4fv(rgb)
_gl.glTranslate(pos[0], pos[1], pos[2])
# Enable textures
for _i in range(self.texlen):
_gl.glActiveTexture(_gl.GL_TEXTURE0 + _i)
_gl.glEnable(_gl.GL_TEXTURE_2D)
_gl.glBindTexture(_gl.GL_TEXTURE_2D, self.texture[_i])
# Draw triangles each 3 elements of vbo
_gl.glDrawArrays(_gl.GL_TRIANGLES, 0, self.totalVertex)
# Dsiable textures
for _i in range(self.texlen):
_gl.glActiveTexture(_gl.GL_TEXTURE0 + _i)
_gl.glDisable(_gl.GL_TEXTURE_2D)
# Disable vbox
_gl.glDisableClientState(_gl.GL_VERTEX_ARRAY)
_gl.glDisableClientState(_gl.GL_NORMAL_ARRAY)
# Pop matrix
_gl.glPopMatrix()
except:
raise Exception('VBO draw error')
def load_obj_model(file_name):
"""
Load an OBJ file.
:param file_name: File name
:type file_name: basestring
:return: OBJ file tuple
:rtype: tuple
"""
file_text = open(file_name)
text = file_text.readlines()
vertex = []
normals = []
uv = []
faces_vertex = []
faces_normal = []
faces_uv = []
for line in text:
info = line.split(' ')
if info[0] == 'v':
vertex.append(
(float(info[1]), float(info[2]) - 0.1, float(info[3])))
elif info[0] == 'vn':
normals.append((float(info[1]), float(info[2]), float(info[3])))
elif info[0] == 'vt':
uv.append((float(info[1]), float(info[2])))
elif info[0] == 'f':
p1 = info[1].split('/')
p2 = info[2].split('/')
p3 = info[3].split('/')
faces_vertex.append((int(p1[0]), int(p2[0]), int(p3[0])))
faces_uv.append((int(p1[1]), int(p2[1]), int(p3[1])))
faces_normal.append((int(p1[2]), int(p2[2]), int(p3[2])))
return vertex, normals, uv, faces_vertex, faces_normal, faces_uv
def load_gmsh_model(modelfile, scale, dx=0.0, dy=0.0, dz=0.0, avg=True,
neg_normal=False, texture=None):
"""
Loads an .MSH or .GMSH file and returns an vboObject scaled as 'scale', by default
normal are average, to disable use avg=False. The model also can be displaced by
(dx,dy,dz) and reverse the normals if neg_normal is True.
:param modelfile: File name
:param scale: Scale parameter
:param dx: X-displacement
:param dy: Y-displacement
:param dz: Z-displacement
:param avg: Normal-avg
:param neg_normal: Reverse normal
:param texture: Texture file
:type modelfile: basestring
:type scale: float
:type dx: float, int
:type dy: float, int
:type avg: bool
:type neg_normal: bool
:type texture: list
:return: VBO Object that contains GMSH model
:rtype: VBObject
"""
def load(gmshfile, _scale, _dx, _dy, _dz):
"""
Load an GMSH file and returns 3 lists, one for vertex, one for normals and another for
normal averages. Takes file, scale and displacement.
:param gmshfile: GMSH file
:param _scale: Scale parameter
:param _dx: X-displacement
:param _dy: Y-displacement
:param _dz: Z-displacement
:return:
"""
def get_ave_normals(_nodes, _elems):
"""
Calculate normal average for each vertex
:param _nodes:
:param _elems:
:return:
"""
nodetrilist = []
for _nodenum in range(len(_nodes)):
nodetrilist.append([])
for elemnum in range(len(_elems)):
if _nodenum in _elems[elemnum]:
nodetrilist[_nodenum].append(elemnum)
_avenorms = []
for tri in nodetrilist:
ave_ni = 0.0
ave_nj = 0.0
ave_nk = 0.0
denom = max(float(len(tri)), 1)
for _elem in tri:
_vert1 = [_nodes[_elems[_elem][0]][0], _nodes[_elems[_elem][0]][1],
_nodes[_elems[_elem][0]][2]]
_vert2 = [_nodes[_elems[_elem][1]][0], _nodes[_elems[_elem][1]][1],
_nodes[_elems[_elem][1]][2]]
_vert3 = [_nodes[_elems[_elem][2]][0], _nodes[_elems[_elem][2]][1],
_nodes[_elems[_elem][2]][2]]
_normals = get_normals(_vert1, _vert2, _vert3)
ave_ni += _normals[0]
ave_nj += _normals[1]
ave_nk += _normals[2]
_avenorms.append([ave_ni / denom, ave_nj / denom, ave_nk / denom])
return _avenorms
def get_normals(vert_a, vert_b, vert_c):
"""
Calculate normal each 3 vertex
:param vert_a:
:param vert_b:
:param vert_c:
:return:
"""
x_a = vert_a[0]
x_b = vert_b[0]
x_c = vert_c[0]
y_a = vert_a[1]
y_b = vert_b[1]
y_c = vert_c[1]
z_a = vert_a[2]
z_b = vert_b[2]
z_c = vert_c[2]
a_bx = x_b - x_a
a_by = y_b - y_a
a_bz = z_b - z_a
b_cx = x_c - x_b
b_cy = y_c - y_b
b_cz = z_c - z_b
nx = a_by * b_cz - a_bz * b_cy
ny = a_bz * b_cx - a_bx * b_cz
nz = a_bx * b_cy - a_by * b_cx
vec_mag = _sqrt(nx ** 2 + ny ** 2 + nz ** 2)
ni = nx / vec_mag
nj = ny / vec_mag
nk = nz / vec_mag
return [ni, nj, nk]
# Read file
try:
infile = open(gmshfile)
except:
raise Exception('Model file does not exist')
# Create model
nodes = []
try:
gmshlines = infile.readlines()
readnodes = False
readelems = False
skipline = 0
elems = []
lnum = 0
for line in gmshlines:
if '$Nodes' in line:
readnodes = True
skipline = 2
nnodes = int(gmshlines[lnum + 1].strip())
nodes = []
for _i in range(nnodes):
nodes.append(99999.9)
elif '$EndNodes' in line:
readnodes = False
skipline = 1
elif '$Elements' in line:
readelems = True
skipline = 2
elif '$EndElements' in line:
readelems = False
skipline = 1
if skipline < 1:
if readnodes:
n_xyz = line.strip().split()
nodenum = int(n_xyz[0]) - 1
n_x = float(n_xyz[1]) * _scale + _dx
n_y = float(n_xyz[2]) * _scale + _dy
n_z = float(n_xyz[3]) * _scale + _dz
if neg_normal:
n_z *= -1
nodes[nodenum] = [n_x, n_y, n_z]
elif readelems:
n123 = line.split()
if n123[1] == '2':
n1 = int(n123[-3]) - 1
n2 = int(n123[-1]) - 1
n3 = int(n123[-2]) - 1
elems.append([n1, n2, n3])
else:
skipline -= 1
lnum += 1
triarray = []
normarray = []
avenorms = []
nodeavenorms = get_ave_normals(nodes, elems)
for elem in elems:
vert1 = [nodes[elem[0]][0], nodes[elem[0]][1],
nodes[elem[0]][2]]
vert2 = [nodes[elem[1]][0], nodes[elem[1]][1],
nodes[elem[1]][2]]
vert3 = [nodes[elem[2]][0], nodes[elem[2]][1],
nodes[elem[2]][2]]
avenorm0 = nodeavenorms[elem[0]]
avenorm1 = nodeavenorms[elem[1]]
avenorm2 = nodeavenorms[elem[2]]
normals = get_normals(vert1, vert2, vert3)
triarray.append(vert1)
triarray.append(vert2)
triarray.append(vert3)
normarray.append(normals)
normarray.append(normals)
normarray.append(normals)
avenorms.append(avenorm0)
avenorms.append(avenorm1)
avenorms.append(avenorm2)
return triarray, normarray, avenorms
except:
raise Exception('Error load model')
vertex, norm, avgnorm = load(modelfile, scale, float(dx), float(dy), float(dz))
if avg:
return VBObject(_vbo.VBO(_array(vertex, 'f')),
_vbo.VBO(_array(avgnorm, 'f')), len(vertex), texture)
else:
return VBObject(_vbo.VBO(_array(vertex, 'f')), _vbo.VBO(_array(norm, 'f')),
len(vertex), texture)
def create_sphere(lats=10, longs=10, color=None):
"""
Creates an sphere.
:param lats: Latitude
:param longs: Longitude
:param color: Color
:type lats: int
:type longs: int
:type color: list
:return: OpenGL list
"""
if lats >= 3 and longs >= 10:
obj = _gl.glGenLists(1)
_gl.glNewList(obj, _gl.GL_COMPILE)
_gl.glPushMatrix()
if color is not None:
_gl.glColor4fv(color)
# noinspection PyBroadException
try:
_glut.glutSolidSphere(1.0, lats, longs)
except:
if not _FIGURES_ERRS[0]:
_print_gl_error('OpenGL actual version does not support glutSolidSphere function')
_FIGURES_ERRS[0] = True
for _i in range(0, lats + 1):
lat0 = _pi * (-0.5 + float(float(_i - 1) / float(lats)))
z0 = _sin(lat0)
zr0 = _cos(lat0)
lat1 = _pi * (-0.5 + float(float(_i) / float(lats)))
z1 = _sin(lat1)
zr1 = _cos(lat1)
# Use Quad strips to draw the sphere
_gl.glBegin(_gl.GL_QUAD_STRIP)
for _j in range(0, longs + 1):
_long = 2 * _pi * float(float(_j - 1) / float(longs))
x = _cos(_long)
y = _sin(_long)
_gl.glNormal3f(x * zr0, y * zr0, z0)
_gl.glVertex3f(x * zr0, y * zr0, z0)
_gl.glNormal3f(x * zr1, y * zr1, z1)
_gl.glVertex3f(x * zr1, y * zr1, z1)
_gl.glEnd()
_gl.glPopMatrix()
_gl.glEndList()
return obj
else:
raise Exception('Latitude and logitude must be greater than 3')
def create_circle(rad=1.0, diff=0.1, normal=None, color=None):
"""
Creates a circle.
:param rad: Radius
:param diff: Difference
:param normal: Normal
:param color: Color
:type rad: float, int
:type diff: float, int
:type normal: list
:type color: list
:return: OpenGL list
"""
if normal is None:
normal = [0.0, 0.0, 1.0]
if diff > 0:
obj = _gl.glGenLists(1)
_gl.glNewList(obj, _gl.GL_COMPILE)
_gl.glPushMatrix()
if color is not None:
_gl.glColor4fv(color)
ang = 0.0
_gl.glBegin(_gl.GL_POLYGON)
while ang <= 360.0:
_gl.glNormal3fv(normal)
_gl.glVertex2f(_sin(ang) * rad, _cos(ang) * rad)
ang += diff
_gl.glEnd()
_gl.glBegin(_gl.GL_LINE_LOOP)
while ang <= 360.0:
_gl.glVertex2f(_sin(ang) * rad, _cos(ang) * rad)
ang += diff
_gl.glEnd()
_gl.glPopMatrix()
_gl.glEndList()
return obj
else:
raise Exception('Difference must be greater than zero')
def create_cone(base=1.0, height=1.0, lat=20, lng=20, color=None):
"""
Creates an cone with base and height, radius 1.
:param base: Cone base
:param height: Cone height
:param lat: Cone latitude
:param lng: Cone longitude
:param color: Cone color
:type base: float, int
:type height: float, int
:type lat: int
:type lng: int
:type color: list
:return: OpenGL list
"""
if lat >= 3 and lng >= 10:
# noinspection PyArgumentEqualDefault
circlebase = create_circle(base - 0.05, 0.1, [0.0, 0.0, -1.0], color)
obj = _gl.glGenLists(1)
_gl.glNewList(obj, _gl.GL_COMPILE)
_gl.glPushMatrix()
if color is not None:
_gl.glColor4fv(color)
# noinspection PyBroadException
try:
_glut.glutSolidCone(base, height, lat, lng)
except:
if not _FIGURES_ERRS[3]:
_print_gl_error('OpenGL actual version does not support glutSolidCone function')
_FIGURES_ERRS[3] = True
_gl.glCallList(circlebase)
_gl.glPopMatrix()
_gl.glEndList()
return obj
else:
raise Exception('Latitude and longitude of the figure must be greater than 3')
def create_cube(color=None):
"""
Cretes a cube.
:param color: Cube color
:type color: list
:return: OpenGL list
"""
a = Point3(-1.0, -1.0, -1.0)
b = Point3(1.0, -1.0, -1.0)
c = Point3(1.0, -1.0, 1.0)
d = Point3(-1.0, -1.0, 1.0)
e = Point3(-1.0, 1.0, -1.0)
f = Point3(1.0, 1.0, -1.0)
g = Point3(1.0, 1.0, 1.0)
h = Point3(-1.0, 1.0, 1.0)
obj = _gl.glGenLists(1)
_gl.glNewList(obj, _gl.GL_COMPILE)
_gl.glPushMatrix()
_gl.glBegin(_gl.GL_QUADS)
if color is not None:
_gl.glColor4fv(color)
draw_vertex_list_create_normal([a, b, c, d])
draw_vertex_list_create_normal([b, f, g, c])
draw_vertex_list_create_normal([f, e, h, g])
draw_vertex_list_create_normal([e, a, d, h])
draw_vertex_list_create_normal([d, c, g, h])
draw_vertex_list_create_normal([a, e, f, b])
_gl.glEnd()
_gl.glPopMatrix()
_gl.glEndList()
return obj
def create_cube_textured(texture_list):
"""
Create a textured cube.
:param texture_list: Texture OpenGL list
:return: OpenGL list
"""
a = Point3(-1.0, -1.0, -1.0)
b = Point3(1.0, -1.0, -1.0)
c = Point3(1.0, -1.0, 1.0)
d = Point3(-1.0, -1.0, 1.0)
e = Point3(-1.0, 1.0, -1.0)
f = Point3(1.0, 1.0, -1.0)
g = Point3(1.0, 1.0, 1.0)
h = Point3(-1.0, 1.0, 1.0)
t_list = [Point2(0, 0), Point2(1, 0), Point2(1, 1), Point2(0, 1)]
obj = _gl.glGenLists(1)
_gl.glNewList(obj, _gl.GL_COMPILE)
_gl.glPushMatrix()
for _i in range(len(texture_list)):
_gl.glActiveTexture(_gl.GL_TEXTURE0 + _i)
_gl.glEnable(_gl.GL_TEXTURE_2D)
_gl.glBindTexture(_gl.GL_TEXTURE_2D, texture_list[_i])
_gl.glBegin(_gl.GL_QUADS)
draw_vertex_list_create_normal_textured([a, b, c, d], t_list)
draw_vertex_list_create_normal_textured([b, f, g, c], t_list)
draw_vertex_list_create_normal_textured([f, e, h, g], t_list)
draw_vertex_list_create_normal_textured([e, a, d, h], t_list)
draw_vertex_list_create_normal_textured([d, c, g, h], t_list)
draw_vertex_list_create_normal_textured([a, e, f, b], t_list)
_gl.glEnd()
for _i in range(len(texture_list)):
_gl.glActiveTexture(_gl.GL_TEXTURE0 + _i)
_gl.glDisable(_gl.GL_TEXTURE_2D)
_gl.glPopMatrix()
_gl.glEndList()
return obj
def create_torus(minr=0.5, maxr=1.0, lat=30, lng=30, color=None):
"""
Creates a torus.
:param minr: Minimum radius
:param maxr: Maximum radius
:param lat: Latitude
:param lng: Longitude
:param color: Color
:type minr: float, int
:type maxr: float, int
:type lat: int
:type lng: int
:type color: list
:return: OpenGl list
"""
if lat >= 3 and lng >= 3:
obj = _gl.glGenLists(1)
_gl.glNewList(obj, _gl.GL_COMPILE)
_gl.glPushMatrix()
if color is not None:
_gl.glColor4fv(color)
# noinspection PyBroadException
try:
_glut.glutSolidTorus(minr, maxr, lat, lng)
except:
if not _FIGURES_ERRS[2]:
_print_gl_error('OpenGL actual version does not support glutSolidTorus function')
_FIGURES_ERRS[2] = True
_gl.glPopMatrix()
_gl.glEndList()
return obj
else:
raise Exception('Latitude and longitude of the figure must be greater than 3')
def create_cube_solid(color=None):
"""
Create a solid cube.
:param color: Cube color
:type color: list
:return: OpenGL list
"""
obj = _gl.glGenLists(1)
_gl.glNewList(obj, _gl.GL_COMPILE)
_gl.glPushMatrix()
if color is not None:
_gl.glColor4fv(color)
# noinspection PyBroadException
try:
_glut.glutSolidCube(1.0)
except:
if not _FIGURES_ERRS[3]:
_print_gl_error('OpenGL actual version does not support glutSolidCube function')
_FIGURES_ERRS[3] = True
_gl.glPopMatrix()
_gl.glEndList()
return obj
def create_pyramid(color=None):
"""
Creates a pyramid.
:param color: Pyramid color
:type color: list
:return: OpenGL list
"""
arista = 2.0
a = Point3(-0.5, -0.5, -0.333) * arista
b = Point3(0.5, -0.5, -0.333) * arista
c = Point3(0.5, 0.5, -0.333) * arista
d = Point3(-0.5, 0.5, -0.333) * arista
# noinspection PyArgumentEqualDefault
e = Point3(0.0, 0.0, 0.666) * arista
obj = _gl.glGenLists(1)
_gl.glNewList(obj, _gl.GL_COMPILE)
_gl.glPushMatrix()
if color is not None:
_gl.glColor4fv(color)
_gl.glBegin(_gl.GL_QUADS)
draw_vertex_list_create_normal([d, c, b, a])
_gl.glEnd()
_gl.glBegin(_gl.GL_TRIANGLES)
draw_vertex_list_create_normal([a, b, e])
draw_vertex_list_create_normal([b, c, e])
draw_vertex_list_create_normal([c, d, e])
draw_vertex_list_create_normal([d, a, e])
_gl.glEnd()
_gl.glPopMatrix()
_gl.glEndList()
return obj
def create_pyramid_textured(texture_list):
"""
Create a textured pyramid.
:param texture_list: Texture OpenGL list
:return: OpenGL list
"""
edge = 2.0
a = Point3(-0.5, -0.5, -0.333) * edge
b = Point3(0.5, -0.5, -0.333) * edge
c = Point3(0.5, 0.5, -0.333) * edge
d = Point3(-0.5, 0.5, -0.333) * edge
# noinspection PyArgumentEqualDefault
e = Point3(0.0, 0.0, 0.666) * edge
t_list = [Point2(0, 0), Point2(1, 0), Point2(1, 1), Point2(0, 1)]
t_list_face = [Point2(0, 0), Point2(0.5, 1.0), Point2(1, 0)]
obj = _gl.glGenLists(1)
_gl.glNewList(obj, _gl.GL_COMPILE)
_gl.glPushMatrix()
for _i in range(len(texture_list)):
_gl.glActiveTexture(_gl.GL_TEXTURE0 + _i)
_gl.glEnable(_gl.GL_TEXTURE_2D)
_gl.glBindTexture(_gl.GL_TEXTURE_2D, texture_list[_i])
_gl.glBegin(_gl.GL_QUADS)
draw_vertex_list_create_normal_textured([d, c, b, a], t_list)
_gl.glEnd()
_gl.glBegin(_gl.GL_TRIANGLES)
draw_vertex_list_create_normal_textured([a, b, e], t_list_face)
draw_vertex_list_create_normal_textured([b, c, e], t_list_face)
draw_vertex_list_create_normal_textured([c, d, e], t_list_face)
draw_vertex_list_create_normal_textured([d, a, e], t_list_face)
_gl.glEnd()
for _i in range(len(texture_list)):
_gl.glActiveTexture(_gl.GL_TEXTURE0 + _i)
_gl.glDisable(_gl.GL_TEXTURE_2D)
_gl.glPopMatrix()
_gl.glEndList()
return obj
def create_diamond(color=None):
"""
Creates a diamond.
:param color: Diamond color
:type color: list
:return: OpenGL list
"""
# noinspection PyArgumentEqualDefault
a = Point3(-1.0, -1.0, 0.0)
# noinspection PyArgumentEqualDefault
b = Point3(1.0, -1.0, 0.0)
# noinspection PyArgumentEqualDefault
c = Point3(1.0, 1.0, 0.0)
# noinspection PyArgumentEqualDefault
d = Point3(-1.0, 1.0, 0.0)
# noinspection PyArgumentEqualDefault
e = Point3(0.0, 0.0, 1.0)
# noinspection PyArgumentEqualDefault
f = Point3(0.0, 0.0, -1.0)
obj = _gl.glGenLists(1)
_gl.glNewList(obj, _gl.GL_COMPILE)
_gl.glPushMatrix()
if color is not None:
_gl.glColor4fv(color)
_gl.glBegin(_gl.GL_TRIANGLES)
draw_vertex_list_create_normal([a, b, e])
draw_vertex_list_create_normal([b, c, e])
draw_vertex_list_create_normal([c, d, e])
draw_vertex_list_create_normal([d, a, e])
draw_vertex_list_create_normal([b, a, f])
draw_vertex_list_create_normal([c, b, f])
draw_vertex_list_create_normal([d, c, f])
draw_vertex_list_create_normal([a, d, f])
_gl.glEnd()
_gl.glPopMatrix()
_gl.glEndList()
return obj
def create_teapot(color=None):
"""
Create a OpenGL teapot.
:param color: Object color
:type color: list
:return: OpenGL list
"""
obj = _gl.glGenLists(1)
_gl.glNewList(obj, _gl.GL_COMPILE)
_gl.glPushMatrix()
if color is not None:
_gl.glColor4fv(color)
_gl.glRotate(90, 1, 0, 0)
# noinspection PyBroadException
try:
_glut.glutSolidTeapot(1.0)
except:
if not _FIGURES_ERRS[4]:
_print_gl_error('OpenGL actual version doest not support glutSolidTeapot function')
_FIGURES_ERRS[4] = True
_gl.glPopMatrix()
_gl.glEndList()
return obj
def create_teapot_textured(texture_list):
"""
Creates a teapot textured.
:param texture_list: Texture OpenGL list
:return: Object list
"""
obj = _gl.glGenLists(1)
_gl.glNewList(obj, _gl.GL_COMPILE)
_gl.glPushMatrix()
for _i in range(len(texture_list)):
_gl.glActiveTexture(_gl.GL_TEXTURE0 + _i)
_gl.glEnable(_gl.GL_TEXTURE_2D)
_gl.glBindTexture(_gl.GL_TEXTURE_2D, texture_list[_i])
_gl.glRotate(90, 1, 0, 0)
# noinspection PyBroadException
try:
_glut.glutSolidTeapot(1.0)
except:
if not _FIGURES_ERRS[4]:
_print_gl_error('OpenGL actual version does not support glutSolidTeapot function')
_FIGURES_ERRS[4] = True
for _i in range(len(texture_list)):
_gl.glActiveTexture(_gl.GL_TEXTURE0 + _i)
_gl.glDisable(_gl.GL_TEXTURE_2D)
_gl.glPopMatrix()
_gl.glEndList()
return obj
def create_pyramid_vbo(edge=1.0):
"""
Creates a VBO pyramid for shaders.
:param edge: Edge length
:type edge: float, int
:return: VBO Object
:rtype: VBObject
"""
def ex(element):
"""
Export element to list.
:param element: Element
:return: List
:rtype: list
"""
return element.export_to_list()
# Create points
a = Point3(-0.5, -0.5, -0.333) * edge
b = Point3(0.5, -0.5, -0.333) * edge
c = Point3(0.5, 0.5, -0.333) * edge
d = Point3(-0.5, 0.5, -0.333) * edge
# noinspection PyArgumentEqualDefault
e = Point3(0.0, 0.0, 0.666) * edge
# Create normals
n1 = ex(_normal_3_points(a, b, e))
n2 = ex(_normal_3_points(b, c, e))
n3 = ex(_normal_3_points(c, d, e))
n4 = ex(_normal_3_points(d, a, e))
n5 = ex(_normal_3_points(c, b, a))
# Create point list
vertex_array = [ex(b), ex(e), ex(a), ex(b), ex(c), ex(e), ex(c), ex(d),
ex(e), ex(d), ex(a), ex(e), ex(a), ex(b),
ex(c), ex(c), ex(d), ex(a)]
normal_array = [n1, n1, n1, n2, n2, n2, n3, n3, n3, n4, n4, n4, n5, n5, n5,
n5, n5, n5]
# Return VBO Object
return VBObject(_vbo.VBO(_array(vertex_array, 'f')), _vbo.VBO(_array(normal_array, 'f')), len(vertex_array))
def create_tetrahedron_vbo(edge=1.0):
"""
Creates a VBO tetrahedron for shaders.
:param edge: Edge length
:type edge: float, int
:return: VBO object
:rtype: VBObject
"""
def ex(element):
"""
Export element to list.
:param element: Element
:return: List
:rtype: list
"""
return element.export_to_list()
# Create points
a = Point3(-0.5, -0.288675, -0.288675) * edge
b = Point3(0.5, -0.288675, -0.288675) * edge
# noinspection PyArgumentEqualDefault
c = Point3(0.0, 0.577350, -0.288675) * edge
# noinspection PyArgumentEqualDefault
d = Point3(0.0, 0.0, 0.57735) * edge
# Create normals
n1 = ex(_normal_3_points(a, b, d))
n2 = ex(_normal_3_points(b, c, d))
n3 = ex(_normal_3_points(c, a, d))
n4 = ex(_normal_3_points(c, b, a))
# Create triangles
vertex_array = [ex(a), ex(b), ex(d), ex(b), ex(c), ex(d), ex(c), ex(a),
ex(d), ex(a), ex(b), ex(c)]
normal_array = [n1, n1, n1, n2, n2, n2, n3, n3, n3, n4, n4, n4]
# Return VBO
return VBObject(_vbo.VBO(_array(vertex_array, 'f')), _vbo.VBO(_array(normal_array, 'f')),
len(vertex_array))
def create_tetrahedron(color=None):
"""
Creates a tetrahedron.
:param color: Tetrahedron color
:type color: list
:return: OpenGL list
"""
obj = _gl.glGenLists(1)
_gl.glNewList(obj, _gl.GL_COMPILE)
_gl.glPushMatrix()
if color is not None:
_gl.glColor4fv(color)
# noinspection PyBroadException
try:
_glut.glutSolidTetrahedron()
except:
if not _FIGURES_ERRS[5]:
_print_gl_error('OpenGL actual version does not support glutSolidTetrahedron function')
_FIGURES_ERRS[5] = True
_gl.glPopMatrix()
_gl.glEndList()
return obj
def create_dodecahedron(color=None):
"""
Creates a dodecahedron.
:param color: Dodecahedron color
:type color: list
:return: OpenGL list
"""
obj = _gl.glGenLists(1)
_gl.glNewList(obj, _gl.GL_COMPILE)
_gl.glPushMatrix()
if color is not None:
_gl.glColor4fv(color)
# noinspection PyBroadException
try:
_glut.glutSolidDodecahedron()
except:
if not _FIGURES_ERRS[6]:
_print_gl_error('OpenGL actual version dost not support glutSolidDodecahedron function')
_FIGURES_ERRS[6] = True
_gl.glPopMatrix()
_gl.glEndList()
return obj
def create_octahedron(color=None):
"""
Crates an octahedron.
:param color: Octahedron color
:type color: list
:return: OpenGL list
"""
obj = _gl.glGenLists(1)
_gl.glNewList(obj, _gl.GL_COMPILE)
_gl.glPushMatrix()
if color is not None:
_gl.glColor4fv(color)
# noinspection PyBroadException
try:
_glut.glutSolidOctahedron()
except:
if not _FIGURES_ERRS[7]:
_print_gl_error('OpenGL actual version does not support glutSolidOctahedron function')
_FIGURES_ERRS[7] = True
_gl.glPopMatrix()
_gl.glEndList()
return obj
def create_icosahedron(color=None):
"""
Creates an icosahedron.
:param color: Icosahedron color
:type color: list
:return: OpenGL list
"""
obj = _gl.glGenLists(1)
_gl.glNewList(obj, _gl.GL_COMPILE)
_gl.glPushMatrix()
if color is not None:
_gl.glColor4fv(color)
# noinspection PyBroadException
try:
_glut.glutSolidIcosahedron()
except:
if not _FIGURES_ERRS[8]:
_print_gl_error('OpenGL actual version does not support glutSolidIcosahedron function')
_FIGURES_ERRS[8] = True
_gl.glPopMatrix()
_gl.glEndList()
return obj
| [
"OpenGL.GL.glDisable",
"OpenGL.GLUT.glutSolidOctahedron",
"OpenGL.GLUT.glutSolidCube",
"OpenGL.GL.glTranslate",
"math.sqrt",
"numpy.array",
"OpenGL.GL.glColor4fv",
"PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal",
"OpenGL.GL.glEnableClientState",
"OpenGL.GL.glPushMatrix",
"OpenGL.GL.glV... | [((17113, 17137), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(-1.0)', '(-1.0)', '(-1.0)'], {}), '(-1.0, -1.0, -1.0)\n', (17119, 17137), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((17146, 17169), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(1.0)', '(-1.0)', '(-1.0)'], {}), '(1.0, -1.0, -1.0)\n', (17152, 17169), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((17178, 17200), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(1.0)', '(-1.0)', '(1.0)'], {}), '(1.0, -1.0, 1.0)\n', (17184, 17200), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((17209, 17232), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(-1.0)', '(-1.0)', '(1.0)'], {}), '(-1.0, -1.0, 1.0)\n', (17215, 17232), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((17241, 17264), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(-1.0)', '(1.0)', '(-1.0)'], {}), '(-1.0, 1.0, -1.0)\n', (17247, 17264), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((17273, 17295), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(1.0)', '(1.0)', '(-1.0)'], {}), '(1.0, 1.0, -1.0)\n', (17279, 17295), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((17304, 17325), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(1.0)', '(1.0)', '(1.0)'], {}), '(1.0, 1.0, 1.0)\n', (17310, 17325), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((17334, 17356), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(-1.0)', '(1.0)', '(1.0)'], {}), '(-1.0, 1.0, 1.0)\n', (17340, 17356), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((17368, 17385), 'OpenGL.GL.glGenLists', '_gl.glGenLists', (['(1)'], {}), '(1)\n', (17382, 17385), True, 'import OpenGL.GL as _gl\n'), ((17390, 17424), 'OpenGL.GL.glNewList', '_gl.glNewList', (['obj', '_gl.GL_COMPILE'], {}), '(obj, _gl.GL_COMPILE)\n', (17403, 17424), True, 'import OpenGL.GL as _gl\n'), ((17429, 17447), 'OpenGL.GL.glPushMatrix', '_gl.glPushMatrix', ([], {}), '()\n', (17445, 17447), True, 'import OpenGL.GL as _gl\n'), ((17452, 17477), 'OpenGL.GL.glBegin', '_gl.glBegin', (['_gl.GL_QUADS'], {}), '(_gl.GL_QUADS)\n', (17463, 17477), True, 'import OpenGL.GL as _gl\n'), ((17538, 17582), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal', 'draw_vertex_list_create_normal', (['[a, b, c, d]'], {}), '([a, b, c, d])\n', (17568, 17582), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((17587, 17631), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal', 'draw_vertex_list_create_normal', (['[b, f, g, c]'], {}), '([b, f, g, c])\n', (17617, 17631), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((17636, 17680), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal', 'draw_vertex_list_create_normal', (['[f, e, h, g]'], {}), '([f, e, h, g])\n', (17666, 17680), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((17685, 17729), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal', 'draw_vertex_list_create_normal', (['[e, a, d, h]'], {}), '([e, a, d, h])\n', (17715, 17729), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((17734, 17778), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal', 'draw_vertex_list_create_normal', (['[d, c, g, h]'], {}), '([d, c, g, h])\n', (17764, 17778), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((17783, 17827), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal', 'draw_vertex_list_create_normal', (['[a, e, f, b]'], {}), '([a, e, f, b])\n', (17813, 17827), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((17832, 17843), 'OpenGL.GL.glEnd', '_gl.glEnd', ([], {}), '()\n', (17841, 17843), True, 'import OpenGL.GL as _gl\n'), ((17848, 17865), 'OpenGL.GL.glPopMatrix', '_gl.glPopMatrix', ([], {}), '()\n', (17863, 17865), True, 'import OpenGL.GL as _gl\n'), ((17870, 17885), 'OpenGL.GL.glEndList', '_gl.glEndList', ([], {}), '()\n', (17883, 17885), True, 'import OpenGL.GL as _gl\n'), ((18067, 18091), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(-1.0)', '(-1.0)', '(-1.0)'], {}), '(-1.0, -1.0, -1.0)\n', (18073, 18091), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((18100, 18123), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(1.0)', '(-1.0)', '(-1.0)'], {}), '(1.0, -1.0, -1.0)\n', (18106, 18123), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((18132, 18154), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(1.0)', '(-1.0)', '(1.0)'], {}), '(1.0, -1.0, 1.0)\n', (18138, 18154), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((18163, 18186), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(-1.0)', '(-1.0)', '(1.0)'], {}), '(-1.0, -1.0, 1.0)\n', (18169, 18186), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((18195, 18218), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(-1.0)', '(1.0)', '(-1.0)'], {}), '(-1.0, 1.0, -1.0)\n', (18201, 18218), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((18227, 18249), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(1.0)', '(1.0)', '(-1.0)'], {}), '(1.0, 1.0, -1.0)\n', (18233, 18249), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((18258, 18279), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(1.0)', '(1.0)', '(1.0)'], {}), '(1.0, 1.0, 1.0)\n', (18264, 18279), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((18288, 18310), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(-1.0)', '(1.0)', '(1.0)'], {}), '(-1.0, 1.0, 1.0)\n', (18294, 18310), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((18392, 18409), 'OpenGL.GL.glGenLists', '_gl.glGenLists', (['(1)'], {}), '(1)\n', (18406, 18409), True, 'import OpenGL.GL as _gl\n'), ((18414, 18448), 'OpenGL.GL.glNewList', '_gl.glNewList', (['obj', '_gl.GL_COMPILE'], {}), '(obj, _gl.GL_COMPILE)\n', (18427, 18448), True, 'import OpenGL.GL as _gl\n'), ((18453, 18471), 'OpenGL.GL.glPushMatrix', '_gl.glPushMatrix', ([], {}), '()\n', (18469, 18471), True, 'import OpenGL.GL as _gl\n'), ((18670, 18695), 'OpenGL.GL.glBegin', '_gl.glBegin', (['_gl.GL_QUADS'], {}), '(_gl.GL_QUADS)\n', (18681, 18695), True, 'import OpenGL.GL as _gl\n'), ((18700, 18761), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal_textured', 'draw_vertex_list_create_normal_textured', (['[a, b, c, d]', 't_list'], {}), '([a, b, c, d], t_list)\n', (18739, 18761), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((18766, 18827), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal_textured', 'draw_vertex_list_create_normal_textured', (['[b, f, g, c]', 't_list'], {}), '([b, f, g, c], t_list)\n', (18805, 18827), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((18832, 18893), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal_textured', 'draw_vertex_list_create_normal_textured', (['[f, e, h, g]', 't_list'], {}), '([f, e, h, g], t_list)\n', (18871, 18893), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((18898, 18959), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal_textured', 'draw_vertex_list_create_normal_textured', (['[e, a, d, h]', 't_list'], {}), '([e, a, d, h], t_list)\n', (18937, 18959), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((18964, 19025), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal_textured', 'draw_vertex_list_create_normal_textured', (['[d, c, g, h]', 't_list'], {}), '([d, c, g, h], t_list)\n', (19003, 19025), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((19030, 19091), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal_textured', 'draw_vertex_list_create_normal_textured', (['[a, e, f, b]', 't_list'], {}), '([a, e, f, b], t_list)\n', (19069, 19091), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((19096, 19107), 'OpenGL.GL.glEnd', '_gl.glEnd', ([], {}), '()\n', (19105, 19107), True, 'import OpenGL.GL as _gl\n'), ((19244, 19261), 'OpenGL.GL.glPopMatrix', '_gl.glPopMatrix', ([], {}), '()\n', (19259, 19261), True, 'import OpenGL.GL as _gl\n'), ((19266, 19281), 'OpenGL.GL.glEndList', '_gl.glEndList', ([], {}), '()\n', (19279, 19281), True, 'import OpenGL.GL as _gl\n'), ((20504, 20521), 'OpenGL.GL.glGenLists', '_gl.glGenLists', (['(1)'], {}), '(1)\n', (20518, 20521), True, 'import OpenGL.GL as _gl\n'), ((20526, 20560), 'OpenGL.GL.glNewList', '_gl.glNewList', (['obj', '_gl.GL_COMPILE'], {}), '(obj, _gl.GL_COMPILE)\n', (20539, 20560), True, 'import OpenGL.GL as _gl\n'), ((20565, 20583), 'OpenGL.GL.glPushMatrix', '_gl.glPushMatrix', ([], {}), '()\n', (20581, 20583), True, 'import OpenGL.GL as _gl\n'), ((20892, 20909), 'OpenGL.GL.glPopMatrix', '_gl.glPopMatrix', ([], {}), '()\n', (20907, 20909), True, 'import OpenGL.GL as _gl\n'), ((20914, 20929), 'OpenGL.GL.glEndList', '_gl.glEndList', ([], {}), '()\n', (20927, 20929), True, 'import OpenGL.GL as _gl\n'), ((21381, 21398), 'OpenGL.GL.glGenLists', '_gl.glGenLists', (['(1)'], {}), '(1)\n', (21395, 21398), True, 'import OpenGL.GL as _gl\n'), ((21403, 21437), 'OpenGL.GL.glNewList', '_gl.glNewList', (['obj', '_gl.GL_COMPILE'], {}), '(obj, _gl.GL_COMPILE)\n', (21416, 21437), True, 'import OpenGL.GL as _gl\n'), ((21442, 21460), 'OpenGL.GL.glPushMatrix', '_gl.glPushMatrix', ([], {}), '()\n', (21458, 21460), True, 'import OpenGL.GL as _gl\n'), ((21521, 21546), 'OpenGL.GL.glBegin', '_gl.glBegin', (['_gl.GL_QUADS'], {}), '(_gl.GL_QUADS)\n', (21532, 21546), True, 'import OpenGL.GL as _gl\n'), ((21551, 21595), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal', 'draw_vertex_list_create_normal', (['[d, c, b, a]'], {}), '([d, c, b, a])\n', (21581, 21595), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((21600, 21611), 'OpenGL.GL.glEnd', '_gl.glEnd', ([], {}), '()\n', (21609, 21611), True, 'import OpenGL.GL as _gl\n'), ((21616, 21645), 'OpenGL.GL.glBegin', '_gl.glBegin', (['_gl.GL_TRIANGLES'], {}), '(_gl.GL_TRIANGLES)\n', (21627, 21645), True, 'import OpenGL.GL as _gl\n'), ((21650, 21691), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal', 'draw_vertex_list_create_normal', (['[a, b, e]'], {}), '([a, b, e])\n', (21680, 21691), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((21696, 21737), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal', 'draw_vertex_list_create_normal', (['[b, c, e]'], {}), '([b, c, e])\n', (21726, 21737), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((21742, 21783), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal', 'draw_vertex_list_create_normal', (['[c, d, e]'], {}), '([c, d, e])\n', (21772, 21783), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((21788, 21829), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal', 'draw_vertex_list_create_normal', (['[d, a, e]'], {}), '([d, a, e])\n', (21818, 21829), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((21834, 21845), 'OpenGL.GL.glEnd', '_gl.glEnd', ([], {}), '()\n', (21843, 21845), True, 'import OpenGL.GL as _gl\n'), ((21850, 21867), 'OpenGL.GL.glPopMatrix', '_gl.glPopMatrix', ([], {}), '()\n', (21865, 21867), True, 'import OpenGL.GL as _gl\n'), ((21872, 21887), 'OpenGL.GL.glEndList', '_gl.glEndList', ([], {}), '()\n', (21885, 21887), True, 'import OpenGL.GL as _gl\n'), ((22472, 22489), 'OpenGL.GL.glGenLists', '_gl.glGenLists', (['(1)'], {}), '(1)\n', (22486, 22489), True, 'import OpenGL.GL as _gl\n'), ((22494, 22528), 'OpenGL.GL.glNewList', '_gl.glNewList', (['obj', '_gl.GL_COMPILE'], {}), '(obj, _gl.GL_COMPILE)\n', (22507, 22528), True, 'import OpenGL.GL as _gl\n'), ((22533, 22551), 'OpenGL.GL.glPushMatrix', '_gl.glPushMatrix', ([], {}), '()\n', (22549, 22551), True, 'import OpenGL.GL as _gl\n'), ((22749, 22774), 'OpenGL.GL.glBegin', '_gl.glBegin', (['_gl.GL_QUADS'], {}), '(_gl.GL_QUADS)\n', (22760, 22774), True, 'import OpenGL.GL as _gl\n'), ((22779, 22840), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal_textured', 'draw_vertex_list_create_normal_textured', (['[d, c, b, a]', 't_list'], {}), '([d, c, b, a], t_list)\n', (22818, 22840), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((22845, 22856), 'OpenGL.GL.glEnd', '_gl.glEnd', ([], {}), '()\n', (22854, 22856), True, 'import OpenGL.GL as _gl\n'), ((22861, 22890), 'OpenGL.GL.glBegin', '_gl.glBegin', (['_gl.GL_TRIANGLES'], {}), '(_gl.GL_TRIANGLES)\n', (22872, 22890), True, 'import OpenGL.GL as _gl\n'), ((22895, 22958), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal_textured', 'draw_vertex_list_create_normal_textured', (['[a, b, e]', 't_list_face'], {}), '([a, b, e], t_list_face)\n', (22934, 22958), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((22963, 23026), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal_textured', 'draw_vertex_list_create_normal_textured', (['[b, c, e]', 't_list_face'], {}), '([b, c, e], t_list_face)\n', (23002, 23026), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((23031, 23094), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal_textured', 'draw_vertex_list_create_normal_textured', (['[c, d, e]', 't_list_face'], {}), '([c, d, e], t_list_face)\n', (23070, 23094), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((23099, 23162), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal_textured', 'draw_vertex_list_create_normal_textured', (['[d, a, e]', 't_list_face'], {}), '([d, a, e], t_list_face)\n', (23138, 23162), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((23167, 23178), 'OpenGL.GL.glEnd', '_gl.glEnd', ([], {}), '()\n', (23176, 23178), True, 'import OpenGL.GL as _gl\n'), ((23314, 23331), 'OpenGL.GL.glPopMatrix', '_gl.glPopMatrix', ([], {}), '()\n', (23329, 23331), True, 'import OpenGL.GL as _gl\n'), ((23336, 23351), 'OpenGL.GL.glEndList', '_gl.glEndList', ([], {}), '()\n', (23349, 23351), True, 'import OpenGL.GL as _gl\n'), ((23570, 23593), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(-1.0)', '(-1.0)', '(0.0)'], {}), '(-1.0, -1.0, 0.0)\n', (23576, 23593), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((23644, 23666), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(1.0)', '(-1.0)', '(0.0)'], {}), '(1.0, -1.0, 0.0)\n', (23650, 23666), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((23717, 23738), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(1.0)', '(1.0)', '(0.0)'], {}), '(1.0, 1.0, 0.0)\n', (23723, 23738), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((23789, 23811), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(-1.0)', '(1.0)', '(0.0)'], {}), '(-1.0, 1.0, 0.0)\n', (23795, 23811), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((23862, 23883), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 1.0)\n', (23868, 23883), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((23934, 23956), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(0.0)', '(0.0)', '(-1.0)'], {}), '(0.0, 0.0, -1.0)\n', (23940, 23956), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((23968, 23985), 'OpenGL.GL.glGenLists', '_gl.glGenLists', (['(1)'], {}), '(1)\n', (23982, 23985), True, 'import OpenGL.GL as _gl\n'), ((23990, 24024), 'OpenGL.GL.glNewList', '_gl.glNewList', (['obj', '_gl.GL_COMPILE'], {}), '(obj, _gl.GL_COMPILE)\n', (24003, 24024), True, 'import OpenGL.GL as _gl\n'), ((24029, 24047), 'OpenGL.GL.glPushMatrix', '_gl.glPushMatrix', ([], {}), '()\n', (24045, 24047), True, 'import OpenGL.GL as _gl\n'), ((24108, 24137), 'OpenGL.GL.glBegin', '_gl.glBegin', (['_gl.GL_TRIANGLES'], {}), '(_gl.GL_TRIANGLES)\n', (24119, 24137), True, 'import OpenGL.GL as _gl\n'), ((24142, 24183), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal', 'draw_vertex_list_create_normal', (['[a, b, e]'], {}), '([a, b, e])\n', (24172, 24183), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((24188, 24229), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal', 'draw_vertex_list_create_normal', (['[b, c, e]'], {}), '([b, c, e])\n', (24218, 24229), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((24234, 24275), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal', 'draw_vertex_list_create_normal', (['[c, d, e]'], {}), '([c, d, e])\n', (24264, 24275), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((24280, 24321), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal', 'draw_vertex_list_create_normal', (['[d, a, e]'], {}), '([d, a, e])\n', (24310, 24321), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((24326, 24367), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal', 'draw_vertex_list_create_normal', (['[b, a, f]'], {}), '([b, a, f])\n', (24356, 24367), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((24372, 24413), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal', 'draw_vertex_list_create_normal', (['[c, b, f]'], {}), '([c, b, f])\n', (24402, 24413), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((24418, 24459), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal', 'draw_vertex_list_create_normal', (['[d, c, f]'], {}), '([d, c, f])\n', (24448, 24459), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((24464, 24505), 'PyOpenGLtoolbox.geometry.draw_vertex_list_create_normal', 'draw_vertex_list_create_normal', (['[a, d, f]'], {}), '([a, d, f])\n', (24494, 24505), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((24510, 24521), 'OpenGL.GL.glEnd', '_gl.glEnd', ([], {}), '()\n', (24519, 24521), True, 'import OpenGL.GL as _gl\n'), ((24526, 24543), 'OpenGL.GL.glPopMatrix', '_gl.glPopMatrix', ([], {}), '()\n', (24541, 24543), True, 'import OpenGL.GL as _gl\n'), ((24548, 24563), 'OpenGL.GL.glEndList', '_gl.glEndList', ([], {}), '()\n', (24561, 24563), True, 'import OpenGL.GL as _gl\n'), ((24745, 24762), 'OpenGL.GL.glGenLists', '_gl.glGenLists', (['(1)'], {}), '(1)\n', (24759, 24762), True, 'import OpenGL.GL as _gl\n'), ((24767, 24801), 'OpenGL.GL.glNewList', '_gl.glNewList', (['obj', '_gl.GL_COMPILE'], {}), '(obj, _gl.GL_COMPILE)\n', (24780, 24801), True, 'import OpenGL.GL as _gl\n'), ((24806, 24824), 'OpenGL.GL.glPushMatrix', '_gl.glPushMatrix', ([], {}), '()\n', (24822, 24824), True, 'import OpenGL.GL as _gl\n'), ((24885, 24910), 'OpenGL.GL.glRotate', '_gl.glRotate', (['(90)', '(1)', '(0)', '(0)'], {}), '(90, 1, 0, 0)\n', (24897, 24910), True, 'import OpenGL.GL as _gl\n'), ((25168, 25185), 'OpenGL.GL.glPopMatrix', '_gl.glPopMatrix', ([], {}), '()\n', (25183, 25185), True, 'import OpenGL.GL as _gl\n'), ((25190, 25205), 'OpenGL.GL.glEndList', '_gl.glEndList', ([], {}), '()\n', (25203, 25205), True, 'import OpenGL.GL as _gl\n'), ((25393, 25410), 'OpenGL.GL.glGenLists', '_gl.glGenLists', (['(1)'], {}), '(1)\n', (25407, 25410), True, 'import OpenGL.GL as _gl\n'), ((25415, 25449), 'OpenGL.GL.glNewList', '_gl.glNewList', (['obj', '_gl.GL_COMPILE'], {}), '(obj, _gl.GL_COMPILE)\n', (25428, 25449), True, 'import OpenGL.GL as _gl\n'), ((25454, 25472), 'OpenGL.GL.glPushMatrix', '_gl.glPushMatrix', ([], {}), '()\n', (25470, 25472), True, 'import OpenGL.GL as _gl\n'), ((25670, 25695), 'OpenGL.GL.glRotate', '_gl.glRotate', (['(90)', '(1)', '(0)', '(0)'], {}), '(90, 1, 0, 0)\n', (25682, 25695), True, 'import OpenGL.GL as _gl\n'), ((26083, 26100), 'OpenGL.GL.glPopMatrix', '_gl.glPopMatrix', ([], {}), '()\n', (26098, 26100), True, 'import OpenGL.GL as _gl\n'), ((26105, 26120), 'OpenGL.GL.glEndList', '_gl.glEndList', ([], {}), '()\n', (26118, 26120), True, 'import OpenGL.GL as _gl\n'), ((28875, 28892), 'OpenGL.GL.glGenLists', '_gl.glGenLists', (['(1)'], {}), '(1)\n', (28889, 28892), True, 'import OpenGL.GL as _gl\n'), ((28897, 28931), 'OpenGL.GL.glNewList', '_gl.glNewList', (['obj', '_gl.GL_COMPILE'], {}), '(obj, _gl.GL_COMPILE)\n', (28910, 28931), True, 'import OpenGL.GL as _gl\n'), ((28936, 28954), 'OpenGL.GL.glPushMatrix', '_gl.glPushMatrix', ([], {}), '()\n', (28952, 28954), True, 'import OpenGL.GL as _gl\n'), ((29274, 29291), 'OpenGL.GL.glPopMatrix', '_gl.glPopMatrix', ([], {}), '()\n', (29289, 29291), True, 'import OpenGL.GL as _gl\n'), ((29296, 29311), 'OpenGL.GL.glEndList', '_gl.glEndList', ([], {}), '()\n', (29309, 29311), True, 'import OpenGL.GL as _gl\n'), ((29505, 29522), 'OpenGL.GL.glGenLists', '_gl.glGenLists', (['(1)'], {}), '(1)\n', (29519, 29522), True, 'import OpenGL.GL as _gl\n'), ((29527, 29561), 'OpenGL.GL.glNewList', '_gl.glNewList', (['obj', '_gl.GL_COMPILE'], {}), '(obj, _gl.GL_COMPILE)\n', (29540, 29561), True, 'import OpenGL.GL as _gl\n'), ((29566, 29584), 'OpenGL.GL.glPushMatrix', '_gl.glPushMatrix', ([], {}), '()\n', (29582, 29584), True, 'import OpenGL.GL as _gl\n'), ((29906, 29923), 'OpenGL.GL.glPopMatrix', '_gl.glPopMatrix', ([], {}), '()\n', (29921, 29923), True, 'import OpenGL.GL as _gl\n'), ((29928, 29943), 'OpenGL.GL.glEndList', '_gl.glEndList', ([], {}), '()\n', (29941, 29943), True, 'import OpenGL.GL as _gl\n'), ((30131, 30148), 'OpenGL.GL.glGenLists', '_gl.glGenLists', (['(1)'], {}), '(1)\n', (30145, 30148), True, 'import OpenGL.GL as _gl\n'), ((30153, 30187), 'OpenGL.GL.glNewList', '_gl.glNewList', (['obj', '_gl.GL_COMPILE'], {}), '(obj, _gl.GL_COMPILE)\n', (30166, 30187), True, 'import OpenGL.GL as _gl\n'), ((30192, 30210), 'OpenGL.GL.glPushMatrix', '_gl.glPushMatrix', ([], {}), '()\n', (30208, 30210), True, 'import OpenGL.GL as _gl\n'), ((30528, 30545), 'OpenGL.GL.glPopMatrix', '_gl.glPopMatrix', ([], {}), '()\n', (30543, 30545), True, 'import OpenGL.GL as _gl\n'), ((30550, 30565), 'OpenGL.GL.glEndList', '_gl.glEndList', ([], {}), '()\n', (30563, 30565), True, 'import OpenGL.GL as _gl\n'), ((30757, 30774), 'OpenGL.GL.glGenLists', '_gl.glGenLists', (['(1)'], {}), '(1)\n', (30771, 30774), True, 'import OpenGL.GL as _gl\n'), ((30779, 30813), 'OpenGL.GL.glNewList', '_gl.glNewList', (['obj', '_gl.GL_COMPILE'], {}), '(obj, _gl.GL_COMPILE)\n', (30792, 30813), True, 'import OpenGL.GL as _gl\n'), ((30818, 30836), 'OpenGL.GL.glPushMatrix', '_gl.glPushMatrix', ([], {}), '()\n', (30834, 30836), True, 'import OpenGL.GL as _gl\n'), ((31156, 31173), 'OpenGL.GL.glPopMatrix', '_gl.glPopMatrix', ([], {}), '()\n', (31171, 31173), True, 'import OpenGL.GL as _gl\n'), ((31178, 31193), 'OpenGL.GL.glEndList', '_gl.glEndList', ([], {}), '()\n', (31191, 31193), True, 'import OpenGL.GL as _gl\n'), ((13149, 13166), 'OpenGL.GL.glGenLists', '_gl.glGenLists', (['(1)'], {}), '(1)\n', (13163, 13166), True, 'import OpenGL.GL as _gl\n'), ((13175, 13209), 'OpenGL.GL.glNewList', '_gl.glNewList', (['obj', '_gl.GL_COMPILE'], {}), '(obj, _gl.GL_COMPILE)\n', (13188, 13209), True, 'import OpenGL.GL as _gl\n'), ((13218, 13236), 'OpenGL.GL.glPushMatrix', '_gl.glPushMatrix', ([], {}), '()\n', (13234, 13236), True, 'import OpenGL.GL as _gl\n'), ((14471, 14488), 'OpenGL.GL.glPopMatrix', '_gl.glPopMatrix', ([], {}), '()\n', (14486, 14488), True, 'import OpenGL.GL as _gl\n'), ((14497, 14512), 'OpenGL.GL.glEndList', '_gl.glEndList', ([], {}), '()\n', (14510, 14512), True, 'import OpenGL.GL as _gl\n'), ((15029, 15046), 'OpenGL.GL.glGenLists', '_gl.glGenLists', (['(1)'], {}), '(1)\n', (15043, 15046), True, 'import OpenGL.GL as _gl\n'), ((15055, 15089), 'OpenGL.GL.glNewList', '_gl.glNewList', (['obj', '_gl.GL_COMPILE'], {}), '(obj, _gl.GL_COMPILE)\n', (15068, 15089), True, 'import OpenGL.GL as _gl\n'), ((15098, 15116), 'OpenGL.GL.glPushMatrix', '_gl.glPushMatrix', ([], {}), '()\n', (15114, 15116), True, 'import OpenGL.GL as _gl\n'), ((15207, 15234), 'OpenGL.GL.glBegin', '_gl.glBegin', (['_gl.GL_POLYGON'], {}), '(_gl.GL_POLYGON)\n', (15218, 15234), True, 'import OpenGL.GL as _gl\n'), ((15392, 15403), 'OpenGL.GL.glEnd', '_gl.glEnd', ([], {}), '()\n', (15401, 15403), True, 'import OpenGL.GL as _gl\n'), ((15412, 15441), 'OpenGL.GL.glBegin', '_gl.glBegin', (['_gl.GL_LINE_LOOP'], {}), '(_gl.GL_LINE_LOOP)\n', (15423, 15441), True, 'import OpenGL.GL as _gl\n'), ((15563, 15574), 'OpenGL.GL.glEnd', '_gl.glEnd', ([], {}), '()\n', (15572, 15574), True, 'import OpenGL.GL as _gl\n'), ((15583, 15600), 'OpenGL.GL.glPopMatrix', '_gl.glPopMatrix', ([], {}), '()\n', (15598, 15600), True, 'import OpenGL.GL as _gl\n'), ((15609, 15624), 'OpenGL.GL.glEndList', '_gl.glEndList', ([], {}), '()\n', (15622, 15624), True, 'import OpenGL.GL as _gl\n'), ((16314, 16331), 'OpenGL.GL.glGenLists', '_gl.glGenLists', (['(1)'], {}), '(1)\n', (16328, 16331), True, 'import OpenGL.GL as _gl\n'), ((16340, 16374), 'OpenGL.GL.glNewList', '_gl.glNewList', (['obj', '_gl.GL_COMPILE'], {}), '(obj, _gl.GL_COMPILE)\n', (16353, 16374), True, 'import OpenGL.GL as _gl\n'), ((16383, 16401), 'OpenGL.GL.glPushMatrix', '_gl.glPushMatrix', ([], {}), '()\n', (16399, 16401), True, 'import OpenGL.GL as _gl\n'), ((16769, 16795), 'OpenGL.GL.glCallList', '_gl.glCallList', (['circlebase'], {}), '(circlebase)\n', (16783, 16795), True, 'import OpenGL.GL as _gl\n'), ((16804, 16821), 'OpenGL.GL.glPopMatrix', '_gl.glPopMatrix', ([], {}), '()\n', (16819, 16821), True, 'import OpenGL.GL as _gl\n'), ((16830, 16845), 'OpenGL.GL.glEndList', '_gl.glEndList', ([], {}), '()\n', (16843, 16845), True, 'import OpenGL.GL as _gl\n'), ((17512, 17533), 'OpenGL.GL.glColor4fv', '_gl.glColor4fv', (['color'], {}), '(color)\n', (17526, 17533), True, 'import OpenGL.GL as _gl\n'), ((18325, 18337), 'PyOpenGLtoolbox.mathlib.Point2', 'Point2', (['(0)', '(0)'], {}), '(0, 0)\n', (18331, 18337), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((18339, 18351), 'PyOpenGLtoolbox.mathlib.Point2', 'Point2', (['(1)', '(0)'], {}), '(1, 0)\n', (18345, 18351), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((18353, 18365), 'PyOpenGLtoolbox.mathlib.Point2', 'Point2', (['(1)', '(1)'], {}), '(1, 1)\n', (18359, 18365), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((18367, 18379), 'PyOpenGLtoolbox.mathlib.Point2', 'Point2', (['(0)', '(1)'], {}), '(0, 1)\n', (18373, 18379), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((18521, 18562), 'OpenGL.GL.glActiveTexture', '_gl.glActiveTexture', (['(_gl.GL_TEXTURE0 + _i)'], {}), '(_gl.GL_TEXTURE0 + _i)\n', (18540, 18562), True, 'import OpenGL.GL as _gl\n'), ((18571, 18602), 'OpenGL.GL.glEnable', '_gl.glEnable', (['_gl.GL_TEXTURE_2D'], {}), '(_gl.GL_TEXTURE_2D)\n', (18583, 18602), True, 'import OpenGL.GL as _gl\n'), ((18611, 18665), 'OpenGL.GL.glBindTexture', '_gl.glBindTexture', (['_gl.GL_TEXTURE_2D', 'texture_list[_i]'], {}), '(_gl.GL_TEXTURE_2D, texture_list[_i])\n', (18628, 18665), True, 'import OpenGL.GL as _gl\n'), ((19157, 19198), 'OpenGL.GL.glActiveTexture', '_gl.glActiveTexture', (['(_gl.GL_TEXTURE0 + _i)'], {}), '(_gl.GL_TEXTURE0 + _i)\n', (19176, 19198), True, 'import OpenGL.GL as _gl\n'), ((19207, 19239), 'OpenGL.GL.glDisable', '_gl.glDisable', (['_gl.GL_TEXTURE_2D'], {}), '(_gl.GL_TEXTURE_2D)\n', (19220, 19239), True, 'import OpenGL.GL as _gl\n'), ((19726, 19743), 'OpenGL.GL.glGenLists', '_gl.glGenLists', (['(1)'], {}), '(1)\n', (19740, 19743), True, 'import OpenGL.GL as _gl\n'), ((19752, 19786), 'OpenGL.GL.glNewList', '_gl.glNewList', (['obj', '_gl.GL_COMPILE'], {}), '(obj, _gl.GL_COMPILE)\n', (19765, 19786), True, 'import OpenGL.GL as _gl\n'), ((19795, 19813), 'OpenGL.GL.glPushMatrix', '_gl.glPushMatrix', ([], {}), '()\n', (19811, 19813), True, 'import OpenGL.GL as _gl\n'), ((20181, 20198), 'OpenGL.GL.glPopMatrix', '_gl.glPopMatrix', ([], {}), '()\n', (20196, 20198), True, 'import OpenGL.GL as _gl\n'), ((20207, 20222), 'OpenGL.GL.glEndList', '_gl.glEndList', ([], {}), '()\n', (20220, 20222), True, 'import OpenGL.GL as _gl\n'), ((20618, 20639), 'OpenGL.GL.glColor4fv', '_gl.glColor4fv', (['color'], {}), '(color)\n', (20632, 20639), True, 'import OpenGL.GL as _gl\n'), ((20693, 20717), 'OpenGL.GLUT.glutSolidCube', '_glut.glutSolidCube', (['(1.0)'], {}), '(1.0)\n', (20712, 20717), True, 'import OpenGL.GLUT as _glut\n'), ((21123, 21149), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(-0.5)', '(-0.5)', '(-0.333)'], {}), '(-0.5, -0.5, -0.333)\n', (21129, 21149), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((21167, 21192), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(0.5)', '(-0.5)', '(-0.333)'], {}), '(0.5, -0.5, -0.333)\n', (21173, 21192), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((21210, 21234), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(0.5)', '(0.5)', '(-0.333)'], {}), '(0.5, 0.5, -0.333)\n', (21216, 21234), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((21252, 21277), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(-0.5)', '(0.5)', '(-0.333)'], {}), '(-0.5, 0.5, -0.333)\n', (21258, 21277), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((21337, 21360), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(0.0)', '(0.0)', '(0.666)'], {}), '(0.0, 0.0, 0.666)\n', (21343, 21360), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((21495, 21516), 'OpenGL.GL.glColor4fv', '_gl.glColor4fv', (['color'], {}), '(color)\n', (21509, 21516), True, 'import OpenGL.GL as _gl\n'), ((22089, 22115), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(-0.5)', '(-0.5)', '(-0.333)'], {}), '(-0.5, -0.5, -0.333)\n', (22095, 22115), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((22131, 22156), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(0.5)', '(-0.5)', '(-0.333)'], {}), '(0.5, -0.5, -0.333)\n', (22137, 22156), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((22172, 22196), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(0.5)', '(0.5)', '(-0.333)'], {}), '(0.5, 0.5, -0.333)\n', (22178, 22196), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((22212, 22237), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(-0.5)', '(0.5)', '(-0.333)'], {}), '(-0.5, 0.5, -0.333)\n', (22218, 22237), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((22295, 22318), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(0.0)', '(0.0)', '(0.666)'], {}), '(0.0, 0.0, 0.666)\n', (22301, 22318), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((22340, 22352), 'PyOpenGLtoolbox.mathlib.Point2', 'Point2', (['(0)', '(0)'], {}), '(0, 0)\n', (22346, 22352), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((22354, 22366), 'PyOpenGLtoolbox.mathlib.Point2', 'Point2', (['(1)', '(0)'], {}), '(1, 0)\n', (22360, 22366), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((22368, 22380), 'PyOpenGLtoolbox.mathlib.Point2', 'Point2', (['(1)', '(1)'], {}), '(1, 1)\n', (22374, 22380), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((22382, 22394), 'PyOpenGLtoolbox.mathlib.Point2', 'Point2', (['(0)', '(1)'], {}), '(0, 1)\n', (22388, 22394), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((22415, 22427), 'PyOpenGLtoolbox.mathlib.Point2', 'Point2', (['(0)', '(0)'], {}), '(0, 0)\n', (22421, 22427), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((22429, 22445), 'PyOpenGLtoolbox.mathlib.Point2', 'Point2', (['(0.5)', '(1.0)'], {}), '(0.5, 1.0)\n', (22435, 22445), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((22447, 22459), 'PyOpenGLtoolbox.mathlib.Point2', 'Point2', (['(1)', '(0)'], {}), '(1, 0)\n', (22453, 22459), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((22600, 22641), 'OpenGL.GL.glActiveTexture', '_gl.glActiveTexture', (['(_gl.GL_TEXTURE0 + _i)'], {}), '(_gl.GL_TEXTURE0 + _i)\n', (22619, 22641), True, 'import OpenGL.GL as _gl\n'), ((22650, 22681), 'OpenGL.GL.glEnable', '_gl.glEnable', (['_gl.GL_TEXTURE_2D'], {}), '(_gl.GL_TEXTURE_2D)\n', (22662, 22681), True, 'import OpenGL.GL as _gl\n'), ((22690, 22744), 'OpenGL.GL.glBindTexture', '_gl.glBindTexture', (['_gl.GL_TEXTURE_2D', 'texture_list[_i]'], {}), '(_gl.GL_TEXTURE_2D, texture_list[_i])\n', (22707, 22744), True, 'import OpenGL.GL as _gl\n'), ((23227, 23268), 'OpenGL.GL.glActiveTexture', '_gl.glActiveTexture', (['(_gl.GL_TEXTURE0 + _i)'], {}), '(_gl.GL_TEXTURE0 + _i)\n', (23246, 23268), True, 'import OpenGL.GL as _gl\n'), ((23277, 23309), 'OpenGL.GL.glDisable', '_gl.glDisable', (['_gl.GL_TEXTURE_2D'], {}), '(_gl.GL_TEXTURE_2D)\n', (23290, 23309), True, 'import OpenGL.GL as _gl\n'), ((24082, 24103), 'OpenGL.GL.glColor4fv', '_gl.glColor4fv', (['color'], {}), '(color)\n', (24096, 24103), True, 'import OpenGL.GL as _gl\n'), ((24859, 24880), 'OpenGL.GL.glColor4fv', '_gl.glColor4fv', (['color'], {}), '(color)\n', (24873, 24880), True, 'import OpenGL.GL as _gl\n'), ((24964, 24990), 'OpenGL.GLUT.glutSolidTeapot', '_glut.glutSolidTeapot', (['(1.0)'], {}), '(1.0)\n', (24985, 24990), True, 'import OpenGL.GLUT as _glut\n'), ((25521, 25562), 'OpenGL.GL.glActiveTexture', '_gl.glActiveTexture', (['(_gl.GL_TEXTURE0 + _i)'], {}), '(_gl.GL_TEXTURE0 + _i)\n', (25540, 25562), True, 'import OpenGL.GL as _gl\n'), ((25571, 25602), 'OpenGL.GL.glEnable', '_gl.glEnable', (['_gl.GL_TEXTURE_2D'], {}), '(_gl.GL_TEXTURE_2D)\n', (25583, 25602), True, 'import OpenGL.GL as _gl\n'), ((25611, 25665), 'OpenGL.GL.glBindTexture', '_gl.glBindTexture', (['_gl.GL_TEXTURE_2D', 'texture_list[_i]'], {}), '(_gl.GL_TEXTURE_2D, texture_list[_i])\n', (25628, 25665), True, 'import OpenGL.GL as _gl\n'), ((25749, 25775), 'OpenGL.GLUT.glutSolidTeapot', '_glut.glutSolidTeapot', (['(1.0)'], {}), '(1.0)\n', (25770, 25775), True, 'import OpenGL.GLUT as _glut\n'), ((25996, 26037), 'OpenGL.GL.glActiveTexture', '_gl.glActiveTexture', (['(_gl.GL_TEXTURE0 + _i)'], {}), '(_gl.GL_TEXTURE0 + _i)\n', (26015, 26037), True, 'import OpenGL.GL as _gl\n'), ((26046, 26078), 'OpenGL.GL.glDisable', '_gl.glDisable', (['_gl.GL_TEXTURE_2D'], {}), '(_gl.GL_TEXTURE_2D)\n', (26059, 26078), True, 'import OpenGL.GL as _gl\n'), ((26552, 26578), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(-0.5)', '(-0.5)', '(-0.333)'], {}), '(-0.5, -0.5, -0.333)\n', (26558, 26578), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((26594, 26619), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(0.5)', '(-0.5)', '(-0.333)'], {}), '(0.5, -0.5, -0.333)\n', (26600, 26619), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((26635, 26659), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(0.5)', '(0.5)', '(-0.333)'], {}), '(0.5, 0.5, -0.333)\n', (26641, 26659), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((26675, 26700), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(-0.5)', '(0.5)', '(-0.333)'], {}), '(-0.5, 0.5, -0.333)\n', (26681, 26700), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((26758, 26781), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(0.0)', '(0.0)', '(0.666)'], {}), '(0.0, 0.0, 0.666)\n', (26764, 26781), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((26823, 26848), 'PyOpenGLtoolbox.geometry._normal_3_points', '_normal_3_points', (['a', 'b', 'e'], {}), '(a, b, e)\n', (26839, 26848), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((26862, 26887), 'PyOpenGLtoolbox.geometry._normal_3_points', '_normal_3_points', (['b', 'c', 'e'], {}), '(b, c, e)\n', (26878, 26887), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((26901, 26926), 'PyOpenGLtoolbox.geometry._normal_3_points', '_normal_3_points', (['c', 'd', 'e'], {}), '(c, d, e)\n', (26917, 26926), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((26940, 26965), 'PyOpenGLtoolbox.geometry._normal_3_points', '_normal_3_points', (['d', 'a', 'e'], {}), '(d, a, e)\n', (26956, 26965), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((26979, 27004), 'PyOpenGLtoolbox.geometry._normal_3_points', '_normal_3_points', (['c', 'b', 'a'], {}), '(c, b, a)\n', (26995, 27004), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((27891, 27925), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(-0.5)', '(-0.288675)', '(-0.288675)'], {}), '(-0.5, -0.288675, -0.288675)\n', (27897, 27925), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((27941, 27974), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(0.5)', '(-0.288675)', '(-0.288675)'], {}), '(0.5, -0.288675, -0.288675)\n', (27947, 27974), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((28032, 28063), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(0.0)', '(0.57735)', '(-0.288675)'], {}), '(0.0, 0.57735, -0.288675)\n', (28038, 28063), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((28122, 28147), 'PyOpenGLtoolbox.mathlib.Point3', 'Point3', (['(0.0)', '(0.0)', '(0.57735)'], {}), '(0.0, 0.0, 0.57735)\n', (28128, 28147), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((28189, 28214), 'PyOpenGLtoolbox.geometry._normal_3_points', '_normal_3_points', (['a', 'b', 'd'], {}), '(a, b, d)\n', (28205, 28214), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((28228, 28253), 'PyOpenGLtoolbox.geometry._normal_3_points', '_normal_3_points', (['b', 'c', 'd'], {}), '(b, c, d)\n', (28244, 28253), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((28267, 28292), 'PyOpenGLtoolbox.geometry._normal_3_points', '_normal_3_points', (['c', 'a', 'd'], {}), '(c, a, d)\n', (28283, 28292), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((28306, 28331), 'PyOpenGLtoolbox.geometry._normal_3_points', '_normal_3_points', (['c', 'b', 'a'], {}), '(c, b, a)\n', (28322, 28331), False, 'from PyOpenGLtoolbox.geometry import _normal_3_points, draw_vertex_list_create_normal, draw_vertex_list_create_normal_textured\n'), ((28989, 29010), 'OpenGL.GL.glColor4fv', '_gl.glColor4fv', (['color'], {}), '(color)\n', (29003, 29010), True, 'import OpenGL.GL as _gl\n'), ((29064, 29092), 'OpenGL.GLUT.glutSolidTetrahedron', '_glut.glutSolidTetrahedron', ([], {}), '()\n', (29090, 29092), True, 'import OpenGL.GLUT as _glut\n'), ((29619, 29640), 'OpenGL.GL.glColor4fv', '_gl.glColor4fv', (['color'], {}), '(color)\n', (29633, 29640), True, 'import OpenGL.GL as _gl\n'), ((29694, 29723), 'OpenGL.GLUT.glutSolidDodecahedron', '_glut.glutSolidDodecahedron', ([], {}), '()\n', (29721, 29723), True, 'import OpenGL.GLUT as _glut\n'), ((30245, 30266), 'OpenGL.GL.glColor4fv', '_gl.glColor4fv', (['color'], {}), '(color)\n', (30259, 30266), True, 'import OpenGL.GL as _gl\n'), ((30320, 30347), 'OpenGL.GLUT.glutSolidOctahedron', '_glut.glutSolidOctahedron', ([], {}), '()\n', (30345, 30347), True, 'import OpenGL.GLUT as _glut\n'), ((30871, 30892), 'OpenGL.GL.glColor4fv', '_gl.glColor4fv', (['color'], {}), '(color)\n', (30885, 30892), True, 'import OpenGL.GL as _gl\n'), ((30946, 30974), 'OpenGL.GLUT.glutSolidIcosahedron', '_glut.glutSolidIcosahedron', ([], {}), '()\n', (30972, 30974), True, 'import OpenGL.GLUT as _glut\n'), ((3145, 3163), 'OpenGL.GL.glPushMatrix', '_gl.glPushMatrix', ([], {}), '()\n', (3161, 3163), True, 'import OpenGL.GL as _gl\n'), ((3264, 3297), 'OpenGL.GL.glVertexPointerf', '_gl.glVertexPointerf', (['self.vertex'], {}), '(self.vertex)\n', (3284, 3297), True, 'import OpenGL.GL as _gl\n'), ((3343, 3378), 'OpenGL.GL.glNormalPointerf', '_gl.glNormalPointerf', (['self.fragment'], {}), '(self.fragment)\n', (3363, 3378), True, 'import OpenGL.GL as _gl\n'), ((3418, 3462), 'OpenGL.GL.glEnableClientState', '_gl.glEnableClientState', (['_gl.GL_VERTEX_ARRAY'], {}), '(_gl.GL_VERTEX_ARRAY)\n', (3441, 3462), True, 'import OpenGL.GL as _gl\n'), ((3475, 3519), 'OpenGL.GL.glEnableClientState', '_gl.glEnableClientState', (['_gl.GL_NORMAL_ARRAY'], {}), '(_gl.GL_NORMAL_ARRAY)\n', (3498, 3519), True, 'import OpenGL.GL as _gl\n'), ((3632, 3671), 'OpenGL.GL.glTranslate', '_gl.glTranslate', (['pos[0]', 'pos[1]', 'pos[2]'], {}), '(pos[0], pos[1], pos[2])\n', (3647, 3671), True, 'import OpenGL.GL as _gl\n'), ((3987, 4042), 'OpenGL.GL.glDrawArrays', '_gl.glDrawArrays', (['_gl.GL_TRIANGLES', '(0)', 'self.totalVertex'], {}), '(_gl.GL_TRIANGLES, 0, self.totalVertex)\n', (4003, 4042), True, 'import OpenGL.GL as _gl\n'), ((4264, 4309), 'OpenGL.GL.glDisableClientState', '_gl.glDisableClientState', (['_gl.GL_VERTEX_ARRAY'], {}), '(_gl.GL_VERTEX_ARRAY)\n', (4288, 4309), True, 'import OpenGL.GL as _gl\n'), ((4322, 4367), 'OpenGL.GL.glDisableClientState', '_gl.glDisableClientState', (['_gl.GL_NORMAL_ARRAY'], {}), '(_gl.GL_NORMAL_ARRAY)\n', (4346, 4367), True, 'import OpenGL.GL as _gl\n'), ((4406, 4423), 'OpenGL.GL.glPopMatrix', '_gl.glPopMatrix', ([], {}), '()\n', (4421, 4423), True, 'import OpenGL.GL as _gl\n'), ((9199, 9233), 'math.sqrt', '_sqrt', (['(nx ** 2 + ny ** 2 + nz ** 2)'], {}), '(nx ** 2 + ny ** 2 + nz ** 2)\n', (9204, 9233), True, 'from math import sqrt as _sqrt\n'), ((13279, 13300), 'OpenGL.GL.glColor4fv', '_gl.glColor4fv', (['color'], {}), '(color)\n', (13293, 13300), True, 'import OpenGL.GL as _gl\n'), ((13366, 13405), 'OpenGL.GLUT.glutSolidSphere', '_glut.glutSolidSphere', (['(1.0)', 'lats', 'longs'], {}), '(1.0, lats, longs)\n', (13387, 13405), True, 'import OpenGL.GLUT as _glut\n'), ((15159, 15180), 'OpenGL.GL.glColor4fv', '_gl.glColor4fv', (['color'], {}), '(color)\n', (15173, 15180), True, 'import OpenGL.GL as _gl\n'), ((15275, 15298), 'OpenGL.GL.glNormal3fv', '_gl.glNormal3fv', (['normal'], {}), '(normal)\n', (15290, 15298), True, 'import OpenGL.GL as _gl\n'), ((16444, 16465), 'OpenGL.GL.glColor4fv', '_gl.glColor4fv', (['color'], {}), '(color)\n', (16458, 16465), True, 'import OpenGL.GL as _gl\n'), ((16531, 16574), 'OpenGL.GLUT.glutSolidCone', '_glut.glutSolidCone', (['base', 'height', 'lat', 'lng'], {}), '(base, height, lat, lng)\n', (16550, 16574), True, 'import OpenGL.GLUT as _glut\n'), ((19856, 19877), 'OpenGL.GL.glColor4fv', '_gl.glColor4fv', (['color'], {}), '(color)\n', (19870, 19877), True, 'import OpenGL.GL as _gl\n'), ((19943, 19985), 'OpenGL.GLUT.glutSolidTorus', '_glut.glutSolidTorus', (['minr', 'maxr', 'lat', 'lng'], {}), '(minr, maxr, lat, lng)\n', (19963, 19985), True, 'import OpenGL.GLUT as _glut\n'), ((27383, 27408), 'numpy.array', '_array', (['vertex_array', '"""f"""'], {}), "(vertex_array, 'f')\n", (27389, 27408), True, 'from numpy import array as _array\n'), ((27420, 27445), 'numpy.array', '_array', (['normal_array', '"""f"""'], {}), "(normal_array, 'f')\n", (27426, 27445), True, 'from numpy import array as _array\n'), ((28596, 28621), 'numpy.array', '_array', (['vertex_array', '"""f"""'], {}), "(vertex_array, 'f')\n", (28602, 28621), True, 'from numpy import array as _array\n'), ((28633, 28658), 'numpy.array', '_array', (['normal_array', '"""f"""'], {}), "(normal_array, 'f')\n", (28639, 28658), True, 'from numpy import array as _array\n'), ((3600, 3619), 'OpenGL.GL.glColor4fv', '_gl.glColor4fv', (['rgb'], {}), '(rgb)\n', (3614, 3619), True, 'import OpenGL.GL as _gl\n'), ((3761, 3802), 'OpenGL.GL.glActiveTexture', '_gl.glActiveTexture', (['(_gl.GL_TEXTURE0 + _i)'], {}), '(_gl.GL_TEXTURE0 + _i)\n', (3780, 3802), True, 'import OpenGL.GL as _gl\n'), ((3819, 3850), 'OpenGL.GL.glEnable', '_gl.glEnable', (['_gl.GL_TEXTURE_2D'], {}), '(_gl.GL_TEXTURE_2D)\n', (3831, 3850), True, 'import OpenGL.GL as _gl\n'), ((3867, 3921), 'OpenGL.GL.glBindTexture', '_gl.glBindTexture', (['_gl.GL_TEXTURE_2D', 'self.texture[_i]'], {}), '(_gl.GL_TEXTURE_2D, self.texture[_i])\n', (3884, 3921), True, 'import OpenGL.GL as _gl\n'), ((4133, 4174), 'OpenGL.GL.glActiveTexture', '_gl.glActiveTexture', (['(_gl.GL_TEXTURE0 + _i)'], {}), '(_gl.GL_TEXTURE0 + _i)\n', (4152, 4174), True, 'import OpenGL.GL as _gl\n'), ((4191, 4223), 'OpenGL.GL.glDisable', '_gl.glDisable', (['_gl.GL_TEXTURE_2D'], {}), '(_gl.GL_TEXTURE_2D)\n', (4204, 4223), True, 'import OpenGL.GL as _gl\n'), ((12603, 12622), 'numpy.array', '_array', (['vertex', '"""f"""'], {}), "(vertex, 'f')\n", (12609, 12622), True, 'from numpy import array as _array\n'), ((12658, 12678), 'numpy.array', '_array', (['avgnorm', '"""f"""'], {}), "(avgnorm, 'f')\n", (12664, 12678), True, 'from numpy import array as _array\n'), ((12746, 12765), 'numpy.array', '_array', (['vertex', '"""f"""'], {}), "(vertex, 'f')\n", (12752, 12765), True, 'from numpy import array as _array\n'), ((12777, 12794), 'numpy.array', '_array', (['norm', '"""f"""'], {}), "(norm, 'f')\n", (12783, 12794), True, 'from numpy import array as _array\n'), ((20775, 20860), 'PyOpenGLtoolbox.utils.print_gl_error', '_print_gl_error', (['"""OpenGL actual version does not support glutSolidCube function"""'], {}), "('OpenGL actual version does not support glutSolidCube function'\n )\n", (20790, 20860), True, 'from PyOpenGLtoolbox.utils import print_gl_error as _print_gl_error\n'), ((25048, 25136), 'PyOpenGLtoolbox.utils.print_gl_error', '_print_gl_error', (['"""OpenGL actual version doest not support glutSolidTeapot function"""'], {}), "(\n 'OpenGL actual version doest not support glutSolidTeapot function')\n", (25063, 25136), True, 'from PyOpenGLtoolbox.utils import print_gl_error as _print_gl_error\n'), ((25833, 25920), 'PyOpenGLtoolbox.utils.print_gl_error', '_print_gl_error', (['"""OpenGL actual version does not support glutSolidTeapot function"""'], {}), "(\n 'OpenGL actual version does not support glutSolidTeapot function')\n", (25848, 25920), True, 'from PyOpenGLtoolbox.utils import print_gl_error as _print_gl_error\n'), ((29150, 29242), 'PyOpenGLtoolbox.utils.print_gl_error', '_print_gl_error', (['"""OpenGL actual version does not support glutSolidTetrahedron function"""'], {}), "(\n 'OpenGL actual version does not support glutSolidTetrahedron function')\n", (29165, 29242), True, 'from PyOpenGLtoolbox.utils import print_gl_error as _print_gl_error\n'), ((29781, 29874), 'PyOpenGLtoolbox.utils.print_gl_error', '_print_gl_error', (['"""OpenGL actual version dost not support glutSolidDodecahedron function"""'], {}), "(\n 'OpenGL actual version dost not support glutSolidDodecahedron function')\n", (29796, 29874), True, 'from PyOpenGLtoolbox.utils import print_gl_error as _print_gl_error\n'), ((30405, 30496), 'PyOpenGLtoolbox.utils.print_gl_error', '_print_gl_error', (['"""OpenGL actual version does not support glutSolidOctahedron function"""'], {}), "(\n 'OpenGL actual version does not support glutSolidOctahedron function')\n", (30420, 30496), True, 'from PyOpenGLtoolbox.utils import print_gl_error as _print_gl_error\n'), ((31032, 31124), 'PyOpenGLtoolbox.utils.print_gl_error', '_print_gl_error', (['"""OpenGL actual version does not support glutSolidIcosahedron function"""'], {}), "(\n 'OpenGL actual version does not support glutSolidIcosahedron function')\n", (31047, 31124), True, 'from PyOpenGLtoolbox.utils import print_gl_error as _print_gl_error\n'), ((13475, 13562), 'PyOpenGLtoolbox.utils.print_gl_error', '_print_gl_error', (['"""OpenGL actual version does not support glutSolidSphere function"""'], {}), "(\n 'OpenGL actual version does not support glutSolidSphere function')\n", (13490, 13562), True, 'from PyOpenGLtoolbox.utils import print_gl_error as _print_gl_error\n'), ((13731, 13741), 'PyOpenGLtoolbox.mathlib._sin', '_sin', (['lat0'], {}), '(lat0)\n', (13735, 13741), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((13764, 13774), 'PyOpenGLtoolbox.mathlib._cos', '_cos', (['lat0'], {}), '(lat0)\n', (13768, 13774), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((13866, 13876), 'PyOpenGLtoolbox.mathlib._sin', '_sin', (['lat1'], {}), '(lat1)\n', (13870, 13876), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((13899, 13909), 'PyOpenGLtoolbox.mathlib._cos', '_cos', (['lat1'], {}), '(lat1)\n', (13903, 13909), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((13980, 14010), 'OpenGL.GL.glBegin', '_gl.glBegin', (['_gl.GL_QUAD_STRIP'], {}), '(_gl.GL_QUAD_STRIP)\n', (13991, 14010), True, 'import OpenGL.GL as _gl\n'), ((14450, 14461), 'OpenGL.GL.glEnd', '_gl.glEnd', ([], {}), '()\n', (14459, 14461), True, 'import OpenGL.GL as _gl\n'), ((15326, 15335), 'PyOpenGLtoolbox.mathlib._sin', '_sin', (['ang'], {}), '(ang)\n', (15330, 15335), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((15343, 15352), 'PyOpenGLtoolbox.mathlib._cos', '_cos', (['ang'], {}), '(ang)\n', (15347, 15352), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((15497, 15506), 'PyOpenGLtoolbox.mathlib._sin', '_sin', (['ang'], {}), '(ang)\n', (15501, 15506), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((15514, 15523), 'PyOpenGLtoolbox.mathlib._cos', '_cos', (['ang'], {}), '(ang)\n', (15518, 15523), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((16644, 16729), 'PyOpenGLtoolbox.utils.print_gl_error', '_print_gl_error', (['"""OpenGL actual version does not support glutSolidCone function"""'], {}), "('OpenGL actual version does not support glutSolidCone function'\n )\n", (16659, 16729), True, 'from PyOpenGLtoolbox.utils import print_gl_error as _print_gl_error\n'), ((20055, 20141), 'PyOpenGLtoolbox.utils.print_gl_error', '_print_gl_error', (['"""OpenGL actual version does not support glutSolidTorus function"""'], {}), "(\n 'OpenGL actual version does not support glutSolidTorus function')\n", (20070, 20141), True, 'from PyOpenGLtoolbox.utils import print_gl_error as _print_gl_error\n'), ((14157, 14168), 'PyOpenGLtoolbox.mathlib._cos', '_cos', (['_long'], {}), '(_long)\n', (14161, 14168), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((14193, 14204), 'PyOpenGLtoolbox.mathlib._sin', '_sin', (['_long'], {}), '(_long)\n', (14197, 14204), False, 'from PyOpenGLtoolbox.mathlib import Point3, _cos, _sin, Point2\n'), ((14225, 14261), 'OpenGL.GL.glNormal3f', '_gl.glNormal3f', (['(x * zr0)', '(y * zr0)', 'z0'], {}), '(x * zr0, y * zr0, z0)\n', (14239, 14261), True, 'import OpenGL.GL as _gl\n'), ((14282, 14318), 'OpenGL.GL.glVertex3f', '_gl.glVertex3f', (['(x * zr0)', '(y * zr0)', 'z0'], {}), '(x * zr0, y * zr0, z0)\n', (14296, 14318), True, 'import OpenGL.GL as _gl\n'), ((14339, 14375), 'OpenGL.GL.glNormal3f', '_gl.glNormal3f', (['(x * zr1)', '(y * zr1)', 'z1'], {}), '(x * zr1, y * zr1, z1)\n', (14353, 14375), True, 'import OpenGL.GL as _gl\n'), ((14396, 14432), 'OpenGL.GL.glVertex3f', '_gl.glVertex3f', (['(x * zr1)', '(y * zr1)', 'z1'], {}), '(x * zr1, y * zr1, z1)\n', (14410, 14432), True, 'import OpenGL.GL as _gl\n')] |
#!/usr/bin/env python
"""
This module provides classes to create phase diagrams.
"""
from __future__ import division
__author__ = "<NAME>"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "2.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
__date__ = "Nov 25, 2012"
import collections
import numpy as np
from pyhull.convex_hull import ConvexHull
from pymatgen.core.composition import Composition
from pymatgen.phasediagram.entries import GrandPotPDEntry, TransformedPDEntry
from pymatgen.core.periodic_table import DummySpecie
from pymatgen.analysis.reaction_calculator import Reaction, ReactionError
class PhaseDiagram (object):
"""
Simple phase diagram class taking in elements and entries as inputs.
The algorithm is based on the work in the following papers:
1. <NAME>, <NAME>, <NAME>, and <NAME>, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
.. attribute: elements:
Elements in the phase diagram.
..attribute: all_entries
All entries provided for Phase Diagram construction. Note that this
does not mean that all these entries are actually used in the phase
diagram. For example, this includes the positive formation energy
entries that are filtered out before Phase Diagram construction.
.. attribute: qhull_data
Data used in the convex hull operation. This is essentially a matrix of
composition data and energy per atom values created from qhull_entries.
.. attribute: dim
The dimensionality of the phase diagram.
.. attribute: facets
Facets of the phase diagram in the form of [[1,2,3],[4,5,6]...]
.. attribute: el_refs:
List of elemental references for the phase diagrams. These are
entries corresponding to the lowest energy element entries for simple
compositional phase diagrams.
.. attribute: qhull_entries:
Actual entries used in convex hull. Excludes all positive formation
energy entries.
"""
# Tolerance for determining if formation energy is positive.
formation_energy_tol = 1e-11
def __init__(self, entries, elements=None):
"""
Standard constructor for phase diagram.
Args:
entries:
A list of PDEntry-like objects having an energy,
energy_per_atom and composition.
elements:
Optional list of elements in the phase diagram. If set to None,
the elements are determined from the the entries themselves.
"""
if elements is None:
elements = set()
map(elements.update, [entry.composition.elements
for entry in entries])
elements = list(elements)
# Qhull seems to be sensitive to choice of independent composition
# components due to numerical issues in higher dimensions. The
# code permutes the element sequence until one that works is found.
dim = len(elements)
el_refs = {}
for el in elements:
el_entries = filter(lambda e: e.composition.is_element and
e.composition.elements[0] == el, entries)
if len(el_entries) == 0:
raise PhaseDiagramError(
"There are no entries associated with terminal {}."
.format(el))
el_refs[el] = min(el_entries, key=lambda e: e.energy_per_atom)
data = []
for entry in entries:
comp = entry.composition
row = map(comp.get_atomic_fraction, elements)
row.append(entry.energy_per_atom)
data.append(row)
data = np.array(data)
self.all_entries_hulldata = data[:, 1:]
# Calculate formation energies and remove positive formation energy
# entries
vec = [el_refs[el].energy_per_atom for el in elements] + [-1]
form_e = -np.dot(data, vec)
ind = np.where(form_e <= -self.formation_energy_tol)[0].tolist()
ind.extend(map(entries.index, el_refs.values()))
qhull_entries = [entries[i] for i in ind]
qhull_data = data[ind][:, 1:]
if len(qhull_data) == dim:
self.facets = [range(dim)]
else:
facets = ConvexHull(qhull_data, joggle=True).vertices
finalfacets = []
for facet in facets:
is_non_element_facet = any(
(len(qhull_entries[i].composition) > 1 for i in facet))
if is_non_element_facet:
m = qhull_data[facet]
m[:, -1] = 1
if abs(np.linalg.det(m)) > 1e-8:
finalfacets.append(facet)
self.facets = finalfacets
self.all_entries = entries
self.qhull_data = qhull_data
self.dim = dim
self.el_refs = el_refs
self.elements = elements
self.qhull_entries = qhull_entries
@property
def unstable_entries(self):
"""
Entries that are unstable in the phase diagram. Includes positive
formation energy entries.
"""
return [e for e in self.all_entries if e not in self.stable_entries]
@property
def stable_entries(self):
"""
Returns the stable entries in the phase diagram.
"""
stable_entries = set()
for facet in self.facets:
for vertex in facet:
stable_entries.add(self.qhull_entries[vertex])
return stable_entries
def get_form_energy(self, entry):
"""
Returns the formation energy for an entry (NOT normalized) from the
elemental references.
Args:
entry:
A PDEntry-like object.
Returns:
Formation energy from the elemental references.
"""
comp = entry.composition
energy = entry.energy - sum([comp[el] *
self.el_refs[el].energy_per_atom
for el in comp.elements])
return energy
def get_form_energy_per_atom(self, entry):
"""
Returns the formation energy per atom for an entry from the
elemental references.
Args:
entry:
An PDEntry-like object
Returns:
Formation energy **per atom** from the elemental references.
"""
comp = entry.composition
return self.get_form_energy(entry) / comp.num_atoms
def __repr__(self):
return self.__str__()
def __str__(self):
symbols = [el.symbol for el in self.elements]
output = ["{} phase diagram".format("-".join(symbols)),
"{} stable phases: ".format(len(self.stable_entries)),
", ".join([entry.name
for entry in self.stable_entries])]
return "\n".join(output)
class GrandPotentialPhaseDiagram(PhaseDiagram):
"""
A class representing a Grand potential phase diagram. Grand potential phase
diagrams are essentially phase diagrams that are open to one or more
components. To construct such phase diagrams, the relevant free energy is
the grand potential, which can be written as the Legendre transform of the
Gibbs free energy as follows
Grand potential = G - u\ :sub:`X` N\ :sub:`X`\
The algorithm is based on the work in the following papers:
1. <NAME>, <NAME>, <NAME>, and <NAME>, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>eder, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
"""
def __init__(self, entries, chempots, elements=None):
"""
Standard constructor for grand potential phase diagram.
Args:
entries:
A list of PDEntry-like objects having an energy,
energy_per_atom and composition.
chempots:
A dict of {element: float} to specify the chemical potentials
of the open elements.
elements:
Optional list of elements in the phase diagram. If set to None,
the elements are determined from the entries themselves.
"""
if elements is None:
elements = set()
map(elements.update, [entry.composition.elements
for entry in entries])
elements = set(elements).difference(chempots.keys())
all_entries = [GrandPotPDEntry(e, chempots)
for e in entries
if (not e.is_element) or
e.composition.elements[0] in elements]
self.chempots = chempots
super(GrandPotentialPhaseDiagram, self).__init__(all_entries, elements)
def __str__(self):
output = []
chemsys = "-".join([el.symbol for el in self.elements])
output.append("{} grand potential phase diagram with ".format(chemsys))
output[-1] += ", ".join(["u{}={}".format(el, v)
for el, v in self.chempots.items()])
output.append("{} stable phases: ".format(len(self.stable_entries)))
output.append(", ".join([entry.name
for entry in self.stable_entries]))
return "\n".join(output)
class CompoundPhaseDiagram(PhaseDiagram):
"""
Generates phase diagrams from compounds as terminations instead of
elements.
"""
# Tolerance for determining if amount of a composition is positive.
amount_tol = 1e-5
def __init__(self, entries, terminal_compositions,
normalize_terminal_compositions=True):
"""
Args:
entries:
Sequence of input entries. For example, if you want a Li2O-P2O5
phase diagram, you might have all Li-P-O entries as an input.
terminal_compositions:
Terminal compositions of phase space. In the Li2O-P2O5 example,
these will be the Li2O and P2O5 compositions.
normalize_terminal_compositions:
Whether to normalize the terminal compositions to a per atom
basis. If normalized, the energy above hulls will be consistent
for comparison across systems. Non-normalized terminals are
more intuitive in terms of compositional breakdowns.
"""
self.original_entries = entries
self.terminal_compositions = terminal_compositions
self.normalize_terminals = normalize_terminal_compositions
(pentries, species_mapping) = \
self.transform_entries(entries, terminal_compositions)
self.species_mapping = species_mapping
PhaseDiagram.__init__(self, pentries,
elements=species_mapping.values())
def transform_entries(self, entries, terminal_compositions):
"""
Method to transform all entries to the composition coordinate in the
terminal compositions. If the entry does not fall within the space
defined by the terminal compositions, they are excluded. For example,
Li3PO4 is mapped into a Li2O:1.5, P2O5:0.5 composition. The terminal
compositions are represented by DummySpecies.
Args:
entries:
Sequence of all input entries
terminal_compositions:
Terminal compositions of phase space.
Returns:
Sequence of TransformedPDEntries falling within the phase space.
"""
new_entries = []
if self.normalize_terminals:
fractional_comp = [c.get_fractional_composition()
for c in terminal_compositions]
else:
fractional_comp = terminal_compositions
#Map terminal compositions to unique dummy species.
sp_mapping = collections.OrderedDict()
for i, comp in enumerate(fractional_comp):
sp_mapping[comp] = DummySpecie("X" + chr(102 + i))
for entry in entries:
try:
rxn = Reaction(fractional_comp, [entry.composition])
rxn.normalize_to(entry.composition)
#We only allow reactions that have positive amounts of
#reactants.
if all([rxn.get_coeff(comp) <= CompoundPhaseDiagram.amount_tol
for comp in fractional_comp]):
newcomp = {sp_mapping[comp]: -rxn.get_coeff(comp)
for comp in fractional_comp}
newcomp = {k: v for k, v in newcomp.items()
if v > CompoundPhaseDiagram.amount_tol}
transformed_entry = \
TransformedPDEntry(Composition(newcomp), entry)
new_entries.append(transformed_entry)
except ReactionError:
#If the reaction can't be balanced, the entry does not fall
#into the phase space. We ignore them.
pass
return new_entries, sp_mapping
class PhaseDiagramError(Exception):
"""
An exception class for Phase Diagram generation.
"""
pass
| [
"pymatgen.core.composition.Composition",
"pyhull.convex_hull.ConvexHull",
"collections.OrderedDict",
"numpy.where",
"pymatgen.phasediagram.entries.GrandPotPDEntry",
"numpy.linalg.det",
"numpy.array",
"numpy.dot",
"pymatgen.analysis.reaction_calculator.Reaction"
] | [((4092, 4106), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (4100, 4106), True, 'import numpy as np\n'), ((12583, 12608), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (12606, 12608), False, 'import collections\n'), ((4338, 4355), 'numpy.dot', 'np.dot', (['data', 'vec'], {}), '(data, vec)\n', (4344, 4355), True, 'import numpy as np\n'), ((9177, 9205), 'pymatgen.phasediagram.entries.GrandPotPDEntry', 'GrandPotPDEntry', (['e', 'chempots'], {}), '(e, chempots)\n', (9192, 9205), False, 'from pymatgen.phasediagram.entries import GrandPotPDEntry, TransformedPDEntry\n'), ((4684, 4719), 'pyhull.convex_hull.ConvexHull', 'ConvexHull', (['qhull_data'], {'joggle': '(True)'}), '(qhull_data, joggle=True)\n', (4694, 4719), False, 'from pyhull.convex_hull import ConvexHull\n'), ((12793, 12839), 'pymatgen.analysis.reaction_calculator.Reaction', 'Reaction', (['fractional_comp', '[entry.composition]'], {}), '(fractional_comp, [entry.composition])\n', (12801, 12839), False, 'from pymatgen.analysis.reaction_calculator import Reaction, ReactionError\n'), ((4370, 4416), 'numpy.where', 'np.where', (['(form_e <= -self.formation_energy_tol)'], {}), '(form_e <= -self.formation_energy_tol)\n', (4378, 4416), True, 'import numpy as np\n'), ((13475, 13495), 'pymatgen.core.composition.Composition', 'Composition', (['newcomp'], {}), '(newcomp)\n', (13486, 13495), False, 'from pymatgen.core.composition import Composition\n'), ((5054, 5070), 'numpy.linalg.det', 'np.linalg.det', (['m'], {}), '(m)\n', (5067, 5070), True, 'import numpy as np\n')] |
import datetime
import pandas as pd
import numpy as np
import re
import os
def remove_blanks(df, col_name):
ctr = 0
working_df = pd.DataFrame(df)
# remove any blanks from the run
try:
while True:
value = working_df.at[ctr, col_name].lower()
if re.search("^blank\d*.*$", value) or re.search("^0$", value):
working_df.drop(labels=ctr, inplace=True)
ctr += 1
except ValueError:
pass
except KeyError:
pass
working_df = working_df.reset_index(drop=True)
print(" Done!\n")
return working_df
def remove_pools(df, col_name):
working_df = pd.DataFrame(df)
col_lst = list(working_df.columns)
size = working_df.index
new_row = []
for i in range(len(size)):
cell_val = str(working_df.iloc[i][col_lst.index(col_name)]).split("/")
cell_val = cell_val[0]
currentrow = working_df.iloc[i]
currentrow = currentrow.values.tolist()
if not ("pool" in cell_val.lower().strip() or "panel" in cell_val.lower().strip()):
new_row.append(currentrow)
working_df = pd.DataFrame(new_row, columns=col_lst)
return working_df
def merge_dataframes(df1=None, df2=None, df1_drop=None, df_final_drop=None, join_lst=None, join_type=None):
# df1 from qc table, may have duplicate hsns. Remove common columns between
# the two dataframes. df2 from results table
join_dict = {}
for col in join_lst:
join_dict[col] = 'str'
df1 = df1.astype(join_dict)
df2 = df2.astype(join_dict)
df1.drop(labels=df1_drop, axis=1, inplace=True)
#df1.drop_duplicates(subset=['hsn'], inplace=True)
df_final = df2.merge(df1, how=join_type, on=join_lst)
df_final.drop(labels=df_final_drop, axis=1, inplace=True)
return df_final
def format_hsn_col(df=None, hsn_colname=None, hsn_only=False):
df = remove_pools(df, hsn_colname)
df = remove_blanks(df, hsn_colname)
if hsn_only:
df.columns = [hsn_colname]
df[hsn_colname] = df.apply(lambda row: extract_hsn(row), axis=1)
df[hsn_colname] = df.apply(lambda row: str(row[hsn_colname]), axis=1)
df = df.rename(columns= {hsn_colname:'hsn'})
df.drop_duplicates(subset='hsn', inplace=True, ignore_index=True)
return df
def add_cols(obj=None, df=None, col_lst=None, col_func_map=None):
# iterate through all columns that should be in final df
for k in col_lst:
# if the column appears in the function mapping,
if k in col_func_map.keys():
# get the pointer to the func/value associated with the column
v = col_func_map[k]
try:
# try to get additional value to run apply function with
val = v[1]
try:
val = getattr(obj, v[1])
# try to catch v[1] as an object variable
df[k] = df.apply(lambda row: globals()[v[0]](row, val), axis=1)
except Exception:
# use v[1] as a constant argument to the function
df[k] = df.apply(lambda row: globals()[v[0]](row, v[1]), axis=1)
# no additional variables to supply to apply function
except IndexError:
# try using the value as a function
try:
df[k] = df.apply(lambda row: globals()[v[0]](row), axis=1)
# try using the value as a variable
except Exception:
val = getattr(obj, v[0])
df[k] = val
# if column not in mapping, insert empty column with appropriate
# name into the dataframe
else:
# try to insert the column
try:
df.insert(0, k, None)
# ValueError raised if column already exists in dataframe
except ValueError:
pass
return df
def format_date(row, colName):
if (isinstance(row[colName], pd.Timestamp)) or\
(isinstance(row[colName], datetime.datetime)):
if (not pd.isna(row[colName])):
return row[colName].strftime("%m/%d/%Y")
else:
return np.nan
else:
return np.nan
def get_today(row):
return datetime.datetime.today().strftime("%Y-%m-%d")
def get_pos(id):
pos_dict = {"A":1, "B":2, "C":3, "D":4, "E":5, "F":6, "G":7, "H":8}
pos = (int(id[-1])*8 - 8) + pos_dict[id[0]]
return pos
def parse_seq_id(row, arg):
try:
seq_id = str(row["Sequence name"]).split("/")
except:
seq_id = str(row['seqName']).split("/")
# if split didn't find matches, it is dealing with folder, should
# be split by ".", also has different indexes for values
if len(seq_id) == 1:
# WF 3
seq_id = str(row["seqName"]).split(".")
if arg == "hsn":
return int(seq_id[0][0:7])
elif arg == "m_num":
return int(seq_id[1][-2:])
elif arg == "pos":
return int(seq_id[4][-2:])
elif arg == "run_num":
return int(seq_id[3])
elif arg == "date":
return seq_id[2]
else:
raise ValueError("Bad argument to parse_seq_id --> folder")
else:
# WF_4, WF_5
if arg == "hsn":
if len(seq_id[0]) > 9:
return int(seq_id[0])
else:
return int(seq_id[0][0:7])
elif arg == "m_num":
return int(seq_id[1][4:6])
elif arg == "pos":
return int(seq_id[2][-2:])
elif arg == "run_num":
return int(seq_id[1][-2:])
elif arg == "date":
return seq_id[1][7:17]
else:
raise ValueError("Bad argument to parse_seq_id --> file")
def extract_hsn(row):
hsn = str(row["Sample ID"])
if len(hsn) == 7:
return hsn
return hsn[:-2]
def format_str_cols(df):
df.columns = [str(col) for col in list(df.columns)]
return df
def format_sex(row, ber=False):
col = "sex"
if ber:
col = 'Patient_Gender'
if pd.isna(row[col]) or str(row[col]).upper() == "" or str(row[col]).upper() == "UNKNOWN" or str(row[col]).upper() == "U":
return "Unknown"
elif str(row[col]).upper() == "M":
return "Male"
elif str(row[col]).upper() == "F":
return "Female"
else:
return str(row[col]).capitalize()
def format_facility(row, facility_replace_dict):
if pd.isna(row['facility']):
return None
elif row['facility'] == "":
return None
else:
facility = str(row['facility']).lower()
for key in facility_replace_dict.keys():
facility = facility.replace(key, facility_replace_dict[key])
return facility.lower()
def parse_category(row, parse_category_dict):
facility = str(row['facility']).lower()
for key in parse_category_dict.keys():
if re.search(key, facility):
return parse_category_dict[key]
return None
def format_race(row):
if pd.isna(row['race']) or row['race'] == "U":
return "Unknown"
elif row['race'] == "":
return "Unknown"
elif str(row['race']).upper() == "W":
return "White"
else:
return str(row['race'])
def format_source(row):
source = str(row['source']).lower()
if len(source) > 2:
if source == "nasopharyngeal":
return "NP"
elif source == "sputum/saliva":
return "SV"
else:
return "OT"
else:
return row['source']
def add_cols_by_name(df, lst):
curr_cols = list(df.columns)
for col in lst:
if not (col in curr_cols):
df.insert(0, col, np.nan)
return df
def format_f_name(row):
if pd.isna(row['name']):
return None
elif row['name'].strip() == "":
return None
else:
full_name = str(row["name"])
names = full_name.split()
f_name = names[0].capitalize()
f_name = f_name.replace("'","''")
return f_name
def format_l_name(row, lst):
if pd.isna(row['name']):
return None
elif row['name'].strip() == "":
return None
else:
full_name = str(row["name"])
names = full_name.split()
for item in lst:
if item == names[-1].lower():
return names[-2].capitalize() + ", " + names[-1].upper()
l_name = names[-1].capitalize()
l_name = l_name.replace("'", "''")
return l_name
def drop_cols(df, lst):
curr_cols = list(df.columns)
for col in curr_cols:
if not (col in lst):
df = df.drop(columns = col)
return df
def get_age(row):
try:
born = datetime.datetime.strptime(row["dob"], "%m/%d/%Y").date()
except Exception:
born = row["dob"].to_pydatetime().date()
try:
tested = row['doc'].to_pydatetime().date()
except Exception:
tested = datetime.datetime.strptime(row['doc'], "%m/%d/%Y").date()
if pd.isnull(born) or pd.isnull(tested):
return -1
days_in_year = 365.2425
age = int((tested - born).days / days_in_year)
return age
def format_state(row, state_abbrev):
if (not pd.isna(row['state'])):
return state_abbrev[str(row["state"])]
else:
return "unknown"
def parse_path(row, path):
new_path = path + "/" + row["seqName"]
if not os.path.exists(new_path):
raise ValueError("The parser generated a path to a fasta file that is not valid!!")
return new_path
def get_gisaid(row):
if np.isnan(row["gisaid_num"]) or pd.isna(row["gisaid_num"]):
return ""
else:
return "KS-KHEL-" + str(int(row["gisaid_num"]))
def get_name(row):
return str(row['f_name']) + " " + str(row['l_name'])
def unkwn(row, col):
if (str(row[col]) == "nan") or (str(row[col]) == "Nan"):
return ""
else:
return row[col]
def cap_all(row, col):
splt = str(row[col]).split()
newlst = []
for word in splt:
newlst.append(word.capitalize())
return " ".join(newlst)
def get_priority(row, lst):
if row['hsn'] in lst:
return 1
else:
return 0
def check_reportable(row, cutoff):
if row['neg_pass'] and row['pos_pass'] and row['percent_cvg'] >= cutoff:
return 1
else:
return 0
def replace_shortcut(path):
base_path = "//kdhe/dfs/LabShared"
path = path.replace("\\", "/")
if base_path == path[0:20]:
return path
if path[3:26] != "Molecular Genomics Unit":
# return None
raise ValueError("The supplied path shouldn't be passed to 'replace_shortcut()'!")
extension = path[3:]
path = base_path + "/" + extension
return path | [
"pandas.isnull",
"os.path.exists",
"datetime.datetime.strptime",
"datetime.datetime.today",
"numpy.isnan",
"pandas.DataFrame",
"pandas.isna",
"re.search"
] | [((138, 154), 'pandas.DataFrame', 'pd.DataFrame', (['df'], {}), '(df)\n', (150, 154), True, 'import pandas as pd\n'), ((650, 666), 'pandas.DataFrame', 'pd.DataFrame', (['df'], {}), '(df)\n', (662, 666), True, 'import pandas as pd\n'), ((1128, 1166), 'pandas.DataFrame', 'pd.DataFrame', (['new_row'], {'columns': 'col_lst'}), '(new_row, columns=col_lst)\n', (1140, 1166), True, 'import pandas as pd\n'), ((6489, 6513), 'pandas.isna', 'pd.isna', (["row['facility']"], {}), "(row['facility'])\n", (6496, 6513), True, 'import pandas as pd\n'), ((7791, 7811), 'pandas.isna', 'pd.isna', (["row['name']"], {}), "(row['name'])\n", (7798, 7811), True, 'import pandas as pd\n'), ((8111, 8131), 'pandas.isna', 'pd.isna', (["row['name']"], {}), "(row['name'])\n", (8118, 8131), True, 'import pandas as pd\n'), ((6110, 6127), 'pandas.isna', 'pd.isna', (['row[col]'], {}), '(row[col])\n', (6117, 6127), True, 'import pandas as pd\n'), ((6945, 6969), 're.search', 're.search', (['key', 'facility'], {}), '(key, facility)\n', (6954, 6969), False, 'import re\n'), ((7062, 7082), 'pandas.isna', 'pd.isna', (["row['race']"], {}), "(row['race'])\n", (7069, 7082), True, 'import pandas as pd\n'), ((9046, 9061), 'pandas.isnull', 'pd.isnull', (['born'], {}), '(born)\n', (9055, 9061), True, 'import pandas as pd\n'), ((9065, 9082), 'pandas.isnull', 'pd.isnull', (['tested'], {}), '(tested)\n', (9074, 9082), True, 'import pandas as pd\n'), ((9250, 9271), 'pandas.isna', 'pd.isna', (["row['state']"], {}), "(row['state'])\n", (9257, 9271), True, 'import pandas as pd\n'), ((9439, 9463), 'os.path.exists', 'os.path.exists', (['new_path'], {}), '(new_path)\n', (9453, 9463), False, 'import os\n'), ((9606, 9633), 'numpy.isnan', 'np.isnan', (["row['gisaid_num']"], {}), "(row['gisaid_num'])\n", (9614, 9633), True, 'import numpy as np\n'), ((9637, 9663), 'pandas.isna', 'pd.isna', (["row['gisaid_num']"], {}), "(row['gisaid_num'])\n", (9644, 9663), True, 'import pandas as pd\n'), ((4081, 4102), 'pandas.isna', 'pd.isna', (['row[colName]'], {}), '(row[colName])\n', (4088, 4102), True, 'import pandas as pd\n'), ((4271, 4296), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (4294, 4296), False, 'import datetime\n'), ((294, 327), 're.search', 're.search', (['"""^blank\\\\d*.*$"""', 'value'], {}), "('^blank\\\\d*.*$', value)\n", (303, 327), False, 'import re\n'), ((330, 353), 're.search', 're.search', (['"""^0$"""', 'value'], {}), "('^0$', value)\n", (339, 353), False, 'import re\n'), ((8752, 8802), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["row['dob']", '"""%m/%d/%Y"""'], {}), "(row['dob'], '%m/%d/%Y')\n", (8778, 8802), False, 'import datetime\n'), ((8981, 9031), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["row['doc']", '"""%m/%d/%Y"""'], {}), "(row['doc'], '%m/%d/%Y')\n", (9007, 9031), False, 'import datetime\n')] |
#!/usr/bin/env python
"""
Dispersion analysis of a heterogeneous finite scale periodic cell.
The periodic cell mesh has to contain two subdomains Y1 (with the cell ids 1),
Y2 (with the cell ids 2), so that different material properties can be defined
in each of the subdomains (see ``--pars`` option). The command line parameters
can be given in any consistent unit set, for example the basic SI units. The
``--unit-multipliers`` option can be used to rescale the input units to ones
more suitable to the simulation, for example to prevent having different
matrix blocks with large differences of matrix entries magnitudes. The results
are then in the rescaled units.
Usage Examples
--------------
Default material parameters, a square periodic cell with a spherical inclusion,
logs also standard pressure dilatation and shear waves, no eigenvectors::
python examples/linear_elasticity/dispersion_analysis.py meshes/2d/special/circle_in_square.mesh --log-std-waves --eigs-only
As above, with custom eigenvalue solver parameters, and different number of
eigenvalues, mesh size and units used in the calculation::
python examples/linear_elasticity/dispersion_analysis.py meshes/2d/special/circle_in_square.mesh --solver-conf="kind='eig.scipy', method='eigsh', tol=1e-10, maxiter=1000, which='LM', sigma=0" --log-std-waves -n 5 --range=0,640,101 --mode=omega --unit-multipliers=1e-6,1e-2,1e-3 --mesh-size=1e-2 --eigs-only
Default material parameters, a square periodic cell with a square inclusion,
and a very small mesh to allow comparing the omega and kappa modes (full matrix
solver required!)::
python examples/linear_elasticity/dispersion_analysis.py meshes/2d/square_2m.mesh --solver-conf="kind='eig.scipy', method='eigh'" --log-std-waves -n 10 --range=0,640,101 --mesh-size=1e-2 --mode=omega --eigs-only --no-legends --unit-multipliers=1e-6,1e-2,1e-3 -o output/omega
python examples/linear_elasticity/dispersion_analysis.py meshes/2d/square_2m.mesh --solver-conf="kind='eig.qevp', method='companion', mode='inverted', solver={kind='eig.scipy', method='eig'}" --log-std-waves -n 500 --range=0,4000000,1001 --mesh-size=1e-2 --mode=kappa --eigs-only --no-legends --unit-multipliers=1e-6,1e-2,1e-3 -o output/kappa
View/compare the resulting logs::
python script/plot_logs.py output/omega/frequencies.txt --no-legends -g 1 -o mode-omega.png
python script/plot_logs.py output/kappa/wave-numbers.txt --no-legends -o mode-kappa.png
python script/plot_logs.py output/kappa/wave-numbers.txt --no-legends --swap-axes -o mode-kappa-t.png
In contrast to the heterogeneous square periodic cell, a homogeneous
square periodic cell (the region Y2 is empty)::
python examples/linear_elasticity/dispersion_analysis.py meshes/2d/square_1m.mesh --solver-conf="kind='eig.scipy', method='eigh'" --log-std-waves -n 10 --range=0,640,101 --mesh-size=1e-2 --mode=omega --eigs-only --no-legends --unit-multipliers=1e-6,1e-2,1e-3 -o output/omega-h
python script/plot_logs.py output/omega-h/frequencies.txt --no-legends -g 1 -o mode-omega-h.png
Use the Brillouin stepper::
python examples/linear_elasticity/dispersion_analysis.py meshes/2d/special/circle_in_square.mesh --log-std-waves -n=60 --eigs-only --no-legends --stepper=brillouin
python script/plot_logs.py output/frequencies.txt -g 0 --rc="'font.size':14, 'lines.linewidth' : 3, 'lines.markersize' : 4" -o brillouin-stepper-kappas.png
python script/plot_logs.py output/frequencies.txt -g 1 --no-legends --rc="'font.size':14, 'lines.linewidth' : 3, 'lines.markersize' : 4" -o brillouin-stepper-omegas.png
Additional arguments can be passed to the problem configuration's
:func:`define()` function using the ``--define-kwargs`` option. In this file,
only the mesh vertex separation parameter `mesh_eps` can be used::
python examples/linear_elasticity/dispersion_analysis.py meshes/2d/special/circle_in_square.mesh --log-std-waves --eigs-only --define-kwargs="mesh_eps=1e-10" --save-regions
"""
from __future__ import absolute_import
import os
import sys
sys.path.append('.')
import gc
from copy import copy
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import import_file, output, Struct
from sfepy.base.conf import dict_from_string, ProblemConf
from sfepy.base.ioutils import ensure_path, remove_files_patterns, save_options
from sfepy.base.log import Log
from sfepy.discrete.fem import MeshIO
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson as stiffness
import sfepy.mechanics.matcoefs as mc
from sfepy.mechanics.units import apply_unit_multipliers
import sfepy.discrete.fem.periodic as per
from sfepy.discrete.fem.meshio import convert_complex_output
from sfepy.homogenization.utils import define_box_regions
from sfepy.discrete import Problem
from sfepy.mechanics.tensors import get_von_mises_stress
from sfepy.solvers import Solver
from sfepy.solvers.ts import get_print_info, TimeStepper
from sfepy.linalg.utils import output_array_stats, max_diff_csr
def apply_units(pars, unit_multipliers):
new_pars = apply_unit_multipliers(pars,
['stress', 'one', 'density',
'stress', 'one' ,'density'],
unit_multipliers)
return new_pars
def compute_von_mises(out, pb, state, extend=False, wmag=None, wdir=None):
"""
Calculate the von Mises stress.
"""
stress = pb.evaluate('ev_cauchy_stress.i.Omega(m.D, u)', mode='el_avg')
vms = get_von_mises_stress(stress.squeeze())
vms.shape = (vms.shape[0], 1, 1, 1)
out['von_mises_stress'] = Struct(name='output_data', mode='cell',
data=vms)
return out
def define(filename_mesh, pars, approx_order, refinement_level, solver_conf,
plane='strain', post_process=False, mesh_eps=1e-8):
io = MeshIO.any_from_filename(filename_mesh)
bbox = io.read_bounding_box()
dim = bbox.shape[1]
options = {
'absolute_mesh_path' : True,
'refinement_level' : refinement_level,
'allow_empty_regions' : True,
'post_process_hook' : 'compute_von_mises' if post_process else None,
}
fields = {
'displacement': ('complex', dim, 'Omega', approx_order),
}
young1, poisson1, density1, young2, poisson2, density2 = pars
materials = {
'm' : ({
'D' : {'Y1' : stiffness(dim, young=young1, poisson=poisson1,
plane=plane),
'Y2' : stiffness(dim, young=young2, poisson=poisson2,
plane=plane)},
'density' : {'Y1' : density1, 'Y2' : density2},
},),
'wave' : 'get_wdir',
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
regions = {
'Omega' : 'all',
'Y1': 'cells of group 1',
'Y2': 'cells of group 2',
}
regions.update(define_box_regions(dim,
bbox[0], bbox[1], mesh_eps))
ebcs = {
}
if dim == 3:
epbcs = {
'periodic_x' : (['Left', 'Right'], {'u.all' : 'u.all'},
'match_x_plane'),
'periodic_y' : (['Near', 'Far'], {'u.all' : 'u.all'},
'match_y_plane'),
'periodic_z' : (['Top', 'Bottom'], {'u.all' : 'u.all'},
'match_z_plane'),
}
else:
epbcs = {
'periodic_x' : (['Left', 'Right'], {'u.all' : 'u.all'},
'match_y_line'),
'periodic_y' : (['Bottom', 'Top'], {'u.all' : 'u.all'},
'match_x_line'),
}
per.set_accuracy(mesh_eps)
functions = {
'match_x_plane' : (per.match_x_plane,),
'match_y_plane' : (per.match_y_plane,),
'match_z_plane' : (per.match_z_plane,),
'match_x_line' : (per.match_x_line,),
'match_y_line' : (per.match_y_line,),
'get_wdir' : (get_wdir,),
}
integrals = {
'i' : 2 * approx_order,
}
equations = {
'K' : 'dw_lin_elastic.i.Omega(m.D, v, u)',
'S' : 'dw_elastic_wave.i.Omega(m.D, wave.vec, v, u)',
'R' : """1j * dw_elastic_wave_cauchy.i.Omega(m.D, wave.vec, u, v)
- 1j * dw_elastic_wave_cauchy.i.Omega(m.D, wave.vec, v, u)""",
'M' : 'dw_volume_dot.i.Omega(m.density, v, u)',
}
solver_0 = solver_conf.copy()
solver_0['name'] = 'eig'
return locals()
def get_wdir(ts, coors, mode=None,
equations=None, term=None, problem=None, wdir=None, **kwargs):
if mode == 'special':
return {'vec' : wdir}
def set_wave_dir(pb, wdir):
materials = pb.get_materials()
wave_mat = materials['wave']
wave_mat.set_extra_args(wdir=wdir)
def save_materials(output_dir, pb, options):
stiffness = pb.evaluate('ev_volume_integrate_mat.2.Omega(m.D, u)',
mode='el_avg', copy_materials=False, verbose=False)
young, poisson = mc.youngpoisson_from_stiffness(stiffness,
plane=options.plane)
density = pb.evaluate('ev_volume_integrate_mat.2.Omega(m.density, u)',
mode='el_avg', copy_materials=False, verbose=False)
out = {}
out['young'] = Struct(name='young', mode='cell',
data=young[..., None, None])
out['poisson'] = Struct(name='poisson', mode='cell',
data=poisson[..., None, None])
out['density'] = Struct(name='density', mode='cell', data=density)
materials_filename = os.path.join(output_dir, 'materials.vtk')
pb.save_state(materials_filename, out=out)
def get_std_wave_fun(pb, options):
stiffness = pb.evaluate('ev_volume_integrate_mat.2.Omega(m.D, u)',
mode='el_avg', copy_materials=False, verbose=False)
young, poisson = mc.youngpoisson_from_stiffness(stiffness,
plane=options.plane)
density = pb.evaluate('ev_volume_integrate_mat.2.Omega(m.density, u)',
mode='el_avg', copy_materials=False, verbose=False)
lam, mu = mc.lame_from_youngpoisson(young, poisson,
plane=options.plane)
alam = nm.average(lam)
amu = nm.average(mu)
adensity = nm.average(density)
cp = nm.sqrt((alam + 2.0 * amu) / adensity)
cs = nm.sqrt(amu / adensity)
output('average p-wave speed:', cp)
output('average shear wave speed:', cs)
log_names = [r'$\omega_p$', r'$\omega_s$']
log_plot_kwargs = [{'ls' : '--', 'color' : 'k'},
{'ls' : '--', 'color' : 'gray'}]
if options.mode == 'omega':
fun = lambda wmag, wdir: (cp * wmag, cs * wmag)
else:
fun = lambda wmag, wdir: (wmag / cp, wmag / cs)
return fun, log_names, log_plot_kwargs
def get_stepper(rng, pb, options):
if options.stepper == 'linear':
stepper = TimeStepper(rng[0], rng[1], dt=None, n_step=rng[2])
return stepper
bbox = pb.domain.mesh.get_bounding_box()
bzone = 2.0 * nm.pi / (bbox[1] - bbox[0])
num = rng[2] // 3
class BrillouinStepper(Struct):
"""
Step over 1. Brillouin zone in xy plane.
"""
def __init__(self, t0, t1, dt=None, n_step=None, step=None, **kwargs):
Struct.__init__(self, t0=t0, t1=t1, dt=dt, n_step=n_step, step=step)
self.n_digit, self.format, self.suffix = get_print_info(self.n_step)
def __iter__(self):
ts = TimeStepper(0, bzone[0], dt=None, n_step=num)
for ii, val in ts:
yield ii, val, nm.array([1.0, 0.0])
if ii == (num-2): break
ts = TimeStepper(0, bzone[1], dt=None, n_step=num)
for ii, k1 in ts:
wdir = nm.array([bzone[0], k1])
val = nm.linalg.norm(wdir)
wdir = wdir / val
yield num + ii, val, wdir
if ii == (num-2): break
wdir = nm.array([bzone[0], bzone[1]])
val = nm.linalg.norm(wdir)
wdir = wdir / val
ts = TimeStepper(0, 1, dt=None, n_step=num)
for ii, _ in ts:
yield 2 * num + ii, val * (1.0 - float(ii)/(num-1)), wdir
stepper = BrillouinStepper(0, 1, n_step=rng[2])
return stepper
def save_eigenvectors(filename, svecs, wmag, wdir, pb):
if svecs is None: return
variables = pb.get_variables()
# Make full eigenvectors (add DOFs fixed by boundary conditions).
vecs = nm.empty((variables.di.ptr[-1], svecs.shape[1]),
dtype=svecs.dtype)
for ii in range(svecs.shape[1]):
vecs[:, ii] = variables.make_full_vec(svecs[:, ii])
# Save the eigenvectors.
out = {}
state = pb.create_state()
pp_name = pb.conf.options.get('post_process_hook')
pp = getattr(pb.conf.funmod, pp_name if pp_name is not None else '',
lambda out, *args, **kwargs: out)
for ii in range(svecs.shape[1]):
state.set_full(vecs[:, ii])
aux = state.create_output_dict()
aux2 = {}
pp(aux2, pb, state, wmag=wmag, wdir=wdir)
aux.update(convert_complex_output(aux2))
out.update({key + '%03d' % ii : aux[key] for key in aux})
pb.save_state(filename, out=out)
def assemble_matrices(define, mod, pars, set_wave_dir, options, wdir=None):
"""
Assemble the blocks of dispersion eigenvalue problem matrices.
"""
define_dict = define(filename_mesh=options.mesh_filename,
pars=pars,
approx_order=options.order,
refinement_level=options.refine,
solver_conf=options.solver_conf,
plane=options.plane,
post_process=options.post_process,
**options.define_kwargs)
conf = ProblemConf.from_dict(define_dict, mod)
pb = Problem.from_conf(conf)
pb.dispersion_options = options
pb.set_output_dir(options.output_dir)
dim = pb.domain.shape.dim
# Set the normalized wave vector direction to the material(s).
if wdir is None:
wdir = nm.asarray(options.wave_dir[:dim], dtype=nm.float64)
wdir = wdir / nm.linalg.norm(wdir)
set_wave_dir(pb, wdir)
bbox = pb.domain.mesh.get_bounding_box()
size = (bbox[1] - bbox[0]).max()
scaling0 = apply_unit_multipliers([1.0], ['length'],
options.unit_multipliers)[0]
scaling = scaling0
if options.mesh_size is not None:
scaling *= options.mesh_size / size
output('scaling factor of periodic cell mesh coordinates:', scaling)
output('new mesh size with applied unit multipliers:', scaling * size)
pb.domain.mesh.coors[:] *= scaling
pb.set_mesh_coors(pb.domain.mesh.coors, update_fields=True)
bzone = 2.0 * nm.pi / (scaling * size)
output('1. Brillouin zone size:', bzone * scaling0)
output('1. Brillouin zone size with applied unit multipliers:', bzone)
pb.time_update()
pb.update_materials()
# Assemble the matrices.
mtxs = {}
for key, eq in pb.equations.iteritems():
mtxs[key] = mtx = pb.mtx_a.copy()
mtx = eq.evaluate(mode='weak', dw_mode='matrix', asm_obj=mtx)
mtx.eliminate_zeros()
output_array_stats(mtx.data, 'nonzeros in %s' % key)
output('symmetry checks:')
output('%s - %s^T:' % (key, key), max_diff_csr(mtx, mtx.T))
output('%s - %s^H:' % (key, key), max_diff_csr(mtx, mtx.H))
return pb, wdir, bzone, mtxs
def setup_n_eigs(options, pb, mtxs):
"""
Setup the numbers of eigenvalues based on options and numbers of DOFs.
"""
solver_n_eigs = n_eigs = options.n_eigs
n_dof = mtxs['K'].shape[0]
if options.mode == 'omega':
if options.n_eigs > n_dof:
n_eigs = n_dof
solver_n_eigs = None
else:
if options.n_eigs > 2 * n_dof:
n_eigs = 2 * n_dof
solver_n_eigs = None
return solver_n_eigs, n_eigs
def build_evp_matrices(mtxs, val, mode, pb):
"""
Build the matrices of the dispersion eigenvalue problem.
"""
if mode == 'omega':
mtx_a = mtxs['K'] + val**2 * mtxs['S'] + val * mtxs['R']
output('A - A^H:', max_diff_csr(mtx_a, mtx_a.H))
evp_mtxs = (mtx_a, mtxs['M'])
else:
evp_mtxs = (mtxs['S'], mtxs['R'], mtxs['K'] - val**2 * mtxs['M'])
return evp_mtxs
def process_evp_results(eigs, svecs, val, wdir, bzone, pb, mtxs, options,
std_wave_fun=None):
"""
Transform eigenvalues to either omegas or kappas, depending on `mode`.
Transform eigenvectors, if available, depending on `mode`.
Return also the values to log.
"""
if options.mode == 'omega':
omegas = nm.sqrt(eigs)
output('eigs, omegas:')
for ii, om in enumerate(omegas):
output('{:>3}. {: .10e}, {:.10e}'.format(ii, eigs[ii], om))
if options.stepper == 'linear':
out = tuple(eigs) + tuple(omegas)
else:
out = tuple(val * wdir) + tuple(omegas)
if std_wave_fun is not None:
out = out + std_wave_fun(val, wdir)
return omegas, svecs, out
else:
kappas = eigs.copy()
rks = kappas.copy()
# Mask modes far from 1. Brillouin zone.
max_kappa = 1.2 * bzone
kappas[kappas.real > max_kappa] = nm.nan
# Mask non-physical modes.
kappas[kappas.real < 0] = nm.nan
kappas[nm.abs(kappas.imag) > 1e-10] = nm.nan
out = tuple(kappas.real)
output('raw kappas, masked real part:',)
for ii, kr in enumerate(kappas.real):
output('{:>3}. {: 23.5e}, {:.10e}'.format(ii, rks[ii], kr))
if svecs is not None:
n_dof = mtxs['K'].shape[0]
# Select only vectors corresponding to physical modes.
ii = nm.isfinite(kappas.real)
svecs = svecs[:n_dof, ii]
if std_wave_fun is not None:
out = out + tuple(ii if ii <= max_kappa else nm.nan
for ii in std_wave_fun(val, wdir))
return kappas, svecs, out
helps = {
'pars' :
'material parameters in Y1, Y2 subdomains in basic units'
' [default: %(default)s]',
'conf' :
'if given, an alternative problem description file with apply_units() and'
' define() functions [default: %(default)s]',
'define_kwargs' : 'additional keyword arguments passed to define()',
'mesh_size' :
'desired mesh size (max. of bounding box dimensions) in basic units'
' - the input periodic cell mesh is rescaled to this size'
' [default: %(default)s]',
'unit_multipliers' :
'basic unit multipliers (time, length, mass) [default: %(default)s]',
'plane' :
'for 2D problems, plane strain or stress hypothesis selection'
' [default: %(default)s]',
'wave_dir' : 'the wave vector direction (will be normalized)'
' [default: %(default)s]',
'mode' : 'solution mode: omega = solve a generalized EVP for omega,'
' kappa = solve a quadratic generalized EVP for kappa'
' [default: %(default)s]',
'stepper' : 'the range stepper. For "brillouin", only the number'
' of items from --range is used'
' [default: %(default)s]',
'range' : 'the wave vector magnitude / frequency range'
' (like numpy.linspace) depending on the mode option'
' [default: %(default)s]',
'order' : 'displacement field approximation order [default: %(default)s]',
'refine' : 'number of uniform mesh refinements [default: %(default)s]',
'n_eigs' : 'the number of eigenvalues to compute [default: %(default)s]',
'eigs_only' : 'compute only eigenvalues, not eigenvectors',
'post_process' : 'post-process eigenvectors',
'solver_conf' : 'eigenvalue problem solver configuration options'
' [default: %(default)s]',
'save_regions' : 'save defined regions into'
' <output_directory>/regions.vtk',
'save_materials' : 'save material parameters into'
' <output_directory>/materials.vtk',
'log_std_waves' : 'log also standard pressure dilatation and shear waves',
'no_legends' :
'do not show legends in the log plots',
'no_show' :
'do not show the log figure',
'silent' : 'do not print messages to screen',
'clear' :
'clear old solution files from output directory',
'output_dir' :
'output directory [default: %(default)s]',
'mesh_filename' :
'input periodic cell mesh file name [default: %(default)s]',
}
def main():
# Aluminium and epoxy.
default_pars = '70e9,0.35,2.799e3, 3.8e9,0.27,1.142e3'
default_solver_conf = ("kind='eig.scipy',method='eigsh',tol=1.0e-5,"
"maxiter=1000,which='LM',sigma=0.0")
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--pars', metavar='young1,poisson1,density1'
',young2,poisson2,density2',
action='store', dest='pars',
default=default_pars, help=helps['pars'])
parser.add_argument('--conf', metavar='filename',
action='store', dest='conf',
default=None, help=helps['conf'])
parser.add_argument('--define-kwargs', metavar='dict-like',
action='store', dest='define_kwargs',
default=None, help=helps['define_kwargs'])
parser.add_argument('--mesh-size', type=float, metavar='float',
action='store', dest='mesh_size',
default=None, help=helps['mesh_size'])
parser.add_argument('--unit-multipliers',
metavar='c_time,c_length,c_mass',
action='store', dest='unit_multipliers',
default='1.0,1.0,1.0', help=helps['unit_multipliers'])
parser.add_argument('--plane', action='store', dest='plane',
choices=['strain', 'stress'],
default='strain', help=helps['plane'])
parser.add_argument('--wave-dir', metavar='float,float[,float]',
action='store', dest='wave_dir',
default='1.0,0.0,0.0', help=helps['wave_dir'])
parser.add_argument('--mode', action='store', dest='mode',
choices=['omega', 'kappa'],
default='omega', help=helps['mode'])
parser.add_argument('--stepper', action='store', dest='stepper',
choices=['linear', 'brillouin'],
default='linear', help=helps['stepper'])
parser.add_argument('--range', metavar='start,stop,count',
action='store', dest='range',
default='0,6.4,33', help=helps['range'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=1, help=helps['order'])
parser.add_argument('--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-n', '--n-eigs', metavar='int', type=int,
action='store', dest='n_eigs',
default=6, help=helps['n_eigs'])
group = parser.add_mutually_exclusive_group()
group.add_argument('--eigs-only',
action='store_true', dest='eigs_only',
default=False, help=helps['eigs_only'])
group.add_argument('--post-process',
action='store_true', dest='post_process',
default=False, help=helps['post_process'])
parser.add_argument('--solver-conf', metavar='dict-like',
action='store', dest='solver_conf',
default=default_solver_conf, help=helps['solver_conf'])
parser.add_argument('--save-regions',
action='store_true', dest='save_regions',
default=False, help=helps['save_regions'])
parser.add_argument('--save-materials',
action='store_true', dest='save_materials',
default=False, help=helps['save_materials'])
parser.add_argument('--log-std-waves',
action='store_true', dest='log_std_waves',
default=False, help=helps['log_std_waves'])
parser.add_argument('--no-legends',
action='store_false', dest='show_legends',
default=True, help=helps['no_legends'])
parser.add_argument('--no-show',
action='store_false', dest='show',
default=True, help=helps['no_show'])
parser.add_argument('--silent',
action='store_true', dest='silent',
default=False, help=helps['silent'])
parser.add_argument('-c', '--clear',
action='store_true', dest='clear',
default=False, help=helps['clear'])
parser.add_argument('-o', '--output-dir', metavar='path',
action='store', dest='output_dir',
default='output', help=helps['output_dir'])
parser.add_argument('mesh_filename', default='',
help=helps['mesh_filename'])
options = parser.parse_args()
output_dir = options.output_dir
output.set_output(filename=os.path.join(output_dir,'output_log.txt'),
combined=options.silent == False)
if options.conf is not None:
mod = import_file(options.conf)
else:
mod = sys.modules[__name__]
apply_units = mod.apply_units
define = mod.define
set_wave_dir = mod.set_wave_dir
setup_n_eigs = mod.setup_n_eigs
build_evp_matrices = mod.build_evp_matrices
save_materials = mod.save_materials
get_std_wave_fun = mod.get_std_wave_fun
get_stepper = mod.get_stepper
process_evp_results = mod.process_evp_results
options.pars = [float(ii) for ii in options.pars.split(',')]
options.unit_multipliers = [float(ii)
for ii in options.unit_multipliers.split(',')]
options.wave_dir = [float(ii)
for ii in options.wave_dir.split(',')]
aux = options.range.split(',')
options.range = [float(aux[0]), float(aux[1]), int(aux[2])]
options.solver_conf = dict_from_string(options.solver_conf)
options.define_kwargs = dict_from_string(options.define_kwargs)
if options.clear:
remove_files_patterns(output_dir,
['*.h5', '*.vtk', '*.txt'],
ignores=['output_log.txt'],
verbose=True)
filename = os.path.join(output_dir, 'options.txt')
ensure_path(filename)
save_options(filename, [('options', vars(options))],
quote_command_line=True)
pars = apply_units(options.pars, options.unit_multipliers)
output('material parameters with applied unit multipliers:')
output(pars)
if options.mode == 'omega':
rng = copy(options.range)
rng[:2] = apply_unit_multipliers(options.range[:2],
['wave_number', 'wave_number'],
options.unit_multipliers)
output('wave number range with applied unit multipliers:', rng)
else:
if options.stepper == 'brillouin':
raise ValueError('Cannot use "brillouin" stepper in kappa mode!')
rng = copy(options.range)
rng[:2] = apply_unit_multipliers(options.range[:2],
['frequency', 'frequency'],
options.unit_multipliers)
output('frequency range with applied unit multipliers:', rng)
pb, wdir, bzone, mtxs = assemble_matrices(define, mod, pars, set_wave_dir,
options)
dim = pb.domain.shape.dim
if dim != 2:
options.plane = 'strain'
if options.save_regions:
pb.save_regions_as_groups(os.path.join(output_dir, 'regions'))
if options.save_materials:
save_materials(output_dir, pb, options)
conf = pb.solver_confs['eig']
eig_solver = Solver.any_from_conf(conf)
n_eigs, options.n_eigs = setup_n_eigs(options, pb, mtxs)
get_color = lambda ii: plt.cm.viridis((float(ii) / (options.n_eigs - 1)))
plot_kwargs = [{'color' : get_color(ii), 'ls' : '', 'marker' : 'o'}
for ii in range(options.n_eigs)]
get_color_dim = lambda ii: plt.cm.viridis((float(ii) / (dim-1)))
plot_kwargs_dim = [{'color' : get_color_dim(ii), 'ls' : '', 'marker' : 'o'}
for ii in range(dim)]
log_names = []
log_plot_kwargs = []
if options.log_std_waves:
std_wave_fun, log_names, log_plot_kwargs = get_std_wave_fun(
pb, options)
else:
std_wave_fun = None
stepper = get_stepper(rng, pb, options)
if options.mode == 'omega':
eigenshapes_filename = os.path.join(output_dir,
'frequency-eigenshapes-%s.vtk'
% stepper.suffix)
if options.stepper == 'linear':
log = Log([[r'$\lambda_{%d}$' % ii for ii in range(options.n_eigs)],
[r'$\omega_{%d}$'
% ii for ii in range(options.n_eigs)] + log_names],
plot_kwargs=[plot_kwargs, plot_kwargs + log_plot_kwargs],
formats=[['{:.5e}'] * options.n_eigs,
['{:.5e}'] * (options.n_eigs + len(log_names))],
yscales=['linear', 'linear'],
xlabels=[r'$\kappa$', r'$\kappa$'],
ylabels=[r'eigenvalues $\lambda_i$',
r'frequencies $\omega_i$'],
show_legends=options.show_legends,
is_plot=options.show,
log_filename=os.path.join(output_dir, 'frequencies.txt'),
aggregate=1000, sleep=0.1)
else:
log = Log([[r'$\kappa_{%d}$'% ii for ii in range(dim)],
[r'$\omega_{%d}$'
% ii for ii in range(options.n_eigs)] + log_names],
plot_kwargs=[plot_kwargs_dim,
plot_kwargs + log_plot_kwargs],
formats=[['{:.5e}'] * dim,
['{:.5e}'] * (options.n_eigs + len(log_names))],
yscales=['linear', 'linear'],
xlabels=[r'', r''],
ylabels=[r'wave vector $\kappa$',
r'frequencies $\omega_i$'],
show_legends=options.show_legends,
is_plot=options.show,
log_filename=os.path.join(output_dir, 'frequencies.txt'),
aggregate=1000, sleep=0.1)
for aux in stepper:
if options.stepper == 'linear':
iv, wmag = aux
else:
iv, wmag, wdir = aux
output('step %d: wave vector %s' % (iv, wmag * wdir))
if options.stepper == 'brillouin':
pb, _, bzone, mtxs = assemble_matrices(
define, mod, pars, set_wave_dir, options, wdir=wdir)
evp_mtxs = build_evp_matrices(mtxs, wmag, options.mode, pb)
if options.eigs_only:
eigs = eig_solver(*evp_mtxs, n_eigs=n_eigs,
eigenvectors=False)
svecs = None
else:
eigs, svecs = eig_solver(*evp_mtxs, n_eigs=n_eigs,
eigenvectors=True)
omegas, svecs, out = process_evp_results(
eigs, svecs, wmag, wdir, bzone, pb, mtxs, options,
std_wave_fun=std_wave_fun
)
if options.stepper == 'linear':
log(*out, x=[wmag, wmag])
else:
log(*out, x=[iv, iv])
save_eigenvectors(eigenshapes_filename % iv, svecs, wmag, wdir, pb)
gc.collect()
log(save_figure=os.path.join(output_dir, 'frequencies.png'))
log(finished=True)
else:
eigenshapes_filename = os.path.join(output_dir,
'wave-number-eigenshapes-%s.vtk'
% stepper.suffix)
log = Log([[r'$\kappa_{%d}$' % ii for ii in range(options.n_eigs)]
+ log_names],
plot_kwargs=[plot_kwargs + log_plot_kwargs],
formats=[['{:.5e}'] * (options.n_eigs + len(log_names))],
yscales=['linear'],
xlabels=[r'$\omega$'],
ylabels=[r'wave numbers $\kappa_i$'],
show_legends=options.show_legends,
is_plot=options.show,
log_filename=os.path.join(output_dir, 'wave-numbers.txt'),
aggregate=1000, sleep=0.1)
for io, omega in stepper:
output('step %d: frequency %s' % (io, omega))
evp_mtxs = build_evp_matrices(mtxs, omega, options.mode, pb)
if options.eigs_only:
eigs = eig_solver(*evp_mtxs, n_eigs=n_eigs,
eigenvectors=False)
svecs = None
else:
eigs, svecs = eig_solver(*evp_mtxs, n_eigs=n_eigs,
eigenvectors=True)
kappas, svecs, out = process_evp_results(
eigs, svecs, omega, wdir, bzone, pb, mtxs, options,
std_wave_fun=std_wave_fun
)
log(*out, x=[omega])
save_eigenvectors(eigenshapes_filename % io, svecs, kappas, wdir,
pb)
gc.collect()
log(save_figure=os.path.join(output_dir, 'wave-numbers.png'))
log(finished=True)
if __name__ == '__main__':
main()
| [
"numpy.sqrt",
"sfepy.base.conf.dict_from_string",
"sfepy.solvers.ts.TimeStepper",
"sfepy.linalg.utils.output_array_stats",
"numpy.array",
"numpy.isfinite",
"sfepy.base.ioutils.remove_files_patterns",
"sfepy.base.base.Struct.__init__",
"sfepy.base.ioutils.ensure_path",
"numpy.linalg.norm",
"copy.... | [((4031, 4051), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (4046, 4051), False, 'import sys\n'), ((5102, 5210), 'sfepy.mechanics.units.apply_unit_multipliers', 'apply_unit_multipliers', (['pars', "['stress', 'one', 'density', 'stress', 'one', 'density']", 'unit_multipliers'], {}), "(pars, ['stress', 'one', 'density', 'stress', 'one',\n 'density'], unit_multipliers)\n", (5124, 5210), False, 'from sfepy.mechanics.units import apply_unit_multipliers\n'), ((5666, 5715), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'data': 'vms'}), "(name='output_data', mode='cell', data=vms)\n", (5672, 5715), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((5919, 5958), 'sfepy.discrete.fem.MeshIO.any_from_filename', 'MeshIO.any_from_filename', (['filename_mesh'], {}), '(filename_mesh)\n', (5943, 5958), False, 'from sfepy.discrete.fem import MeshIO\n'), ((7812, 7838), 'sfepy.discrete.fem.periodic.set_accuracy', 'per.set_accuracy', (['mesh_eps'], {}), '(mesh_eps)\n', (7828, 7838), True, 'import sfepy.discrete.fem.periodic as per\n'), ((9143, 9205), 'sfepy.mechanics.matcoefs.youngpoisson_from_stiffness', 'mc.youngpoisson_from_stiffness', (['stiffness'], {'plane': 'options.plane'}), '(stiffness, plane=options.plane)\n', (9173, 9205), True, 'import sfepy.mechanics.matcoefs as mc\n'), ((9444, 9506), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""young"""', 'mode': '"""cell"""', 'data': 'young[..., None, None]'}), "(name='young', mode='cell', data=young[..., None, None])\n", (9450, 9506), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((9554, 9620), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""poisson"""', 'mode': '"""cell"""', 'data': 'poisson[..., None, None]'}), "(name='poisson', mode='cell', data=poisson[..., None, None])\n", (9560, 9620), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((9670, 9719), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""density"""', 'mode': '"""cell"""', 'data': 'density'}), "(name='density', mode='cell', data=density)\n", (9676, 9719), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((9745, 9786), 'os.path.join', 'os.path.join', (['output_dir', '"""materials.vtk"""'], {}), "(output_dir, 'materials.vtk')\n", (9757, 9786), False, 'import os\n'), ((10042, 10104), 'sfepy.mechanics.matcoefs.youngpoisson_from_stiffness', 'mc.youngpoisson_from_stiffness', (['stiffness'], {'plane': 'options.plane'}), '(stiffness, plane=options.plane)\n', (10072, 10104), True, 'import sfepy.mechanics.matcoefs as mc\n'), ((10325, 10387), 'sfepy.mechanics.matcoefs.lame_from_youngpoisson', 'mc.lame_from_youngpoisson', (['young', 'poisson'], {'plane': 'options.plane'}), '(young, poisson, plane=options.plane)\n', (10350, 10387), True, 'import sfepy.mechanics.matcoefs as mc\n'), ((10439, 10454), 'numpy.average', 'nm.average', (['lam'], {}), '(lam)\n', (10449, 10454), True, 'import numpy as nm\n'), ((10465, 10479), 'numpy.average', 'nm.average', (['mu'], {}), '(mu)\n', (10475, 10479), True, 'import numpy as nm\n'), ((10495, 10514), 'numpy.average', 'nm.average', (['density'], {}), '(density)\n', (10505, 10514), True, 'import numpy as nm\n'), ((10525, 10563), 'numpy.sqrt', 'nm.sqrt', (['((alam + 2.0 * amu) / adensity)'], {}), '((alam + 2.0 * amu) / adensity)\n', (10532, 10563), True, 'import numpy as nm\n'), ((10573, 10596), 'numpy.sqrt', 'nm.sqrt', (['(amu / adensity)'], {}), '(amu / adensity)\n', (10580, 10596), True, 'import numpy as nm\n'), ((10601, 10636), 'sfepy.base.base.output', 'output', (['"""average p-wave speed:"""', 'cp'], {}), "('average p-wave speed:', cp)\n", (10607, 10636), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((10641, 10680), 'sfepy.base.base.output', 'output', (['"""average shear wave speed:"""', 'cs'], {}), "('average shear wave speed:', cs)\n", (10647, 10680), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((12742, 12809), 'numpy.empty', 'nm.empty', (['(variables.di.ptr[-1], svecs.shape[1])'], {'dtype': 'svecs.dtype'}), '((variables.di.ptr[-1], svecs.shape[1]), dtype=svecs.dtype)\n', (12750, 12809), True, 'import numpy as nm\n'), ((14111, 14150), 'sfepy.base.conf.ProblemConf.from_dict', 'ProblemConf.from_dict', (['define_dict', 'mod'], {}), '(define_dict, mod)\n', (14132, 14150), False, 'from sfepy.base.conf import dict_from_string, ProblemConf\n'), ((14161, 14184), 'sfepy.discrete.Problem.from_conf', 'Problem.from_conf', (['conf'], {}), '(conf)\n', (14178, 14184), False, 'from sfepy.discrete import Problem\n'), ((14836, 14904), 'sfepy.base.base.output', 'output', (['"""scaling factor of periodic cell mesh coordinates:"""', 'scaling'], {}), "('scaling factor of periodic cell mesh coordinates:', scaling)\n", (14842, 14904), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((14909, 14979), 'sfepy.base.base.output', 'output', (['"""new mesh size with applied unit multipliers:"""', '(scaling * size)'], {}), "('new mesh size with applied unit multipliers:', scaling * size)\n", (14915, 14979), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((15131, 15182), 'sfepy.base.base.output', 'output', (['"""1. Brillouin zone size:"""', '(bzone * scaling0)'], {}), "('1. Brillouin zone size:', bzone * scaling0)\n", (15137, 15182), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((15187, 15257), 'sfepy.base.base.output', 'output', (['"""1. Brillouin zone size with applied unit multipliers:"""', 'bzone'], {}), "('1. Brillouin zone size with applied unit multipliers:', bzone)\n", (15193, 15257), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((21060, 21145), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=RawDescriptionHelpFormatter\n )\n', (21074, 21145), False, 'from argparse import ArgumentParser, RawDescriptionHelpFormatter\n'), ((26795, 26832), 'sfepy.base.conf.dict_from_string', 'dict_from_string', (['options.solver_conf'], {}), '(options.solver_conf)\n', (26811, 26832), False, 'from sfepy.base.conf import dict_from_string, ProblemConf\n'), ((26861, 26900), 'sfepy.base.conf.dict_from_string', 'dict_from_string', (['options.define_kwargs'], {}), '(options.define_kwargs)\n', (26877, 26900), False, 'from sfepy.base.conf import dict_from_string, ProblemConf\n'), ((27142, 27181), 'os.path.join', 'os.path.join', (['output_dir', '"""options.txt"""'], {}), "(output_dir, 'options.txt')\n", (27154, 27181), False, 'import os\n'), ((27186, 27207), 'sfepy.base.ioutils.ensure_path', 'ensure_path', (['filename'], {}), '(filename)\n', (27197, 27207), False, 'from sfepy.base.ioutils import ensure_path, remove_files_patterns, save_options\n'), ((27375, 27435), 'sfepy.base.base.output', 'output', (['"""material parameters with applied unit multipliers:"""'], {}), "('material parameters with applied unit multipliers:')\n", (27381, 27435), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((27440, 27452), 'sfepy.base.base.output', 'output', (['pars'], {}), '(pars)\n', (27446, 27452), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((28674, 28700), 'sfepy.solvers.Solver.any_from_conf', 'Solver.any_from_conf', (['conf'], {}), '(conf)\n', (28694, 28700), False, 'from sfepy.solvers import Solver\n'), ((7046, 7097), 'sfepy.homogenization.utils.define_box_regions', 'define_box_regions', (['dim', 'bbox[0]', 'bbox[1]', 'mesh_eps'], {}), '(dim, bbox[0], bbox[1], mesh_eps)\n', (7064, 7097), False, 'from sfepy.homogenization.utils import define_box_regions\n'), ((11128, 11179), 'sfepy.solvers.ts.TimeStepper', 'TimeStepper', (['rng[0]', 'rng[1]'], {'dt': 'None', 'n_step': 'rng[2]'}), '(rng[0], rng[1], dt=None, n_step=rng[2])\n', (11139, 11179), False, 'from sfepy.solvers.ts import get_print_info, TimeStepper\n'), ((14397, 14449), 'numpy.asarray', 'nm.asarray', (['options.wave_dir[:dim]'], {'dtype': 'nm.float64'}), '(options.wave_dir[:dim], dtype=nm.float64)\n', (14407, 14449), True, 'import numpy as nm\n'), ((14618, 14685), 'sfepy.mechanics.units.apply_unit_multipliers', 'apply_unit_multipliers', (['[1.0]', "['length']", 'options.unit_multipliers'], {}), "([1.0], ['length'], options.unit_multipliers)\n", (14640, 14685), False, 'from sfepy.mechanics.units import apply_unit_multipliers\n'), ((15545, 15597), 'sfepy.linalg.utils.output_array_stats', 'output_array_stats', (['mtx.data', "('nonzeros in %s' % key)"], {}), "(mtx.data, 'nonzeros in %s' % key)\n", (15563, 15597), False, 'from sfepy.linalg.utils import output_array_stats, max_diff_csr\n'), ((15607, 15633), 'sfepy.base.base.output', 'output', (['"""symmetry checks:"""'], {}), "('symmetry checks:')\n", (15613, 15633), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((17054, 17067), 'numpy.sqrt', 'nm.sqrt', (['eigs'], {}), '(eigs)\n', (17061, 17067), True, 'import numpy as nm\n'), ((17077, 17100), 'sfepy.base.base.output', 'output', (['"""eigs, omegas:"""'], {}), "('eigs, omegas:')\n", (17083, 17100), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((17860, 17899), 'sfepy.base.base.output', 'output', (['"""raw kappas, masked real part:"""'], {}), "('raw kappas, masked real part:')\n", (17866, 17899), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((25966, 25991), 'sfepy.base.base.import_file', 'import_file', (['options.conf'], {}), '(options.conf)\n', (25977, 25991), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((26932, 27040), 'sfepy.base.ioutils.remove_files_patterns', 'remove_files_patterns', (['output_dir', "['*.h5', '*.vtk', '*.txt']"], {'ignores': "['output_log.txt']", 'verbose': '(True)'}), "(output_dir, ['*.h5', '*.vtk', '*.txt'], ignores=[\n 'output_log.txt'], verbose=True)\n", (26953, 27040), False, 'from sfepy.base.ioutils import ensure_path, remove_files_patterns, save_options\n'), ((27500, 27519), 'copy.copy', 'copy', (['options.range'], {}), '(options.range)\n', (27504, 27519), False, 'from copy import copy\n'), ((27538, 27641), 'sfepy.mechanics.units.apply_unit_multipliers', 'apply_unit_multipliers', (['options.range[:2]', "['wave_number', 'wave_number']", 'options.unit_multipliers'], {}), "(options.range[:2], ['wave_number', 'wave_number'],\n options.unit_multipliers)\n", (27560, 27641), False, 'from sfepy.mechanics.units import apply_unit_multipliers\n'), ((27728, 27791), 'sfepy.base.base.output', 'output', (['"""wave number range with applied unit multipliers:"""', 'rng'], {}), "('wave number range with applied unit multipliers:', rng)\n", (27734, 27791), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((27939, 27958), 'copy.copy', 'copy', (['options.range'], {}), '(options.range)\n', (27943, 27958), False, 'from copy import copy\n'), ((27977, 28076), 'sfepy.mechanics.units.apply_unit_multipliers', 'apply_unit_multipliers', (['options.range[:2]', "['frequency', 'frequency']", 'options.unit_multipliers'], {}), "(options.range[:2], ['frequency', 'frequency'],\n options.unit_multipliers)\n", (27999, 28076), False, 'from sfepy.mechanics.units import apply_unit_multipliers\n'), ((28163, 28224), 'sfepy.base.base.output', 'output', (['"""frequency range with applied unit multipliers:"""', 'rng'], {}), "('frequency range with applied unit multipliers:', rng)\n", (28169, 28224), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((29476, 29549), 'os.path.join', 'os.path.join', (['output_dir', "('frequency-eigenshapes-%s.vtk' % stepper.suffix)"], {}), "(output_dir, 'frequency-eigenshapes-%s.vtk' % stepper.suffix)\n", (29488, 29549), False, 'import os\n'), ((32758, 32833), 'os.path.join', 'os.path.join', (['output_dir', "('wave-number-eigenshapes-%s.vtk' % stepper.suffix)"], {}), "(output_dir, 'wave-number-eigenshapes-%s.vtk' % stepper.suffix)\n", (32770, 32833), False, 'import os\n'), ((11520, 11588), 'sfepy.base.base.Struct.__init__', 'Struct.__init__', (['self'], {'t0': 't0', 't1': 't1', 'dt': 'dt', 'n_step': 'n_step', 'step': 'step'}), '(self, t0=t0, t1=t1, dt=dt, n_step=n_step, step=step)\n', (11535, 11588), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((11643, 11670), 'sfepy.solvers.ts.get_print_info', 'get_print_info', (['self.n_step'], {}), '(self.n_step)\n', (11657, 11670), False, 'from sfepy.solvers.ts import get_print_info, TimeStepper\n'), ((11717, 11762), 'sfepy.solvers.ts.TimeStepper', 'TimeStepper', (['(0)', 'bzone[0]'], {'dt': 'None', 'n_step': 'num'}), '(0, bzone[0], dt=None, n_step=num)\n', (11728, 11762), False, 'from sfepy.solvers.ts import get_print_info, TimeStepper\n'), ((11904, 11949), 'sfepy.solvers.ts.TimeStepper', 'TimeStepper', (['(0)', 'bzone[1]'], {'dt': 'None', 'n_step': 'num'}), '(0, bzone[1], dt=None, n_step=num)\n', (11915, 11949), False, 'from sfepy.solvers.ts import get_print_info, TimeStepper\n'), ((12207, 12237), 'numpy.array', 'nm.array', (['[bzone[0], bzone[1]]'], {}), '([bzone[0], bzone[1]])\n', (12215, 12237), True, 'import numpy as nm\n'), ((12256, 12276), 'numpy.linalg.norm', 'nm.linalg.norm', (['wdir'], {}), '(wdir)\n', (12270, 12276), True, 'import numpy as nm\n'), ((12324, 12362), 'sfepy.solvers.ts.TimeStepper', 'TimeStepper', (['(0)', '(1)'], {'dt': 'None', 'n_step': 'num'}), '(0, 1, dt=None, n_step=num)\n', (12335, 12362), False, 'from sfepy.solvers.ts import get_print_info, TimeStepper\n'), ((13382, 13410), 'sfepy.discrete.fem.meshio.convert_complex_output', 'convert_complex_output', (['aux2'], {}), '(aux2)\n', (13404, 13410), False, 'from sfepy.discrete.fem.meshio import convert_complex_output\n'), ((14472, 14492), 'numpy.linalg.norm', 'nm.linalg.norm', (['wdir'], {}), '(wdir)\n', (14486, 14492), True, 'import numpy as nm\n'), ((15676, 15700), 'sfepy.linalg.utils.max_diff_csr', 'max_diff_csr', (['mtx', 'mtx.T'], {}), '(mtx, mtx.T)\n', (15688, 15700), False, 'from sfepy.linalg.utils import output_array_stats, max_diff_csr\n'), ((15744, 15768), 'sfepy.linalg.utils.max_diff_csr', 'max_diff_csr', (['mtx', 'mtx.H'], {}), '(mtx, mtx.H)\n', (15756, 15768), False, 'from sfepy.linalg.utils import output_array_stats, max_diff_csr\n'), ((16522, 16550), 'sfepy.linalg.utils.max_diff_csr', 'max_diff_csr', (['mtx_a', 'mtx_a.H'], {}), '(mtx_a, mtx_a.H)\n', (16534, 16550), False, 'from sfepy.linalg.utils import output_array_stats, max_diff_csr\n'), ((18173, 18197), 'numpy.isfinite', 'nm.isfinite', (['kappas.real'], {}), '(kappas.real)\n', (18184, 18197), True, 'import numpy as nm\n'), ((25819, 25861), 'os.path.join', 'os.path.join', (['output_dir', '"""output_log.txt"""'], {}), "(output_dir, 'output_log.txt')\n", (25831, 25861), False, 'import os\n'), ((28505, 28540), 'os.path.join', 'os.path.join', (['output_dir', '"""regions"""'], {}), "(output_dir, 'regions')\n", (28517, 28540), False, 'import os\n'), ((31563, 31616), 'sfepy.base.base.output', 'output', (["('step %d: wave vector %s' % (iv, wmag * wdir))"], {}), "('step %d: wave vector %s' % (iv, wmag * wdir))\n", (31569, 31616), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((32606, 32618), 'gc.collect', 'gc.collect', ([], {}), '()\n', (32616, 32618), False, 'import gc\n'), ((33566, 33611), 'sfepy.base.base.output', 'output', (["('step %d: frequency %s' % (io, omega))"], {}), "('step %d: frequency %s' % (io, omega))\n", (33572, 33611), False, 'from sfepy.base.base import import_file, output, Struct\n'), ((34348, 34360), 'gc.collect', 'gc.collect', ([], {}), '()\n', (34358, 34360), False, 'import gc\n'), ((12003, 12027), 'numpy.array', 'nm.array', (['[bzone[0], k1]'], {}), '([bzone[0], k1])\n', (12011, 12027), True, 'import numpy as nm\n'), ((12050, 12070), 'numpy.linalg.norm', 'nm.linalg.norm', (['wdir'], {}), '(wdir)\n', (12064, 12070), True, 'import numpy as nm\n'), ((17780, 17799), 'numpy.abs', 'nm.abs', (['kappas.imag'], {}), '(kappas.imag)\n', (17786, 17799), True, 'import numpy as nm\n'), ((32644, 32687), 'os.path.join', 'os.path.join', (['output_dir', '"""frequencies.png"""'], {}), "(output_dir, 'frequencies.png')\n", (32656, 32687), False, 'import os\n'), ((33429, 33473), 'os.path.join', 'os.path.join', (['output_dir', '"""wave-numbers.txt"""'], {}), "(output_dir, 'wave-numbers.txt')\n", (33441, 33473), False, 'import os\n'), ((34386, 34430), 'os.path.join', 'os.path.join', (['output_dir', '"""wave-numbers.png"""'], {}), "(output_dir, 'wave-numbers.png')\n", (34398, 34430), False, 'import os\n'), ((6454, 6513), 'sfepy.mechanics.matcoefs.stiffness_from_youngpoisson', 'stiffness', (['dim'], {'young': 'young1', 'poisson': 'poisson1', 'plane': 'plane'}), '(dim, young=young1, poisson=poisson1, plane=plane)\n', (6463, 6513), True, 'from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson as stiffness\n'), ((6577, 6636), 'sfepy.mechanics.matcoefs.stiffness_from_youngpoisson', 'stiffness', (['dim'], {'young': 'young2', 'poisson': 'poisson2', 'plane': 'plane'}), '(dim, young=young2, poisson=poisson2, plane=plane)\n', (6586, 6636), True, 'from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson as stiffness\n'), ((30413, 30456), 'os.path.join', 'os.path.join', (['output_dir', '"""frequencies.txt"""'], {}), "(output_dir, 'frequencies.txt')\n", (30425, 30456), False, 'import os\n'), ((31296, 31339), 'os.path.join', 'os.path.join', (['output_dir', '"""frequencies.txt"""'], {}), "(output_dir, 'frequencies.txt')\n", (31308, 31339), False, 'import os\n'), ((11825, 11845), 'numpy.array', 'nm.array', (['[1.0, 0.0]'], {}), '([1.0, 0.0])\n', (11833, 11845), True, 'import numpy as nm\n')] |
# export OPENBLAS_CORETYPE=ARMV8
import enum
import os
from re import X
from sre_constants import SUCCESS
import sys
import cv2
import math
import networktables
import torch
import torch.backends.cudnn as cudnn
import numpy as np
import time
from PIL import Image
from threading import Thread
from networktables import NetworkTables
import socket
from flask import Flask, render_template, Response
#os.chdir("/home/radicubs/RapidReact2022/src/main/python")
sys.path.insert(0, './yolov5')
from models.common import DetectMultiBackend
from utils.general import (check_img_size, check_imshow, non_max_suppression)
from utils.torch_utils import select_device
app = Flask("frc vision")
class LoadWebcams:
def __init__(self, sources, img_size=(640, 360), stride=32):
self.img_size = img_size
self.stride = stride
self.cams = sources
n = len(self.cams)
# ALL CAMERAS MUST BE SAME W and H
# w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
# h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
w, h = img_size
self.imgs = np.zeros((n, 3, h, w)) # batch size, channels, height, width
self.scaled_ims = [np.zeros((3, h, w))] * n
self.encoded_frames = [np.zeros((3, h, w))] * n
self.streams = [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)] * n
self.fps, self.frames, self.threads = [0] * n, [0] * n, [None] * n
for i, s in enumerate(self.cams): # index, source
cap = cv2.VideoCapture(s)
assert cap.isOpened(), f'{st}Failed to open {s}'
fps = cap.get(cv2.CAP_PROP_FPS)
self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf')
self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback
print(f'Camera {i} running at {self.fps[i]} fps')
im = cap.read()[1]
# We need to crop to the aspect ratio first and then scale down
# Step 1: find the margin we need to cut
h_scaled = int((im.shape[1] / w) * h)
top_margin = int((im.shape[0] - h_scaled) / 2)
scaled_im = cv2.resize(im[top_margin:(top_margin+h_scaled), :], (w, h))
print("Cropped and scaled to " + str(scaled_im.shape))
if not headless:
self.scaled_ims[i] = scaled_im.copy()
self.imgs[i] = scaled_im[..., ::-1].transpose(2, 0, 1) # explanation is like 10 lines below
self.threads[i] = Thread(target=self.capture, args=([i, cap, s]), daemon=True)
print(f"Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
self.threads[i].start()
def capture(self, i, cap, stream):
captured_count = 0
start_time = time.time()
while cap.isOpened(): # this runs 60 times a second even without the time.sleep
#for _ in range(40):
# cap.grab()
# im = cv2.imread('/Users/raptor/cs/RapidReact2022/src/main/python/test_img/3.jpg')
# success = True
success, im = cap.read() # reads in height, width, channels AND in BGR
Thread(target=self.process_frame, args=([success, im, i]), daemon=True).start()
Thread(target=self.imencode_stream, args=([success, im, i]), daemon=True).start()
captured_count += 1
if captured_count % 60 == 0:
print("Camera seconds per frame: " + str(1.0 / (time.time() - start_time)))
start_time = time.time()
def process_frame(self, success, im, i):
if success:
w, h = self.img_size
# We need to crop to the aspect ratio first and then scale down
# Step 1: find the margin we need to cut
h_scaled = int((im.shape[1] / w) * h)
top_margin = int((im.shape[0] - h_scaled) / 2)
scaled_im = cv2.resize(im[top_margin:(top_margin+h_scaled), :], (w, h))
if not headless:
self.scaled_ims[i] = scaled_im.copy()
self.imgs[i] = scaled_im[..., ::-1].transpose(2, 0, 1) # explanation is like 10 lines below
else:
print("WARNING: video stream yikes yikes unresponsive")
self.imgs[i] = np.zeros_like(self.imgs[i])
# time.sleep(1 / (self.fps(i)+10)) # NOT NEEDED
def imencode_stream(self, success, im, i):
if success:
_, frame = cv2.imencode('.JPEG', im)
self.encoded_frames[i] = frame
def get_encoded_frame(self, idx):
while True:
yield(b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + self.encoded_frames[idx].tostring() + b'\r\n')
Thread.sleep(0.025)
def __iter__(self):
return self
def __next__(self):
#assert all(x.is_alive() for x in self.threads)
return self.imgs, self.scaled_ims.copy()
@app.route('/')
def video_feed():
return Response(dataset.get_encoded_frame(0),
mimetype='multipart/x-mixed-replace; boundary=frame')
cams = [2]
# device = "cpu"
device = "cpu" # gpu
headless = False
#weights = "./models/pytorch_3_b1_fp16.onnx"
weights="./models/pytorch_3.pt"
dnn=False # use OpenCV DNN for ONNX inference
data = "models/data.yaml"
imgsz=(640, 320) # inference size (width, height)
h,w = imgsz
conf_thres = 0.25
iou_thres = 0.45
classes = None
agnostic_nms = False
max_det = 10
bs = len(cams) # batch size
usenetworktables = True
if usenetworktables:
ip = "roborio-7503-FRC.local"
NetworkTables.initialize(server=ip)
datatable = NetworkTables.getTable("data")
if device == "cpu":
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
else: # non-cpu device requested
os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available()
with torch.no_grad():
device = select_device(device)
half = True
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
model.eval()
# set model to eval mode
stride, names, pt = model.stride, model.names, model.pt
imgsz = check_img_size(imgsz, s=stride) # check image size
if not headless:
view_img = check_imshow()
else:
view_img = False
cudnn.benchmark = True
model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup
dataset = LoadWebcams(cams, imgsz)
Thread(target=app.run, args=(['0.0.0.0', 8083]), daemon=True).start()
gn = np.array(dataset.scaled_ims[0].shape)[[1, 0, 1, 0]]
for batch, original_imgs in dataset:
start_time = time.time()
im = torch.from_numpy(batch)
im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
im /= 255 # 0 - 255 to 0.0 - 1.0
if len(im.shape) == 3:
im = im[None]
im = im.to(device)
preds = model(im)
preds = non_max_suppression(preds, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
detected_str = "nothing"
for i, img_pred in enumerate(preds):
for *xyxy, conf, cls in img_pred:
det = (np.array(xyxy) / gn) * np.array([h,w,h,w])
x1 = round(det[0].item())
y1 = round(det[1].item())
x2 = round(det[2].item())
y2 = round(det[3].item())
if (detected_str == "nothing"):
detected_str = ""
detected_str += " ".join([str(x) for x in [((x1 + x2) / 2), ((y1 + y2) / 2), (0.5 * math.sqrt((x2 - x1)^2 + (y2 - y1)^2))]])
detected_str += " " # separates out ball elements
if view_img:
original_imgs[i] = cv2.rectangle(original_imgs[i], (x1, y1), (x2, y2), (0, 255, 0), 2)
if view_img:
cv2.imshow(str(i), original_imgs[i])
cv2.waitKey(1)
if usenetworktables:
datatable.putString(str(i), detected_str)
print(detected_str)
print("FPS inference: " + str(1.0 / (time.time() - start_time)))
| [
"models.common.DetectMultiBackend",
"cv2.rectangle",
"sys.path.insert",
"flask.Flask",
"utils.general.check_img_size",
"math.sqrt",
"torch.from_numpy",
"numpy.array",
"networktables.NetworkTables.initialize",
"cv2.waitKey",
"math.isfinite",
"threading.Thread.sleep",
"networktables.NetworkTab... | [((459, 489), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./yolov5"""'], {}), "(0, './yolov5')\n", (474, 489), False, 'import sys\n'), ((665, 684), 'flask.Flask', 'Flask', (['"""frc vision"""'], {}), "('frc vision')\n", (670, 684), False, 'from flask import Flask, render_template, Response\n'), ((5538, 5573), 'networktables.NetworkTables.initialize', 'NetworkTables.initialize', ([], {'server': 'ip'}), '(server=ip)\n', (5562, 5573), False, 'from networktables import NetworkTables\n'), ((5590, 5620), 'networktables.NetworkTables.getTable', 'NetworkTables.getTable', (['"""data"""'], {}), "('data')\n", (5612, 5620), False, 'from networktables import NetworkTables\n'), ((5886, 5901), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5899, 5901), False, 'import torch\n'), ((5916, 5937), 'utils.torch_utils.select_device', 'select_device', (['device'], {}), '(device)\n', (5929, 5937), False, 'from utils.torch_utils import select_device\n'), ((5966, 6039), 'models.common.DetectMultiBackend', 'DetectMultiBackend', (['weights'], {'device': 'device', 'dnn': 'dnn', 'data': 'data', 'fp16': 'half'}), '(weights, device=device, dnn=dnn, data=data, fp16=half)\n', (5984, 6039), False, 'from models.common import DetectMultiBackend\n'), ((6158, 6189), 'utils.general.check_img_size', 'check_img_size', (['imgsz'], {'s': 'stride'}), '(imgsz, s=stride)\n', (6172, 6189), False, 'from utils.general import check_img_size, check_imshow, non_max_suppression\n'), ((1083, 1105), 'numpy.zeros', 'np.zeros', (['(n, 3, h, w)'], {}), '((n, 3, h, w))\n', (1091, 1105), True, 'import numpy as np\n'), ((2776, 2787), 'time.time', 'time.time', ([], {}), '()\n', (2785, 2787), False, 'import time\n'), ((6251, 6265), 'utils.general.check_imshow', 'check_imshow', ([], {}), '()\n', (6263, 6265), False, 'from utils.general import check_img_size, check_imshow, non_max_suppression\n'), ((6524, 6561), 'numpy.array', 'np.array', (['dataset.scaled_ims[0].shape'], {}), '(dataset.scaled_ims[0].shape)\n', (6532, 6561), True, 'import numpy as np\n'), ((6639, 6650), 'time.time', 'time.time', ([], {}), '()\n', (6648, 6650), False, 'import time\n'), ((6664, 6687), 'torch.from_numpy', 'torch.from_numpy', (['batch'], {}), '(batch)\n', (6680, 6687), False, 'import torch\n'), ((6929, 7022), 'utils.general.non_max_suppression', 'non_max_suppression', (['preds', 'conf_thres', 'iou_thres', 'classes', 'agnostic_nms'], {'max_det': 'max_det'}), '(preds, conf_thres, iou_thres, classes, agnostic_nms,\n max_det=max_det)\n', (6948, 7022), False, 'from utils.general import check_img_size, check_imshow, non_max_suppression\n'), ((1482, 1501), 'cv2.VideoCapture', 'cv2.VideoCapture', (['s'], {}), '(s)\n', (1498, 1501), False, 'import cv2\n'), ((2157, 2216), 'cv2.resize', 'cv2.resize', (['im[top_margin:top_margin + h_scaled, :]', '(w, h)'], {}), '(im[top_margin:top_margin + h_scaled, :], (w, h))\n', (2167, 2216), False, 'import cv2\n'), ((2502, 2560), 'threading.Thread', 'Thread', ([], {'target': 'self.capture', 'args': '[i, cap, s]', 'daemon': '(True)'}), '(target=self.capture, args=[i, cap, s], daemon=True)\n', (2508, 2560), False, 'from threading import Thread\n'), ((3896, 3955), 'cv2.resize', 'cv2.resize', (['im[top_margin:top_margin + h_scaled, :]', '(w, h)'], {}), '(im[top_margin:top_margin + h_scaled, :], (w, h))\n', (3906, 3955), False, 'import cv2\n'), ((4252, 4279), 'numpy.zeros_like', 'np.zeros_like', (['self.imgs[i]'], {}), '(self.imgs[i])\n', (4265, 4279), True, 'import numpy as np\n'), ((4427, 4452), 'cv2.imencode', 'cv2.imencode', (['""".JPEG"""', 'im'], {}), "('.JPEG', im)\n", (4439, 4452), False, 'import cv2\n'), ((4701, 4720), 'threading.Thread.sleep', 'Thread.sleep', (['(0.025)'], {}), '(0.025)\n', (4713, 4720), False, 'from threading import Thread\n'), ((6440, 6499), 'threading.Thread', 'Thread', ([], {'target': 'app.run', 'args': "['0.0.0.0', 8083]", 'daemon': '(True)'}), "(target=app.run, args=['0.0.0.0', 8083], daemon=True)\n", (6446, 6499), False, 'from threading import Thread\n'), ((1171, 1190), 'numpy.zeros', 'np.zeros', (['(3, h, w)'], {}), '((3, h, w))\n', (1179, 1190), True, 'import numpy as np\n'), ((1227, 1246), 'numpy.zeros', 'np.zeros', (['(3, h, w)'], {}), '((3, h, w))\n', (1235, 1246), True, 'import numpy as np\n'), ((1276, 1324), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (1289, 1324), False, 'import socket\n'), ((3522, 3533), 'time.time', 'time.time', ([], {}), '()\n', (3531, 3533), False, 'import time\n'), ((7905, 7919), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (7916, 7919), False, 'import cv2\n'), ((3154, 3223), 'threading.Thread', 'Thread', ([], {'target': 'self.process_frame', 'args': '[success, im, i]', 'daemon': '(True)'}), '(target=self.process_frame, args=[success, im, i], daemon=True)\n', (3160, 3223), False, 'from threading import Thread\n'), ((3246, 3317), 'threading.Thread', 'Thread', ([], {'target': 'self.imencode_stream', 'args': '[success, im, i]', 'daemon': '(True)'}), '(target=self.imencode_stream, args=[success, im, i], daemon=True)\n', (3252, 3317), False, 'from threading import Thread\n'), ((7191, 7213), 'numpy.array', 'np.array', (['[h, w, h, w]'], {}), '([h, w, h, w])\n', (7199, 7213), True, 'import numpy as np\n'), ((7743, 7810), 'cv2.rectangle', 'cv2.rectangle', (['original_imgs[i]', '(x1, y1)', '(x2, y2)', '(0, 255, 0)', '(2)'], {}), '(original_imgs[i], (x1, y1), (x2, y2), (0, 255, 0), 2)\n', (7756, 7810), False, 'import cv2\n'), ((7168, 7182), 'numpy.array', 'np.array', (['xyxy'], {}), '(xyxy)\n', (7176, 7182), True, 'import numpy as np\n'), ((1738, 1756), 'math.isfinite', 'math.isfinite', (['fps'], {}), '(fps)\n', (1751, 1756), False, 'import math\n'), ((8083, 8094), 'time.time', 'time.time', ([], {}), '()\n', (8092, 8094), False, 'import time\n'), ((3465, 3476), 'time.time', 'time.time', ([], {}), '()\n', (3474, 3476), False, 'import time\n'), ((7566, 7604), 'math.sqrt', 'math.sqrt', (['(x2 - x1 ^ 2 + (y2 - y1) ^ 2)'], {}), '(x2 - x1 ^ 2 + (y2 - y1) ^ 2)\n', (7575, 7604), False, 'import math\n')] |
""" Code generation for PyTorch C++ dispatched operators. """
import copy
import dataclasses
import itertools
import logging
import operator
import os
from typing import List, Tuple, Callable, Optional, Dict, Union
import dace.library
import numpy as np
import torch
from dace import dtypes as dt, data
from dace.codegen import targets, compiler
from dace.codegen.codeobject import CodeObject
from dace.codegen.compiled_sdfg import CompiledSDFG
from dace.codegen.prettycode import CodeIOStream
from dace.codegen.targets.common import sym2cpp
from daceml.autodiff import BackwardResult
from daceml.pytorch.environments import PyTorch
from daceml.util import is_cuda, platform_library_name
from daceml.pytorch.dispatchers.common import DaCeMLTorchFunction, compile_and_init_sdfgs, get_arglist
log = logging.getLogger(__name__)
_REPLACED_CTYPES = {
dace.int64: "int64_t",
dace.uint64: "uint64_t",
dace.float16: "at::Half"
}
def torch_ctype(dtype: dace.typeclass) -> str:
if isinstance(dtype, dace.pointer):
# assuming pointers are 64 bit
ctype = "int64_t"
elif dtype in _REPLACED_CTYPES:
ctype = _REPLACED_CTYPES[dtype]
else:
ctype = dtype.ctype
return ctype
_TYPECLASS_TO_TORCH_DTYPE_STR = {
dt.bool: "kBool",
dt.int8: "kInt8",
dt.uint8: "kUInt8",
dt.int16: "kInt16",
dt.int32: "kInt32",
dt.int64: "kInt64",
dt.float16: "kFloat16",
dt.float32: "kFloat32",
dt.float64: "kFloat64",
}
def typeclass_to_torch_cpp_type(type: dace.typeclass) -> str:
if isinstance(type, dace.pointer):
# assuming pointers are 64 bit
return "kInt64"
else:
return _TYPECLASS_TO_TORCH_DTYPE_STR[type]
def tensor_init_for_desc(name: str, desc: data.Data, zeros=False) -> str:
""" Emit the initialization code for a descriptor.
"""
return f"""\
Tensor {name} = torch::{'zeros' if zeros else 'empty'}(
{{{', '.join(str(s) for s in desc.shape)}}},
torch::TensorOptions()
.dtype(torch::{typeclass_to_torch_cpp_type(desc.dtype)})
.device(torch::{'kCUDA' if is_cuda(desc.storage) else 'kCPU'})
.layout(torch::kStrided));
"""
def initialize_outputs_code(module: 'daceml.pytorch.DaceModule',
output_names: List[str]) -> str:
""" Generate the code that initializes the output tensors
:param module: the module
:param output_names: the output names of the SDFG.
:param backward_arrays: names of array that must be saved for the backward pass. Only required if
generating code for a differentiable function.
:return: the code
"""
arglist = module.sdfg.arglist()
code = ""
for name in sorted(output_names):
code += tensor_init_for_desc(name, arglist[name])
return code
def argument_codegen(
sdfg: dace.SDFG,
clean_weights: Dict[str, torch.Tensor],
input_names: List[str],
output_names: List[str],
guard_contiguous: Optional[List[str]] = None) -> Tuple[str, str, str]:
""" Generate the code that grabs the pointers of inputs and outputs.
The names of the tensors will match the SDFG tensor names. Tensors that are not created by us (i.e. inputs)
should be named {sdfg_name}_ first, and then .contiguous() will be called on them to yield the tensor that we
require. This is the case for all tensors in ``guard_contiguous``.
:param module: the module
:param clean_weights: the constant weights of the SDFG.
:param input_names: names of inputs to the torch function.
:param output_names: names of outputs to the torch function.
:param guard_contiguous: a subset of input_names to call .contiguous on. If None, all input names will be
guarded.
:return: the code for initializing the argument, the sdfg arguments in order, and the init call arguments
"""
arglist = sdfg.arglist()
guard_contiguous = set(guard_contiguous or input_names)
# initialize the inputs and outputs
ptr_init_code = "\n// setup input and output pointers\n"
for name in sorted(input_names):
tctype = torch_ctype(arglist[name].dtype)
dctype = arglist[name].dtype
if isinstance(arglist[name], data.Array) or dt.can_access(
dt.ScheduleType.GPU_Device, arglist[name].storage):
if name in guard_contiguous:
if logging.root.level <= logging.DEBUG:
ptr_init_code += f"""
if (!{name}_.is_contiguous()) {{
fprintf(stderr, "{name} was not contiguous!");
}}
"""
ptr_init_code += '\n' + f"Tensor {name} = {name}_.contiguous();"
ptr_init_code += '\n' + f"{dctype} *{name}_ptr = reinterpret_cast<{dctype}*>({name}.data_ptr<{tctype}>());"
elif isinstance(arglist[name], data.Scalar):
if name in guard_contiguous:
ptr_init_code += '\n' + f"{dctype} {name}_ptr = static_cast<{dctype}>({name}_.item().to<{tctype}>());"
else:
ptr_init_code += '\n' + f"{dctype} {name}_ptr = static_cast<{dctype}>({name}.item().to<{tctype}>());"
else:
raise ValueError(
f"Unsupported data type {type(arglist[name])} for descriptor {name}"
)
ptr_init_code += '\n'
# outputs and bwd arrays
ptr_init_code += '\n'.join(
f"{arglist[name].dtype.ctype} *{name}_ptr = reinterpret_cast<{arglist[name].dtype.ctype}*>"
f"({name}.data_ptr<{torch_ctype(arglist[name].dtype)}>());"
for name in sorted(output_names))
ptr_init_code += "\n// setup constant arguments\n"
all_access_nodes = set()
for state in sdfg.nodes():
all_access_nodes |= set(n.data for n in state.data_nodes())
# initialize all remaining parameters
remaining = set(arglist).difference(
itertools.chain(input_names, output_names))
for name in sorted(remaining):
# remaining args must be constants
if name not in clean_weights:
raise ValueError(
f"Cannot generate PyTorch module C++ code: SDFG argument {name} is not an input or output"
f" of the PyTorch Module, and not a constant.")
if arglist[name].total_size > 1000:
raise ValueError(
f"Cannot generate PyTorch module C++ code: SDFG argument {name} is not an input or output"
f" of the PyTorch Module, and is too large.")
# Skip parameter if it is not used
if name not in all_access_nodes:
desc = sdfg.arrays[name]
ptr_init_code += f"{desc.dtype.ctype} *{name}_ptr = nullptr;\n"
continue
value = clean_weights[name]
ptr_init_code += f"{constant_initializer_code(name, arglist[name], value)}\n"
arguments = ", ".join(f"{n}_ptr" for n in arglist)
init_arguments = ", ".join(f"{n}_ptr" for n, desc in arglist.items()
if isinstance(desc, data.Scalar))
return ptr_init_code, arguments, init_arguments
def item_to_cpp_literal(item) -> str:
dtype = str(item.dtype)
if dtype == "float32":
return f"{item}f"
elif dtype == "bool":
return f"{str(item).lower()}"
elif dtype == "int64":
return f"{item}l"
elif dtype == "float16":
ctype = dace.dtypes._CTYPES[item.dtype.type]
return f"(({ctype}){item})"
elif dtype in ["float64", "int32", "int16", "int8"]:
return str(item)
else:
raise ValueError(f"Unsupported tensor type {item.dtype}")
def constant_initializer_code(name: str, desc: data.Data, value) -> str:
gpu_storage = dt.can_access(dt.ScheduleType.GPU_Device, desc.storage)
if desc.total_size == 0:
return f"{desc.dtype.ctype} *{name}_ptr = nullptr;"
elif isinstance(desc, data.Array) or gpu_storage:
numpyval = value.cpu().numpy()
if len(numpyval.shape) == 0:
numpyval = numpyval.reshape((1, ))
iterator = np.nditer(numpyval, order="C")
gpu_copy_code = f"""
Tensor {name} = torch::from_blob({name}_ptr_cpu, {{{', '.join(sym2cpp(s) for s in desc.shape)}}},
{{{', '.join(sym2cpp(s) for s in desc.strides)}}}, torch::{typeclass_to_torch_cpp_type(desc.dtype)})
.to(torch::kCUDA);
{desc.dtype.ctype} *{name}_ptr = reinterpret_cast<{desc.dtype.ctype}*>({name}.data_ptr<{torch_ctype(desc.dtype)}>());
"""
return f"""
{desc.dtype.ctype} {name}_ptr{'_cpu' if gpu_storage else ''}[{sym2cpp(desc.total_size)}] =
{{{', '.join(item_to_cpp_literal(e) for e in iterator)}}};
{gpu_copy_code if gpu_storage else ""}
"""
elif isinstance(desc, data.Scalar):
return f"{desc.dtype.ctype} {name}_ptr = {str(value.item())};"
else:
raise ValueError("Unsupported data descriptor")
def return_type_str(outputs: List[str]) -> str:
return f"""{"Tensor" if len(outputs) == 1 else f"std::tuple<{', '.join(['Tensor'] * len(outputs))}>"}"""
def save_non_inputs_outputs(names: List[str]):
return "\n".join(f'ctx->saved_data["{n}"] = {n};' for n in names)
def recover_saved_inputs_outputs(saved_inputs_outputs: List[str],
other_saved: List[str]):
code = ""
if saved_inputs_outputs:
code += "auto saved = ctx->get_saved_variables();\n"
for i, n in enumerate(saved_inputs_outputs):
code += f"\nauto {n} = saved[{i}];"
for n in other_saved:
code += f'\nauto {n} = ctx->saved_data["{n}"].toTensor();'
return code
def setup_grad_values(backward_result: BackwardResult, sdfg: dace.SDFG,
outputs: List[str]) -> str:
code = "// input grads"
for param_name, grad_name in sorted(
backward_result.required_grad_names.items()):
zero_init = backward_result.zero_init.get(param_name, True)
code += "\n" + tensor_init_for_desc(
grad_name, sdfg.arrays[grad_name], zeros=zero_init)
code += "// output grads"
for i, o in enumerate(outputs):
grad_name = backward_result.given_grad_names[o]
code += f'\nauto {grad_name}_ = grad_outputs[{i}];'
return code
def code_for_backward_function(module: 'daceml.pytorch.DaceModule',
forward_sdfg: dace.SDFG,
backward_sdfg: dace.SDFG,
backward_result: BackwardResult,
forwarded_arrays: Dict[str, data.Data]) -> str:
inputs, outputs = get_arglist(module)
sdfg_name = forward_sdfg.name
ret_str = return_type_str(outputs)
outputs_with_forwarded_outputs = copy.deepcopy(outputs)
outputs_with_forwarded_outputs.extend(
n for n in forwarded_arrays if n not in inputs and n not in outputs)
fwd_ptr_init_code, fwd_sdfg_call_arguments, _ = argument_codegen(
forward_sdfg, module.dace_model.clean_weights, inputs,
outputs_with_forwarded_outputs)
# inputs are given_grads + forwarded_outputs
bwd_inputs = list(
backward_result.given_grad_names.values()) + list(forwarded_arrays)
# outputs are required grads
bwd_outputs = list(backward_result.required_grad_names.values())
bwd_ptr_init_code, bwd_sdfg_call_arguments, _ = argument_codegen(
backward_sdfg,
module.dace_model.clean_weights,
bwd_inputs,
bwd_outputs,
guard_contiguous=list(backward_result.given_grad_names.values()))
# saved inputs/outputs
saved_io_for_backward = [
n for n in forwarded_arrays if n in inputs or n in outputs
]
other_saved_for_backward = [
n for n in forwarded_arrays if n not in inputs and n not in outputs
]
return f"""
{get_header(forward_sdfg, backward_sdfg, inputs, outputs, module.use_cuda)}
class {sdfg_name}Function : public torch::autograd::Function<{sdfg_name}Function> {{
public:
static
{ret_str}
forward(
AutogradContext *ctx,
int64_t fwd_handle_ptr, int64_t bwd_handle_ptr, {", ".join(f"const Tensor& {name}_" for name in inputs)}) {{
at::AutoNonVariableTypeMode g;
// initialize outputs
{initialize_outputs_code(module, outputs_with_forwarded_outputs)}
{fwd_ptr_init_code}
// get SDFG state handle
{forward_sdfg.name}Handle_t handle = reinterpret_cast<{forward_sdfg.name}Handle_t>(fwd_handle_ptr);
// call SDFG
__program_{forward_sdfg.name}(handle, {fwd_sdfg_call_arguments});
// save inputs/outputs for backward
{
f"ctx->save_for_backward({{{', '.join(f'{n}' for n in saved_io_for_backward)}}});"
if saved_io_for_backward else ""
}
// save non-inputs/outputs
{save_non_inputs_outputs(other_saved_for_backward)}
// save bwd handle
ctx->saved_data["bwd_handle"] = bwd_handle_ptr;
// return to torch
return {f"{outputs[0]}" if len(outputs) == 1
else f"{{{', '.join(o for o in outputs)}}}"};
}}
static tensor_list backward(AutogradContext *ctx, tensor_list grad_outputs) {{
// recover bwd_handle_ptr
int64_t bwd_handle_ptr = ctx->saved_data.find("bwd_handle")->second.toInt();
// recover saved values
{recover_saved_inputs_outputs(saved_io_for_backward, other_saved_for_backward)}
// create grad values
// NOTE, it might make sense take these from .grad()
{setup_grad_values(backward_result, backward_sdfg, outputs)}
{bwd_ptr_init_code}
// get SDFG state handle
{backward_sdfg.name}Handle_t handle = reinterpret_cast<{backward_sdfg.name}Handle_t>(bwd_handle_ptr);
// call bwd SDFG
__program_{backward_sdfg.name}(handle, {bwd_sdfg_call_arguments});
// return calculated grads in correct order
// first two grads are None (these are the grads for the handle ptrs)
return {{
Tensor(), Tensor(), {', '.join(backward_result.required_grad_names[i] if i in backward_result.required_grad_names else 'Tensor()' for i in inputs )}
}};
}}
}};
{ret_str}
{sdfg_name}_autograd(int64_t handle_ptr, int64_t bwd_handle_ptr, {",".join(f"const Tensor& {name}_" for name in inputs)}) {{
return {sdfg_name}Function::apply(
handle_ptr, bwd_handle_ptr, {", ".join(f"{name}_" for name in inputs)}
);
}}
TORCH_LIBRARY_IMPL(daceml_{sdfg_name}, Autograd{'CUDA' if module.use_cuda else 'CPU'}, m) {{
m.impl("{sdfg_name}", {sdfg_name}_autograd);
}}
"""
def code_for_module(module: 'daceml.pytorch.DaceModule',
compiled_sdfg: CompiledSDFG) -> str:
""" Generate the code for an operator that calls the sdfgs in the module.
:param module: the module.
:param compiled_sdfg: the compiled SDFG.
"""
inputs, outputs = get_arglist(module)
sdfg_name = compiled_sdfg.sdfg.name
ret_str = return_type_str(outputs)
ptr_init_code, sdfg_call_arguments, init_arguments = argument_codegen(
compiled_sdfg.sdfg, module.dace_model.clean_weights, inputs, outputs)
return f"""
{get_header(compiled_sdfg.sdfg, None, inputs, outputs, module.use_cuda)}
// function definition
{ret_str}
{sdfg_name}(int64_t handle_ptr, {",".join(f"const Tensor& {name}_" for name in inputs)}) {{
// initialize outputs
{initialize_outputs_code(module, outputs)}
{ptr_init_code}
// get SDFG state handle
{sdfg_name}Handle_t handle = reinterpret_cast<{sdfg_name}Handle_t>(handle_ptr);
// call SDFG
__program_{sdfg_name}(handle, {sdfg_call_arguments});
// return to torch
return {f"{outputs[0]}" if len(outputs) == 1
else f"{{{', '.join(o for o in outputs)}}}"};
}}
TORCH_LIBRARY_IMPL(daceml_{sdfg_name}, {'CUDA' if module.use_cuda else 'CPU'}, m) {{
m.impl("{sdfg_name}", {sdfg_name});
}}
"""
def get_header(fwd_sdfg: dace.SDFG, bwd_sdfg: Optional[dace.SDFG], inputs,
outputs, use_cuda: bool) -> str:
return f"""
#include <torch/torch.h>
#include <torch/script.h>
#include "{fwd_sdfg.name}.h"
{"" if bwd_sdfg is None else f'#include "{bwd_sdfg.name}.h"'}
using torch::Tensor;
using torch::DeviceType;
using torch::autograd::tensor_list;
using torch::autograd::AutogradContext;
TORCH_LIBRARY(daceml_{fwd_sdfg.name}, m) {{
m.def("{fwd_sdfg.name}(int handle_ptr,{"int bwd_handle_ptr," if bwd_sdfg else ""} {", ".join('Tensor ' + arg for arg in inputs)}) -> {'Tensor' if len(outputs) == 1
else "(" + ", ".join(['Tensor'] * len(outputs)) + ")"}");
}}
"""
def register_and_compile_torch_extension(module: 'daceml.pytorch.DaceModule',
dummy_inputs) -> DaCeMLTorchFunction:
""" Get a torch callable for the module. This will compile the sdfg, compile a PyTorch C++ operator, register it
with PyTorch and return the function that calls it.
This function handles code generation for both the forward and backward pass.
:param module: the module.
:param dummy_inputs: dummy inputs to initialize the model with.
:return: the callable function for the SDFG.
"""
# build the SDFG
# set all states to not-sync
for state in module.sdfg.nodes():
state.nosync = True
environments = {
PyTorch.full_class_path(),
}
if module.backward:
compiled, handle_ptr, compiled_bwd, bwd_handle_ptr = compile_and_init_sdfgs(
module, dummy_inputs)
compiled_sdfgs = [compiled, compiled_bwd]
ptrs = [handle_ptr, bwd_handle_ptr]
if compiled_bwd is not None:
environments.add(get_env_for_sdfg(compiled_bwd).full_class_path())
bwd_sdfg = compiled_bwd.sdfg
code = code_for_backward_function(module, compiled.sdfg, bwd_sdfg,
module._ad_result,
module._ad_inp_arrs)
else:
bwd_sdfg = module.backward_sdfg
compiled_sdfgs = [compiled]
ptrs = [handle_ptr]
code = code_for_module(module, compiled)
else:
compiled, handle_ptr = compile_and_init_sdfgs(module, dummy_inputs)
ptrs = [handle_ptr]
code = code_for_module(module, compiled)
compiled_sdfgs = [compiled]
environments.add(get_env_for_sdfg(compiled).full_class_path())
code = indent_code(code)
# build the PyTorch module
libname = f"torch_{compiled.sdfg.name}"
program = CodeObject(libname,
code,
"cpp",
targets.cpu.CPUCodeGen,
f"Torch{module.sdfg_name}",
environments=environments)
torch_module_build_path = os.path.join('.dacecache',
f"torch_{compiled.sdfg.name}")
compiler.generate_program_folder(None, [program], torch_module_build_path)
compiler.configure_and_compile(torch_module_build_path)
torch.ops.load_library(
os.path.join(torch_module_build_path, "build",
platform_library_name(libname)))
torch_function = operator.attrgetter(
f"daceml_{compiled.sdfg.name}.{compiled.sdfg.name}")(torch.ops)
result = DaCeMLTorchFunction(function=torch_function,
compiled_sdfgs=compiled_sdfgs,
ptr=ptrs)
return result
def get_env_for_sdfg(compiled: CompiledSDFG):
sdfg_build_path = os.path.abspath(compiled.sdfg.build_folder)
class SDFGEnvironment:
""" Environment for the SDFG
"""
cmake_minimum_version = None
cmake_packages = []
cmake_variables = {}
cmake_includes = [os.path.join(sdfg_build_path, "include")]
cmake_compile_flags = []
cmake_link_flags = []
cmake_files = []
cmake_libraries = [
os.path.join(sdfg_build_path, "build",
platform_library_name(compiled.sdfg.name))
]
state_fields = []
dependencies = []
headers = []
init_code = ""
finalize_code = ""
SDFGEnvironment.__name__ = compiled.sdfg.name
dace.library.environment(SDFGEnvironment)
return SDFGEnvironment
def indent_code(code: str) -> str:
stream = CodeIOStream()
stream.write(code)
return stream.getvalue()
| [
"logging.getLogger",
"dace.codegen.prettycode.CodeIOStream",
"daceml.pytorch.dispatchers.common.get_arglist",
"itertools.chain",
"daceml.pytorch.environments.PyTorch.full_class_path",
"daceml.util.is_cuda",
"dace.dtypes.can_access",
"daceml.util.platform_library_name",
"copy.deepcopy",
"dace.codeg... | [((801, 828), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (818, 828), False, 'import logging\n'), ((7814, 7869), 'dace.dtypes.can_access', 'dt.can_access', (['dt.ScheduleType.GPU_Device', 'desc.storage'], {}), '(dt.ScheduleType.GPU_Device, desc.storage)\n', (7827, 7869), True, 'from dace import dtypes as dt, data\n'), ((10727, 10746), 'daceml.pytorch.dispatchers.common.get_arglist', 'get_arglist', (['module'], {}), '(module)\n', (10738, 10746), False, 'from daceml.pytorch.dispatchers.common import DaCeMLTorchFunction, compile_and_init_sdfgs, get_arglist\n'), ((10859, 10881), 'copy.deepcopy', 'copy.deepcopy', (['outputs'], {}), '(outputs)\n', (10872, 10881), False, 'import copy\n'), ((15239, 15258), 'daceml.pytorch.dispatchers.common.get_arglist', 'get_arglist', (['module'], {}), '(module)\n', (15250, 15258), False, 'from daceml.pytorch.dispatchers.common import DaCeMLTorchFunction, compile_and_init_sdfgs, get_arglist\n'), ((18904, 19019), 'dace.codegen.codeobject.CodeObject', 'CodeObject', (['libname', 'code', '"""cpp"""', 'targets.cpu.CPUCodeGen', 'f"""Torch{module.sdfg_name}"""'], {'environments': 'environments'}), "(libname, code, 'cpp', targets.cpu.CPUCodeGen,\n f'Torch{module.sdfg_name}', environments=environments)\n", (18914, 19019), False, 'from dace.codegen.codeobject import CodeObject\n'), ((19171, 19228), 'os.path.join', 'os.path.join', (['""".dacecache"""', 'f"""torch_{compiled.sdfg.name}"""'], {}), "('.dacecache', f'torch_{compiled.sdfg.name}')\n", (19183, 19228), False, 'import os\n'), ((19277, 19351), 'dace.codegen.compiler.generate_program_folder', 'compiler.generate_program_folder', (['None', '[program]', 'torch_module_build_path'], {}), '(None, [program], torch_module_build_path)\n', (19309, 19351), False, 'from dace.codegen import targets, compiler\n'), ((19356, 19411), 'dace.codegen.compiler.configure_and_compile', 'compiler.configure_and_compile', (['torch_module_build_path'], {}), '(torch_module_build_path)\n', (19386, 19411), False, 'from dace.codegen import targets, compiler\n'), ((19679, 19768), 'daceml.pytorch.dispatchers.common.DaCeMLTorchFunction', 'DaCeMLTorchFunction', ([], {'function': 'torch_function', 'compiled_sdfgs': 'compiled_sdfgs', 'ptr': 'ptrs'}), '(function=torch_function, compiled_sdfgs=compiled_sdfgs,\n ptr=ptrs)\n', (19698, 19768), False, 'from daceml.pytorch.dispatchers.common import DaCeMLTorchFunction, compile_and_init_sdfgs, get_arglist\n'), ((19920, 19963), 'os.path.abspath', 'os.path.abspath', (['compiled.sdfg.build_folder'], {}), '(compiled.sdfg.build_folder)\n', (19935, 19963), False, 'import os\n'), ((20746, 20760), 'dace.codegen.prettycode.CodeIOStream', 'CodeIOStream', ([], {}), '()\n', (20758, 20760), False, 'from dace.codegen.prettycode import CodeIOStream\n'), ((6014, 6056), 'itertools.chain', 'itertools.chain', (['input_names', 'output_names'], {}), '(input_names, output_names)\n', (6029, 6056), False, 'import itertools\n'), ((17697, 17722), 'daceml.pytorch.environments.PyTorch.full_class_path', 'PyTorch.full_class_path', ([], {}), '()\n', (17720, 17722), False, 'from daceml.pytorch.environments import PyTorch\n'), ((17815, 17859), 'daceml.pytorch.dispatchers.common.compile_and_init_sdfgs', 'compile_and_init_sdfgs', (['module', 'dummy_inputs'], {}), '(module, dummy_inputs)\n', (17837, 17859), False, 'from daceml.pytorch.dispatchers.common import DaCeMLTorchFunction, compile_and_init_sdfgs, get_arglist\n'), ((18560, 18604), 'daceml.pytorch.dispatchers.common.compile_and_init_sdfgs', 'compile_and_init_sdfgs', (['module', 'dummy_inputs'], {}), '(module, dummy_inputs)\n', (18582, 18604), False, 'from daceml.pytorch.dispatchers.common import DaCeMLTorchFunction, compile_and_init_sdfgs, get_arglist\n'), ((19572, 19644), 'operator.attrgetter', 'operator.attrgetter', (['f"""daceml_{compiled.sdfg.name}.{compiled.sdfg.name}"""'], {}), "(f'daceml_{compiled.sdfg.name}.{compiled.sdfg.name}')\n", (19591, 19644), False, 'import operator\n'), ((4351, 4415), 'dace.dtypes.can_access', 'dt.can_access', (['dt.ScheduleType.GPU_Device', 'arglist[name].storage'], {}), '(dt.ScheduleType.GPU_Device, arglist[name].storage)\n', (4364, 4415), True, 'from dace import dtypes as dt, data\n'), ((8156, 8186), 'numpy.nditer', 'np.nditer', (['numpyval'], {'order': '"""C"""'}), "(numpyval, order='C')\n", (8165, 8186), True, 'import numpy as np\n'), ((19517, 19547), 'daceml.util.platform_library_name', 'platform_library_name', (['libname'], {}), '(libname)\n', (19538, 19547), False, 'from daceml.util import is_cuda, platform_library_name\n'), ((20162, 20202), 'os.path.join', 'os.path.join', (['sdfg_build_path', '"""include"""'], {}), "(sdfg_build_path, 'include')\n", (20174, 20202), False, 'import os\n'), ((2112, 2133), 'daceml.util.is_cuda', 'is_cuda', (['desc.storage'], {}), '(desc.storage)\n', (2119, 2133), False, 'from daceml.util import is_cuda, platform_library_name\n'), ((20396, 20437), 'daceml.util.platform_library_name', 'platform_library_name', (['compiled.sdfg.name'], {}), '(compiled.sdfg.name)\n', (20417, 20437), False, 'from daceml.util import is_cuda, platform_library_name\n'), ((8710, 8734), 'dace.codegen.targets.common.sym2cpp', 'sym2cpp', (['desc.total_size'], {}), '(desc.total_size)\n', (8717, 8734), False, 'from dace.codegen.targets.common import sym2cpp\n'), ((8311, 8321), 'dace.codegen.targets.common.sym2cpp', 'sym2cpp', (['s'], {}), '(s)\n', (8318, 8321), False, 'from dace.codegen.targets.common import sym2cpp\n'), ((8372, 8382), 'dace.codegen.targets.common.sym2cpp', 'sym2cpp', (['s'], {}), '(s)\n', (8379, 8382), False, 'from dace.codegen.targets.common import sym2cpp\n')] |
from __future__ import division
import os,time,cv2
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
def lrelu(x):
return tf.maximum(x*0.2,x)
def identity_initializer():
def _initializer(shape, dtype=tf.float32, partition_info=None):
array = np.zeros(shape, dtype=float)
cx, cy = shape[0]//2, shape[1]//2
for i in range(shape[2]):
array[cx, cy, i, i] = 1
return tf.constant(array, dtype=dtype)
return _initializer
def nm(x):
w0=tf.Variable(1.0,name='w0')
w1=tf.Variable(0.0,name='w1')
return w0*x+w1*slim.batch_norm(x)
def build(input):
net=slim.conv2d(input,32,[3,3],rate=1,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv1')
net=slim.conv2d(net,32,[3,3],rate=2,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv2')
net=slim.conv2d(net,32,[3,3],rate=4,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv3')
net=slim.conv2d(net,32,[3,3],rate=8,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv4')
net=slim.conv2d(net,32,[3,3],rate=16,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv5')
net=slim.conv2d(net,32,[3,3],rate=32,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv6')
net=slim.conv2d(net,32,[3,3],rate=64,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv7')
net=slim.conv2d(net,32,[3,3],rate=128,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv8')
net=slim.conv2d(net,32,[3,3],rate=1,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv9')
net=slim.conv2d(net,3,[1,1],rate=1,activation_fn=None,scope='g_conv_last')
return net
def prepare_data():
input_names=[]
hyper_names=[]
output_names=[]
finetune_input_names=[]
finetune_output_names=[]
finetune_hyper_names=[]
val_names=[]
val_hyper_names=[]
for dirname in ['MIT-Adobe_train_480p']:#training images at 480p
for i in range(1,2501):
input_names.append("../data/%s/%06d.png"%(dirname,i))
hyper_names.append("../original_results/L0_smoothing_parameterized/%s/%06d.txt"%(dirname,i))#a single parameter in the txt
output_names.append("../original_results/L0_smoothing_parameterized/%s/%06d.png"%(dirname,i))
for dirname in ['MIT-Adobe_train_random']:#test images at random resolutions
for i in range(1,2501):
finetune_input_names.append("../data/%s/%06d.png"%(dirname,i))
finetune_hyper_names.append("../original_results/L0_smoothing_parameterized/%s/%06d.txt" % (dirname, i))#a single parameter in the txt
finetune_output_names.append("../original_results/L0_smoothing_parameterized/%s/%06d.png"%(dirname,i))
for dirname in ['MIT-Adobe_test_1080p']:#test images at 1080p
for i in range(1,2501):
val_names.append("../data/%s/%06d.png"%(dirname,i))
val_hyper_names.append("../original_results/L0_smoothing_parameterized/%s/%06d.txt"%(dirname,i))#a single parameter in the txt
return input_names,hyper_names,output_names,val_names,val_hyper_names,finetune_input_names,finetune_output_names,finetune_hyper_names
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
os.environ['CUDA_VISIBLE_DEVICES']=str(np.argmax([int(x.split()[2]) for x in open('tmp','r').readlines()]))
os.system('rm tmp')
sess=tf.Session()
is_training=False
input_names,hyper_names,output_names,val_names,val_hyper_names,finetune_input_names,finetune_output_names,finetune_hyper_names=prepare_data()
input=tf.placeholder(tf.float32,shape=[None,None,None,4])
output=tf.placeholder(tf.float32,shape=[None,None,None,3])
network=build(input)
loss=tf.reduce_mean(tf.square(network-output))
opt=tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss,var_list=[var for var in tf.trainable_variables() if var.name.startswith('g_')])
saver=tf.train.Saver(max_to_keep=1000)
sess.run(tf.global_variables_initializer())
ckpt=tf.train.get_checkpoint_state("result_parameterized")
if ckpt:
print('loaded '+ckpt.model_checkpoint_path)
saver.restore(sess,ckpt.model_checkpoint_path)
if is_training:
all=np.zeros(3000, dtype=float)
for epoch in range(1,181):
if epoch==1 or epoch==151:
input_images=[None]*len(input_names)
output_images=[None]*len(input_names)
hyper_parameters=[None]*len(input_names)
if os.path.isdir("result_parameterized/%04d"%epoch):
continue
cnt=0
for id in np.random.permutation(len(input_names)):
st=time.time()
if input_images[id] is None:
input_images[id]=np.expand_dims(np.float32(cv2.imread(input_names[id] if epoch<=150 else finetune_input_names[id],-1)),axis=0)/255.0
output_images[id]=np.expand_dims(np.float32(cv2.imread(output_names[id] if epoch<=150 else finetune_output_names[id],-1)),axis=0)/255.0
hyper_parameters[id]=np.tile(float(open(hyper_names[id] if epoch<=150 else finetune_hyper_names[id],'r').readline()),(1,input_images[id].shape[1],input_images[id].shape[2],1))
_,current=sess.run([opt,loss],feed_dict={input:np.concatenate((input_images[id],hyper_parameters[id]),axis=3),output:output_images[id]})
all[id]=current*255.0*255.0
cnt+=1
print("%d %d %.2f %.2f %.2f %s"%(epoch,cnt,current*255.0*255.0,np.mean(all[np.where(all)]),time.time()-st,os.getcwd().split('/')[-2]))
os.makedirs("result_parameterized/%04d"%epoch)
target=open("result_parameterized/%04d/score.txt"%epoch,'w')
target.write("%f"%np.mean(all[np.where(all)]))
target.close()
saver.save(sess,"result_parameterized/model.ckpt")
saver.save(sess,"result_parameterized/%04d/model.ckpt"%epoch)
for ind in range(10):
input_image=np.expand_dims(np.float32(cv2.imread(val_names[ind],-1)),axis=0)/255.0
hyper_parameter=np.tile(float(open(val_hyper_names[ind],'r').readline()),(1,input_image.shape[1],input_image.shape[2],1))
st=time.time()
output_image=sess.run(network,feed_dict={input:np.concatenate((input_image,hyper_parameter),axis=3)})
print("%.3f"%(time.time()-st))
output_image=np.minimum(np.maximum(output_image,0.0),1.0)*255.0
cv2.imwrite("result_parameterized/%04d/%06d.png"%(epoch,ind+1),np.uint8(output_image[0,:,:,:]))
if not os.path.isdir("result_parameterized/video"):
os.makedirs("result_parameterized/video")
input_image=np.expand_dims(np.float32(cv2.imread(val_names[884],-1)),axis=0)/255.0
cnt=0
for k in range(2,201):
hyper_parameter=np.tile(k/200.0,(1,input_image.shape[1],input_image.shape[2],1))
output_image=sess.run(network,feed_dict={input:np.concatenate((input_image,hyper_parameter),axis=3)})
output_image=np.minimum(np.maximum(output_image,0.0),1.0)*255.0
cnt+=1
cv2.imwrite("result_parameterized/video/%06d.png"%cnt,np.uint8(output_image[0,:,:,:]))
exit()
if not os.path.isdir("result_parameterized/MIT-Adobe_test_1080p"):
os.makedirs("result_parameterized/MIT-Adobe_test_1080p")
for ind in range(len(val_names)):
input_image=np.expand_dims(np.float32(cv2.imread(val_names[ind],-1)),axis=0)/255.0
hyper_parameter=np.tile(float(open(val_hyper_names[ind], 'r').readline()),(1,input_image.shape[1],input_image.shape[2],1))
st=time.time()
output_image=sess.run(network,feed_dict={input:np.concatenate((input_image,hyper_parameter),axis=3)})
print("%.3f"%(time.time()-st))
output_image=np.minimum(np.maximum(output_image,0.0),1.0)*255.0
cv2.imwrite("result_parameterized/MIT-Adobe_test_1080p/%06d.png"%(ind+1),np.uint8(output_image[0,:,:,:]))
| [
"numpy.uint8",
"numpy.where",
"tensorflow.placeholder",
"tensorflow.Session",
"os.path.isdir",
"numpy.concatenate",
"tensorflow.maximum",
"tensorflow.square",
"tensorflow.train.AdamOptimizer",
"numpy.maximum",
"tensorflow.trainable_variables",
"numpy.tile",
"tensorflow.contrib.slim.batch_nor... | [((3477, 3542), 'os.system', 'os.system', (['"""nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp"""'], {}), "('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\n", (3486, 3542), False, 'import os, time, cv2\n'), ((3651, 3670), 'os.system', 'os.system', (['"""rm tmp"""'], {}), "('rm tmp')\n", (3660, 3670), False, 'import os, time, cv2\n'), ((3677, 3689), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3687, 3689), True, 'import tensorflow as tf\n'), ((3857, 3912), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, None, None, 4]'}), '(tf.float32, shape=[None, None, None, 4])\n', (3871, 3912), True, 'import tensorflow as tf\n'), ((3916, 3971), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, None, None, 3]'}), '(tf.float32, shape=[None, None, None, 3])\n', (3930, 3971), True, 'import tensorflow as tf\n'), ((4188, 4220), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(1000)'}), '(max_to_keep=1000)\n', (4202, 4220), True, 'import tensorflow as tf\n'), ((4271, 4324), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['"""result_parameterized"""'], {}), "('result_parameterized')\n", (4300, 4324), True, 'import tensorflow as tf\n'), ((159, 181), 'tensorflow.maximum', 'tf.maximum', (['(x * 0.2)', 'x'], {}), '(x * 0.2, x)\n', (169, 181), True, 'import tensorflow as tf\n'), ((523, 550), 'tensorflow.Variable', 'tf.Variable', (['(1.0)'], {'name': '"""w0"""'}), "(1.0, name='w0')\n", (534, 550), True, 'import tensorflow as tf\n'), ((557, 584), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'name': '"""w1"""'}), "(0.0, name='w1')\n", (568, 584), True, 'import tensorflow as tf\n'), ((1889, 1965), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['net', '(3)', '[1, 1]'], {'rate': '(1)', 'activation_fn': 'None', 'scope': '"""g_conv_last"""'}), "(net, 3, [1, 1], rate=1, activation_fn=None, scope='g_conv_last')\n", (1900, 1965), True, 'import tensorflow.contrib.slim as slim\n'), ((4009, 4036), 'tensorflow.square', 'tf.square', (['(network - output)'], {}), '(network - output)\n', (4018, 4036), True, 'import tensorflow as tf\n'), ((4230, 4263), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4261, 4263), True, 'import tensorflow as tf\n'), ((4458, 4485), 'numpy.zeros', 'np.zeros', (['(3000)'], {'dtype': 'float'}), '(3000, dtype=float)\n', (4466, 4485), True, 'import numpy as np\n'), ((6743, 6786), 'os.path.isdir', 'os.path.isdir', (['"""result_parameterized/video"""'], {}), "('result_parameterized/video')\n", (6756, 6786), False, 'import os, time, cv2\n'), ((6792, 6833), 'os.makedirs', 'os.makedirs', (['"""result_parameterized/video"""'], {}), "('result_parameterized/video')\n", (6803, 6833), False, 'import os, time, cv2\n'), ((6966, 7036), 'numpy.tile', 'np.tile', (['(k / 200.0)', '(1, input_image.shape[1], input_image.shape[2], 1)'], {}), '(k / 200.0, (1, input_image.shape[1], input_image.shape[2], 1))\n', (6973, 7036), True, 'import numpy as np\n'), ((7322, 7380), 'os.path.isdir', 'os.path.isdir', (['"""result_parameterized/MIT-Adobe_test_1080p"""'], {}), "('result_parameterized/MIT-Adobe_test_1080p')\n", (7335, 7380), False, 'import os, time, cv2\n'), ((7386, 7442), 'os.makedirs', 'os.makedirs', (['"""result_parameterized/MIT-Adobe_test_1080p"""'], {}), "('result_parameterized/MIT-Adobe_test_1080p')\n", (7397, 7442), False, 'import os, time, cv2\n'), ((7698, 7709), 'time.time', 'time.time', ([], {}), '()\n', (7707, 7709), False, 'import os, time, cv2\n'), ((292, 320), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'float'}), '(shape, dtype=float)\n', (300, 320), True, 'import numpy as np\n'), ((448, 479), 'tensorflow.constant', 'tf.constant', (['array'], {'dtype': 'dtype'}), '(array, dtype=dtype)\n', (459, 479), True, 'import tensorflow as tf\n'), ((4041, 4085), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.0001)'}), '(learning_rate=0.0001)\n', (4063, 4085), True, 'import tensorflow as tf\n'), ((4715, 4765), 'os.path.isdir', 'os.path.isdir', (["('result_parameterized/%04d' % epoch)"], {}), "('result_parameterized/%04d' % epoch)\n", (4728, 4765), False, 'import os, time, cv2\n'), ((5784, 5832), 'os.makedirs', 'os.makedirs', (["('result_parameterized/%04d' % epoch)"], {}), "('result_parameterized/%04d' % epoch)\n", (5795, 5832), False, 'import os, time, cv2\n'), ((7274, 7308), 'numpy.uint8', 'np.uint8', (['output_image[0, :, :, :]'], {}), '(output_image[0, :, :, :])\n', (7282, 7308), True, 'import numpy as np\n'), ((7996, 8030), 'numpy.uint8', 'np.uint8', (['output_image[0, :, :, :]'], {}), '(output_image[0, :, :, :])\n', (8004, 8030), True, 'import numpy as np\n'), ((603, 621), 'tensorflow.contrib.slim.batch_norm', 'slim.batch_norm', (['x'], {}), '(x)\n', (618, 621), True, 'import tensorflow.contrib.slim as slim\n'), ((4874, 4885), 'time.time', 'time.time', ([], {}), '()\n', (4883, 4885), False, 'import os, time, cv2\n'), ((6382, 6393), 'time.time', 'time.time', ([], {}), '()\n', (6391, 6393), False, 'import os, time, cv2\n'), ((6872, 6902), 'cv2.imread', 'cv2.imread', (['val_names[884]', '(-1)'], {}), '(val_names[884], -1)\n', (6882, 6902), False, 'import os, time, cv2\n'), ((7165, 7194), 'numpy.maximum', 'np.maximum', (['output_image', '(0.0)'], {}), '(output_image, 0.0)\n', (7175, 7194), True, 'import numpy as np\n'), ((7879, 7908), 'numpy.maximum', 'np.maximum', (['output_image', '(0.0)'], {}), '(output_image, 0.0)\n', (7889, 7908), True, 'import numpy as np\n'), ((4125, 4149), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (4147, 4149), True, 'import tensorflow as tf\n'), ((6702, 6736), 'numpy.uint8', 'np.uint8', (['output_image[0, :, :, :]'], {}), '(output_image[0, :, :, :])\n', (6710, 6736), True, 'import numpy as np\n'), ((7082, 7136), 'numpy.concatenate', 'np.concatenate', (['(input_image, hyper_parameter)'], {'axis': '(3)'}), '((input_image, hyper_parameter), axis=3)\n', (7096, 7136), True, 'import numpy as np\n'), ((7519, 7549), 'cv2.imread', 'cv2.imread', (['val_names[ind]', '(-1)'], {}), '(val_names[ind], -1)\n', (7529, 7549), False, 'import os, time, cv2\n'), ((7761, 7815), 'numpy.concatenate', 'np.concatenate', (['(input_image, hyper_parameter)'], {'axis': '(3)'}), '((input_image, hyper_parameter), axis=3)\n', (7775, 7815), True, 'import numpy as np\n'), ((7834, 7845), 'time.time', 'time.time', ([], {}), '()\n', (7843, 7845), False, 'import os, time, cv2\n'), ((6587, 6616), 'numpy.maximum', 'np.maximum', (['output_image', '(0.0)'], {}), '(output_image, 0.0)\n', (6597, 6616), True, 'import numpy as np\n'), ((5479, 5543), 'numpy.concatenate', 'np.concatenate', (['(input_images[id], hyper_parameters[id])'], {'axis': '(3)'}), '((input_images[id], hyper_parameters[id]), axis=3)\n', (5493, 5543), True, 'import numpy as np\n'), ((5938, 5951), 'numpy.where', 'np.where', (['all'], {}), '(all)\n', (5946, 5951), True, 'import numpy as np\n'), ((6188, 6218), 'cv2.imread', 'cv2.imread', (['val_names[ind]', '(-1)'], {}), '(val_names[ind], -1)\n', (6198, 6218), False, 'import os, time, cv2\n'), ((6453, 6507), 'numpy.concatenate', 'np.concatenate', (['(input_image, hyper_parameter)'], {'axis': '(3)'}), '((input_image, hyper_parameter), axis=3)\n', (6467, 6507), True, 'import numpy as np\n'), ((6534, 6545), 'time.time', 'time.time', ([], {}), '()\n', (6543, 6545), False, 'import os, time, cv2\n'), ((4986, 5063), 'cv2.imread', 'cv2.imread', (['(input_names[id] if epoch <= 150 else finetune_input_names[id])', '(-1)'], {}), '(input_names[id] if epoch <= 150 else finetune_input_names[id], -1)\n', (4996, 5063), False, 'import os, time, cv2\n'), ((5136, 5215), 'cv2.imread', 'cv2.imread', (['(output_names[id] if epoch <= 150 else finetune_output_names[id])', '(-1)'], {}), '(output_names[id] if epoch <= 150 else finetune_output_names[id], -1)\n', (5146, 5215), False, 'import os, time, cv2\n'), ((5731, 5742), 'time.time', 'time.time', ([], {}), '()\n', (5740, 5742), False, 'import os, time, cv2\n'), ((5715, 5728), 'numpy.where', 'np.where', (['all'], {}), '(all)\n', (5723, 5728), True, 'import numpy as np\n'), ((5746, 5757), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5755, 5757), False, 'import os, time, cv2\n')] |
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import os
import glob
from scipy.stats import zscore
import importlib
import zipfile
import math
import utils
import scipy as sp
from scipy import io
import scipy.signal
from scipy.sparse.linalg import eigsh
import csv
import power_law
import matplotlib as mpl
from cycler import cycler
mpl.rcParams['axes.prop_cycle'] = cycler(color='bgrcmyk')
#import decoder
myfont = 6
myaxis_font = 8
plt.rcParams.update({'font.size': myfont})
line_width = 1
dataroot = 'grating_data'
db = np.load(os.path.join(dataroot, 'database.npy'), allow_pickle=True)
fs = []
fig_dir = 'figures/'
all_mouse_names = []
mouse_dict = {}
for di in db:
mname = di['mouse_name']
if mname not in mouse_dict:
mouse_dict[mname] = []
datexp = di['date']
blk = di['block']
stype = di['expt']
fname = '%s_%s_%s_%s.npy'%(stype, mname, datexp, blk)
fs.append(os.path.join(dataroot, fname))
count = 0
maxcount = 5
npc = 20
fs_all = fs
#fs = [fs[0]]
#fs = fs[0:1]
all_spectra = []
all_alphas = []
count = 0
all_matern_errs = []
num_stim = 102
#num_stim = 50
np.random.seed(2021)
for t,f in enumerate(fs):
if t > 0:
break
F1_indices = []
F2_indices = []
if count > 5:
break
count += 1
dat = np.load(f, allow_pickle=True).item()
sresp, istim, itrain, itest = utils.compile_resp(dat, npc=npc)
print(sresp.shape)
# do some trial averaging to smooth out the responses...
stim_vals = np.linspace(0, math.pi, num_stim)
resp_avg = np.zeros( (sresp.shape[0], num_stim) )
density = np.zeros( len(stim_vals))
istim = istim % math.pi
for i in range(num_stim-1):
stim_inds = [j for j in range(len(istim)) if istim[j] <= stim_vals[i+1] and istim[j] > stim_vals[i]]
resp_avg[:,i] = np.mean( sresp[:,stim_inds] , axis = 1)
density[i] = len(stim_inds)
resp_avg = resp_avg[:,0:resp_avg.shape[1]-1]
stim_vals = stim_vals[0:stim_vals.shape[0]-1]
Q,R = np.linalg.qr(np.random.standard_normal((1000,1000)))
rotate = Q @ resp_avg[0:1000,:]
sigma = 0.1
filter = np.exp(- 0.5 * (stim_vals-math.pi)**2 /sigma**2 )
all_filtered = np.zeros((3,len(filter)))
plt.figure(figsize=(1.8,1.5))
for i in range(3):
all_filtered[i,:] = np.convolve(filter, resp_avg[i,:], 'same')
#plt.plot(stim_vals,all_filtered[i,:], linewidth = line_width)
plt.plot(stim_vals, resp_avg[i,:], linewidth = line_width)
plt.xlabel(r'$\theta$', fontsize=myaxis_font)
plt.ylabel(r'$r(\theta)$', fontsize=myaxis_font)
#plt.xticks([])
#plt.yticks([])
plt.xticks([0,math.pi],[r'$0$',r'$\pi$'])
plt.title('Original', fontsize=myaxis_font)
plt.tight_layout()
plt.savefig(fig_dir + 'tuning_curves_mouse_r.pdf')
plt.show()
plt.close()
#fig = plt.figure(figsize=(1.8,1.5))
fig = plt.figure()
plt.rcParams.update({'font.size': 20})
ax = fig.add_subplot(111, projection = '3d')
#ax.plot(resp_avg[0,:], resp_avg[1,:], resp_avg[2,:], label = 'Original Code')
ax.plot(all_filtered[0,:], all_filtered[1,:], all_filtered[2,:], linewidth = 3)
np.random.seed(1)
Q, _ = np.linalg.qr(np.random.standard_normal((3,3)))
all_rot = Q @ all_filtered
#r_rot = Q @ resp_avg[0:3,:]
#ax.plot(r_rot[0,:], r_rot[1,:], r_rot[2,:], label = 'Rotated Code')
ax.plot(all_rot[0,:], all_rot[1,:], all_rot[2,:], linewidth = 3, color = 'C2')
ax.scatter([],[], label ='Ori.', color = 'C0')
ax.scatter([],[], label = 'Rot.', color = 'C2')
#plt.legend()
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
ax.set_xlabel([])
#ax.legend(loc=(0.5,0.5,0.5), frameon=0)
#ax.legend(loc = 'best', bbox_to_anchor=(0.8, 0.8, 0.3, 0.3))
plt.title('Neural Space')
plt.legend()
#ax.set_xlabel(r'$r_1$', fontsize=myaxis_font)
#ax.set_ylabel(r'$r_2$', fontsize=myaxis_font)
#ax.set_zlabel(r'$r_3$', fontsize = myaxis_font)
ax.set_xlabel(r'$r_1$', fontsize=30)
ax.set_ylabel(r'$r_2$',fontsize=30)
ax.set_zlabel(r'$r_3$',fontsize=30)
plt.tight_layout()
plt.savefig(fig_dir + 'tuning_curves_mouse_r_3d.pdf')
plt.show()
plt.close()
plt.rcParams.update({'font.size': myfont})
plt.figure(figsize=(1.8,1.5))
for i in range(6):
filtered = np.convolve(filter, resp_avg[i,:], 'same')
#plt.plot(stim_vals,filtered)
#plt.plot(stim_vals, filtered, linewidth = line_width)
plt.plot(stim_vals, resp_avg[i,:], linewidth = line_width)
plt.xlabel(r'$\theta$', fontsize=myaxis_font)
plt.ylabel(r'$r(\theta)$', fontsize=myaxis_font)
#plt.xticks([])
#plt.yticks([])
#plt.title('Original Code', fontsize=myaxis_font)
plt.tight_layout()
plt.savefig(fig_dir + 'tuning_curves_mouse_r_many.pdf')
plt.show()
plt.figure(figsize=(1.8,1.5))
for i in range(3):
filtered = np.convolve(filter, rotate[i,:], 'same')
#plt.plot(stim_vals, filtered, linewidth=line_width)
plt.plot(stim_vals, rotate[i,:], linewidth = line_width)
plt.xlabel(r'$\theta$', fontsize=myaxis_font)
plt.ylabel(r'$\tilde{r}(\theta)$', fontsize=myaxis_font)
plt.title('Rotated',fontsize=myaxis_font)
plt.xticks([0,math.pi],[r'$0$',r'$\pi$'])
#plt.xticks([])
#plt.yticks([])
plt.tight_layout()
plt.savefig(fig_dir+'tuning_curves_mouse_rotated_r.pdf')
plt.show()
#K_sub = 1/1000 * resp_avg[0:1000,:].T @ resp_avg[0:1000,:]
#K_sub_rotate = 1/1000 * rotate.T @ rotate
K_sub = 1/resp_avg.shape[0] * resp_avg.T @ resp_avg
plt.figure(figsize=(1.8,1.5))
vmax = np.abs(K_sub).max()
vmin = - vmax
plt.imshow(K_sub, cmap = 'seismic', vmin = vmin, vmax = vmax)
plt.xticks([0,resp_avg.shape[1]],[r'$0$',r'$\pi$'])
plt.yticks([0,resp_avg.shape[1]],[r'$\pi$',r'$0$'])
plt.xlabel(r'$\theta_1$', fontsize=myaxis_font)
plt.ylabel(r'$\theta_2$', fontsize=myaxis_font)
plt.title(r'Kernel',fontsize=myaxis_font)
plt.colorbar()
plt.tight_layout()
plt.savefig(fig_dir + 'kernel_matrix_sub_no_rotate.pdf')
plt.show()
#Nvals = [10,20, 50, 100, 200, 500, 1000, 2000,5000, resp_avg.shape[0]]
Nvals = np.logspace(1.5,np.log10(0.8*resp_avg.shape[0]-1), 50).astype('int')
power = np.zeros(len(Nvals))
num_subsample = 250
num_eig = 10
eigs = np.zeros((num_eig, len(Nvals)))
"""
for i, Ni in enumerate(Nvals):
print("N = %d" % Ni)
for j in range(num_subsample):
neuron_idsi = np.random.choice(resp_avg.shape[0], Ni, replace = False)
r = resp_avg[neuron_idsi,:]
power[i] += 1/num_subsample * np.mean( r**2 )
u,s,v = np.linalg.svd( 1/np.sqrt(Ni) * r, full_matrices = False)
s = np.sort(s)[::-1]
eigs[:,i] += 1/num_subsample * s[0:10]**2
plt.semilogx(Nvals, power)
plt.ylim([0,2*np.amax(power)])
plt.xlabel(r'$N$', fontsize = 20)
plt.ylabel(r'$\frac{1}{N} \sum_{i=1}^N \left< r_i(x)^2 \right>_{x}$', fontsize = 20)
plt.tight_layout()
plt.savefig('power_vs_N.pdf')
plt.show()
for i in range(num_eig):
plt.loglog(Nvals, eigs[i,:])
plt.xlabel(r'$N$', fontsize = 20)
plt.ylabel(r'$\lambda_k$', fontsize = 20)
plt.tight_layout()
plt.savefig('kernel_eigenvalues_vs_N.pdf')
plt.show()
"""
#resp_avg = sp.signal.fftconvolve( filter.reshape((1, filter.shape[0])), resp_avg, 'same')
# compute kernel
K = 1/resp_avg.shape[0] * resp_avg.T @ resp_avg
plt.figure(figsize=(1.8,1.5))
plt.imshow(K)
plt.xticks([])
plt.yticks([])
plt.xlabel(r'$\theta_1$', fontsize=myaxis_font)
plt.ylabel(r'$\theta_2$', fontsize=myaxis_font)
plt.colorbar()
plt.tight_layout()
plt.savefig(fig_dir + 'kernel_matrix.pdf')
plt.show()
kavg = np.zeros(K.shape[0])
for k in range(K.shape[0]):
inds = np.roll(np.arange(K.shape[0]),-k)
kavg += 1/K.shape[0] * K[k,inds]
plt.figure(figsize=(1.8,1.5))
plt.plot(stim_vals, kavg)
plt.xlabel(r'$\theta$', fontsize=myaxis_font)
plt.ylabel(r'$K(\theta)$', fontsize=myaxis_font)
plt.tight_layout()
plt.savefig(fig_dir + 'kernel_real_space.pdf')
plt.show()
kavg = np.zeros(K.shape[0])
for k in range(K.shape[0]):
inds = np.roll(np.arange(K.shape[0]),-k)
kavg += 1/K.shape[0] * K[k,inds]
stim2 = stim_vals[0:len(stim_vals)-1]
kavg2 = kavg[0:len(stim_vals)-1]
x_shift = np.heaviside(-1e-1+math.pi*np.ones(len(stim2)) - stim2, np.zeros(len(stim2)))*stim2
x_shift += np.heaviside(1e-1-math.pi*np.ones(len(stim2)) + stim2, np.zeros(len(stim2))) * (stim2 - 2*math.pi*np.ones(len(stim2)))
k2 = kavg2 + kavg2[::-1]
print(k2)
plt.figure(figsize=(1.8,1.5))
plt.plot(x_shift, k2)
plt.xlabel(r'$\theta$', fontsize=myaxis_font)
plt.ylabel(r'$K(\theta)$', fontsize=myaxis_font)
plt.tight_layout()
plt.savefig(fig_dir + 'kernel_real_space_shift.pdf')
plt.show()
s,u = np.linalg.eigh(K)
sort_inds = np.argsort(s)[::-1]
u = u[:,sort_inds]
s = s[sort_inds]
plt.figure(figsize=(1.8,1.5))
plt.loglog(s/s[0], linewidth = line_width)
plt.xlabel(r'$k$', fontsize=myaxis_font)
plt.ylabel(r'$\lambda_k$', fontsize=myaxis_font)
plt.ylim([1e-3,1])
plt.tight_layout()
plt.savefig(fig_dir + 'spectrum_population_grating_big.pdf')
plt.show()
# smooth out eigenfunctions for visualization
plt.figure(figsize=(2.25,1.5))
sigma = 0.075
filter = np.exp(- 0.5 * (stim_vals-math.pi)**2 /sigma**2 ) / np.sqrt(2*math.pi*sigma**2)
for i in range(5):
filtered = np.convolve(u[:,i],filter, 'same')
plt.plot(stim_vals, u[:,i] + 0.5*i*np.ones(len(stim_vals)), label = 'k = %d' % i, linewidth=line_width)
plt.legend(bbox_to_anchor = (1,1) )
plt.xlabel(r'$\theta$', fontsize = myaxis_font)
plt.ylabel(r'$\phi_k(\theta)$', fontsize=myaxis_font)
plt.xticks([0,math.pi], [r'$0$',r'$\pi$'])
#plt.xticks([])
#plt.yticks([])
plt.tight_layout()
plt.savefig(fig_dir + 'eigenfunctions_big.pdf')
plt.show()
spectrum = s / s[0]
pvals = np.logspace(0,4,300)
me = power_law.mode_errs(pvals, spectrum, np.ones(len(s)), 10)
inds = [1,10,20,50]
plt.figure(figsize=(1.8,1.5))
for i,ind in enumerate(inds):
plt.loglog(pvals, me[ind-1,:] / me[ind-1,0], label = r'$k = %d$' % ind, linewidth = line_width)
plt.legend()
plt.xlabel(r'$p$', fontsize = myaxis_font)
plt.ylabel(r'$E_k$', fontsize=myaxis_font)
plt.title('Mode Errors',fontsize=myaxis_font)
plt.tight_layout()
plt.savefig(fig_dir + 'mode_err_curves_population_grating.pdf')
plt.show()
# u is the eigenvectors, K is
feature_space = u.T @ K
inds_0_90 = [i for i in range(len(stim_vals)) if np.cos(2*stim_vals[i]) > 0]
inds_90_180 = [i for i in range(len(stim_vals)) if np.cos(2*stim_vals[i]) <= 0]
plt.figure(figsize=(1.8,1.5))
plt.scatter(feature_space[0,inds_0_90], feature_space[1, inds_0_90], s=1, color = 'C4', label = r'$+1$')
plt.scatter(feature_space[0,inds_90_180], feature_space[1, inds_90_180], s=1, color = 'C5', label = r'$-1$')
plt.xticks([])
plt.yticks([])
plt.xlabel(r'$\sqrt{\lambda_1} \psi_1(\theta)$', fontsize=myaxis_font)
plt.ylabel(r'$\sqrt{\lambda_2} \psi_2(\theta)$', fontsize = myaxis_font)
plt.title('Low Freq. Task',fontsize=myaxis_font)
plt.legend()
plt.tight_layout()
plt.savefig(fig_dir + 'feature_k_space_mouse_low_freq.pdf')
plt.show()
inds_0 = [i for i in range(len(stim_vals)) if np.cos(6*stim_vals[i]) > 0]
inds_1= [i for i in range(len(stim_vals)) if np.cos(6*stim_vals[i]) <= 0]
plt.figure(figsize=(1.8,1.5))
plt.scatter(feature_space[0,inds_0], feature_space[1, inds_0], s = 1, color = 'C4',label = r'$+1$')
plt.scatter(feature_space[0,inds_1], feature_space[1, inds_1], s=1, color = 'C5', label = r'$-1$')
plt.xticks([])
plt.yticks([])
plt.xlabel(r'$\sqrt{\lambda_1} \psi_1(\theta)$', fontsize=myaxis_font)
plt.ylabel(r'$\sqrt{\lambda_2} \psi_2(\theta)$', fontsize = myaxis_font)
plt.title('High Freq. Task',fontsize=myaxis_font)
plt.legend()
plt.tight_layout()
plt.savefig(fig_dir + 'feature_k_space_mouse_high_freq.pdf')
plt.show()
y1 = np.sign(np.cos(2*stim_vals))
y2 = np.sign(np.cos(6*stim_vals))
plt.figure(figsize=(1.8,1.5))
plt.subplot(2,1,1)
plt.plot(stim_vals, y1, linewidth=line_width, color = 'C0')
plt.ylabel(r'$y(\theta)$', fontsize=myaxis_font)
plt.title('Low Freq. Task', fontsize=myaxis_font)
plt.subplot(2,1,2)
plt.plot(stim_vals, y2, linewidth=line_width, color = 'C2')
#plt.legend()
plt.xlabel(r'$\theta$', fontsize=myaxis_font)
plt.ylabel(r'$y(\theta)$', fontsize=myaxis_font)
plt.title('High Freq. Task',fontsize=myaxis_font)
plt.xticks([0,math.pi], [r'$0$',r'$\pi$'])
plt.tight_layout()
plt.savefig(fig_dir + 'low_high_target_visual.pdf')
plt.show()
coeffs1 = (u.T @ y1)**2
coeffs2 = (u.T @ y2)**2
print(coeffs1[0:10])
print(coeffs2[0:10])
sort1 = np.argsort(coeffs1)[::-1]
sort2 = np.argsort(coeffs2)[::-1]
plt.figure(figsize=(1.8,1.5))
plt.scatter(feature_space[sort1[0],inds_0_90], feature_space[sort1[1], inds_0_90], color = 'C4', s= 1)
plt.scatter(feature_space[sort1[0],inds_90_180], feature_space[sort1[1], inds_90_180], color = 'C5', s= 1)
plt.xticks([])
plt.yticks([])
plt.xlabel(r'$\sqrt{\lambda_1} \psi_1(\theta)$', fontsize=myaxis_font)
plt.ylabel(r'$\sqrt{\lambda_2} \psi_2(\theta)$', fontsize = myaxis_font)
plt.tight_layout()
plt.savefig(fig_dir + 'feature_k_space_mouse_low_freq_max_var_kpc.pdf')
plt.show()
inds_0 = [i for i in range(len(stim_vals)) if np.cos(5*stim_vals[i]) > 0]
inds_1= [i for i in range(len(stim_vals)) if np.cos(5*stim_vals[i]) <= 0]
plt.figure(figsize=(1.8,1.5))
plt.scatter(feature_space[sort2[0],inds_0], feature_space[sort2[1], inds_0], color = 'C4', s = 1)
plt.scatter(feature_space[sort2[0],inds_1], feature_space[sort2[1], inds_1], color = 'C5', s = 1)
plt.xticks([])
plt.yticks([])
plt.xlabel(r'$\sqrt{\lambda_3} \psi_{3}(\theta)$')
plt.ylabel(r'$\sqrt{\lambda_{3}} \psi_{3}(\theta)$')
plt.savefig(fig_dir + 'feature_k_space_mouse_high_freq_max_var_kpc.pdf')
plt.show()
plt.figure(figsize=(1.8,1.5))
plt.scatter(feature_space[2,inds_0], feature_space[3, inds_0], color = 'C4', label = r'$+1$', s =1)
plt.scatter(feature_space[2,inds_1], feature_space[3, inds_1], color = 'C5', label = r'$-1$', s = 1)
plt.xticks([])
plt.yticks([])
plt.xlabel(r'$\sqrt{\lambda_3} \psi_3(\theta)$', fontsize=myaxis_font)
plt.ylabel(r'$\sqrt{\lambda_4} \psi_4(\theta)$', fontsize = myaxis_font)
plt.title('High Freq. Task',fontsize=myaxis_font)
plt.legend()
plt.tight_layout()
plt.savefig(fig_dir + 'feature_k_space_mouse_high_freq_68_kpc.pdf')
plt.show()
#R = np.random.standard_normal((resp_avg.shape[0], resp_avg.shape[0]))
"""
Nr = 500
R = sp.stats.ortho_group.rvs(Nr)
#s, v = np.linalg.eig(R)
rotated_code = R @ resp_avg[0:Nr,:]
for i in range(3):
plt.plot(stim_vals, resp_avg[i,:], color = 'C%d' % i)
plt.plot(stim_vals, rotated_code[i,:], '--', color = 'C%d' % i)
plt.show()
"""
# kernel regression expt
pvals = np.logspace(0.4,2, 12).astype('int')
num_repeats = 30
lamb = 12
y_easy_true = np.sign(np.cos(2*stim_vals))
y_hard_true = np.sign(np.cos(6*stim_vals))
err_easy = np.zeros((len(pvals), num_repeats))
err_hard = np.zeros((len(pvals), num_repeats))
for n in range(num_repeats):
for i,p in enumerate(pvals):
rand_i = np.random.randint(0,K.shape[0],p)
Ki = K[rand_i,:]
Kii = Ki[:,rand_i]
x = stim_vals[rand_i]
y1 = np.sign(np.cos(2*x))
y2 = np.sign(np.cos(6*x))
#y1 = u[rand_i,0]
#y2 = u[rand_i,5]
yhat1 = Ki.T @ np.linalg.inv(Kii + 1/K.shape[0]*lamb*np.eye(p)) @ y1
yhat2 = Ki.T @ np.linalg.inv(Kii + 1/K.shape[0]*lamb*np.eye(p)) @ y2
err_easy[i,n] = np.mean( (y_easy_true - yhat1 )**2 )
err_hard[i,n] = np.mean( (y_hard_true - yhat2 )**2 )
plt.figure(figsize=(1.8,1.5))
plt.plot(stim_vals, yhat1, label = r'Ori.', linewidth=line_width)
plt.plot(stim_vals, yhat1, '--', color = 'C2', label = r'Rot.', linewidth=line_width)
plt.plot(stim_vals, y_easy_true, '--', color = 'black', label = r'$y(x)$', linewidth=line_width)
plt.xlabel(r'$\theta$', fontsize=myaxis_font)
plt.ylabel(r'$y(\theta)$', fontsize=myaxis_font)
plt.title('Target Function',fontsize=myaxis_font)
plt.xticks([0,math.pi], [r'$0$',r'$\pi$'])
#plt.xticks([])
#plt.yticks([])
#plt.legend()
plt.tight_layout()
plt.savefig(fig_dir + 'task_visual.pdf')
plt.show()
kmax = 50
print(len(spectrum))
print(np.sum(s))
coeffs_easy = (1/K.shape[0] * u.T @ y_easy_true)**2
coeffs_hard = (1/K.shape[0] * u.T @ y_hard_true)**2
plt.figure(figsize=(1.8,1.5))
plt.plot(np.cumsum(coeffs_easy)/np.sum(coeffs_easy), label = 'low freq.', linewidth=line_width, color = 'C0')
plt.plot(np.cumsum(coeffs_hard)/np.sum(coeffs_hard), label = 'high freq.', linewidth=line_width, color = 'C2')
plt.xlabel(r'$k$', fontsize=myaxis_font)
plt.ylabel(r'$C(k)$', fontsize=myaxis_font)
plt.title('Cumulative Power',fontsize=myaxis_font)
plt.legend()
plt.tight_layout()
plt.savefig(fig_dir + 'cumulative_sign_harmonic_mouse.pdf')
plt.show()
plt.figure(figsize=(2.4,2))
decay_coeff = 1-np.cumsum(coeffs_easy)/np.sum(coeffs_easy)
decay_coeff = decay_coeff[0:95]
len_i = len(decay_coeff)
log_decay_coeff = np.log(decay_coeff)
kvals_linsp= np.log( np.linspace(1,len_i, len_i) )
a = (np.mean(kvals_linsp*log_decay_coeff) - np.mean(kvals_linsp)*np.mean(log_decay_coeff)) / (np.mean(kvals_linsp**2) - np.mean(kvals_linsp)**2)
b = np.mean(decay_coeff) - a*np.mean(kvals_linsp)
print("a val from fit %0.2f" % a)
plt.loglog(decay_coeff, label ='Orientation Task')
plt.loglog(np.linspace(1,len_i,len_i)**a * decay_coeff[0], '--', color = 'black', label = r'$k^{%.1f}$' % a)
plt.xlabel(r'$k$', fontsize=myaxis_font)
plt.ylabel(r'$1 - C(k)$', fontsize=myaxis_font)
plt.title('Cumulative Power',fontsize=myaxis_font)
plt.legend()
plt.tight_layout()
plt.savefig(fig_dir + 'cumulative_powerlaw_scaling.pdf')
plt.show()
bvals = [2,3,4]
all_s = [s] + [np.linspace(1,len(s),len(s))**(-b) for b in bvals]
plt.figure(figsize=(2.4,2))
for i,si in enumerate(all_s):
if i == 0:
label = 'Expt.'
else:
label = r'$b = %d$' % bvals[i-1]
plt.loglog(pvals, power_law.mode_errs(pvals,si,coeffs_easy,10).sum(axis = 0), label = label)
plt.xlabel(r'$p$', fontsize=myaxis_font)
plt.ylabel(r'$E_g$', fontsize=myaxis_font)
plt.title('Learning Curves', fontsize=myaxis_font)
#ax.set_xscale('log')
plt.legend()
plt.tight_layout()
plt.savefig(fig_dir + 'power_law_scalings_mouse_ori.pdf')
plt.show()
plt.figure(figsize=(2.4,2))
for i,si in enumerate(all_s):
if i == 0:
label = 'Expt.'
else:
label = r'$b = %d$' % bvals[i-1]
plt.loglog(si/si[0], label = label)
plt.xlabel(r'$k$', fontsize=myaxis_font)
plt.ylabel(r'$\lambda_k$', fontsize=myaxis_font)
plt.title('Power Law Spectra', fontsize=myaxis_font)
plt.ylim([1e-5,2])
#ax.set_xscale('log')
plt.legend()
plt.tight_layout()
plt.savefig(fig_dir + 'power_law_spectra_mouse.pdf')
plt.show()
plt.figure(figsize=(1.8,1.5))
plt.plot(stim_vals, y_easy_true, '--', color = 'black', label = r'$y(x)$', linewidth=line_width)
plt.plot(stim_vals, yhat1, color = 'C0')
plt.plot(stim_vals, yhat1, '--', color ='C1')
plt.xlabel(r'$\theta$', fontsize=myaxis_font)
plt.ylabel(r'$y(\theta)$', fontsize=myaxis_font)
plt.title('Target Function',fontsize=myaxis_font)
plt.xticks([0,math.pi], [r'$0$',r'$\pi$'])
#plt.xticks([])
#plt.yticks([])
#plt.legend()
plt.tight_layout()
plt.savefig(fig_dir + 'task_visual_powerlaw.pdf')
plt.show()
ptheory = pvals
theory_easy = np.sum( power_law.mode_errs(ptheory, s, coeffs_easy, lamb), axis = 0)
theory_hard = np.sum( power_law.mode_errs(ptheory, s, coeffs_hard, lamb), axis = 0)
fig = plt.figure()
ax = plt.axes()
easy_mean = np.mean(err_easy, axis = 1)
hard_mean = np.mean(err_hard, axis = 1)
plt.figure(figsize=(1.8,1.5))
plt.errorbar(pvals, easy_mean/easy_mean[0], np.std(err_easy, axis=1), fmt = 'o', markersize=2.5, color = 'C0', linewidth=line_width)
plt.errorbar(pvals, hard_mean/hard_mean[0], np.std(err_hard, axis = 1), fmt = 'o', markersize=2.5, color = 'C2', linewidth=line_width)
#plt.plot(pvals, me[0,:] / me[0,0], '--', color = 'C0')
#plt.plot(pvals, me[5,:] / me[5,0], '--', color = 'C1')
plt.plot(ptheory, theory_easy/theory_easy[0], '--', color = 'C0', linewidth=line_width)
plt.plot(ptheory, theory_hard/theory_hard[0], '--', color = 'C2', linewidth=line_width)
plt.xlabel(r'$p$', fontsize=myaxis_font)
plt.ylabel(r'$E_g$', fontsize=myaxis_font)
plt.title('Learning Curves', fontsize=myaxis_font)
#ax.set_xscale('log')
#plt.legend()
plt.tight_layout()
plt.savefig(fig_dir+ 'mouse_lc.pdf')
plt.show()
plt.figure(figsize=(1.8,1.5))
plt.errorbar(pvals, easy_mean/easy_mean[0], np.std(err_easy,axis=1), fmt = 'o', markersize=2, color = 'C0', label = 'Original', linewidth=line_width)
plt.errorbar(pvals, easy_mean/easy_mean[0], np.std(err_hard,axis=1), fmt = '^', markersize=2, color = 'C2', label = 'Rotated', linewidth=line_width)
plt.plot(ptheory, theory_easy/theory_easy[0], '--', color = 'black', label = 'theory', linewidth=line_width)
plt.legend()
plt.xlabel(r'$p$', fontsize=myaxis_font)
plt.ylabel(r'$E_g$', fontsize=myaxis_font)
plt.title('Learning Curves',fontsize=myaxis_font)
plt.tight_layout()
plt.savefig(fig_dir + 'mouse_rotate_lcs.pdf')
plt.show()
| [
"numpy.random.standard_normal",
"numpy.convolve",
"numpy.sqrt",
"numpy.log10",
"matplotlib.pyplot.ylabel",
"numpy.log",
"numpy.argsort",
"numpy.arange",
"matplotlib.pyplot.imshow",
"numpy.mean",
"matplotlib.pyplot.loglog",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.exp",
... | [((417, 440), 'cycler.cycler', 'cycler', ([], {'color': '"""bgrcmyk"""'}), "(color='bgrcmyk')\n", (423, 440), False, 'from cycler import cycler\n'), ((485, 527), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': myfont}"], {}), "({'font.size': myfont})\n", (504, 527), True, 'from matplotlib import pyplot as plt\n'), ((1162, 1182), 'numpy.random.seed', 'np.random.seed', (['(2021)'], {}), '(2021)\n', (1176, 1182), True, 'import numpy as np\n'), ((583, 621), 'os.path.join', 'os.path.join', (['dataroot', '"""database.npy"""'], {}), "(dataroot, 'database.npy')\n", (595, 621), False, 'import os\n'), ((1410, 1442), 'utils.compile_resp', 'utils.compile_resp', (['dat'], {'npc': 'npc'}), '(dat, npc=npc)\n', (1428, 1442), False, 'import utils\n'), ((1546, 1579), 'numpy.linspace', 'np.linspace', (['(0)', 'math.pi', 'num_stim'], {}), '(0, math.pi, num_stim)\n', (1557, 1579), True, 'import numpy as np\n'), ((1595, 1631), 'numpy.zeros', 'np.zeros', (['(sresp.shape[0], num_stim)'], {}), '((sresp.shape[0], num_stim))\n', (1603, 1631), True, 'import numpy as np\n'), ((2175, 2229), 'numpy.exp', 'np.exp', (['(-0.5 * (stim_vals - math.pi) ** 2 / sigma ** 2)'], {}), '(-0.5 * (stim_vals - math.pi) ** 2 / sigma ** 2)\n', (2181, 2229), True, 'import numpy as np\n'), ((2274, 2304), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.8, 1.5)'}), '(figsize=(1.8, 1.5))\n', (2284, 2304), True, 'from matplotlib import pyplot as plt\n'), ((2540, 2585), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\theta$', fontsize=myaxis_font)\n", (2550, 2585), True, 'from matplotlib import pyplot as plt\n'), ((2590, 2638), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$r(\\\\theta)$"""'], {'fontsize': 'myaxis_font'}), "('$r(\\\\theta)$', fontsize=myaxis_font)\n", (2600, 2638), True, 'from matplotlib import pyplot as plt\n'), ((2683, 2726), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, math.pi]', "['$0$', '$\\\\pi$']"], {}), "([0, math.pi], ['$0$', '$\\\\pi$'])\n", (2693, 2726), True, 'from matplotlib import pyplot as plt\n'), ((2729, 2772), 'matplotlib.pyplot.title', 'plt.title', (['"""Original"""'], {'fontsize': 'myaxis_font'}), "('Original', fontsize=myaxis_font)\n", (2738, 2772), True, 'from matplotlib import pyplot as plt\n'), ((2777, 2795), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2793, 2795), True, 'from matplotlib import pyplot as plt\n'), ((2800, 2850), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'tuning_curves_mouse_r.pdf')"], {}), "(fig_dir + 'tuning_curves_mouse_r.pdf')\n", (2811, 2850), True, 'from matplotlib import pyplot as plt\n'), ((2855, 2865), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2863, 2865), True, 'from matplotlib import pyplot as plt\n'), ((2870, 2881), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2879, 2881), True, 'from matplotlib import pyplot as plt\n'), ((2935, 2947), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2945, 2947), True, 'from matplotlib import pyplot as plt\n'), ((2952, 2990), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 20}"], {}), "({'font.size': 20})\n", (2971, 2990), True, 'from matplotlib import pyplot as plt\n'), ((3212, 3229), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (3226, 3229), True, 'import numpy as np\n'), ((3833, 3858), 'matplotlib.pyplot.title', 'plt.title', (['"""Neural Space"""'], {}), "('Neural Space')\n", (3842, 3858), True, 'from matplotlib import pyplot as plt\n'), ((3863, 3875), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3873, 3875), True, 'from matplotlib import pyplot as plt\n'), ((4156, 4174), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4172, 4174), True, 'from matplotlib import pyplot as plt\n'), ((4179, 4232), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'tuning_curves_mouse_r_3d.pdf')"], {}), "(fig_dir + 'tuning_curves_mouse_r_3d.pdf')\n", (4190, 4232), True, 'from matplotlib import pyplot as plt\n'), ((4237, 4247), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4245, 4247), True, 'from matplotlib import pyplot as plt\n'), ((4252, 4263), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4261, 4263), True, 'from matplotlib import pyplot as plt\n'), ((4269, 4311), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': myfont}"], {}), "({'font.size': myfont})\n", (4288, 4311), True, 'from matplotlib import pyplot as plt\n'), ((4318, 4348), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.8, 1.5)'}), '(figsize=(1.8, 1.5))\n', (4328, 4348), True, 'from matplotlib import pyplot as plt\n'), ((4606, 4651), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\theta$', fontsize=myaxis_font)\n", (4616, 4651), True, 'from matplotlib import pyplot as plt\n'), ((4656, 4704), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$r(\\\\theta)$"""'], {'fontsize': 'myaxis_font'}), "('$r(\\\\theta)$', fontsize=myaxis_font)\n", (4666, 4704), True, 'from matplotlib import pyplot as plt\n'), ((4803, 4821), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4819, 4821), True, 'from matplotlib import pyplot as plt\n'), ((4826, 4881), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'tuning_curves_mouse_r_many.pdf')"], {}), "(fig_dir + 'tuning_curves_mouse_r_many.pdf')\n", (4837, 4881), True, 'from matplotlib import pyplot as plt\n'), ((4886, 4896), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4894, 4896), True, 'from matplotlib import pyplot as plt\n'), ((4903, 4933), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.8, 1.5)'}), '(figsize=(1.8, 1.5))\n', (4913, 4933), True, 'from matplotlib import pyplot as plt\n'), ((5147, 5192), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\theta$', fontsize=myaxis_font)\n", (5157, 5192), True, 'from matplotlib import pyplot as plt\n'), ((5197, 5254), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\tilde{r}(\\\\theta)$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\tilde{r}(\\\\theta)$', fontsize=myaxis_font)\n", (5207, 5254), True, 'from matplotlib import pyplot as plt\n'), ((5258, 5300), 'matplotlib.pyplot.title', 'plt.title', (['"""Rotated"""'], {'fontsize': 'myaxis_font'}), "('Rotated', fontsize=myaxis_font)\n", (5267, 5300), True, 'from matplotlib import pyplot as plt\n'), ((5304, 5347), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, math.pi]', "['$0$', '$\\\\pi$']"], {}), "([0, math.pi], ['$0$', '$\\\\pi$'])\n", (5314, 5347), True, 'from matplotlib import pyplot as plt\n'), ((5391, 5409), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5407, 5409), True, 'from matplotlib import pyplot as plt\n'), ((5414, 5472), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'tuning_curves_mouse_rotated_r.pdf')"], {}), "(fig_dir + 'tuning_curves_mouse_rotated_r.pdf')\n", (5425, 5472), True, 'from matplotlib import pyplot as plt\n'), ((5475, 5485), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5483, 5485), True, 'from matplotlib import pyplot as plt\n'), ((5658, 5688), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.8, 1.5)'}), '(figsize=(1.8, 1.5))\n', (5668, 5688), True, 'from matplotlib import pyplot as plt\n'), ((5741, 5796), 'matplotlib.pyplot.imshow', 'plt.imshow', (['K_sub'], {'cmap': '"""seismic"""', 'vmin': 'vmin', 'vmax': 'vmax'}), "(K_sub, cmap='seismic', vmin=vmin, vmax=vmax)\n", (5751, 5796), True, 'from matplotlib import pyplot as plt\n'), ((5807, 5860), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, resp_avg.shape[1]]', "['$0$', '$\\\\pi$']"], {}), "([0, resp_avg.shape[1]], ['$0$', '$\\\\pi$'])\n", (5817, 5860), True, 'from matplotlib import pyplot as plt\n'), ((5863, 5916), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, resp_avg.shape[1]]', "['$\\\\pi$', '$0$']"], {}), "([0, resp_avg.shape[1]], ['$\\\\pi$', '$0$'])\n", (5873, 5916), True, 'from matplotlib import pyplot as plt\n'), ((5919, 5966), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta_1$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\theta_1$', fontsize=myaxis_font)\n", (5929, 5966), True, 'from matplotlib import pyplot as plt\n'), ((5971, 6018), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\theta_2$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\theta_2$', fontsize=myaxis_font)\n", (5981, 6018), True, 'from matplotlib import pyplot as plt\n'), ((6023, 6064), 'matplotlib.pyplot.title', 'plt.title', (['"""Kernel"""'], {'fontsize': 'myaxis_font'}), "('Kernel', fontsize=myaxis_font)\n", (6032, 6064), True, 'from matplotlib import pyplot as plt\n'), ((6069, 6083), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6081, 6083), True, 'from matplotlib import pyplot as plt\n'), ((6089, 6107), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6105, 6107), True, 'from matplotlib import pyplot as plt\n'), ((6112, 6168), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'kernel_matrix_sub_no_rotate.pdf')"], {}), "(fig_dir + 'kernel_matrix_sub_no_rotate.pdf')\n", (6123, 6168), True, 'from matplotlib import pyplot as plt\n'), ((6173, 6183), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6181, 6183), True, 'from matplotlib import pyplot as plt\n'), ((7604, 7634), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.8, 1.5)'}), '(figsize=(1.8, 1.5))\n', (7614, 7634), True, 'from matplotlib import pyplot as plt\n'), ((7639, 7652), 'matplotlib.pyplot.imshow', 'plt.imshow', (['K'], {}), '(K)\n', (7649, 7652), True, 'from matplotlib import pyplot as plt\n'), ((7657, 7671), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (7667, 7671), True, 'from matplotlib import pyplot as plt\n'), ((7676, 7690), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (7686, 7690), True, 'from matplotlib import pyplot as plt\n'), ((7695, 7742), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta_1$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\theta_1$', fontsize=myaxis_font)\n", (7705, 7742), True, 'from matplotlib import pyplot as plt\n'), ((7747, 7794), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\theta_2$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\theta_2$', fontsize=myaxis_font)\n", (7757, 7794), True, 'from matplotlib import pyplot as plt\n'), ((7799, 7813), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (7811, 7813), True, 'from matplotlib import pyplot as plt\n'), ((7818, 7836), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7834, 7836), True, 'from matplotlib import pyplot as plt\n'), ((7841, 7883), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'kernel_matrix.pdf')"], {}), "(fig_dir + 'kernel_matrix.pdf')\n", (7852, 7883), True, 'from matplotlib import pyplot as plt\n'), ((7888, 7898), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7896, 7898), True, 'from matplotlib import pyplot as plt\n'), ((7911, 7931), 'numpy.zeros', 'np.zeros', (['K.shape[0]'], {}), '(K.shape[0])\n', (7919, 7931), True, 'import numpy as np\n'), ((8058, 8088), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.8, 1.5)'}), '(figsize=(1.8, 1.5))\n', (8068, 8088), True, 'from matplotlib import pyplot as plt\n'), ((8093, 8118), 'matplotlib.pyplot.plot', 'plt.plot', (['stim_vals', 'kavg'], {}), '(stim_vals, kavg)\n', (8101, 8118), True, 'from matplotlib import pyplot as plt\n'), ((8123, 8168), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\theta$', fontsize=myaxis_font)\n", (8133, 8168), True, 'from matplotlib import pyplot as plt\n'), ((8173, 8221), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$K(\\\\theta)$"""'], {'fontsize': 'myaxis_font'}), "('$K(\\\\theta)$', fontsize=myaxis_font)\n", (8183, 8221), True, 'from matplotlib import pyplot as plt\n'), ((8226, 8244), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8242, 8244), True, 'from matplotlib import pyplot as plt\n'), ((8249, 8295), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'kernel_real_space.pdf')"], {}), "(fig_dir + 'kernel_real_space.pdf')\n", (8260, 8295), True, 'from matplotlib import pyplot as plt\n'), ((8300, 8310), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8308, 8310), True, 'from matplotlib import pyplot as plt\n'), ((8322, 8342), 'numpy.zeros', 'np.zeros', (['K.shape[0]'], {}), '(K.shape[0])\n', (8330, 8342), True, 'import numpy as np\n'), ((8825, 8855), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.8, 1.5)'}), '(figsize=(1.8, 1.5))\n', (8835, 8855), True, 'from matplotlib import pyplot as plt\n'), ((8859, 8880), 'matplotlib.pyplot.plot', 'plt.plot', (['x_shift', 'k2'], {}), '(x_shift, k2)\n', (8867, 8880), True, 'from matplotlib import pyplot as plt\n'), ((8885, 8930), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\theta$', fontsize=myaxis_font)\n", (8895, 8930), True, 'from matplotlib import pyplot as plt\n'), ((8935, 8983), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$K(\\\\theta)$"""'], {'fontsize': 'myaxis_font'}), "('$K(\\\\theta)$', fontsize=myaxis_font)\n", (8945, 8983), True, 'from matplotlib import pyplot as plt\n'), ((8988, 9006), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9004, 9006), True, 'from matplotlib import pyplot as plt\n'), ((9011, 9063), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'kernel_real_space_shift.pdf')"], {}), "(fig_dir + 'kernel_real_space_shift.pdf')\n", (9022, 9063), True, 'from matplotlib import pyplot as plt\n'), ((9068, 9078), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9076, 9078), True, 'from matplotlib import pyplot as plt\n'), ((9090, 9107), 'numpy.linalg.eigh', 'np.linalg.eigh', (['K'], {}), '(K)\n', (9104, 9107), True, 'import numpy as np\n'), ((9192, 9222), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.8, 1.5)'}), '(figsize=(1.8, 1.5))\n', (9202, 9222), True, 'from matplotlib import pyplot as plt\n'), ((9226, 9268), 'matplotlib.pyplot.loglog', 'plt.loglog', (['(s / s[0])'], {'linewidth': 'line_width'}), '(s / s[0], linewidth=line_width)\n', (9236, 9268), True, 'from matplotlib import pyplot as plt\n'), ((9273, 9312), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$k$"""'], {'fontsize': 'myaxis_font'}), "('$k$', fontsize=myaxis_font)\n", (9283, 9312), True, 'from matplotlib import pyplot as plt\n'), ((9318, 9366), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\lambda_k$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\lambda_k$', fontsize=myaxis_font)\n", (9328, 9366), True, 'from matplotlib import pyplot as plt\n'), ((9371, 9391), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.001, 1]'], {}), '([0.001, 1])\n', (9379, 9391), True, 'from matplotlib import pyplot as plt\n'), ((9394, 9412), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9410, 9412), True, 'from matplotlib import pyplot as plt\n'), ((9417, 9477), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'spectrum_population_grating_big.pdf')"], {}), "(fig_dir + 'spectrum_population_grating_big.pdf')\n", (9428, 9477), True, 'from matplotlib import pyplot as plt\n'), ((9482, 9492), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9490, 9492), True, 'from matplotlib import pyplot as plt\n'), ((9548, 9579), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2.25, 1.5)'}), '(figsize=(2.25, 1.5))\n', (9558, 9579), True, 'from matplotlib import pyplot as plt\n'), ((9885, 9918), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1, 1)'}), '(bbox_to_anchor=(1, 1))\n', (9895, 9918), True, 'from matplotlib import pyplot as plt\n'), ((9925, 9970), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\theta$', fontsize=myaxis_font)\n", (9935, 9970), True, 'from matplotlib import pyplot as plt\n'), ((9977, 10031), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\phi_k(\\\\theta)$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\phi_k(\\\\theta)$', fontsize=myaxis_font)\n", (9987, 10031), True, 'from matplotlib import pyplot as plt\n'), ((10035, 10078), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, math.pi]', "['$0$', '$\\\\pi$']"], {}), "([0, math.pi], ['$0$', '$\\\\pi$'])\n", (10045, 10078), True, 'from matplotlib import pyplot as plt\n'), ((10122, 10140), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10138, 10140), True, 'from matplotlib import pyplot as plt\n'), ((10145, 10192), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'eigenfunctions_big.pdf')"], {}), "(fig_dir + 'eigenfunctions_big.pdf')\n", (10156, 10192), True, 'from matplotlib import pyplot as plt\n'), ((10197, 10207), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10205, 10207), True, 'from matplotlib import pyplot as plt\n'), ((10248, 10270), 'numpy.logspace', 'np.logspace', (['(0)', '(4)', '(300)'], {}), '(0, 4, 300)\n', (10259, 10270), True, 'import numpy as np\n'), ((10364, 10394), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.8, 1.5)'}), '(figsize=(1.8, 1.5))\n', (10374, 10394), True, 'from matplotlib import pyplot as plt\n'), ((10536, 10548), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10546, 10548), True, 'from matplotlib import pyplot as plt\n'), ((10553, 10592), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$p$"""'], {'fontsize': 'myaxis_font'}), "('$p$', fontsize=myaxis_font)\n", (10563, 10592), True, 'from matplotlib import pyplot as plt\n'), ((10600, 10641), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$E_k$"""'], {'fontsize': 'myaxis_font'}), "('$E_k$', fontsize=myaxis_font)\n", (10610, 10641), True, 'from matplotlib import pyplot as plt\n'), ((10647, 10693), 'matplotlib.pyplot.title', 'plt.title', (['"""Mode Errors"""'], {'fontsize': 'myaxis_font'}), "('Mode Errors', fontsize=myaxis_font)\n", (10656, 10693), True, 'from matplotlib import pyplot as plt\n'), ((10697, 10715), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10713, 10715), True, 'from matplotlib import pyplot as plt\n'), ((10720, 10783), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'mode_err_curves_population_grating.pdf')"], {}), "(fig_dir + 'mode_err_curves_population_grating.pdf')\n", (10731, 10783), True, 'from matplotlib import pyplot as plt\n'), ((10788, 10798), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10796, 10798), True, 'from matplotlib import pyplot as plt\n'), ((11036, 11066), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.8, 1.5)'}), '(figsize=(1.8, 1.5))\n', (11046, 11066), True, 'from matplotlib import pyplot as plt\n'), ((11071, 11175), 'matplotlib.pyplot.scatter', 'plt.scatter', (['feature_space[0, inds_0_90]', 'feature_space[1, inds_0_90]'], {'s': '(1)', 'color': '"""C4"""', 'label': '"""$+1$"""'}), "(feature_space[0, inds_0_90], feature_space[1, inds_0_90], s=1,\n color='C4', label='$+1$')\n", (11082, 11175), True, 'from matplotlib import pyplot as plt\n'), ((11180, 11289), 'matplotlib.pyplot.scatter', 'plt.scatter', (['feature_space[0, inds_90_180]', 'feature_space[1, inds_90_180]'], {'s': '(1)', 'color': '"""C5"""', 'label': '"""$-1$"""'}), "(feature_space[0, inds_90_180], feature_space[1, inds_90_180], s\n =1, color='C5', label='$-1$')\n", (11191, 11289), True, 'from matplotlib import pyplot as plt\n'), ((11293, 11307), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (11303, 11307), True, 'from matplotlib import pyplot as plt\n'), ((11312, 11326), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (11322, 11326), True, 'from matplotlib import pyplot as plt\n'), ((11331, 11404), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\sqrt{\\\\lambda_1} \\\\psi_1(\\\\theta)$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\sqrt{\\\\lambda_1} \\\\psi_1(\\\\theta)$', fontsize=myaxis_font)\n", (11341, 11404), True, 'from matplotlib import pyplot as plt\n'), ((11406, 11479), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sqrt{\\\\lambda_2} \\\\psi_2(\\\\theta)$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\sqrt{\\\\lambda_2} \\\\psi_2(\\\\theta)$', fontsize=myaxis_font)\n", (11416, 11479), True, 'from matplotlib import pyplot as plt\n'), ((11483, 11532), 'matplotlib.pyplot.title', 'plt.title', (['"""Low Freq. Task"""'], {'fontsize': 'myaxis_font'}), "('Low Freq. Task', fontsize=myaxis_font)\n", (11492, 11532), True, 'from matplotlib import pyplot as plt\n'), ((11536, 11548), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11546, 11548), True, 'from matplotlib import pyplot as plt\n'), ((11553, 11571), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11569, 11571), True, 'from matplotlib import pyplot as plt\n'), ((11576, 11635), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'feature_k_space_mouse_low_freq.pdf')"], {}), "(fig_dir + 'feature_k_space_mouse_low_freq.pdf')\n", (11587, 11635), True, 'from matplotlib import pyplot as plt\n'), ((11640, 11650), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11648, 11650), True, 'from matplotlib import pyplot as plt\n'), ((11812, 11842), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.8, 1.5)'}), '(figsize=(1.8, 1.5))\n', (11822, 11842), True, 'from matplotlib import pyplot as plt\n'), ((11847, 11946), 'matplotlib.pyplot.scatter', 'plt.scatter', (['feature_space[0, inds_0]', 'feature_space[1, inds_0]'], {'s': '(1)', 'color': '"""C4"""', 'label': '"""$+1$"""'}), "(feature_space[0, inds_0], feature_space[1, inds_0], s=1, color=\n 'C4', label='$+1$')\n", (11858, 11946), True, 'from matplotlib import pyplot as plt\n'), ((11951, 12050), 'matplotlib.pyplot.scatter', 'plt.scatter', (['feature_space[0, inds_1]', 'feature_space[1, inds_1]'], {'s': '(1)', 'color': '"""C5"""', 'label': '"""$-1$"""'}), "(feature_space[0, inds_1], feature_space[1, inds_1], s=1, color=\n 'C5', label='$-1$')\n", (11962, 12050), True, 'from matplotlib import pyplot as plt\n'), ((12054, 12068), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (12064, 12068), True, 'from matplotlib import pyplot as plt\n'), ((12073, 12087), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (12083, 12087), True, 'from matplotlib import pyplot as plt\n'), ((12092, 12165), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\sqrt{\\\\lambda_1} \\\\psi_1(\\\\theta)$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\sqrt{\\\\lambda_1} \\\\psi_1(\\\\theta)$', fontsize=myaxis_font)\n", (12102, 12165), True, 'from matplotlib import pyplot as plt\n'), ((12167, 12240), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sqrt{\\\\lambda_2} \\\\psi_2(\\\\theta)$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\sqrt{\\\\lambda_2} \\\\psi_2(\\\\theta)$', fontsize=myaxis_font)\n", (12177, 12240), True, 'from matplotlib import pyplot as plt\n'), ((12244, 12294), 'matplotlib.pyplot.title', 'plt.title', (['"""High Freq. Task"""'], {'fontsize': 'myaxis_font'}), "('High Freq. Task', fontsize=myaxis_font)\n", (12253, 12294), True, 'from matplotlib import pyplot as plt\n'), ((12298, 12310), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12308, 12310), True, 'from matplotlib import pyplot as plt\n'), ((12315, 12333), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (12331, 12333), True, 'from matplotlib import pyplot as plt\n'), ((12338, 12398), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'feature_k_space_mouse_high_freq.pdf')"], {}), "(fig_dir + 'feature_k_space_mouse_high_freq.pdf')\n", (12349, 12398), True, 'from matplotlib import pyplot as plt\n'), ((12403, 12413), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12411, 12413), True, 'from matplotlib import pyplot as plt\n'), ((12496, 12526), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.8, 1.5)'}), '(figsize=(1.8, 1.5))\n', (12506, 12526), True, 'from matplotlib import pyplot as plt\n'), ((12530, 12550), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (12541, 12550), True, 'from matplotlib import pyplot as plt\n'), ((12553, 12610), 'matplotlib.pyplot.plot', 'plt.plot', (['stim_vals', 'y1'], {'linewidth': 'line_width', 'color': '"""C0"""'}), "(stim_vals, y1, linewidth=line_width, color='C0')\n", (12561, 12610), True, 'from matplotlib import pyplot as plt\n'), ((12617, 12665), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$y(\\\\theta)$"""'], {'fontsize': 'myaxis_font'}), "('$y(\\\\theta)$', fontsize=myaxis_font)\n", (12627, 12665), True, 'from matplotlib import pyplot as plt\n'), ((12670, 12719), 'matplotlib.pyplot.title', 'plt.title', (['"""Low Freq. Task"""'], {'fontsize': 'myaxis_font'}), "('Low Freq. Task', fontsize=myaxis_font)\n", (12679, 12719), True, 'from matplotlib import pyplot as plt\n'), ((12724, 12744), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (12735, 12744), True, 'from matplotlib import pyplot as plt\n'), ((12747, 12804), 'matplotlib.pyplot.plot', 'plt.plot', (['stim_vals', 'y2'], {'linewidth': 'line_width', 'color': '"""C2"""'}), "(stim_vals, y2, linewidth=line_width, color='C2')\n", (12755, 12804), True, 'from matplotlib import pyplot as plt\n'), ((12829, 12874), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\theta$', fontsize=myaxis_font)\n", (12839, 12874), True, 'from matplotlib import pyplot as plt\n'), ((12879, 12927), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$y(\\\\theta)$"""'], {'fontsize': 'myaxis_font'}), "('$y(\\\\theta)$', fontsize=myaxis_font)\n", (12889, 12927), True, 'from matplotlib import pyplot as plt\n'), ((12932, 12982), 'matplotlib.pyplot.title', 'plt.title', (['"""High Freq. Task"""'], {'fontsize': 'myaxis_font'}), "('High Freq. Task', fontsize=myaxis_font)\n", (12941, 12982), True, 'from matplotlib import pyplot as plt\n'), ((12986, 13029), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, math.pi]', "['$0$', '$\\\\pi$']"], {}), "([0, math.pi], ['$0$', '$\\\\pi$'])\n", (12996, 13029), True, 'from matplotlib import pyplot as plt\n'), ((13033, 13051), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13049, 13051), True, 'from matplotlib import pyplot as plt\n'), ((13056, 13107), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'low_high_target_visual.pdf')"], {}), "(fig_dir + 'low_high_target_visual.pdf')\n", (13067, 13107), True, 'from matplotlib import pyplot as plt\n'), ((13112, 13122), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13120, 13122), True, 'from matplotlib import pyplot as plt\n'), ((13310, 13340), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.8, 1.5)'}), '(figsize=(1.8, 1.5))\n', (13320, 13340), True, 'from matplotlib import pyplot as plt\n'), ((13345, 13449), 'matplotlib.pyplot.scatter', 'plt.scatter', (['feature_space[sort1[0], inds_0_90]', 'feature_space[sort1[1], inds_0_90]'], {'color': '"""C4"""', 's': '(1)'}), "(feature_space[sort1[0], inds_0_90], feature_space[sort1[1],\n inds_0_90], color='C4', s=1)\n", (13356, 13449), True, 'from matplotlib import pyplot as plt\n'), ((13452, 13560), 'matplotlib.pyplot.scatter', 'plt.scatter', (['feature_space[sort1[0], inds_90_180]', 'feature_space[sort1[1], inds_90_180]'], {'color': '"""C5"""', 's': '(1)'}), "(feature_space[sort1[0], inds_90_180], feature_space[sort1[1],\n inds_90_180], color='C5', s=1)\n", (13463, 13560), True, 'from matplotlib import pyplot as plt\n'), ((13563, 13577), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (13573, 13577), True, 'from matplotlib import pyplot as plt\n'), ((13582, 13596), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (13592, 13596), True, 'from matplotlib import pyplot as plt\n'), ((13601, 13674), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\sqrt{\\\\lambda_1} \\\\psi_1(\\\\theta)$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\sqrt{\\\\lambda_1} \\\\psi_1(\\\\theta)$', fontsize=myaxis_font)\n", (13611, 13674), True, 'from matplotlib import pyplot as plt\n'), ((13676, 13749), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sqrt{\\\\lambda_2} \\\\psi_2(\\\\theta)$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\sqrt{\\\\lambda_2} \\\\psi_2(\\\\theta)$', fontsize=myaxis_font)\n", (13686, 13749), True, 'from matplotlib import pyplot as plt\n'), ((13753, 13771), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13769, 13771), True, 'from matplotlib import pyplot as plt\n'), ((13776, 13847), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'feature_k_space_mouse_low_freq_max_var_kpc.pdf')"], {}), "(fig_dir + 'feature_k_space_mouse_low_freq_max_var_kpc.pdf')\n", (13787, 13847), True, 'from matplotlib import pyplot as plt\n'), ((13852, 13862), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13860, 13862), True, 'from matplotlib import pyplot as plt\n'), ((14024, 14054), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.8, 1.5)'}), '(figsize=(1.8, 1.5))\n', (14034, 14054), True, 'from matplotlib import pyplot as plt\n'), ((14059, 14158), 'matplotlib.pyplot.scatter', 'plt.scatter', (['feature_space[sort2[0], inds_0]', 'feature_space[sort2[1], inds_0]'], {'color': '"""C4"""', 's': '(1)'}), "(feature_space[sort2[0], inds_0], feature_space[sort2[1], inds_0\n ], color='C4', s=1)\n", (14070, 14158), True, 'from matplotlib import pyplot as plt\n'), ((14161, 14260), 'matplotlib.pyplot.scatter', 'plt.scatter', (['feature_space[sort2[0], inds_1]', 'feature_space[sort2[1], inds_1]'], {'color': '"""C5"""', 's': '(1)'}), "(feature_space[sort2[0], inds_1], feature_space[sort2[1], inds_1\n ], color='C5', s=1)\n", (14172, 14260), True, 'from matplotlib import pyplot as plt\n'), ((14263, 14277), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (14273, 14277), True, 'from matplotlib import pyplot as plt\n'), ((14282, 14296), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (14292, 14296), True, 'from matplotlib import pyplot as plt\n'), ((14301, 14354), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\sqrt{\\\\lambda_3} \\\\psi_{3}(\\\\theta)$"""'], {}), "('$\\\\sqrt{\\\\lambda_3} \\\\psi_{3}(\\\\theta)$')\n", (14311, 14354), True, 'from matplotlib import pyplot as plt\n'), ((14356, 14411), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sqrt{\\\\lambda_{3}} \\\\psi_{3}(\\\\theta)$"""'], {}), "('$\\\\sqrt{\\\\lambda_{3}} \\\\psi_{3}(\\\\theta)$')\n", (14366, 14411), True, 'from matplotlib import pyplot as plt\n'), ((14413, 14485), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'feature_k_space_mouse_high_freq_max_var_kpc.pdf')"], {}), "(fig_dir + 'feature_k_space_mouse_high_freq_max_var_kpc.pdf')\n", (14424, 14485), True, 'from matplotlib import pyplot as plt\n'), ((14490, 14500), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14498, 14500), True, 'from matplotlib import pyplot as plt\n'), ((14508, 14538), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.8, 1.5)'}), '(figsize=(1.8, 1.5))\n', (14518, 14538), True, 'from matplotlib import pyplot as plt\n'), ((14542, 14640), 'matplotlib.pyplot.scatter', 'plt.scatter', (['feature_space[2, inds_0]', 'feature_space[3, inds_0]'], {'color': '"""C4"""', 'label': '"""$+1$"""', 's': '(1)'}), "(feature_space[2, inds_0], feature_space[3, inds_0], color='C4',\n label='$+1$', s=1)\n", (14553, 14640), True, 'from matplotlib import pyplot as plt\n'), ((14646, 14744), 'matplotlib.pyplot.scatter', 'plt.scatter', (['feature_space[2, inds_1]', 'feature_space[3, inds_1]'], {'color': '"""C5"""', 'label': '"""$-1$"""', 's': '(1)'}), "(feature_space[2, inds_1], feature_space[3, inds_1], color='C5',\n label='$-1$', s=1)\n", (14657, 14744), True, 'from matplotlib import pyplot as plt\n'), ((14751, 14765), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (14761, 14765), True, 'from matplotlib import pyplot as plt\n'), ((14770, 14784), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (14780, 14784), True, 'from matplotlib import pyplot as plt\n'), ((14789, 14862), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\sqrt{\\\\lambda_3} \\\\psi_3(\\\\theta)$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\sqrt{\\\\lambda_3} \\\\psi_3(\\\\theta)$', fontsize=myaxis_font)\n", (14799, 14862), True, 'from matplotlib import pyplot as plt\n'), ((14864, 14937), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sqrt{\\\\lambda_4} \\\\psi_4(\\\\theta)$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\sqrt{\\\\lambda_4} \\\\psi_4(\\\\theta)$', fontsize=myaxis_font)\n", (14874, 14937), True, 'from matplotlib import pyplot as plt\n'), ((14941, 14991), 'matplotlib.pyplot.title', 'plt.title', (['"""High Freq. Task"""'], {'fontsize': 'myaxis_font'}), "('High Freq. Task', fontsize=myaxis_font)\n", (14950, 14991), True, 'from matplotlib import pyplot as plt\n'), ((14995, 15007), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (15005, 15007), True, 'from matplotlib import pyplot as plt\n'), ((15012, 15030), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15028, 15030), True, 'from matplotlib import pyplot as plt\n'), ((15035, 15102), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'feature_k_space_mouse_high_freq_68_kpc.pdf')"], {}), "(fig_dir + 'feature_k_space_mouse_high_freq_68_kpc.pdf')\n", (15046, 15102), True, 'from matplotlib import pyplot as plt\n'), ((15107, 15117), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15115, 15117), True, 'from matplotlib import pyplot as plt\n'), ((16465, 16495), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.8, 1.5)'}), '(figsize=(1.8, 1.5))\n', (16475, 16495), True, 'from matplotlib import pyplot as plt\n'), ((16499, 16561), 'matplotlib.pyplot.plot', 'plt.plot', (['stim_vals', 'yhat1'], {'label': '"""Ori."""', 'linewidth': 'line_width'}), "(stim_vals, yhat1, label='Ori.', linewidth=line_width)\n", (16507, 16561), True, 'from matplotlib import pyplot as plt\n'), ((16569, 16654), 'matplotlib.pyplot.plot', 'plt.plot', (['stim_vals', 'yhat1', '"""--"""'], {'color': '"""C2"""', 'label': '"""Rot."""', 'linewidth': 'line_width'}), "(stim_vals, yhat1, '--', color='C2', label='Rot.', linewidth=line_width\n )\n", (16577, 16654), True, 'from matplotlib import pyplot as plt\n'), ((16659, 16754), 'matplotlib.pyplot.plot', 'plt.plot', (['stim_vals', 'y_easy_true', '"""--"""'], {'color': '"""black"""', 'label': '"""$y(x)$"""', 'linewidth': 'line_width'}), "(stim_vals, y_easy_true, '--', color='black', label='$y(x)$',\n linewidth=line_width)\n", (16667, 16754), True, 'from matplotlib import pyplot as plt\n'), ((16760, 16805), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\theta$', fontsize=myaxis_font)\n", (16770, 16805), True, 'from matplotlib import pyplot as plt\n'), ((16810, 16858), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$y(\\\\theta)$"""'], {'fontsize': 'myaxis_font'}), "('$y(\\\\theta)$', fontsize=myaxis_font)\n", (16820, 16858), True, 'from matplotlib import pyplot as plt\n'), ((16863, 16913), 'matplotlib.pyplot.title', 'plt.title', (['"""Target Function"""'], {'fontsize': 'myaxis_font'}), "('Target Function', fontsize=myaxis_font)\n", (16872, 16913), True, 'from matplotlib import pyplot as plt\n'), ((16917, 16960), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, math.pi]', "['$0$', '$\\\\pi$']"], {}), "([0, math.pi], ['$0$', '$\\\\pi$'])\n", (16927, 16960), True, 'from matplotlib import pyplot as plt\n'), ((17023, 17041), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (17039, 17041), True, 'from matplotlib import pyplot as plt\n'), ((17046, 17086), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'task_visual.pdf')"], {}), "(fig_dir + 'task_visual.pdf')\n", (17057, 17086), True, 'from matplotlib import pyplot as plt\n'), ((17091, 17101), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17099, 17101), True, 'from matplotlib import pyplot as plt\n'), ((17283, 17313), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.8, 1.5)'}), '(figsize=(1.8, 1.5))\n', (17293, 17313), True, 'from matplotlib import pyplot as plt\n'), ((17546, 17585), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$k$"""'], {'fontsize': 'myaxis_font'}), "('$k$', fontsize=myaxis_font)\n", (17556, 17585), True, 'from matplotlib import pyplot as plt\n'), ((17591, 17633), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$C(k)$"""'], {'fontsize': 'myaxis_font'}), "('$C(k)$', fontsize=myaxis_font)\n", (17601, 17633), True, 'from matplotlib import pyplot as plt\n'), ((17639, 17690), 'matplotlib.pyplot.title', 'plt.title', (['"""Cumulative Power"""'], {'fontsize': 'myaxis_font'}), "('Cumulative Power', fontsize=myaxis_font)\n", (17648, 17690), True, 'from matplotlib import pyplot as plt\n'), ((17694, 17706), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (17704, 17706), True, 'from matplotlib import pyplot as plt\n'), ((17711, 17729), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (17727, 17729), True, 'from matplotlib import pyplot as plt\n'), ((17734, 17793), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'cumulative_sign_harmonic_mouse.pdf')"], {}), "(fig_dir + 'cumulative_sign_harmonic_mouse.pdf')\n", (17745, 17793), True, 'from matplotlib import pyplot as plt\n'), ((17798, 17808), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17806, 17808), True, 'from matplotlib import pyplot as plt\n'), ((17814, 17842), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2.4, 2)'}), '(figsize=(2.4, 2))\n', (17824, 17842), True, 'from matplotlib import pyplot as plt\n'), ((17992, 18011), 'numpy.log', 'np.log', (['decay_coeff'], {}), '(decay_coeff)\n', (17998, 18011), True, 'import numpy as np\n'), ((18312, 18361), 'matplotlib.pyplot.loglog', 'plt.loglog', (['decay_coeff'], {'label': '"""Orientation Task"""'}), "(decay_coeff, label='Orientation Task')\n", (18322, 18361), True, 'from matplotlib import pyplot as plt\n'), ((18480, 18519), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$k$"""'], {'fontsize': 'myaxis_font'}), "('$k$', fontsize=myaxis_font)\n", (18490, 18519), True, 'from matplotlib import pyplot as plt\n'), ((18525, 18571), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$1 - C(k)$"""'], {'fontsize': 'myaxis_font'}), "('$1 - C(k)$', fontsize=myaxis_font)\n", (18535, 18571), True, 'from matplotlib import pyplot as plt\n'), ((18577, 18628), 'matplotlib.pyplot.title', 'plt.title', (['"""Cumulative Power"""'], {'fontsize': 'myaxis_font'}), "('Cumulative Power', fontsize=myaxis_font)\n", (18586, 18628), True, 'from matplotlib import pyplot as plt\n'), ((18632, 18644), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (18642, 18644), True, 'from matplotlib import pyplot as plt\n'), ((18649, 18667), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (18665, 18667), True, 'from matplotlib import pyplot as plt\n'), ((18672, 18728), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'cumulative_powerlaw_scaling.pdf')"], {}), "(fig_dir + 'cumulative_powerlaw_scaling.pdf')\n", (18683, 18728), True, 'from matplotlib import pyplot as plt\n'), ((18733, 18743), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18741, 18743), True, 'from matplotlib import pyplot as plt\n'), ((18841, 18869), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2.4, 2)'}), '(figsize=(2.4, 2))\n', (18851, 18869), True, 'from matplotlib import pyplot as plt\n'), ((19114, 19153), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$p$"""'], {'fontsize': 'myaxis_font'}), "('$p$', fontsize=myaxis_font)\n", (19124, 19153), True, 'from matplotlib import pyplot as plt\n'), ((19159, 19200), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$E_g$"""'], {'fontsize': 'myaxis_font'}), "('$E_g$', fontsize=myaxis_font)\n", (19169, 19200), True, 'from matplotlib import pyplot as plt\n'), ((19206, 19256), 'matplotlib.pyplot.title', 'plt.title', (['"""Learning Curves"""'], {'fontsize': 'myaxis_font'}), "('Learning Curves', fontsize=myaxis_font)\n", (19215, 19256), True, 'from matplotlib import pyplot as plt\n'), ((19287, 19299), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (19297, 19299), True, 'from matplotlib import pyplot as plt\n'), ((19304, 19322), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19320, 19322), True, 'from matplotlib import pyplot as plt\n'), ((19327, 19384), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'power_law_scalings_mouse_ori.pdf')"], {}), "(fig_dir + 'power_law_scalings_mouse_ori.pdf')\n", (19338, 19384), True, 'from matplotlib import pyplot as plt\n'), ((19389, 19399), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19397, 19399), True, 'from matplotlib import pyplot as plt\n'), ((19405, 19433), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2.4, 2)'}), '(figsize=(2.4, 2))\n', (19415, 19433), True, 'from matplotlib import pyplot as plt\n'), ((19621, 19660), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$k$"""'], {'fontsize': 'myaxis_font'}), "('$k$', fontsize=myaxis_font)\n", (19631, 19660), True, 'from matplotlib import pyplot as plt\n'), ((19666, 19714), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\lambda_k$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\lambda_k$', fontsize=myaxis_font)\n", (19676, 19714), True, 'from matplotlib import pyplot as plt\n'), ((19719, 19771), 'matplotlib.pyplot.title', 'plt.title', (['"""Power Law Spectra"""'], {'fontsize': 'myaxis_font'}), "('Power Law Spectra', fontsize=myaxis_font)\n", (19728, 19771), True, 'from matplotlib import pyplot as plt\n'), ((19776, 19796), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[1e-05, 2]'], {}), '([1e-05, 2])\n', (19784, 19796), True, 'from matplotlib import pyplot as plt\n'), ((19825, 19837), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (19835, 19837), True, 'from matplotlib import pyplot as plt\n'), ((19842, 19860), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19858, 19860), True, 'from matplotlib import pyplot as plt\n'), ((19865, 19917), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'power_law_spectra_mouse.pdf')"], {}), "(fig_dir + 'power_law_spectra_mouse.pdf')\n", (19876, 19917), True, 'from matplotlib import pyplot as plt\n'), ((19922, 19932), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19930, 19932), True, 'from matplotlib import pyplot as plt\n'), ((19939, 19969), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.8, 1.5)'}), '(figsize=(1.8, 1.5))\n', (19949, 19969), True, 'from matplotlib import pyplot as plt\n'), ((19973, 20068), 'matplotlib.pyplot.plot', 'plt.plot', (['stim_vals', 'y_easy_true', '"""--"""'], {'color': '"""black"""', 'label': '"""$y(x)$"""', 'linewidth': 'line_width'}), "(stim_vals, y_easy_true, '--', color='black', label='$y(x)$',\n linewidth=line_width)\n", (19981, 20068), True, 'from matplotlib import pyplot as plt\n'), ((20074, 20112), 'matplotlib.pyplot.plot', 'plt.plot', (['stim_vals', 'yhat1'], {'color': '"""C0"""'}), "(stim_vals, yhat1, color='C0')\n", (20082, 20112), True, 'from matplotlib import pyplot as plt\n'), ((20119, 20163), 'matplotlib.pyplot.plot', 'plt.plot', (['stim_vals', 'yhat1', '"""--"""'], {'color': '"""C1"""'}), "(stim_vals, yhat1, '--', color='C1')\n", (20127, 20163), True, 'from matplotlib import pyplot as plt\n'), ((20169, 20214), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta$"""'], {'fontsize': 'myaxis_font'}), "('$\\\\theta$', fontsize=myaxis_font)\n", (20179, 20214), True, 'from matplotlib import pyplot as plt\n'), ((20219, 20267), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$y(\\\\theta)$"""'], {'fontsize': 'myaxis_font'}), "('$y(\\\\theta)$', fontsize=myaxis_font)\n", (20229, 20267), True, 'from matplotlib import pyplot as plt\n'), ((20272, 20322), 'matplotlib.pyplot.title', 'plt.title', (['"""Target Function"""'], {'fontsize': 'myaxis_font'}), "('Target Function', fontsize=myaxis_font)\n", (20281, 20322), True, 'from matplotlib import pyplot as plt\n'), ((20326, 20369), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, math.pi]', "['$0$', '$\\\\pi$']"], {}), "([0, math.pi], ['$0$', '$\\\\pi$'])\n", (20336, 20369), True, 'from matplotlib import pyplot as plt\n'), ((20431, 20449), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (20447, 20449), True, 'from matplotlib import pyplot as plt\n'), ((20454, 20503), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'task_visual_powerlaw.pdf')"], {}), "(fig_dir + 'task_visual_powerlaw.pdf')\n", (20465, 20503), True, 'from matplotlib import pyplot as plt\n'), ((20508, 20518), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20516, 20518), True, 'from matplotlib import pyplot as plt\n'), ((20730, 20742), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (20740, 20742), True, 'from matplotlib import pyplot as plt\n'), ((20752, 20762), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (20760, 20762), True, 'from matplotlib import pyplot as plt\n'), ((20779, 20804), 'numpy.mean', 'np.mean', (['err_easy'], {'axis': '(1)'}), '(err_easy, axis=1)\n', (20786, 20804), True, 'import numpy as np\n'), ((20823, 20848), 'numpy.mean', 'np.mean', (['err_hard'], {'axis': '(1)'}), '(err_hard, axis=1)\n', (20830, 20848), True, 'import numpy as np\n'), ((20856, 20886), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.8, 1.5)'}), '(figsize=(1.8, 1.5))\n', (20866, 20886), True, 'from matplotlib import pyplot as plt\n'), ((21289, 21381), 'matplotlib.pyplot.plot', 'plt.plot', (['ptheory', '(theory_easy / theory_easy[0])', '"""--"""'], {'color': '"""C0"""', 'linewidth': 'line_width'}), "(ptheory, theory_easy / theory_easy[0], '--', color='C0', linewidth\n =line_width)\n", (21297, 21381), True, 'from matplotlib import pyplot as plt\n'), ((21381, 21473), 'matplotlib.pyplot.plot', 'plt.plot', (['ptheory', '(theory_hard / theory_hard[0])', '"""--"""'], {'color': '"""C2"""', 'linewidth': 'line_width'}), "(ptheory, theory_hard / theory_hard[0], '--', color='C2', linewidth\n =line_width)\n", (21389, 21473), True, 'from matplotlib import pyplot as plt\n'), ((21473, 21512), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$p$"""'], {'fontsize': 'myaxis_font'}), "('$p$', fontsize=myaxis_font)\n", (21483, 21512), True, 'from matplotlib import pyplot as plt\n'), ((21518, 21559), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$E_g$"""'], {'fontsize': 'myaxis_font'}), "('$E_g$', fontsize=myaxis_font)\n", (21528, 21559), True, 'from matplotlib import pyplot as plt\n'), ((21565, 21615), 'matplotlib.pyplot.title', 'plt.title', (['"""Learning Curves"""'], {'fontsize': 'myaxis_font'}), "('Learning Curves', fontsize=myaxis_font)\n", (21574, 21615), True, 'from matplotlib import pyplot as plt\n'), ((21664, 21682), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (21680, 21682), True, 'from matplotlib import pyplot as plt\n'), ((21687, 21724), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'mouse_lc.pdf')"], {}), "(fig_dir + 'mouse_lc.pdf')\n", (21698, 21724), True, 'from matplotlib import pyplot as plt\n'), ((21728, 21738), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21736, 21738), True, 'from matplotlib import pyplot as plt\n'), ((21745, 21775), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.8, 1.5)'}), '(figsize=(1.8, 1.5))\n', (21755, 21775), True, 'from matplotlib import pyplot as plt\n'), ((22086, 22197), 'matplotlib.pyplot.plot', 'plt.plot', (['ptheory', '(theory_easy / theory_easy[0])', '"""--"""'], {'color': '"""black"""', 'label': '"""theory"""', 'linewidth': 'line_width'}), "(ptheory, theory_easy / theory_easy[0], '--', color='black', label=\n 'theory', linewidth=line_width)\n", (22094, 22197), True, 'from matplotlib import pyplot as plt\n'), ((22199, 22211), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (22209, 22211), True, 'from matplotlib import pyplot as plt\n'), ((22216, 22255), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$p$"""'], {'fontsize': 'myaxis_font'}), "('$p$', fontsize=myaxis_font)\n", (22226, 22255), True, 'from matplotlib import pyplot as plt\n'), ((22261, 22302), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$E_g$"""'], {'fontsize': 'myaxis_font'}), "('$E_g$', fontsize=myaxis_font)\n", (22271, 22302), True, 'from matplotlib import pyplot as plt\n'), ((22308, 22358), 'matplotlib.pyplot.title', 'plt.title', (['"""Learning Curves"""'], {'fontsize': 'myaxis_font'}), "('Learning Curves', fontsize=myaxis_font)\n", (22317, 22358), True, 'from matplotlib import pyplot as plt\n'), ((22362, 22380), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (22378, 22380), True, 'from matplotlib import pyplot as plt\n'), ((22385, 22430), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fig_dir + 'mouse_rotate_lcs.pdf')"], {}), "(fig_dir + 'mouse_rotate_lcs.pdf')\n", (22396, 22430), True, 'from matplotlib import pyplot as plt\n'), ((22435, 22445), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22443, 22445), True, 'from matplotlib import pyplot as plt\n'), ((958, 987), 'os.path.join', 'os.path.join', (['dataroot', 'fname'], {}), '(dataroot, fname)\n', (970, 987), False, 'import os\n'), ((1867, 1903), 'numpy.mean', 'np.mean', (['sresp[:, stim_inds]'], {'axis': '(1)'}), '(sresp[:, stim_inds], axis=1)\n', (1874, 1903), True, 'import numpy as np\n'), ((2070, 2109), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(1000, 1000)'], {}), '((1000, 1000))\n', (2095, 2109), True, 'import numpy as np\n'), ((2355, 2398), 'numpy.convolve', 'np.convolve', (['filter', 'resp_avg[i, :]', '"""same"""'], {}), "(filter, resp_avg[i, :], 'same')\n", (2366, 2398), True, 'import numpy as np\n'), ((2477, 2534), 'matplotlib.pyplot.plot', 'plt.plot', (['stim_vals', 'resp_avg[i, :]'], {'linewidth': 'line_width'}), '(stim_vals, resp_avg[i, :], linewidth=line_width)\n', (2485, 2534), True, 'from matplotlib import pyplot as plt\n'), ((3255, 3288), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(3, 3)'], {}), '((3, 3))\n', (3280, 3288), True, 'import numpy as np\n'), ((4391, 4434), 'numpy.convolve', 'np.convolve', (['filter', 'resp_avg[i, :]', '"""same"""'], {}), "(filter, resp_avg[i, :], 'same')\n", (4402, 4434), True, 'import numpy as np\n'), ((4543, 4600), 'matplotlib.pyplot.plot', 'plt.plot', (['stim_vals', 'resp_avg[i, :]'], {'linewidth': 'line_width'}), '(stim_vals, resp_avg[i, :], linewidth=line_width)\n', (4551, 4600), True, 'from matplotlib import pyplot as plt\n'), ((4976, 5017), 'numpy.convolve', 'np.convolve', (['filter', 'rotate[i, :]', '"""same"""'], {}), "(filter, rotate[i, :], 'same')\n", (4987, 5017), True, 'import numpy as np\n'), ((5086, 5141), 'matplotlib.pyplot.plot', 'plt.plot', (['stim_vals', 'rotate[i, :]'], {'linewidth': 'line_width'}), '(stim_vals, rotate[i, :], linewidth=line_width)\n', (5094, 5141), True, 'from matplotlib import pyplot as plt\n'), ((9124, 9137), 'numpy.argsort', 'np.argsort', (['s'], {}), '(s)\n', (9134, 9137), True, 'import numpy as np\n'), ((9611, 9665), 'numpy.exp', 'np.exp', (['(-0.5 * (stim_vals - math.pi) ** 2 / sigma ** 2)'], {}), '(-0.5 * (stim_vals - math.pi) ** 2 / sigma ** 2)\n', (9617, 9665), True, 'import numpy as np\n'), ((9663, 9696), 'numpy.sqrt', 'np.sqrt', (['(2 * math.pi * sigma ** 2)'], {}), '(2 * math.pi * sigma ** 2)\n', (9670, 9696), True, 'import numpy as np\n'), ((9734, 9770), 'numpy.convolve', 'np.convolve', (['u[:, i]', 'filter', '"""same"""'], {}), "(u[:, i], filter, 'same')\n", (9745, 9770), True, 'import numpy as np\n'), ((10436, 10536), 'matplotlib.pyplot.loglog', 'plt.loglog', (['pvals', '(me[ind - 1, :] / me[ind - 1, 0])'], {'label': "('$k = %d$' % ind)", 'linewidth': 'line_width'}), "(pvals, me[ind - 1, :] / me[ind - 1, 0], label='$k = %d$' % ind,\n linewidth=line_width)\n", (10446, 10536), True, 'from matplotlib import pyplot as plt\n'), ((12433, 12454), 'numpy.cos', 'np.cos', (['(2 * stim_vals)'], {}), '(2 * stim_vals)\n', (12439, 12454), True, 'import numpy as np\n'), ((12471, 12492), 'numpy.cos', 'np.cos', (['(6 * stim_vals)'], {}), '(6 * stim_vals)\n', (12477, 12492), True, 'import numpy as np\n'), ((13242, 13261), 'numpy.argsort', 'np.argsort', (['coeffs1'], {}), '(coeffs1)\n', (13252, 13261), True, 'import numpy as np\n'), ((13280, 13299), 'numpy.argsort', 'np.argsort', (['coeffs2'], {}), '(coeffs2)\n', (13290, 13299), True, 'import numpy as np\n'), ((15643, 15664), 'numpy.cos', 'np.cos', (['(2 * stim_vals)'], {}), '(2 * stim_vals)\n', (15649, 15664), True, 'import numpy as np\n'), ((15690, 15711), 'numpy.cos', 'np.cos', (['(6 * stim_vals)'], {}), '(6 * stim_vals)\n', (15696, 15711), True, 'import numpy as np\n'), ((17155, 17164), 'numpy.sum', 'np.sum', (['s'], {}), '(s)\n', (17161, 17164), True, 'import numpy as np\n'), ((18037, 18065), 'numpy.linspace', 'np.linspace', (['(1)', 'len_i', 'len_i'], {}), '(1, len_i, len_i)\n', (18048, 18065), True, 'import numpy as np\n'), ((18224, 18244), 'numpy.mean', 'np.mean', (['decay_coeff'], {}), '(decay_coeff)\n', (18231, 18244), True, 'import numpy as np\n'), ((19581, 19616), 'matplotlib.pyplot.loglog', 'plt.loglog', (['(si / si[0])'], {'label': 'label'}), '(si / si[0], label=label)\n', (19591, 19616), True, 'from matplotlib import pyplot as plt\n'), ((20568, 20618), 'power_law.mode_errs', 'power_law.mode_errs', (['ptheory', 's', 'coeffs_easy', 'lamb'], {}), '(ptheory, s, coeffs_easy, lamb)\n', (20587, 20618), False, 'import power_law\n'), ((20656, 20706), 'power_law.mode_errs', 'power_law.mode_errs', (['ptheory', 's', 'coeffs_hard', 'lamb'], {}), '(ptheory, s, coeffs_hard, lamb)\n', (20675, 20706), False, 'import power_law\n'), ((20935, 20959), 'numpy.std', 'np.std', (['err_easy'], {'axis': '(1)'}), '(err_easy, axis=1)\n', (20941, 20959), True, 'import numpy as np\n'), ((21074, 21098), 'numpy.std', 'np.std', (['err_hard'], {'axis': '(1)'}), '(err_hard, axis=1)\n', (21080, 21098), True, 'import numpy as np\n'), ((21823, 21847), 'numpy.std', 'np.std', (['err_easy'], {'axis': '(1)'}), '(err_easy, axis=1)\n', (21829, 21847), True, 'import numpy as np\n'), ((21977, 22001), 'numpy.std', 'np.std', (['err_hard'], {'axis': '(1)'}), '(err_hard, axis=1)\n', (21983, 22001), True, 'import numpy as np\n'), ((1339, 1368), 'numpy.load', 'np.load', (['f'], {'allow_pickle': '(True)'}), '(f, allow_pickle=True)\n', (1346, 1368), True, 'import numpy as np\n'), ((5699, 5712), 'numpy.abs', 'np.abs', (['K_sub'], {}), '(K_sub)\n', (5705, 5712), True, 'import numpy as np\n'), ((7987, 8008), 'numpy.arange', 'np.arange', (['K.shape[0]'], {}), '(K.shape[0])\n', (7996, 8008), True, 'import numpy as np\n'), ((8399, 8420), 'numpy.arange', 'np.arange', (['K.shape[0]'], {}), '(K.shape[0])\n', (8408, 8420), True, 'import numpy as np\n'), ((15545, 15568), 'numpy.logspace', 'np.logspace', (['(0.4)', '(2)', '(12)'], {}), '(0.4, 2, 12)\n', (15556, 15568), True, 'import numpy as np\n'), ((15904, 15939), 'numpy.random.randint', 'np.random.randint', (['(0)', 'K.shape[0]', 'p'], {}), '(0, K.shape[0], p)\n', (15921, 15939), True, 'import numpy as np\n'), ((16358, 16393), 'numpy.mean', 'np.mean', (['((y_easy_true - yhat1) ** 2)'], {}), '((y_easy_true - yhat1) ** 2)\n', (16365, 16393), True, 'import numpy as np\n'), ((16423, 16458), 'numpy.mean', 'np.mean', (['((y_hard_true - yhat2) ** 2)'], {}), '((y_hard_true - yhat2) ** 2)\n', (16430, 16458), True, 'import numpy as np\n'), ((17326, 17348), 'numpy.cumsum', 'np.cumsum', (['coeffs_easy'], {}), '(coeffs_easy)\n', (17335, 17348), True, 'import numpy as np\n'), ((17349, 17368), 'numpy.sum', 'np.sum', (['coeffs_easy'], {}), '(coeffs_easy)\n', (17355, 17368), True, 'import numpy as np\n'), ((17440, 17462), 'numpy.cumsum', 'np.cumsum', (['coeffs_hard'], {}), '(coeffs_hard)\n', (17449, 17462), True, 'import numpy as np\n'), ((17463, 17482), 'numpy.sum', 'np.sum', (['coeffs_hard'], {}), '(coeffs_hard)\n', (17469, 17482), True, 'import numpy as np\n'), ((17862, 17884), 'numpy.cumsum', 'np.cumsum', (['coeffs_easy'], {}), '(coeffs_easy)\n', (17871, 17884), True, 'import numpy as np\n'), ((17885, 17904), 'numpy.sum', 'np.sum', (['coeffs_easy'], {}), '(coeffs_easy)\n', (17891, 17904), True, 'import numpy as np\n'), ((18076, 18114), 'numpy.mean', 'np.mean', (['(kvals_linsp * log_decay_coeff)'], {}), '(kvals_linsp * log_decay_coeff)\n', (18083, 18114), True, 'import numpy as np\n'), ((18165, 18190), 'numpy.mean', 'np.mean', (['(kvals_linsp ** 2)'], {}), '(kvals_linsp ** 2)\n', (18172, 18190), True, 'import numpy as np\n'), ((18249, 18269), 'numpy.mean', 'np.mean', (['kvals_linsp'], {}), '(kvals_linsp)\n', (18256, 18269), True, 'import numpy as np\n'), ((6293, 6330), 'numpy.log10', 'np.log10', (['(0.8 * resp_avg.shape[0] - 1)'], {}), '(0.8 * resp_avg.shape[0] - 1)\n', (6301, 6330), True, 'import numpy as np\n'), ((10920, 10944), 'numpy.cos', 'np.cos', (['(2 * stim_vals[i])'], {}), '(2 * stim_vals[i])\n', (10926, 10944), True, 'import numpy as np\n'), ((11003, 11027), 'numpy.cos', 'np.cos', (['(2 * stim_vals[i])'], {}), '(2 * stim_vals[i])\n', (11009, 11027), True, 'import numpy as np\n'), ((11702, 11726), 'numpy.cos', 'np.cos', (['(6 * stim_vals[i])'], {}), '(6 * stim_vals[i])\n', (11708, 11726), True, 'import numpy as np\n'), ((11779, 11803), 'numpy.cos', 'np.cos', (['(6 * stim_vals[i])'], {}), '(6 * stim_vals[i])\n', (11785, 11803), True, 'import numpy as np\n'), ((13914, 13938), 'numpy.cos', 'np.cos', (['(5 * stim_vals[i])'], {}), '(5 * stim_vals[i])\n', (13920, 13938), True, 'import numpy as np\n'), ((13991, 14015), 'numpy.cos', 'np.cos', (['(5 * stim_vals[i])'], {}), '(5 * stim_vals[i])\n', (13997, 14015), True, 'import numpy as np\n'), ((16057, 16070), 'numpy.cos', 'np.cos', (['(2 * x)'], {}), '(2 * x)\n', (16063, 16070), True, 'import numpy as np\n'), ((16095, 16108), 'numpy.cos', 'np.cos', (['(6 * x)'], {}), '(6 * x)\n', (16101, 16108), True, 'import numpy as np\n'), ((18115, 18135), 'numpy.mean', 'np.mean', (['kvals_linsp'], {}), '(kvals_linsp)\n', (18122, 18135), True, 'import numpy as np\n'), ((18136, 18160), 'numpy.mean', 'np.mean', (['log_decay_coeff'], {}), '(log_decay_coeff)\n', (18143, 18160), True, 'import numpy as np\n'), ((18191, 18211), 'numpy.mean', 'np.mean', (['kvals_linsp'], {}), '(kvals_linsp)\n', (18198, 18211), True, 'import numpy as np\n'), ((18378, 18406), 'numpy.linspace', 'np.linspace', (['(1)', 'len_i', 'len_i'], {}), '(1, len_i, len_i)\n', (18389, 18406), True, 'import numpy as np\n'), ((19035, 19082), 'power_law.mode_errs', 'power_law.mode_errs', (['pvals', 'si', 'coeffs_easy', '(10)'], {}), '(pvals, si, coeffs_easy, 10)\n', (19054, 19082), False, 'import power_law\n'), ((16233, 16242), 'numpy.eye', 'np.eye', (['p'], {}), '(p)\n', (16239, 16242), True, 'import numpy as np\n'), ((16314, 16323), 'numpy.eye', 'np.eye', (['p'], {}), '(p)\n', (16320, 16323), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
import cv2
from scipy.stats import special_ortho_group as sog
##########################################################
dim = 20
N = 1000
alpha_vectors = np.zeros((N, dim))
for i in range(N):
alpha_vectors[i] = np.random.normal(0, i + 1, dim)
V = sog.rvs(dim)
alpha_v = np.matmul(alpha_vectors, V)
#print(alpha_v)
##########################################################
pca = PCA()
pca.fit(alpha_v)
#print(pca.components_)
#print(pca.explained_variance_)
##########################################################
pca = PCA(n_components = 3)
pca.fit(alpha_v)
print(str(100 * np.sum(pca.explained_variance_ratio_)) + " percent of data is preserved in 3 dimensions!")
min_dim = 0
pca = PCA(n_components = 8)
pca.fit(alpha_v)
for i in range(1, dim):
pca = PCA(n_components = i)
pca.fit(alpha_v)
if (np.sum(pca.explained_variance_ratio_) >= 0.9):
min_dim = i
break
print("Almost " + str(100 * np.sum(pca.explained_variance_ratio_)) + " percent of data is preserved in at least " + str(min_dim) + " dimensions!")
##########################################################
##########################################################
image1 = cv2.imread("mona.jpg")
image = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
dim = image.shape
#print('Image shape =', dim)
##########################################################
#plt.imshow(image)
#plt.show()
##########################################################
R = image[:, :, 0]
G = image[:, :, 1]
B = image[:, :, 2]
# print(R.shape)
# print(G.shape)
# print(B.shape)
##########################################################
k = 10
rpca = PCA(n_components = k)
gpca = PCA(n_components = k)
bpca = PCA(n_components = k)
rpca.fit(R)
gpca.fit(G)
bpca.fit(B)
print("First " + str(k) + "components of Red Matrix have " + str(100 * np.sum(rpca.explained_variance_ratio_)) + " percent of data.")
print("First " + str(k) + "components of Green Matrix have " + str(100 * np.sum(gpca.explained_variance_ratio_)) + " percent of data.")
print("First " + str(k) + "components of Blue Matrix have " + str(100 * np.sum(bpca.explained_variance_ratio_)) + " percent of data.")
# plt.bar([i for i in range(k)], rpca.explained_variance_ratio_, color ='red', width = 0.4)
# plt.xlabel("Red Components")
# plt.ylabel("Variance %")
# plt.show()
# plt.bar([i for i in range(k)], gpca.explained_variance_ratio_, color ='green', width = 0.4)
# plt.xlabel("Green Components")
# plt.ylabel("Variance %")
# plt.show()
# plt.bar([i for i in range(k)], bpca.explained_variance_ratio_, color ='blue', width = 0.4)
# plt.xlabel("Blue Components")
# plt.ylabel("Variance %")
# plt.show()
# ##########################################################
Transform_R = rpca.transform(R)
Transform_B = gpca.transform(G)
Transform_G = bpca.transform(B)
Reduced_R = rpca.inverse_transform(Transform_R)
Reduced_G = gpca.inverse_transform(Transform_G)
Reduced_B = bpca.inverse_transform(Transform_B)
print('Transform Matrix Shape = ', Transform_R.shape)
print('Inverse Transform Matrix Shape = ', Reduced_R.shape)
# ##########################################################
Reduced_R = Reduced_R.reshape((dim[0], dim[1], 1))
Reduced_G = Reduced_G.reshape((dim[0], dim[1], 1))
Reduced_B = Reduced_B.reshape((dim[0], dim[1], 1))
reduced_image = np.dstack((Reduced_R, Reduced_G, Reduced_B))
final_image = reduced_image.astype(int)
print('final_image shape = ', final_image.shape)
plt.imshow(final_image)
plt.show()
##########################################################
##########################################################
##########################################################
##########################################################
##########################################################
k = 5
rpca = PCA(n_components = k)
gpca = PCA(n_components = k)
bpca = PCA(n_components = k)
rpca.fit(R)
gpca.fit(G)
bpca.fit(B)
##########################################################
Transform_R = rpca.transform(R)
Transform_B = gpca.transform(G)
Transform_G = bpca.transform(B)
Reduced_R = rpca.inverse_transform(Transform_R)
Reduced_G = gpca.inverse_transform(Transform_G)
Reduced_B = bpca.inverse_transform(Transform_B)
print('Transform Matrix Shape = ', Transform_R.shape)
print('Inverse Transform Matrix Shape = ', Reduced_R.shape)
##########################################################
Reduced_R = Reduced_R.reshape((dim[0], dim[1], 1))
Reduced_G = Reduced_G.reshape((dim[0], dim[1], 1))
Reduced_B = Reduced_B.reshape((dim[0], dim[1], 1))
reduced_image = np.dstack((Reduced_R, Reduced_G, Reduced_B))
final_image = reduced_image.astype(int)
print('final_image shape = ', final_image.shape)
plt.imshow(final_image)
plt.show()
print(np.sum(rpca.explained_variance_ratio_)) | [
"matplotlib.pyplot.imshow",
"numpy.dstack",
"numpy.random.normal",
"sklearn.decomposition.PCA",
"scipy.stats.special_ortho_group.rvs",
"numpy.sum",
"numpy.zeros",
"numpy.matmul",
"cv2.cvtColor",
"cv2.imread",
"matplotlib.pyplot.show"
] | [((245, 263), 'numpy.zeros', 'np.zeros', (['(N, dim)'], {}), '((N, dim))\n', (253, 263), True, 'import numpy as np\n'), ((344, 356), 'scipy.stats.special_ortho_group.rvs', 'sog.rvs', (['dim'], {}), '(dim)\n', (351, 356), True, 'from scipy.stats import special_ortho_group as sog\n'), ((367, 394), 'numpy.matmul', 'np.matmul', (['alpha_vectors', 'V'], {}), '(alpha_vectors, V)\n', (376, 394), True, 'import numpy as np\n'), ((476, 481), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (479, 481), False, 'from sklearn.decomposition import PCA\n'), ((620, 639), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(3)'}), '(n_components=3)\n', (623, 639), False, 'from sklearn.decomposition import PCA\n'), ((784, 803), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(8)'}), '(n_components=8)\n', (787, 803), False, 'from sklearn.decomposition import PCA\n'), ((1270, 1292), 'cv2.imread', 'cv2.imread', (['"""mona.jpg"""'], {}), "('mona.jpg')\n", (1280, 1292), False, 'import cv2\n'), ((1301, 1340), 'cv2.cvtColor', 'cv2.cvtColor', (['image1', 'cv2.COLOR_BGR2RGB'], {}), '(image1, cv2.COLOR_BGR2RGB)\n', (1313, 1340), False, 'import cv2\n'), ((1718, 1737), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'k'}), '(n_components=k)\n', (1721, 1737), False, 'from sklearn.decomposition import PCA\n'), ((1747, 1766), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'k'}), '(n_components=k)\n', (1750, 1766), False, 'from sklearn.decomposition import PCA\n'), ((1776, 1795), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'k'}), '(n_components=k)\n', (1779, 1795), False, 'from sklearn.decomposition import PCA\n'), ((3387, 3431), 'numpy.dstack', 'np.dstack', (['(Reduced_R, Reduced_G, Reduced_B)'], {}), '((Reduced_R, Reduced_G, Reduced_B))\n', (3396, 3431), True, 'import numpy as np\n'), ((3521, 3544), 'matplotlib.pyplot.imshow', 'plt.imshow', (['final_image'], {}), '(final_image)\n', (3531, 3544), True, 'import matplotlib.pyplot as plt\n'), ((3545, 3555), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3553, 3555), True, 'import matplotlib.pyplot as plt\n'), ((3864, 3883), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'k'}), '(n_components=k)\n', (3867, 3883), False, 'from sklearn.decomposition import PCA\n'), ((3893, 3912), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'k'}), '(n_components=k)\n', (3896, 3912), False, 'from sklearn.decomposition import PCA\n'), ((3922, 3941), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'k'}), '(n_components=k)\n', (3925, 3941), False, 'from sklearn.decomposition import PCA\n'), ((4627, 4671), 'numpy.dstack', 'np.dstack', (['(Reduced_R, Reduced_G, Reduced_B)'], {}), '((Reduced_R, Reduced_G, Reduced_B))\n', (4636, 4671), True, 'import numpy as np\n'), ((4761, 4784), 'matplotlib.pyplot.imshow', 'plt.imshow', (['final_image'], {}), '(final_image)\n', (4771, 4784), True, 'import matplotlib.pyplot as plt\n'), ((4785, 4795), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4793, 4795), True, 'import matplotlib.pyplot as plt\n'), ((307, 338), 'numpy.random.normal', 'np.random.normal', (['(0)', '(i + 1)', 'dim'], {}), '(0, i + 1, dim)\n', (323, 338), True, 'import numpy as np\n'), ((857, 876), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'i'}), '(n_components=i)\n', (860, 876), False, 'from sklearn.decomposition import PCA\n'), ((4802, 4840), 'numpy.sum', 'np.sum', (['rpca.explained_variance_ratio_'], {}), '(rpca.explained_variance_ratio_)\n', (4808, 4840), True, 'import numpy as np\n'), ((908, 945), 'numpy.sum', 'np.sum', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (914, 945), True, 'import numpy as np\n'), ((675, 712), 'numpy.sum', 'np.sum', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (681, 712), True, 'import numpy as np\n'), ((1907, 1945), 'numpy.sum', 'np.sum', (['rpca.explained_variance_ratio_'], {}), '(rpca.explained_variance_ratio_)\n', (1913, 1945), True, 'import numpy as np\n'), ((2043, 2081), 'numpy.sum', 'np.sum', (['gpca.explained_variance_ratio_'], {}), '(gpca.explained_variance_ratio_)\n', (2049, 2081), True, 'import numpy as np\n'), ((2178, 2216), 'numpy.sum', 'np.sum', (['bpca.explained_variance_ratio_'], {}), '(bpca.explained_variance_ratio_)\n', (2184, 2216), True, 'import numpy as np\n'), ((1017, 1054), 'numpy.sum', 'np.sum', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (1023, 1054), True, 'import numpy as np\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the mask module.
"""
import astropy.units as u
import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal
import pytest
from ..bounding_box import BoundingBox
from ..circle import CircularAperture, CircularAnnulus
from ..mask import ApertureMask
from ..rectangle import RectangularAnnulus
POSITIONS = [(-20, -20), (-20, 20), (20, -20), (60, 60)]
def test_mask_input_shapes():
with pytest.raises(ValueError):
mask_data = np.ones((10, 10))
bbox = BoundingBox(5, 10, 5, 10)
ApertureMask(mask_data, bbox)
def test_mask_array():
mask_data = np.ones((10, 10))
bbox = BoundingBox(5, 15, 5, 15)
mask = ApertureMask(mask_data, bbox)
data = np.array(mask)
assert_allclose(data, mask.data)
def test_mask_get_overlap_slices():
aper = CircularAperture((5, 5), r=10.)
mask = aper.to_mask()
slc = ((slice(0, 16, None), slice(0, 16, None)),
(slice(5, 21, None), slice(5, 21, None)))
assert mask.get_overlap_slices((25, 25)) == slc
def test_mask_cutout_shape():
mask_data = np.ones((10, 10))
bbox = BoundingBox(5, 15, 5, 15)
mask = ApertureMask(mask_data, bbox)
with pytest.raises(ValueError):
mask.cutout(np.arange(10))
with pytest.raises(ValueError):
mask.to_image((10,))
def test_mask_cutout_copy():
data = np.ones((50, 50))
aper = CircularAperture((25, 25), r=10.)
mask = aper.to_mask()
cutout = mask.cutout(data, copy=True)
data[25, 25] = 100.
assert cutout[10, 10] == 1.
# test quantity data
data2 = np.ones((50, 50)) * u.adu
cutout2 = mask.cutout(data2, copy=True)
assert cutout2.unit == data2.unit
data2[25, 25] = 100. * u.adu
assert cutout2[10, 10].value == 1.
@pytest.mark.parametrize('position', POSITIONS)
def test_mask_cutout_no_overlap(position):
data = np.ones((50, 50))
aper = CircularAperture(position, r=10.)
mask = aper.to_mask()
cutout = mask.cutout(data)
assert cutout is None
weighted_data = mask.multiply(data)
assert weighted_data is None
image = mask.to_image(data.shape)
assert image is None
@pytest.mark.parametrize('position', POSITIONS)
def test_mask_cutout_partial_overlap(position):
data = np.ones((50, 50))
aper = CircularAperture(position, r=30.)
mask = aper.to_mask()
cutout = mask.cutout(data)
assert cutout.shape == mask.shape
weighted_data = mask.multiply(data)
assert weighted_data.shape == mask.shape
image = mask.to_image(data.shape)
assert image.shape == data.shape
def test_mask_multiply():
radius = 10.
data = np.ones((50, 50))
aper = CircularAperture((25, 25), r=radius)
mask = aper.to_mask()
data_weighted = mask.multiply(data)
assert_almost_equal(np.sum(data_weighted), np.pi * radius**2)
# test that multiply() returns a copy
data[25, 25] = 100.
assert data_weighted[10, 10] == 1.
def test_mask_multiply_quantity():
radius = 10.
data = np.ones((50, 50)) * u.adu
aper = CircularAperture((25, 25), r=radius)
mask = aper.to_mask()
data_weighted = mask.multiply(data)
assert data_weighted.unit == u.adu
assert_almost_equal(np.sum(data_weighted.value), np.pi * radius**2)
# test that multiply() returns a copy
data[25, 25] = 100. * u.adu
assert data_weighted[10, 10].value == 1.
@pytest.mark.parametrize('value', (np.nan, np.inf))
def test_mask_nonfinite_fill_value(value):
aper = CircularAnnulus((0, 0), 10, 20)
data = np.ones((101, 101)).astype(int)
cutout = aper.to_mask().cutout(data, fill_value=value)
assert ~np.isfinite(cutout[0, 0])
def test_mask_multiply_fill_value():
aper = CircularAnnulus((0, 0), 10, 20)
data = np.ones((101, 101)).astype(int)
cutout = aper.to_mask().multiply(data, fill_value=np.nan)
xypos = ((20, 20), (5, 5), (5, 35), (35, 5), (35, 35))
for x, y in xypos:
assert np.isnan(cutout[y, x])
def test_mask_nonfinite_in_bbox():
"""
Regression test that non-finite data values outside of the mask but
within the bounding box are set to zero.
"""
data = np.ones((101, 101))
data[33, 33] = np.nan
data[67, 67] = np.inf
data[33, 67] = -np.inf
data[22, 22] = np.nan
data[22, 23] = np.inf
radius = 20.
aper1 = CircularAperture((50, 50), r=radius)
aper2 = CircularAperture((5, 5), r=radius)
wdata1 = aper1.to_mask(method='exact').multiply(data)
assert_allclose(np.sum(wdata1), np.pi * radius**2)
wdata2 = aper2.to_mask(method='exact').multiply(data)
assert_allclose(np.sum(wdata2), 561.6040111923013)
def test_mask_get_values():
aper = CircularAnnulus(((0, 0), (50, 50), (100, 100)), 10, 20)
data = np.ones((101, 101))
values = [mask.get_values(data) for mask in aper.to_mask()]
shapes = [val.shape for val in values]
sums = [np.sum(val) for val in values]
assert shapes[0] == (278,)
assert shapes[1] == (1068,)
assert shapes[2] == (278,)
sums_expected = (245.621534, 942.477796, 245.621534)
assert_allclose(sums, sums_expected)
def test_mask_get_values_no_overlap():
aper = CircularAperture((-100, -100), r=3)
data = np.ones((51, 51))
values = aper.to_mask().get_values(data)
assert values.shape == (0,)
def test_mask_get_values_mask():
aper = CircularAperture((24.5, 24.5), r=10.)
data = np.ones((51, 51))
mask = aper.to_mask()
with pytest.raises(ValueError):
mask.get_values(data, mask=np.ones(3))
arr = mask.get_values(data, mask=None)
assert_allclose(np.sum(arr), 100. * np.pi)
data_mask = np.zeros(data.shape, dtype=bool)
data_mask[25:] = True
arr2 = mask.get_values(data, mask=data_mask)
assert_allclose(np.sum(arr2), 100. * np.pi / 2.)
def test_rectangular_annulus_hin():
aper = RectangularAnnulus((25, 25), 2, 4, 20, h_in=18, theta=0)
mask = aper.to_mask(method='center')
assert mask.data.shape == (21, 5)
assert np.count_nonzero(mask.data) == 40
| [
"numpy.ones",
"numpy.testing.assert_allclose",
"numpy.count_nonzero",
"pytest.mark.parametrize",
"numpy.array",
"numpy.zeros",
"pytest.raises",
"numpy.sum",
"numpy.isfinite",
"numpy.isnan",
"numpy.arange"
] | [((1832, 1878), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""position"""', 'POSITIONS'], {}), "('position', POSITIONS)\n", (1855, 1878), False, 'import pytest\n'), ((2221, 2267), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""position"""', 'POSITIONS'], {}), "('position', POSITIONS)\n", (2244, 2267), False, 'import pytest\n'), ((3447, 3497), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value"""', '(np.nan, np.inf)'], {}), "('value', (np.nan, np.inf))\n", (3470, 3497), False, 'import pytest\n'), ((676, 693), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (683, 693), True, 'import numpy as np\n'), ((783, 797), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (791, 797), True, 'import numpy as np\n'), ((802, 834), 'numpy.testing.assert_allclose', 'assert_allclose', (['data', 'mask.data'], {}), '(data, mask.data)\n', (817, 834), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((1148, 1165), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (1155, 1165), True, 'import numpy as np\n'), ((1424, 1441), 'numpy.ones', 'np.ones', (['(50, 50)'], {}), '((50, 50))\n', (1431, 1441), True, 'import numpy as np\n'), ((1933, 1950), 'numpy.ones', 'np.ones', (['(50, 50)'], {}), '((50, 50))\n', (1940, 1950), True, 'import numpy as np\n'), ((2327, 2344), 'numpy.ones', 'np.ones', (['(50, 50)'], {}), '((50, 50))\n', (2334, 2344), True, 'import numpy as np\n'), ((2704, 2721), 'numpy.ones', 'np.ones', (['(50, 50)'], {}), '((50, 50))\n', (2711, 2721), True, 'import numpy as np\n'), ((4212, 4231), 'numpy.ones', 'np.ones', (['(101, 101)'], {}), '((101, 101))\n', (4219, 4231), True, 'import numpy as np\n'), ((4813, 4832), 'numpy.ones', 'np.ones', (['(101, 101)'], {}), '((101, 101))\n', (4820, 4832), True, 'import numpy as np\n'), ((5138, 5174), 'numpy.testing.assert_allclose', 'assert_allclose', (['sums', 'sums_expected'], {}), '(sums, sums_expected)\n', (5153, 5174), False, 'from numpy.testing import assert_allclose, assert_almost_equal\n'), ((5274, 5291), 'numpy.ones', 'np.ones', (['(51, 51)'], {}), '((51, 51))\n', (5281, 5291), True, 'import numpy as np\n'), ((5464, 5481), 'numpy.ones', 'np.ones', (['(51, 51)'], {}), '((51, 51))\n', (5471, 5481), True, 'import numpy as np\n'), ((5699, 5731), 'numpy.zeros', 'np.zeros', (['data.shape'], {'dtype': 'bool'}), '(data.shape, dtype=bool)\n', (5707, 5731), True, 'import numpy as np\n'), ((491, 516), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (504, 516), False, 'import pytest\n'), ((538, 555), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (545, 555), True, 'import numpy as np\n'), ((1254, 1279), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1267, 1279), False, 'import pytest\n'), ((1326, 1351), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1339, 1351), False, 'import pytest\n'), ((1649, 1666), 'numpy.ones', 'np.ones', (['(50, 50)'], {}), '((50, 50))\n', (1656, 1666), True, 'import numpy as np\n'), ((2860, 2881), 'numpy.sum', 'np.sum', (['data_weighted'], {}), '(data_weighted)\n', (2866, 2881), True, 'import numpy as np\n'), ((3073, 3090), 'numpy.ones', 'np.ones', (['(50, 50)'], {}), '((50, 50))\n', (3080, 3090), True, 'import numpy as np\n'), ((3276, 3303), 'numpy.sum', 'np.sum', (['data_weighted.value'], {}), '(data_weighted.value)\n', (3282, 3303), True, 'import numpy as np\n'), ((3698, 3723), 'numpy.isfinite', 'np.isfinite', (['cutout[0, 0]'], {}), '(cutout[0, 0])\n', (3709, 3723), True, 'import numpy as np\n'), ((4008, 4030), 'numpy.isnan', 'np.isnan', (['cutout[y, x]'], {}), '(cutout[y, x])\n', (4016, 4030), True, 'import numpy as np\n'), ((4556, 4570), 'numpy.sum', 'np.sum', (['wdata1'], {}), '(wdata1)\n', (4562, 4570), True, 'import numpy as np\n'), ((4670, 4684), 'numpy.sum', 'np.sum', (['wdata2'], {}), '(wdata2)\n', (4676, 4684), True, 'import numpy as np\n'), ((4952, 4963), 'numpy.sum', 'np.sum', (['val'], {}), '(val)\n', (4958, 4963), True, 'import numpy as np\n'), ((5517, 5542), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5530, 5542), False, 'import pytest\n'), ((5655, 5666), 'numpy.sum', 'np.sum', (['arr'], {}), '(arr)\n', (5661, 5666), True, 'import numpy as np\n'), ((5827, 5839), 'numpy.sum', 'np.sum', (['arr2'], {}), '(arr2)\n', (5833, 5839), True, 'import numpy as np\n'), ((6056, 6083), 'numpy.count_nonzero', 'np.count_nonzero', (['mask.data'], {}), '(mask.data)\n', (6072, 6083), True, 'import numpy as np\n'), ((1301, 1314), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1310, 1314), True, 'import numpy as np\n'), ((3595, 3614), 'numpy.ones', 'np.ones', (['(101, 101)'], {}), '((101, 101))\n', (3602, 3614), True, 'import numpy as np\n'), ((3817, 3836), 'numpy.ones', 'np.ones', (['(101, 101)'], {}), '((101, 101))\n', (3824, 3836), True, 'import numpy as np\n'), ((5579, 5589), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (5586, 5589), True, 'import numpy as np\n')] |
# Thirdparty
import numpy as np
class Optimizer():
def __init__(self, **kwargs):
return
def update_weights(self, *args):
return
def update_bias(self, *args):
return
class MinibatchSgd(Optimizer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
return
def update_weights(self, weights, lr, batch_size, gradient):
weights -= lr/batch_size * gradient
return weights
def update_bias(self, bias, lr, batch_size, bias_gradient):
bias -= lr/batch_size * bias_gradient
return bias
class Momentum(Optimizer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.velocity = 0
self.bias_velocity = 0
return
def update_weights(self, weights, lr, batch_size, gradient, mu=9e-1):
self.velocity = mu * self.velocity - lr/batch_size * gradient
weights += self.velocity
return weights
def update_bias(self, bias, lr, batch_size, bias_gradient, mu=9e-1):
self.bias_velocity = mu * self.bias_velocity - lr/batch_size * bias_gradient
bias += self.bias_velocity
return bias
class NesterovMomentum(Optimizer):
'''
Note: this implementation uses an approximation of the
Nesterov Momentum formula which is valid only for large
values of mu, see: stackoverflow.com/questions/50774683
'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.velocity = 0
self.previous_velocity = 0
self.bias_velocity = 0
self.previous_bias_velocity = 0
return
def update_weights(self, weights, lr, batch_size, gradient, mu=9e-1):
self.previous_velocity = self.velocity
self.velocity = mu * self.velocity - lr/batch_size * gradient
weights += -mu * self.previous_velocity + (1 + mu) * self.velocity
return weights
def update_bias(self, bias, lr, batch_size, bias_gradient, mu=9e-1):
self.previous_bias_velocity = self.bias_velocity
self.bias_velocity = mu * self.bias_velocity - lr/batch_size * bias_gradient
bias += -mu * self.previous_bias_velocity + (1 + mu) * self.bias_velocity
return bias
class Adagrad(Optimizer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.cache = 0
self.bias_cache = 0
return
def update_weights(self, weights, lr, batch_size, gradient, eps=1e-12):
self.cache += gradient ** 2
weights -= lr / (np.sqrt(self.cache) + eps) * gradient
return weights
def update_bias(self, bias, lr, batch_size, bias_gradient, eps=1e-12):
self.bias_cache += bias_gradient ** 2
bias -= lr / (np.sqrt(self.bias_cache) + eps) * bias_gradient
return bias
class Rmsprop(Optimizer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.cache = 0
self.bias_cache = 0
return
def update_weights(self, weights, lr, batch_size, gradient, eps=1e-12, decay_rate=0.99):
self.cache = decay_rate * self.cache + (1 - decay_rate) * (gradient ** 2)
weights -= lr / (np.sqrt(self.cache) + eps) * gradient
return weights
def update_bias(self, bias, lr, batch_size, bias_gradient, eps=1e-12, decay_rate=0.99):
self.bias_cache = decay_rate * self.bias_cache + (1 - decay_rate) * (bias_gradient ** 2)
bias -= lr / (np.sqrt(self.bias_cache) + eps) * bias_gradient
return bias
class Adam(Optimizer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.t = 0
self.cache_m = 0
self.cache_v = 0
self.bias_cache_m = 0
self.bias_cache_v = 0
return
def update_weights(self, weights, lr, batch_size, gradient, eps=1e-12, beta1=0.9, beta2=0.999):
self.t += 1
self.cache_m = beta1*self.cache_m + (1-beta1)*gradient
# Bias correction
m_corrected = self.cache_m / (1-beta1**self.t)
self.cache_v = beta2 * self.cache_v + (1-beta2)*(gradient**2)
v_corrected = self.cache_v / (1-beta2**self.t)
weights -= lr * m_corrected / (np.sqrt(v_corrected) + eps)
return weights
def update_bias(self, bias, lr, batch_size, bias_gradient, eps=1e-12, beta1=0.9, beta2=0.999):
self.bias_cache_m = beta1*self.bias_cache_m + (1-beta1)*bias_gradient
# Bias correction
m_corrected = self.bias_cache_m / (1-beta1**self.t)
self.bias_cache_v = beta2 * self.bias_cache_v + (1-beta2)*(bias_gradient**2)
v_corrected = self.bias_cache_v / (1-beta2**self.t)
bias -= lr * m_corrected / (np.sqrt(v_corrected) + eps)
return bias | [
"numpy.sqrt"
] | [((4167, 4187), 'numpy.sqrt', 'np.sqrt', (['v_corrected'], {}), '(v_corrected)\n', (4174, 4187), True, 'import numpy as np\n'), ((4663, 4683), 'numpy.sqrt', 'np.sqrt', (['v_corrected'], {}), '(v_corrected)\n', (4670, 4683), True, 'import numpy as np\n'), ((2524, 2543), 'numpy.sqrt', 'np.sqrt', (['self.cache'], {}), '(self.cache)\n', (2531, 2543), True, 'import numpy as np\n'), ((2729, 2753), 'numpy.sqrt', 'np.sqrt', (['self.bias_cache'], {}), '(self.bias_cache)\n', (2736, 2753), True, 'import numpy as np\n'), ((3160, 3179), 'numpy.sqrt', 'np.sqrt', (['self.cache'], {}), '(self.cache)\n', (3167, 3179), True, 'import numpy as np\n'), ((3433, 3457), 'numpy.sqrt', 'np.sqrt', (['self.bias_cache'], {}), '(self.bias_cache)\n', (3440, 3457), True, 'import numpy as np\n')] |
#!/user/bin/env python
'''tictactoe_ai.py: Implement an ai for the game of Tic-Tac-Toe.'''
################################################################################
from copy import deepcopy as copy
from numpy import random
import numpy as np
def random_ai(game):
return random.choice(game.get_action_list())
def semi_random_ai(game):
alist = game.get_action_list()
p_turn = game.get_player_turn()
prev = np.sum(game.superboard[p_turn,:,:])
good_actions = []
for a in alist:
co = copy(game)
co.take_action(p_turn, a)
stat = co.get_status()
if stat == p_turn:
return a
now = np.sum(co.superboard[p_turn,:,:])
assert(now>=prev)
assert(now<=prev+1)
if now == prev+1:
good_actions.append(a)
if len(good_actions)>0:
return random.choice(good_actions)
else:
return random.choice(alist)
if __name__ == '__main__':
from ultimate_tictactoe import Ultimate_TicTacToe
g = Ultimate_TicTacToe()
turn = 0
status = g.get_status()
print('Do you want to start? [Y/n]')
player_start = True
answer = input()
if answer == 'n' or answer == 'N':
player_start = False
elif answer == 'y' or answer == 'Y' or len(answer)==0:
player_start = True
else:
print('Invalid answer! Exit!')
quit()
player_id = 0
if not player_start:
player_id = 1
while status==-1:
g.visualize()
if turn == player_id:
# x,y = input().split(' ')
# act = 3*int(x)+int(y)
# act = random_ai(g)
act = semi_random_ai(g)
else:
act = random_ai(g)
g.take_action(turn, act)
status = g.get_status()
turn = 1-turn
g.visualize()
print(status)
| [
"numpy.random.choice",
"numpy.sum",
"copy.deepcopy",
"ultimate_tictactoe.Ultimate_TicTacToe"
] | [((433, 470), 'numpy.sum', 'np.sum', (['game.superboard[p_turn, :, :]'], {}), '(game.superboard[p_turn, :, :])\n', (439, 470), True, 'import numpy as np\n'), ((1019, 1039), 'ultimate_tictactoe.Ultimate_TicTacToe', 'Ultimate_TicTacToe', ([], {}), '()\n', (1037, 1039), False, 'from ultimate_tictactoe import Ultimate_TicTacToe\n'), ((524, 534), 'copy.deepcopy', 'copy', (['game'], {}), '(game)\n', (528, 534), True, 'from copy import deepcopy as copy\n'), ((662, 697), 'numpy.sum', 'np.sum', (['co.superboard[p_turn, :, :]'], {}), '(co.superboard[p_turn, :, :])\n', (668, 697), True, 'import numpy as np\n'), ((854, 881), 'numpy.random.choice', 'random.choice', (['good_actions'], {}), '(good_actions)\n', (867, 881), False, 'from numpy import random\n'), ((907, 927), 'numpy.random.choice', 'random.choice', (['alist'], {}), '(alist)\n', (920, 927), False, 'from numpy import random\n')] |
#!/usr/bin/env python3
"""Postprocess for the example galaxy.
The PostBlobby3D class is very simple. However, it is useful for organisational
purposes of the Blobby3D output. In this script, I created a PostBlobby3D
object and plotted a handful of sample attributes.
@author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
import dnest4 as dn4
from pyblobby3d import PostBlobby3D
from pyblobby3d import SpectralModel
dn4.postprocess()
post_b3d = PostBlobby3D(
samples_path='posterior_sample.txt',
data_path='data.txt',
var_path='var.txt',
metadata_path='metadata.txt',
nlines=2)
# choose a sample
sample = 0
# Plot maps for sample
fig, ax = plt.subplots(1, 4)
ax[0].set_title(r'H$\alpha$ Flux')
ax[0].imshow(
np.log10(post_b3d.maps[sample, 0]),
interpolation='nearest', origin='lower')
ax[1].set_title(r'[NII] Flux')
ax[1].imshow(
np.log10(post_b3d.maps[sample, 1]),
interpolation='nearest', origin='lower')
ax[2].set_title('V')
ax[2].imshow(post_b3d.maps[sample, 2], interpolation='nearest', origin='lower')
ax[3].set_title('V Disp')
ax[3].imshow(post_b3d.maps[sample, 3], interpolation='nearest', origin='lower')
fig.tight_layout()
# We can also plot the integrated flux across the wavelength axis for a sample
# and compare it to the data. The below does this for H-alpha.
fig, ax = plt.subplots(1, 3)
ax[0].set_title('Preconvolved')
ax[0].imshow(
np.log10(post_b3d.precon_cubes[sample].sum(axis=2)),
interpolation='nearest', origin='lower')
ax[1].set_title('Convolved')
ax[1].imshow(
np.log10(post_b3d.con_cubes[sample].sum(axis=2)),
interpolation='nearest', origin='lower')
ax[2].set_title('Data')
ax[2].imshow(
np.log10(post_b3d.data.sum(axis=2)),
interpolation='nearest', origin='lower')
fig.tight_layout()
# Similarly we can integrate the total cube and look at the flux as
# a function of wavelength. In this case lets compare all convolved samples
# to the data.
fig, ax = plt.subplots()
ax.plot(post_b3d.data.sum(axis=(0, 1)), '--k', label='Data')
ax.plot(post_b3d.con_cubes.sum(axis=(1, 2)).transpose(), '--', color='0.5')
ax.legend()
# Another interesting thing can be to compare the velocity dispersion of the
# models pre and post convolution. The post emission lines in the convolved
# are not known analytically, and thus need to be estimated. There is an
# emission line fitting procedure for this purpose in pyblobby3d. You will
# often see that a flat velocity dispersion leads to a velocity dispersion map
# with substructure.
sm = SpectralModel(
lines=[[6562.81], [6583.1, 6548.1, 0.3333]],
lsf_fwhm=1.61,)
wave = post_b3d.metadata.get_axis_array('r')
fit, fit_err = sm.fit_cube(wave, post_b3d.data, post_b3d.var)
fit_model = sm.calculate_cube(wave, fit)
fig, ax = plt.subplots(1, 2)
fig.suptitle('V Disp')
ax[0].set_title('Preconvolved')
ax[0].imshow(post_b3d.maps[sample, 3], vmin=10.0, vmax=50.0)
ax[1].set_title('Convolved')
ax[1].imshow(fit[3], vmin=10.0, vmax=50.0)
fig.tight_layout()
| [
"numpy.log10",
"pyblobby3d.PostBlobby3D",
"pyblobby3d.SpectralModel",
"dnest4.postprocess",
"matplotlib.pyplot.subplots"
] | [((438, 455), 'dnest4.postprocess', 'dn4.postprocess', ([], {}), '()\n', (453, 455), True, 'import dnest4 as dn4\n'), ((468, 603), 'pyblobby3d.PostBlobby3D', 'PostBlobby3D', ([], {'samples_path': '"""posterior_sample.txt"""', 'data_path': '"""data.txt"""', 'var_path': '"""var.txt"""', 'metadata_path': '"""metadata.txt"""', 'nlines': '(2)'}), "(samples_path='posterior_sample.txt', data_path='data.txt',\n var_path='var.txt', metadata_path='metadata.txt', nlines=2)\n", (480, 603), False, 'from pyblobby3d import PostBlobby3D\n'), ((705, 723), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(4)'], {}), '(1, 4)\n', (717, 723), True, 'import matplotlib.pyplot as plt\n'), ((1372, 1390), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {}), '(1, 3)\n', (1384, 1390), True, 'import matplotlib.pyplot as plt\n'), ((2022, 2036), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2034, 2036), True, 'import matplotlib.pyplot as plt\n'), ((2595, 2668), 'pyblobby3d.SpectralModel', 'SpectralModel', ([], {'lines': '[[6562.81], [6583.1, 6548.1, 0.3333]]', 'lsf_fwhm': '(1.61)'}), '(lines=[[6562.81], [6583.1, 6548.1, 0.3333]], lsf_fwhm=1.61)\n', (2608, 2668), False, 'from pyblobby3d import SpectralModel\n'), ((2847, 2865), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (2859, 2865), True, 'import matplotlib.pyplot as plt\n'), ((778, 812), 'numpy.log10', 'np.log10', (['post_b3d.maps[sample, 0]'], {}), '(post_b3d.maps[sample, 0])\n', (786, 812), True, 'import numpy as np\n'), ((909, 943), 'numpy.log10', 'np.log10', (['post_b3d.maps[sample, 1]'], {}), '(post_b3d.maps[sample, 1])\n', (917, 943), True, 'import numpy as np\n')] |
import pickle
import re
import numpy as np
from matplotlib import pyplot as plt
from nltk import word_tokenize
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
def preprocessTweet(TweeList):
testList = []
for d in range(len(TweeList)):
testList.append([re.sub('(?<=^|(?<=[^a-zA-Z0-9-_.+A-Za-z+]))(@[A-Za-z_+A-Za-z+]+[A-Za-z0-9-_:+A-Za-z_+]+|htt.*:[//.A-Za-z0-9-_:+A-Za-z_+]+|&#[0-9]+|([^a-zA-Z0-9])|(RT))','',TweeList[d][0]),TweeList[d][1]])
return testList
def createVectorX(tweetList):
vect = ["".join(tweetList[x][0]) for x in range(len(tweetList))]
return vect
def createVectorY(tweetList):
y = []
for x in range(len(tweetList)):
y.append(tweetList[x][1])
return y
def classifyTweet(XtrainVector, YtrainVector,XtestVector,YtestVector):
stop_ = set(stopwords.words('english'))
text_clf_NB = Pipeline([('vec', CountVectorizer(stop_words= stop_)), ('tfidf', TfidfTransformer()), ('clf', MultinomialNB())])
text_clf_NB = text_clf_NB.fit(XtrainVector, YtrainVector)
predictedNB = text_clf_NB.predict(XtestVector)
mean1 = np.mean(predictedNB == YtestVector)
print("Prediction Accuracy for Bag of words with MultiNominal Naive Bayes:",mean1*100)
# text_clf_SVM = Pipeline([('vec', CountVectorizer(stop_words= stop)), ('tfidf', TfidfTransformer()), ('clf', SVC(max_iter=5, probability=True,random_state=42))])
text_clf_SVM = Pipeline([('vec', CountVectorizer(stop_words= stop_)), ('tfidf', TfidfTransformer()), ('clf', SGDClassifier(loss='log', penalty='l2',alpha=1e-3, n_iter=5, random_state=42))])
text_clf_SVM = text_clf_SVM.fit(XtrainVector, YtrainVector)
predictedSVM = text_clf_SVM.predict(XtestVector)
mean2 = np.mean(predictedSVM == YtestVector)
print("Prediction Accuracy for Bag of words with SGDClassifier(SVM):",mean2*100)
return predictedNB, predictedSVM,text_clf_NB,text_clf_SVM
def createPlotData(predicted,text_clf, prob):
x0 = []
x1 = []
y0 = []
y1 = []
for x in range(len(prob)):
if predicted[x] == 'none':
x0.append(prob[x][0])
y0.append(prob[x][1])
else:
x1.append(prob[x][0])
y1.append(prob[x][1])
return x0, x1, y0, y1
def BOGTweet_live(tweet,text_clf_NB,text_clf_SVM):
filterTweet = [re.sub('(?<=^|(?<=[^a-zA-Z0-9-_.+A-Za-z+]))(@[A-Za-z_+A-Za-z+]+[A-Za-z0-9-_:+A-Za-z_+]+|htt.*:[//.A-Za-z0-9-_:+A-Za-z_+]+|&#[0-9]+|([^a-zA-Z0-9])|(RT))','',tweet)]
pre_nb = text_clf_NB.predict(filterTweet)
pre_svm = text_clf_SVM.predict(filterTweet)
return pre_svm, pre_nb
| [
"numpy.mean",
"sklearn.feature_extraction.text.TfidfTransformer",
"sklearn.linear_model.SGDClassifier",
"nltk.corpus.stopwords.words",
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.naive_bayes.MultinomialNB",
"re.sub"
] | [((1364, 1399), 'numpy.mean', 'np.mean', (['(predictedNB == YtestVector)'], {}), '(predictedNB == YtestVector)\n', (1371, 1399), True, 'import numpy as np\n'), ((1977, 2013), 'numpy.mean', 'np.mean', (['(predictedSVM == YtestVector)'], {}), '(predictedSVM == YtestVector)\n', (1984, 2013), True, 'import numpy as np\n'), ((1080, 1106), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1095, 1106), False, 'from nltk.corpus import stopwords\n'), ((2576, 2750), 're.sub', 're.sub', (['"""(?<=^|(?<=[^a-zA-Z0-9-_.+A-Za-z+]))(@[A-Za-z_+A-Za-z+]+[A-Za-z0-9-_:+A-Za-z_+]+|htt.*:[//.A-Za-z0-9-_:+A-Za-z_+]+|&#[0-9]+|([^a-zA-Z0-9])|(RT))"""', '""""""', 'tweet'], {}), "(\n '(?<=^|(?<=[^a-zA-Z0-9-_.+A-Za-z+]))(@[A-Za-z_+A-Za-z+]+[A-Za-z0-9-_:+A-Za-z_+]+|htt.*:[//.A-Za-z0-9-_:+A-Za-z_+]+|&#[0-9]+|([^a-zA-Z0-9])|(RT))'\n , '', tweet)\n", (2582, 2750), False, 'import re\n'), ((539, 722), 're.sub', 're.sub', (['"""(?<=^|(?<=[^a-zA-Z0-9-_.+A-Za-z+]))(@[A-Za-z_+A-Za-z+]+[A-Za-z0-9-_:+A-Za-z_+]+|htt.*:[//.A-Za-z0-9-_:+A-Za-z_+]+|&#[0-9]+|([^a-zA-Z0-9])|(RT))"""', '""""""', 'TweeList[d][0]'], {}), "(\n '(?<=^|(?<=[^a-zA-Z0-9-_.+A-Za-z+]))(@[A-Za-z_+A-Za-z+]+[A-Za-z0-9-_:+A-Za-z_+]+|htt.*:[//.A-Za-z0-9-_:+A-Za-z_+]+|&#[0-9]+|([^a-zA-Z0-9])|(RT))'\n , '', TweeList[d][0])\n", (545, 722), False, 'import re\n'), ((1144, 1177), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'stop_words': 'stop_'}), '(stop_words=stop_)\n', (1159, 1177), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((1191, 1209), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (1207, 1209), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((1220, 1235), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (1233, 1235), False, 'from sklearn.naive_bayes import MultinomialNB\n'), ((1691, 1724), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'stop_words': 'stop_'}), '(stop_words=stop_)\n', (1706, 1724), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((1738, 1756), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (1754, 1756), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((1767, 1846), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""log"""', 'penalty': '"""l2"""', 'alpha': '(0.001)', 'n_iter': '(5)', 'random_state': '(42)'}), "(loss='log', penalty='l2', alpha=0.001, n_iter=5, random_state=42)\n", (1780, 1846), False, 'from sklearn.linear_model import SGDClassifier\n')] |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# #### Hi all. 🙋
#
# #### Nice to meet you!
#
# #### We all know that feature engineering is the key to dynamically growing a model's performance in machine learning.
#
# #### You will try a lot of thought and various methods when doing feature engineering! I'm going to suggest a lot of ways to reduce the trouble and process.
#
# #### The methods I will introduce are both known and unfamiliar methods. I hope you will refer to them when conducting competitions on Kaggle in the future! 💯
#
#
# %% _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.model_selection import train_test_split
import seaborn as sns
import matplotlib.pyplot as plt
# %%
train = pd.read_csv('input/train.csv')
train = train.drop(['Id'], axis=1)
pd.set_option('display.max_columns', None)
train.head()
# %%
# data segmentation
X = train.drop('SalePrice', axis=1)
y = train['SalePrice']
train_x, test_x, train_y, test_y = train_test_split(
X, y, test_size=0.2, shuffle=True, random_state=0) # train, valid 8:2 분할
# %%
# We need to duplicate the original state of our training data and test data.
train_x_saved = train_x.copy()
test_x_saved = test_x.copy()
# Functions that return training data and test data
def load_data(train_x_saved, test_x_saved):
train_x, test_x = train_x_saved.copy(), test_x_saved.copy()
return train_x, test_x
# %%
# Store the numeric variable to be converted into a list
num_cols = [
'MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond',
'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', '1stFlrSF',
'2ndFlrSF', 'GrLivArea', 'GarageYrBlt', 'GarageArea', 'WoodDeckSF'
]
# %% [markdown]
# # Linear Transform
# %% [markdown]
# <div style="background-color:rgba(0, 255, 255, 0.6);border-radius:5px;display:fill;">
# <h1><center style ="margin-left : 20px;">Standardization</center></h1>
# </div>
# %% [markdown]
# #### This is the most basic transformation method.
# #### It is a method that makes the mean 0 and the standard deviation 1 through a linear transformation!
# %% [markdown]
# 
# %%
# Load Data
train_x, test_x = load_data(train_x_saved=train_x_saved,
test_x_saved=test_x_saved)
# %%
from sklearn.preprocessing import StandardScaler
# %%
scaler = StandardScaler()
scaler.fit(train_x[num_cols])
# %%
# Permuting each column after normalization
train_x[num_cols] = scaler.transform(train_x[num_cols])
test_x[num_cols] = scaler.transform(test_x[num_cols])
# %% [markdown]
# <div style="background-color:red;border-radius:5px;display:fill;">
# <h1><center style ="margin-left : 20px;">BAD Standardization</center></h1>
# </div>
# %% [markdown]
# #### In this method, training data and test data are transformed according to the mean and standard deviation of different criteria.
#
# #### If the distribution of each data does not differ significantly from each other, it is not a problem. However, this method should not be used. 💥
# %%
train_x, test_x = load_data(train_x_saved=train_x_saved,
test_x_saved=test_x_saved)
# %%
from sklearn.preprocessing import StandardScaler
# %%
# Normalize training data and test data respectively (bad example)
scaler_train = StandardScaler()
scaler_train.fit(train_x[num_cols])
train_x[num_cols] = scaler_train.transform(train_x[num_cols])
scaler_test = StandardScaler()
scaler_test.fit(test_x[num_cols])
test_x[num_cols] = scaler_test.transform(test_x[num_cols])
# %% [markdown]
# <div style="background-color:rgba(0, 255, 255, 0.6);border-radius:5px;display:fill;">
# <h1><center style ="margin-left : 20px;">Min-Max Scaling</center></h1>
# </div>
# %% [markdown]
# #### This is a Min-Max Scaling method that converts the range taken by the variable value into a specific interval (between 0 and 1).
# %% [markdown]
# 
# %%
train_x, test_x = load_data(train_x_saved=train_x_saved,
test_x_saved=test_x_saved)
# %%
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(train_x[num_cols])
# %%
train_x[num_cols] = scaler.transform(train_x[num_cols])
test_x[num_cols] = scaler.transform(test_x[num_cols])
# %%
train_x[num_cols].describe().T.style.bar(subset=['mean'], color='#205ff2')\
.background_gradient(subset=['min'], cmap='Reds')\
.background_gradient(subset=['max'], cmap='coolwarm')
## The minimum value is 0 and the maximum value is 1.
# %% [markdown]
# # Non-linear Transformation
# %% [markdown]
# <div style="background-color:rgba(0, 255, 255, 0.6);border-radius:5px;display:fill;">
# <h1><center style ="margin-left : 20px;">Log</center></h1>
# </div>
# %% [markdown]
# #### It is recommended that the distribution of variables is not skewed to one side.
#
# #### For example, a variable representing a specific amount or number of times tends
#
# #### to have a distribution that is biased in one direction,
#
# #### so log transformation is sometimes performed. And when the value is 0,
#
# #### log(x+1) transformation is often used because it cannot take the log as it is.
# %%
train_x, test_x = load_data(train_x_saved=train_x_saved,
test_x_saved=test_x_saved)
# %%
x = train_x[num_cols]
# %%
# take log
x1 = np.log(x)
x1
# %%
# Add 1 and then take the logarithm
x2 = np.log1p(x)
x2
# %%
# After taking the logarithm of the absolute value, add the original sign
x3 = np.sign(x) * np.log(np.abs(x))
x3
# %% [markdown]
# <div style="background-color:rgba(0, 255, 255, 0.6);border-radius:5px;display:fill;">
# <h1><center style ="margin-left : 20px;">Box-Cox Transform</center></h1>
# </div>
# %% [markdown]
# #### In addition to the BOX-COX Transform, which is a generalized log transformation,
#
# #### there is also the Yeo-Johnson Transform that can be applied to variables with negative values.
# #### These transformations approximate a normal distribution after log transformation.
# %% [markdown]
# 
# %%
train_x, test_x = load_data(train_x_saved=train_x_saved,
test_x_saved=test_x_saved)
# %%
# Storing variables that take only positive integer values as conversion targets in a list
# Also, if missing values are included, be careful because (~(train_x[c] <= 0.0)).all() should be used.
pos_cols = [
c for c in num_cols
if (train_x[c] > 0.0).all() and (test_x[c] > 0.0).all()
]
## List of features with positive values
pos_cols
# %%
from sklearn.preprocessing import PowerTransformer
# %%
pt = PowerTransformer(method='box-cox')
pt.fit(train_x[pos_cols])
# %%
# 변환 후의 데이터로 각 열을 치환
train_x[pos_cols] = pt.transform(train_x[pos_cols])
test_x[pos_cols] = pt.transform(test_x[pos_cols])
# %% [markdown]
# #### LotArea column before after comparison
# %%
x = train.LotArea.values
sns.kdeplot(x)
plt.title("before Box-Cox-transform")
plt.show()
# %%
x = train_x.LotArea.values
sns.kdeplot(x)
plt.title("after Box-Cox-transform")
plt.show()
## The existing data also has a form of a normal distribution,
## so there is little difference between it and after the Box-Cox transformation.
# %% [markdown]
# #### GrLivArea column before after comparison
# %%
x = train.GrLivArea.values
sns.kdeplot(x)
plt.title("before Box-Cox-transform")
plt.show()
# %%
x = train_x.GrLivArea.values
sns.kdeplot(x)
plt.title("after Box-Cox-transform")
plt.show()
## The existing data also has a form of a normal distribution,
## so there is little difference between it and after the Box-Cox transformation.
# %% [markdown]
# <div style="background-color:rgba(0, 255, 255, 0.6);border-radius:5px;display:fill;">
# <h1><center style ="margin-left : 20px;">Yeo-Johnson Transform</center></h1>
# </div>
# %% [markdown]
# #### Yeo-Johnson transform can also take negative values.
# %% [markdown]
# 
# %%
train_x, test_x = load_data(train_x_saved=train_x_saved,
test_x_saved=test_x_saved)
# %%
from sklearn.preprocessing import PowerTransformer
# %%
pt = PowerTransformer(method='yeo-johnson')
pt.fit(train_x[num_cols])
# %%
# 변환 후의 데이터로 각 열을 치환
train_x[num_cols] = pt.transform(train_x[num_cols])
test_x[num_cols] = pt.transform(test_x[num_cols])
# %%
train_x[num_cols]
# %%
import plotly.graph_objects as go
fig = go.Figure()
fig.add_trace(
go.Histogram(
x=train.MSSubClass,
xbins=dict( # bins used for histogram
start=-100, end=200),
marker_color='blue',
opacity=1))
fig.update_layout(
title_text='MSSubClass yeo-johnson Before',
xaxis_title_text='MSSubClass',
yaxis_title_text='Value',
bargap=0.05, # gap between bars of adjacent location coordinates
xaxis={'showgrid': False},
yaxis={'showgrid': False},
template='plotly_dark')
fig.show()
# %%
import plotly.graph_objects as go
fig = go.Figure()
fig.add_trace(
go.Histogram(
x=train_x.MSSubClass,
xbins=dict( # bins used for histogram
start=0, end=200),
marker_color='blue',
opacity=1))
fig.update_layout(
title_text='MSSubClass yeo-johnson After',
xaxis_title_text='MSSubClass',
yaxis_title_text='Value',
bargap=0.05, # gap between bars of adjacent location coordinates
xaxis={'showgrid': False},
yaxis={'showgrid': False},
template='plotly_dark')
fig.show()
## The spread distribution was forced to approximate the normal distribution.
# %% [markdown]
# # Setting TransForm
# %% [markdown]
# <div style="background-color:rgba(0, 255, 255, 0.6);border-radius:5px;display:fill;">
# <h1><center style ="margin-left : 20px;">Clipping</center></h1>
# </div>
# %% [markdown]
# #### Numerical variables sometimes include outliers, but you can exclude outliers
# #### outside a certain range by setting upper and lower limits and replacing values
# #### outside the range with upper and lower limits. It is also a good idea to check the distribution first and then set the threshold.
# %%
train_x, test_x = load_data(train_x_saved=train_x_saved,
test_x_saved=test_x_saved)
# %%
# Check 1%, 99% points of training data per column
p01 = train_x[num_cols].quantile(0.01)
p99 = train_x[num_cols].quantile(0.99)
p01
p99
# %%
# Values below 1% point are clipped to 1% point, and values above 99% point are clipped to 99% point.
train_x[num_cols] = train_x[num_cols].clip(p01, p99, axis=1)
test_x[num_cols] = test_x[num_cols].clip(p01, p99, axis=1)
# %% [markdown]
# #### LotArea column before after comparison
# %%
import plotly.graph_objects as go
fig = go.Figure()
fig.add_trace(
go.Histogram(
x=train.LotArea,
xbins=dict( # bins used for histogram
start=0, end=50000, size=2),
marker_color='#e8ab60',
opacity=1))
fig.update_layout(
title_text='LotArea Clipping Before',
xaxis_title_text='LotArea',
yaxis_title_text='COUNT',
bargap=0.05, # gap between bars of adjacent location coordinates
xaxis={'showgrid': False},
yaxis={'showgrid': False},
template='plotly_dark')
fig.show()
# %%
import plotly.graph_objects as go
fig = go.Figure()
fig.add_trace(
go.Histogram(
x=train_x.LotArea,
xbins=dict( # bins used for histogram
start=0, end=50000),
marker_color='#e8ab60',
opacity=1))
fig.update_layout(
title_text='LotArea Clipping After',
xaxis_title_text='LotArea',
yaxis_title_text='COUNT',
bargap=0.05, # gap between bars of adjacent location coordinates
xaxis={'showgrid': False},
yaxis={'showgrid': False},
template='plotly_dark')
fig.show()
## Values from 0 to 80 are substituted.
# %% [markdown]
# #### RestingBP column before after comparison
# %%
fig = go.Figure()
fig.add_trace(
go.Histogram(
x=train.GrLivArea,
xbins=dict( # bins used for histogram
start=0, end=10000, size=15),
marker_color='#FE6F5E',
opacity=1))
fig.update_layout(
title_text='GrLivArea Clipping Before',
xaxis_title_text='GrLivArea',
yaxis_title_text='COUNT',
bargap=0.05, # gap between bars of adjacent location coordinates
xaxis={'showgrid': False},
yaxis={'showgrid': False},
template='plotly_dark')
fig.show()
# %%
fig = go.Figure()
fig.add_trace(
go.Histogram(
x=train_x.GrLivArea,
xbins=dict( # bins used for histogram
start=0, end=10000, size=15),
marker_color='#FE6F5E',
opacity=1))
fig.update_layout(
title_text='GrLivArea Clipping After',
xaxis_title_text='GrLivArea',
yaxis_title_text='COUNT',
bargap=0.05, # gap between bars of adjacent location coordinates
xaxis={'showgrid': False},
yaxis={'showgrid': False},
template='plotly_dark')
fig.show()
# %% [markdown]
# #### If you look at the graph, you can clearly see that the values are not spread widely but are clustered like a normal distribution.
# %% [markdown]
# <div style="background-color:rgba(0, 255, 255, 0.6);border-radius:5px;display:fill;">
# <h1><center style ="margin-left : 20px;">Rank Gauss</center></h1>
# </div>
# %% [markdown]
# #### This is a method of converting numeric variables into ranks and then semi-forced normal
# #### distributions while maintaining the order. The method used by Kaggle Grandmaster
# #### <NAME> was revealed in the 1st solution in Porto Seguro's Safe Driver Prediction competition.
# #### In particular, it is said to have better performance than general standardization as a transformation when building a model in a neural network.
# %%
train_x, test_x = load_data(train_x_saved=train_x_saved,
test_x_saved=test_x_saved)
from sklearn.preprocessing import QuantileTransformer
# %%
transformer = QuantileTransformer(n_quantiles=100,
random_state=0,
output_distribution='normal')
transformer.fit(train_x[num_cols])
# %%
train_x[num_cols] = transformer.transform(train_x[num_cols])
test_x[num_cols] = transformer.transform(test_x[num_cols])
# %%
train_x[num_cols]
# %%
p = sns.boxplot(x=train.GarageArea, color='teal')
p.set_title("GarageArea RankGauss Before")
plt.show()
# %%
p = sns.boxplot(x=train_x.GarageArea, color='teal')
p.set_title("GarageArea RankGauss After")
plt.show()
# %% [markdown]
# #### The values were semi-forced to be normally distributed. The impact of outliers is also expected to decrease.
# %% [markdown]
# # NEXT PLAN
# %% [markdown]
# #### The following tabular data conversion will deal with numeric conversion of category types.
# #### If you are interested in my kernel, please find the next category type conversion kernel as well.
| [
"numpy.abs",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.log",
"sklearn.preprocessing.PowerTransformer",
"pandas.set_option",
"sklearn.preprocessing.StandardScaler",
"plotly.graph_objects.Figure",
"seaborn.boxplot",
"seaborn.kdeplot",
"sklearn.preprocessing.QuantileTran... | [((1139, 1169), 'pandas.read_csv', 'pd.read_csv', (['"""input/train.csv"""'], {}), "('input/train.csv')\n", (1150, 1169), True, 'import pandas as pd\n'), ((1205, 1247), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (1218, 1247), True, 'import pandas as pd\n'), ((1382, 1449), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'shuffle': '(True)', 'random_state': '(0)'}), '(X, y, test_size=0.2, shuffle=True, random_state=0)\n', (1398, 1449), False, 'from sklearn.model_selection import train_test_split\n'), ((2795, 2811), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2809, 2811), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3744, 3760), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3758, 3760), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3874, 3890), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3888, 3890), False, 'from sklearn.preprocessing import StandardScaler\n'), ((4596, 4610), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (4608, 4610), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((5875, 5884), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (5881, 5884), True, 'import numpy as np\n'), ((5935, 5946), 'numpy.log1p', 'np.log1p', (['x'], {}), '(x)\n', (5943, 5946), True, 'import numpy as np\n'), ((7186, 7220), 'sklearn.preprocessing.PowerTransformer', 'PowerTransformer', ([], {'method': '"""box-cox"""'}), "(method='box-cox')\n", (7202, 7220), False, 'from sklearn.preprocessing import PowerTransformer\n'), ((7470, 7484), 'seaborn.kdeplot', 'sns.kdeplot', (['x'], {}), '(x)\n', (7481, 7484), True, 'import seaborn as sns\n'), ((7485, 7522), 'matplotlib.pyplot.title', 'plt.title', (['"""before Box-Cox-transform"""'], {}), "('before Box-Cox-transform')\n", (7494, 7522), True, 'import matplotlib.pyplot as plt\n'), ((7523, 7533), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7531, 7533), True, 'import matplotlib.pyplot as plt\n'), ((7567, 7581), 'seaborn.kdeplot', 'sns.kdeplot', (['x'], {}), '(x)\n', (7578, 7581), True, 'import seaborn as sns\n'), ((7582, 7618), 'matplotlib.pyplot.title', 'plt.title', (['"""after Box-Cox-transform"""'], {}), "('after Box-Cox-transform')\n", (7591, 7618), True, 'import matplotlib.pyplot as plt\n'), ((7619, 7629), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7627, 7629), True, 'import matplotlib.pyplot as plt\n'), ((7874, 7888), 'seaborn.kdeplot', 'sns.kdeplot', (['x'], {}), '(x)\n', (7885, 7888), True, 'import seaborn as sns\n'), ((7889, 7926), 'matplotlib.pyplot.title', 'plt.title', (['"""before Box-Cox-transform"""'], {}), "('before Box-Cox-transform')\n", (7898, 7926), True, 'import matplotlib.pyplot as plt\n'), ((7927, 7937), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7935, 7937), True, 'import matplotlib.pyplot as plt\n'), ((7973, 7987), 'seaborn.kdeplot', 'sns.kdeplot', (['x'], {}), '(x)\n', (7984, 7987), True, 'import seaborn as sns\n'), ((7988, 8024), 'matplotlib.pyplot.title', 'plt.title', (['"""after Box-Cox-transform"""'], {}), "('after Box-Cox-transform')\n", (7997, 8024), True, 'import matplotlib.pyplot as plt\n'), ((8025, 8035), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8033, 8035), True, 'import matplotlib.pyplot as plt\n'), ((8725, 8763), 'sklearn.preprocessing.PowerTransformer', 'PowerTransformer', ([], {'method': '"""yeo-johnson"""'}), "(method='yeo-johnson')\n", (8741, 8763), False, 'from sklearn.preprocessing import PowerTransformer\n'), ((8990, 9001), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (8999, 9001), True, 'import plotly.graph_objects as go\n'), ((9545, 9556), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (9554, 9556), True, 'import plotly.graph_objects as go\n'), ((11281, 11292), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (11290, 11292), True, 'import plotly.graph_objects as go\n'), ((11834, 11845), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (11843, 11845), True, 'import plotly.graph_objects as go\n'), ((12453, 12464), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (12462, 12464), True, 'import plotly.graph_objects as go\n'), ((12978, 12989), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (12987, 12989), True, 'import plotly.graph_objects as go\n'), ((14480, 14567), 'sklearn.preprocessing.QuantileTransformer', 'QuantileTransformer', ([], {'n_quantiles': '(100)', 'random_state': '(0)', 'output_distribution': '"""normal"""'}), "(n_quantiles=100, random_state=0, output_distribution=\n 'normal')\n", (14499, 14567), False, 'from sklearn.preprocessing import QuantileTransformer\n'), ((14826, 14871), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': 'train.GarageArea', 'color': '"""teal"""'}), "(x=train.GarageArea, color='teal')\n", (14837, 14871), True, 'import seaborn as sns\n'), ((14915, 14925), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14923, 14925), True, 'import matplotlib.pyplot as plt\n'), ((14936, 14983), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': 'train_x.GarageArea', 'color': '"""teal"""'}), "(x=train_x.GarageArea, color='teal')\n", (14947, 14983), True, 'import seaborn as sns\n'), ((15026, 15036), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15034, 15036), True, 'import matplotlib.pyplot as plt\n'), ((6035, 6045), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (6042, 6045), True, 'import numpy as np\n'), ((6055, 6064), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (6061, 6064), True, 'import numpy as np\n')] |
import sys
import cv2
import numpy as np
from PyQt4 import QtGui, QtCore, Qt
from mainWindow_view import Ui_FaceDetector
from settings_view import Ui_Settings
import facenet
class Video():
def __init__(self,capture, facenet):
self.capture = capture
self.currentFrame=np.array([])
self.facenet= facenet
# if you want to reduce frame rate, but it doesnt work as you expected
self.fps_delay = 1000
self.frms_count = 1
def captureFrame(self):
"""
capture frame and return captured frame
"""
ret, readFrame = self.capture.read()
if(self.frms_count % self.fps_delay == 0):
newframe = self.facenet.recognize_still_image(readFrame)
else:
newframe = readFrame
return newframe
def captureNextFrame(self):
"""
capture frame and reverse RBG BGR and return opencv image
"""
ret, readFrame=self.capture.read()
newframe = self.facenet.recognize_still_image(readFrame)
if(ret==True):
self.currentFrame=cv2.cvtColor(newframe,cv2.COLOR_BGR2RGB)
def convertFrame(self):
""" converts frame to format suitable for QtGui """
try:
height,width=self.currentFrame.shape[:2]
img=QtGui.QImage(self.currentFrame,
width,
height,
QtGui.QImage.Format_RGB888)
img=QtGui.QPixmap.fromImage(img)
self.previousFrame = self.currentFrame
return img
except:
return None
def convertSpecifiedFrame(frame):
""" converts frame to format suitable for QtGui """
try:
height,width=frame.shape[:2]
img=QtGui.QImage(frame,
width,
height,
QtGui.QImage.Format_RGB888)
img=QtGui.QPixmap.fromImage(img)
return img
except:
return None
def getImage(self):
return cv2.imread("test.jpg")
class Gui(QtGui.QMainWindow):
def __init__(self,parent=None):
QtGui.QWidget.__init__(self,parent)
self.ui = Ui_FaceDetector()
self.ui.setupUi(self)
self.filename = ""
self.detector = facenet.Facenet.Detectors.YOLO
self.facenet = facenet.Facenet(self.detector)
self.video = Video(cv2.VideoCapture(0), self.facenet)
self.settings = Settings()
self.initui()
self._timer = QtCore.QTimer(self)
self._timer.timeout.connect(self.play)
self._timer.start(27)
self.update()
self.ret, self.capturedFrame = self.video.capture.read()
def initui(self):
# this is the action which we will put in the menu
self.ui.actionVideo.setShortcut("Ctrl+O")
self.ui.actionVideo.setStatusTip("Open a video")
self.ui.actionVideo.triggered.connect(self.openFile)
self.ui.actionCamera.setShortcut("Ctrl+C")
self.ui.actionCamera.setStatusTip("Open the Camera")
self.ui.actionCamera.triggered.connect(self.openCamera)
self.ui.actionCamera.setShortcut("Ctrl+S")
self.ui.actionCamera.setStatusTip("Settings")
self.ui.actionSettings.triggered.connect(self.openSettings)
self.settings.ui.comboBox.activated[str].connect(self.select_detector)
self.settings.ui.pushButton.clicked.connect(self.openModel_path)
def select_detector(self, text):
print('detector changed to --', text)
self.detector = text
self.facenet.change_detector(self.detector)
def openSettings(self):
self.settings.show()
def openFile(self):
self.filename = QtGui.QFileDialog.getOpenFileName(self, 'Open File')
if(self.filename):
self.video = Video(cv2.VideoCapture(self.filename), self.facenet)
def openModel_path(self):
text = QtGui.QFileDialog.getOpenFileName(self, 'Open File')
if(text):
self.model_path = text
self.facenet.change_model_path(self.model_path)
print(self.model)
def openCamera(self):
self.video = Video(cv2.VideoCapture(0))
def play(self):
try:
self.video.captureNextFrame()
self.ui.videoFrame.setPixmap(self.video.convertFrame())
self.ui.videoFrame.setScaledContents(True)
except TypeError:
print ("No frame")
# this is an event for frame x button it takes all the closing eventis and triggers the close application function
def closeEvent(self, event):
self.settings.close()
class Settings(QtGui.QMainWindow):
def __init__(self,parent=None):
QtGui.QWidget.__init__(self,parent)
self.ui = Ui_Settings()
self.ui.setupUi(self)
def main():
app = QtGui.QApplication(sys.argv)
ex = Gui()
ex.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main() | [
"PyQt4.QtGui.QImage",
"PyQt4.QtGui.QApplication",
"facenet.Facenet",
"PyQt4.QtCore.QTimer",
"PyQt4.QtGui.QFileDialog.getOpenFileName",
"numpy.array",
"PyQt4.QtGui.QPixmap.fromImage",
"PyQt4.QtGui.QWidget.__init__",
"cv2.VideoCapture",
"cv2.cvtColor",
"mainWindow_view.Ui_FaceDetector",
"cv2.imr... | [((4996, 5024), 'PyQt4.QtGui.QApplication', 'QtGui.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (5014, 5024), False, 'from PyQt4 import QtGui, QtCore, Qt\n'), ((288, 300), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (296, 300), True, 'import numpy as np\n'), ((2194, 2216), 'cv2.imread', 'cv2.imread', (['"""test.jpg"""'], {}), "('test.jpg')\n", (2204, 2216), False, 'import cv2\n'), ((2293, 2329), 'PyQt4.QtGui.QWidget.__init__', 'QtGui.QWidget.__init__', (['self', 'parent'], {}), '(self, parent)\n', (2315, 2329), False, 'from PyQt4 import QtGui, QtCore, Qt\n'), ((2347, 2364), 'mainWindow_view.Ui_FaceDetector', 'Ui_FaceDetector', ([], {}), '()\n', (2362, 2364), False, 'from mainWindow_view import Ui_FaceDetector\n'), ((2500, 2530), 'facenet.Facenet', 'facenet.Facenet', (['self.detector'], {}), '(self.detector)\n', (2515, 2530), False, 'import facenet\n'), ((2672, 2691), 'PyQt4.QtCore.QTimer', 'QtCore.QTimer', (['self'], {}), '(self)\n', (2685, 2691), False, 'from PyQt4 import QtGui, QtCore, Qt\n'), ((3879, 3931), 'PyQt4.QtGui.QFileDialog.getOpenFileName', 'QtGui.QFileDialog.getOpenFileName', (['self', '"""Open File"""'], {}), "(self, 'Open File')\n", (3912, 3931), False, 'from PyQt4 import QtGui, QtCore, Qt\n'), ((4083, 4135), 'PyQt4.QtGui.QFileDialog.getOpenFileName', 'QtGui.QFileDialog.getOpenFileName', (['self', '"""Open File"""'], {}), "(self, 'Open File')\n", (4116, 4135), False, 'from PyQt4 import QtGui, QtCore, Qt\n'), ((4875, 4911), 'PyQt4.QtGui.QWidget.__init__', 'QtGui.QWidget.__init__', (['self', 'parent'], {}), '(self, parent)\n', (4897, 4911), False, 'from PyQt4 import QtGui, QtCore, Qt\n'), ((4929, 4942), 'settings_view.Ui_Settings', 'Ui_Settings', ([], {}), '()\n', (4940, 4942), False, 'from settings_view import Ui_Settings\n'), ((1156, 1197), 'cv2.cvtColor', 'cv2.cvtColor', (['newframe', 'cv2.COLOR_BGR2RGB'], {}), '(newframe, cv2.COLOR_BGR2RGB)\n', (1168, 1197), False, 'import cv2\n'), ((1385, 1459), 'PyQt4.QtGui.QImage', 'QtGui.QImage', (['self.currentFrame', 'width', 'height', 'QtGui.QImage.Format_RGB888'], {}), '(self.currentFrame, width, height, QtGui.QImage.Format_RGB888)\n', (1397, 1459), False, 'from PyQt4 import QtGui, QtCore, Qt\n'), ((1566, 1594), 'PyQt4.QtGui.QPixmap.fromImage', 'QtGui.QPixmap.fromImage', (['img'], {}), '(img)\n', (1589, 1594), False, 'from PyQt4 import QtGui, QtCore, Qt\n'), ((1893, 1955), 'PyQt4.QtGui.QImage', 'QtGui.QImage', (['frame', 'width', 'height', 'QtGui.QImage.Format_RGB888'], {}), '(frame, width, height, QtGui.QImage.Format_RGB888)\n', (1905, 1955), False, 'from PyQt4 import QtGui, QtCore, Qt\n'), ((2062, 2090), 'PyQt4.QtGui.QPixmap.fromImage', 'QtGui.QPixmap.fromImage', (['img'], {}), '(img)\n', (2085, 2090), False, 'from PyQt4 import QtGui, QtCore, Qt\n'), ((2558, 2577), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (2574, 2577), False, 'import cv2\n'), ((4329, 4348), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (4345, 4348), False, 'import cv2\n'), ((3990, 4021), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.filename'], {}), '(self.filename)\n', (4006, 4021), False, 'import cv2\n')] |
#!/usr/bin/env python
# Copyright (c) 2014-2018 <NAME>, Ph.D.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Input/output: default units are METERS and DEGREES.
boolean deg=True means degrees
For most functions you can input Numpy arrays of any shape, except as noted in the functions
see tests/Test.py for example uses.
"""
from __future__ import division
from copy import deepcopy
from six import string_types,PY2
from datetime import datetime
try:
import numpy
from numpy import sin, cos, tan, sqrt, radians, arctan2, hypot, degrees
except ImportError:
numpy = None
from math import sin, cos, tan, sqrt, radians, hypot, degrees
from math import atan2 as arctan2
try:
from astropy.time import Time
from astropy import units as u
from astropy.coordinates import Angle,SkyCoord, EarthLocation, AltAz, ICRS
except ImportError:
Time = None
#
from .vallado import vazel2radec, vradec2azel
from .timeconv import str2dt
class EarthEllipsoid:
"""generate reference ellipsoid"""
def __init__(self,model='wgs84'):
if model == 'wgs84':
"""https://en.wikipedia.org/wiki/World_Geodetic_System#WGS84"""
self.a = 6378137. # semi-major axis [m]
self.f = 1 / 298.2572235630 # flattening
self.b = self.a * (1 - self.f) # semi-minor axis
elif model=='grs80':
"""https://en.wikipedia.org/wiki/GRS_80"""
self.a = 6378137. # semi-major axis [m]
self.f = 1 / 298.257222100882711243 # flattening
self.b = self.a * (1 - self.f) # semi-minor axis
#%% to AER (azimuth, elevation, range)
def ecef2aer(x, y, z, lat0, lon0, h0, ell=None, deg=True):
"""
Observer => Point
input:
-----
x,y,z [meters] target ECEF location [0,Infinity)
lat0, lon0 (degrees/radians) Observer coordinates on ellipsoid [-90,90],[-180,180]
h0 [meters] observer altitude [0,Infinity)
ell reference ellipsoid
deg degrees input/output (False: radians in/out)
output: AER
------
azimuth, elevation (degrees/radians) [0,360),[0,90]
slant range [meters] [0,Infinity)
"""
xEast, yNorth, zUp = ecef2enu(x, y, z, lat0, lon0, h0, ell, deg=deg)
return enu2aer(xEast, yNorth, zUp, deg=deg)
def eci2aer(eci, lat0, lon0, h0, t):
"""
Observer => Point
input
-----
eci [meters] Nx3 target ECI location (x,y,z) [0,Infinity)
lat0, lon0 (degrees/radians) Observer coordinates on ellipsoid [-90,90],[-180,180]
h0 [meters] observer altitude [0,Infinity)
t time (datetime.datetime) time of obsevation (UTC)
output: AER
------
azimuth, elevation (degrees/radians) [0,360),[0,90]
slant range [meters] [0,Infinity)
"""
ecef = eci2ecef(eci, t)
return ecef2aer(ecef[:, 0], ecef[:, 1], ecef[:, 2], lat0, lon0, h0)
def enu2aer(e, n, u, deg=True):
"""
Observer => Point
input
-----
e,n,u [meters] East, north, up [0,Infinity)
deg degrees input/output (False: radians in/out)
output: AER
------
azimuth, elevation (degrees/radians) [0,360),[0,90]
slant range [meters] [0,Infinity)
"""
r = hypot(e, n)
slantRange = hypot(r, u)
elev = arctan2(u, r)
az = arctan2(e, n) % (2 * arctan2(0, -1))
if deg:
return degrees(az), degrees(elev), slantRange
else:
return az, elev, slantRange # radians
def geodetic2aer(lat, lon, h, lat0, lon0, h0, ell=None, deg=True):
"""
Observer => Point
input:
-----
Target: lat, lon, h (altitude, meters)
Observer: lat0, lon0, h0 (altitude, meters)
ell reference ellipsoid
deg degrees input/output (False: radians in/out)
output: AER
------
azimuth, elevation (degrees/radians)
slant range [meters]
"""
e, n, u = geodetic2enu(lat, lon, h, lat0, lon0, h0, ell, deg=deg)
return enu2aer(e, n, u, deg=deg)
def ned2aer(n, e, d, deg=True):
"""
Observer => Point
input
-----
n,e,d [meters] North,east, down [0,Infinity)
deg degrees input/output (False: radians in/out)
output: AER
------
azimuth, elevation (degrees/radians) [0,360),[0,90]
slant range [meters] [0,Infinity)
"""
return enu2aer(e, n, -d, deg=deg)
#%% to ECEF
def aer2ecef(az, el, srange, lat0, lon0, alt0, ell=None, deg=True):
"""
convert target azimuth, elevation, range (meters) from observer at lat0,lon0,alt0 to ECEF coordinates.
Input:
-----
azimuth, elevation (degrees/radians) [0,360),[0,90]
slant range [meters] [0,Infinity)
Observer: lat0, lon0, h0 (altitude, meters)
ell reference ellipsoid
deg degrees input/output (False: radians in/out)
output: ECEF x,y,z [meters]
if you specify NaN for srange, return value z will be NaN
"""
# Origin of the local system in geocentric coordinates.
x0, y0, z0 = geodetic2ecef(lat0, lon0, alt0, ell, deg=deg)
# Convert Local Spherical AER to ENU
e1, n1, u1 = aer2enu(az, el, srange, deg=deg)
# Rotating ENU to ECEF
dx, dy, dz = _enu2uvw(e1, n1, u1, lat0, lon0, deg=deg)
# Origin + offset from origin equals position in ECEF
return x0 + dx, y0 + dy, z0 + dz
def eci2ecef(eci, t):
"""
Observer => Point
input
-----
eci [meters] Nx3 target ECI location (x,y,z) [0,Infinity)
t time (datetime.datetime) time of obsevation (UTC)
output
------
x,y,z [meters] target ECEF location [0,Infinity)
"""
if numpy is None or Time is None:
raise ImportError('eci2ecef requires Numpy and AstroPy')
t = numpy.atleast_1d(t)
if isinstance(t[0], string_types): #don't just ram in in case it's float
t = str2dt(t)
if isinstance(t[0], datetime):
gst = Time(t).sidereal_time('apparent', 'greenwich').radian
elif isinstance(t[0],float):
gst = t
else:
raise TypeError('eci2ecef: time must be datetime or radian float')
assert isinstance(gst[0], float) # must be in radians!
eci = numpy.atleast_2d(eci)
N, trip = eci.shape
if eci.ndim > 2 or trip != 3:
raise ValueError('eci triplets must be shape (N,3)')
"""ported from:
https://github.com/dinkelk/astrodynamics/blob/master/rot3.m
"""
ecef = numpy.empty_like(eci)
for i in range(N):
#ecef[i, :] = _rottrip(gst[i]) @ eci[i, :]
ecef[i, :] = _rottrip(gst[i]).dot(eci[i, :])
return ecef
def enu2ecef(e1, n1, u1, lat0, lon0, h0, ell=None, deg=True):
"""
Observer => Point
inputs:
e1, n1, u1 (meters) east, north, up
observer: lat0, lon0, h0 (degrees/radians,degrees/radians, meters)
ell reference ellipsoid
deg degrees input/output (False: radians in/out)
output
------
x,y,z [meters] target ECEF location [0,Infinity)
"""
x0, y0, z0 = geodetic2ecef(lat0, lon0, h0, ell, deg=deg)
dx, dy, dz = _enu2uvw(e1, n1, u1, lat0, lon0, deg=deg)
return x0 + dx, y0 + dy, z0 + dz
def geodetic2ecef(lat, lon, alt, ell=None, deg=True):
"""
Observer => Point
input:
-----
Target: lat, lon, h (altitude, meters)
Observer: lat0, lon0, h0 (altitude, meters)
ell reference ellipsoid
deg degrees input/output (False: radians in/out)
output: ECEF x,y,z (meters)
"""
if ell is None:
ell = EarthEllipsoid()
if deg:
lat = radians(lat)
lon = radians(lon)
# radius of curvature of the prime vertical section
N = get_radius_normal(lat, ell)
# Compute cartesian (geocentric) coordinates given (curvilinear) geodetic
# coordinates.
x = (N + alt) * cos(lat) * cos(lon)
y = (N + alt) * cos(lat) * sin(lon)
z = (N * (ell.b / ell.a)**2 + alt) * sin(lat)
return x, y, z
def ned2ecef(n, e, d, lat0, lon0, h0, ell=None, deg=True):
"""
Observer => Point
input
-----
n,e,d [meters] North,east, down [0,Infinity)
Observer: lat0, lon0, h0 (altitude, meters)
ell reference ellipsoid
deg degrees input/output (False: radians in/out)
output:
------
ECEF x,y,z (meters)
"""
return enu2ecef(e, n, -d, lat0, lon0, h0, ell, deg=deg)
#%% to ECI
def aer2eci(az, el, srange, lat0, lon0, h0, t, ell=None, deg=True):
"""
input
-----
azimuth, elevation (degrees/radians) [0,360),[0,90]
slant range [meters] [0,Infinity)
Observer: lat0, lon0, h0 (altitude, meters)
ell reference ellipsoid
deg degrees input/output (False: radians in/out)
t datetime.datetime of obseration
output
------
eci x,y,z (meters)
"""
if numpy is None:
raise ImportError('aer2eci requires Numpy')
x, y, z = aer2ecef(az, el, srange, lat0, lon0, h0, ell, deg)
return ecef2eci(numpy.column_stack((x, y, z)), t)
def ecef2eci(ecef, t):
"""
Point => Point
input
-----
ecef: Nx3 x,y,z (meters)
t: datetime.datetime
output
------
eci x,y,z (meters)
"""
if Time is None or numpy is None:
raise ImportError('ecef2eci requires Numpy and AstroPy')
t = numpy.atleast_1d(t)
if isinstance(t[0], string_types): #don't just ram in in case it's float
t = str2dt(t)
if isinstance(t[0], datetime):
gst = Time(t).sidereal_time('apparent', 'greenwich').radian
elif isinstance(t[0],float):
gst = t
else:
raise TypeError('eci2ecef: time must be datetime or radian float')
assert isinstance(gst[0], float) # must be in radians!
ecef = numpy.atleast_2d(ecef)
N, trip = ecef.shape
if ecef.ndim > 2 or trip != 3:
raise TypeError('ecef triplets must be shape (N,3)')
"""ported from:
https://github.com/dinkelk/astrodynamics/blob/master/rot3.m
"""
eci = numpy.empty_like(ecef)
for i in range(N):
#eci[i, :] = _rottrip(gst[i]).T @ ecef[i, :] # this one is transposed
eci[i, :] = _rottrip(gst[i]).T.dot(ecef[i, :]) # this one is transposed
return eci
#%% to ENU
def aer2enu(az, el, srange, deg=True):
"""
azimuth, elevation (degrees/radians) [0,360),[0,90]
slant range [meters] [0,Infinity)
deg degrees input/output (False: radians in/out)
output:
-------
e,n,u East, North, Up [m]
"""
if deg:
el = radians(el)
az = radians(az)
r = srange * cos(el)
return r * sin(az), r * cos(az), srange * sin(el)
def ecef2enu(x, y, z, lat0, lon0, h0, ell=None, deg=True):
"""
input
-----
x,y,z [meters] target ECEF location [0,Infinity)
Observer: lat0, lon0, h0 (altitude, meters)
ell reference ellipsoid
deg degrees input/output (False: radians in/out)
output:
-------
e,n,u East, North, Up [m]
"""
x0, y0, z0 = geodetic2ecef(lat0, lon0, h0, ell, deg=deg)
return _uvw2enu(x - x0, y - y0, z - z0, lat0, lon0, deg=deg)
def ecef2enuv(u, v, w, lat0, lon0, deg=True):
"""
for VECTOR i.e. between two points
input
-----
x,y,z [meters] target ECEF location [0,Infinity)
"""
if deg:
lat0 = radians(lat0)
lon0 = radians(lon0)
t = cos(lon0) * u + sin(lon0) * v
uEast = -sin(lon0) * u + cos(lon0) * v
wUp = cos(lat0) * t + sin(lat0) * w
vNorth = -sin(lat0) * t + cos(lat0) * w
return uEast, vNorth, wUp
def geodetic2enu(lat, lon, h, lat0, lon0, h0, ell=None, deg=True):
"""
input
-----
target: lat,lon, h
Observer: lat0, lon0, h0 (altitude, meters)
ell reference ellipsoid
deg degrees input/output (False: radians in/out)
output:
-------
e,n,u East, North, Up [m]
"""
x1, y1, z1 = geodetic2ecef(lat, lon, h, ell, deg=deg)
x2, y2, z2 = geodetic2ecef(lat0, lon0, h0, ell, deg=deg)
dx = x1 - x2
dy = y1 - y2
dz = z1 - z2
return _uvw2enu(dx, dy, dz, lat0, lon0, deg=deg)
#%% to geodetic
def aer2geodetic(az, el, srange, lat0, lon0, h0, deg=True):
"""
Input:
-----
az,el (degrees/radians)
srange[meters]
Observer: lat0,lon0 [degrees]
altitude h0 [meters]
deg : degrees input/output (False: radians in/out)
output:
WGS84 lat,lon [degrees] h0altitude above spheroid [meters]
"""
x, y, z = aer2ecef(az, el, srange, lat0, lon0, h0, deg=deg)
return ecef2geodetic(x, y, z, deg=deg)
def ecef2geodetic(x, y, z, ell=None, deg=True):
"""
convert ECEF (meters) to geodetic coordinates
input
-----
x,y,z [meters] target ECEF location [0,Infinity)
ell reference ellipsoid
deg degrees input/output (False: radians in/out)
output
------
lat,lon (degrees/radians)
alt (meters)
Algorithm is based on
http://www.astro.uni.torun.pl/~kb/Papers/geod/Geod-BG.htm
This algorithm provides a converging solution to the latitude equation
in terms of the parametric or reduced latitude form (v)
This algorithm provides a uniform solution over all latitudes as it does
not involve division by cos(phi) or sin(phi)
"""
if ell is None:
ell = EarthEllipsoid()
ea = ell.a
eb = ell.b
rad = hypot(x, y)
# Constant required for Latitude equation
rho = arctan2(eb * z, ea * rad)
# Constant required for latitude equation
c = (ea**2 - eb**2) / hypot(ea * rad, eb * z)
# Starter for the Newtons Iteration Method
vnew = arctan2(ea * z, eb * rad)
# Initializing the parametric latitude
v = 0
for _ in range (5):
v = deepcopy(vnew)
#%% Newtons Method for computing iterations
vnew = v - ((2 * sin(v - rho) - c * sin(2 * v)) /
(2 * (cos(v - rho) - c * cos(2 * v))))
if allclose(v,vnew):
break
#%% Computing latitude from the root of the latitude equation
lat = arctan2(ea * tan(vnew), eb)
# by inspection
lon = arctan2(y, x)
alt = (((rad - ea * cos(vnew)) * cos(lat)) +
((z - eb * sin(vnew)) * sin(lat)))
if deg:
return degrees(lat), degrees(lon), alt
else:
return lat, lon, alt # radians
"""
this is from PySatel and gives same result to EIGHT decimal places
def cbrt(x):
if x >= 0:
return pow(x, 1.0/3.0)
else:
return -pow(abs(x), 1.0/3.0)
def ecef2geodetic(x, y, z, ell=EarthEllipsoid(),deg=True):
a = ell.a; b = ell.b
esq = 6.69437999014*0.001
e1sq = 6.73949674228*0.001
r = hypot(x,y)
Esq = a**2 - b**2
F = 54 * b**2 * z**2
G = r**2 + (1 - esq)* z**2 - esq*Esq
C = (esq**2 *F* r**2)/(pow(G, 3))
S = cbrt(1 + C + sqrt(C**2 + 2*C))
P = F/(3* pow((S + 1/S + 1), 2)*G**2)
Q = sqrt(1 + 2* esq**2 *P)
r_0 = -(P*esq*r)/(1 + Q) + sqrt(0.5* a**2 *(1 + 1.0/Q) - \
P*(1 - esq)*z**2/(Q*(1 + Q)) - 0.5*P* r**2)
U = sqrt(pow((r - esq*r_0), 2) + z**2)
V = sqrt(pow((r - esq*r_0), 2) + (1 - esq)* z**2)
Z_0 = b**2 *z/(a*V)
alt = U*(1 - b**2/(a*V))
lat = arctan((z + e1sq*Z_0)/r)
lon = arctan2(y, x)
if deg:
return degrees(lat),degrees(lon),alt
else:
return lat, lon, alt #radians
"""
def eci2geodetic(eci, t):
"""
convert ECI to geodetic coordinates
inputs:
eci/ecef: Nx3 vector of x,y,z triplets in the eci or ecef system [meters]
t : length N vector of datetime OR greenwich sidereal time angle [radians].
output
------
lat,lon (degrees/radians)
alt (meters)
Note: Conversion is idealized: doesn't consider nutations, perterbations,
etc. like the IAU-76/FK5 or IAU-2000/2006 model-based conversions
from ECI to ECEF
"""
""" a.k.a. eci2lla() """
ecef = eci2ecef(eci, t)
return ecef2geodetic(ecef[:, 0], ecef[:, 1], ecef[:, 2])
def enu2geodetic(e, n, u, lat0, lon0, h0, ell=None, deg=True):
"""
input
-----
e,n,u East, North, Up [m]
Observer: lat0, lon0, h0 (altitude, meters)
ell reference ellipsoid
deg degrees input/output (False: radians in/out)
output:
-------
target: lat,lon, h (degrees/radians,degrees/radians, meters)
"""
x, y, z = enu2ecef(e, n, u, lat0, lon0, h0, ell, deg=deg)
return ecef2geodetic(x, y, z, ell, deg=deg)
def ned2geodetic(n, e, d, lat0, lon0, h0, ell=None, deg=True):
"""
input
-----
n,e,d North, east, down (meters)
Observer: lat0, lon0, h0 (altitude, meters)
ell reference ellipsoid
deg degrees input/output (False: radians in/out)
output:
-------
target: lat,lon, h (degrees/radians,degrees/radians, meters)
"""
x, y, z = enu2ecef(e, n, -d, lat0, lon0, h0, ell, deg=deg)
return ecef2geodetic(x, y, z, ell, deg=deg)
# %% to NED
def aer2ned(az, elev, slantRange, deg=True):
"""
input
-----
azimuth, elevation (degrees/radians) [0,360),[0,90]
slant range [meters] [0,Infinity)
deg degrees input/output (False: radians in/out)
output:
-------
n,e,d North,east, down [m]
"""
e, n, u = aer2enu(az, elev, slantRange, deg=deg)
return n, e, -u
def ecef2ned(x, y, z, lat0, lon0, h0, ell=None, deg=True):
"""
input
-----
x,y,z [meters] target ECEF location [0,Infinity)
Observer: lat0, lon0, h0 (altitude, meters)
ell reference ellipsoid
deg degrees input/output (False: radians in/out)
output:
-------
n,e,d North,east, down [m]
"""
e, n, u = ecef2enu(x, y, z, lat0, lon0, h0, ell, deg=deg)
return n, e, -u
def ecef2nedv(u, v, w, lat0, lon0, deg=True):
"""
for VECTOR between two points
"""
e, n, u = ecef2enuv(u, v, w, lat0, lon0, deg=deg)
return n, e, -u
def geodetic2ned(lat, lon, h, lat0, lon0, h0, ell=None, deg=True):
"""
input
-----
target: lat,lon (degrees/radians)
h (altitude, meters)
Observer: lat0, lon0 (degrees/radians)
h0 (altitude, meters)
ell reference ellipsoid
deg degrees input/output (False: radians in/out)
output:
-------
n,e,d North,east, down [m]
"""
e, n, u = geodetic2enu(lat, lon, h, lat0, lon0, h0, ell, deg=deg)
return n, e, -u
#%% shared functions
def get_radius_normal(lat_radians, ell):
""" Compute normal radius of planetary body"""
if ell is None:
ell = EarthEllipsoid()
a = ell.a
b = ell.b
return a**2 / sqrt(
a**2 * (cos(lat_radians))**2 + b**2 *
(sin(lat_radians))**2)
#%% internal use
def _rottrip(ang):
ang = ang.squeeze()
if ang.size > 1:
raise ValueError('only one angle allowed at a time')
"""ported from:
https://github.com/dinkelk/astrodynamics/blob/master/rot3.m
"""
return numpy.array([[cos(ang), sin(ang), 0],
[-sin(ang), cos(ang), 0],
[0, 0, 1]])
def _enu2uvw(east, north, up, lat0, lon0, deg=True):
if deg:
lat0 = radians(lat0)
lon0 = radians(lon0)
t = cos(lat0) * up - sin(lat0) * north
w = sin(lat0) * up + cos(lat0) * north
u = cos(lon0) * t - sin(lon0) * east
v = sin(lon0) * t + cos(lon0) * east
return u, v, w
def _uvw2enu(u, v, w, lat0, lon0, deg):
if deg:
lat0 = radians(lat0)
lon0 = radians(lon0)
t = cos(lon0) * u + sin(lon0) * v
East = -sin(lon0) * u + cos(lon0) * v
Up = cos(lat0) * t + sin(lat0) * w
North = -sin(lat0) * t + cos(lat0) * w
return East, North, Up
# %% azel radec
def azel2radec(az_deg, el_deg, lat_deg, lon_deg, t):
"""convert astronomical target horizontal azimuth, elevation to ecliptic right ascension, declination (degrees)"""
if PY2 or Time is None: # non-AstroPy method, less accurate
return vazel2radec(az_deg, el_deg, lat_deg, lon_deg, t)
t = str2dt(t)
obs = EarthLocation(lat=lat_deg * u.deg, lon=lon_deg * u.deg)
direc = AltAz(location=obs, obstime=Time(t),
az=az_deg * u.deg, alt=el_deg * u.deg)
sky = SkyCoord(direc.transform_to(ICRS()))
return sky.ra.deg, sky.dec.deg
def radec2azel(ra_deg, dec_deg, lat_deg, lon_deg, t):
"""convert astronomical target ecliptic right ascension, declination to horizontal azimuth, eelvation (degrees)"""
if numpy is None:
raise ImportError('radec2azel requires Numpy')
if Time is None:
return vradec2azel(ra_deg, dec_deg, lat_deg, lon_deg, t)
#%% input trapping
t = str2dt(t)
lat_deg = numpy.atleast_1d(lat_deg)
lon_deg = numpy.atleast_1d(lon_deg)
ra_deg = numpy.atleast_1d(ra_deg)
dec_deg = numpy.atleast_1d(dec_deg)
assert lat_deg.size == 1 & lon_deg.size == 1, 'radec2azel is designed for one observer and one or more points (ra,dec).'
assert ra_deg.shape == dec_deg.shape, 'ra and dec must be the same shape ndarray'
obs = EarthLocation(lat=lat_deg * u.deg,
lon=lon_deg * u.deg)
points = SkyCoord(Angle(ra_deg, unit=u.deg),
Angle(dec_deg, unit=u.deg),
equinox='J2000.0')
altaz = points.transform_to(AltAz(location=obs, obstime=Time(t)))
return altaz.az.degree, altaz.alt.degree
# %%
def isclose(actual, desired, rtol=1e-7, atol=0):
"""
rigourously evaluates closeness of values.
https://www.python.org/dev/peps/pep-0485/#proposed-implementation
"""
return abs(actual-desired) <= max(rtol * max(abs(actual), abs(desired)), atol)
def allclose(actual, desired, rtol=1e-7, atol=0):
"""1-D only version of numpy.testing.assert_allclose"""
try:
for a,d in zip(actual, desired):
return isclose(a, d, rtol, atol)
except TypeError:
return isclose(actual, desired, rtol, atol) | [
"numpy.atleast_2d",
"astropy.coordinates.ICRS",
"astropy.coordinates.EarthLocation",
"math.tan",
"astropy.coordinates.Angle",
"math.degrees",
"numpy.column_stack",
"math.radians",
"math.cos",
"astropy.time.Time",
"numpy.empty_like",
"math.atan2",
"copy.deepcopy",
"math.hypot",
"math.sin"... | [((4744, 4755), 'math.hypot', 'hypot', (['e', 'n'], {}), '(e, n)\n', (4749, 4755), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((4773, 4784), 'math.hypot', 'hypot', (['r', 'u'], {}), '(r, u)\n', (4778, 4784), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((4796, 4809), 'math.atan2', 'arctan2', (['u', 'r'], {}), '(u, r)\n', (4803, 4809), True, 'from math import atan2 as arctan2\n'), ((7420, 7439), 'numpy.atleast_1d', 'numpy.atleast_1d', (['t'], {}), '(t)\n', (7436, 7439), False, 'import numpy\n'), ((7849, 7870), 'numpy.atleast_2d', 'numpy.atleast_2d', (['eci'], {}), '(eci)\n', (7865, 7870), False, 'import numpy\n'), ((8093, 8114), 'numpy.empty_like', 'numpy.empty_like', (['eci'], {}), '(eci)\n', (8109, 8114), False, 'import numpy\n'), ((11072, 11091), 'numpy.atleast_1d', 'numpy.atleast_1d', (['t'], {}), '(t)\n', (11088, 11091), False, 'import numpy\n'), ((11502, 11524), 'numpy.atleast_2d', 'numpy.atleast_2d', (['ecef'], {}), '(ecef)\n', (11518, 11524), False, 'import numpy\n'), ((11748, 11770), 'numpy.empty_like', 'numpy.empty_like', (['ecef'], {}), '(ecef)\n', (11764, 11770), False, 'import numpy\n'), ((15290, 15301), 'math.hypot', 'hypot', (['x', 'y'], {}), '(x, y)\n', (15295, 15301), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((15354, 15379), 'math.atan2', 'arctan2', (['(eb * z)', '(ea * rad)'], {}), '(eb * z, ea * rad)\n', (15361, 15379), True, 'from math import atan2 as arctan2\n'), ((15526, 15551), 'math.atan2', 'arctan2', (['(ea * z)', '(eb * rad)'], {}), '(ea * z, eb * rad)\n', (15533, 15551), True, 'from math import atan2 as arctan2\n'), ((15991, 16004), 'math.atan2', 'arctan2', (['y', 'x'], {}), '(y, x)\n', (15998, 16004), True, 'from math import atan2 as arctan2\n'), ((22000, 22055), 'astropy.coordinates.EarthLocation', 'EarthLocation', ([], {'lat': '(lat_deg * u.deg)', 'lon': '(lon_deg * u.deg)'}), '(lat=lat_deg * u.deg, lon=lon_deg * u.deg)\n', (22013, 22055), False, 'from astropy.coordinates import Angle, SkyCoord, EarthLocation, AltAz, ICRS\n'), ((22637, 22662), 'numpy.atleast_1d', 'numpy.atleast_1d', (['lat_deg'], {}), '(lat_deg)\n', (22653, 22662), False, 'import numpy\n'), ((22677, 22702), 'numpy.atleast_1d', 'numpy.atleast_1d', (['lon_deg'], {}), '(lon_deg)\n', (22693, 22702), False, 'import numpy\n'), ((22716, 22740), 'numpy.atleast_1d', 'numpy.atleast_1d', (['ra_deg'], {}), '(ra_deg)\n', (22732, 22740), False, 'import numpy\n'), ((22755, 22780), 'numpy.atleast_1d', 'numpy.atleast_1d', (['dec_deg'], {}), '(dec_deg)\n', (22771, 22780), False, 'import numpy\n'), ((23004, 23059), 'astropy.coordinates.EarthLocation', 'EarthLocation', ([], {'lat': '(lat_deg * u.deg)', 'lon': '(lon_deg * u.deg)'}), '(lat=lat_deg * u.deg, lon=lon_deg * u.deg)\n', (23017, 23059), False, 'from astropy.coordinates import Angle, SkyCoord, EarthLocation, AltAz, ICRS\n'), ((4820, 4833), 'math.atan2', 'arctan2', (['e', 'n'], {}), '(e, n)\n', (4827, 4833), True, 'from math import atan2 as arctan2\n'), ((9246, 9258), 'math.radians', 'radians', (['lat'], {}), '(lat)\n', (9253, 9258), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((9273, 9285), 'math.radians', 'radians', (['lon'], {}), '(lon)\n', (9280, 9285), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((9507, 9515), 'math.cos', 'cos', (['lon'], {}), '(lon)\n', (9510, 9515), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((9547, 9555), 'math.sin', 'sin', (['lon'], {}), '(lon)\n', (9550, 9555), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((9597, 9605), 'math.sin', 'sin', (['lat'], {}), '(lat)\n', (9600, 9605), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((10739, 10768), 'numpy.column_stack', 'numpy.column_stack', (['(x, y, z)'], {}), '((x, y, z))\n', (10757, 10768), False, 'import numpy\n'), ((12340, 12351), 'math.radians', 'radians', (['el'], {}), '(el)\n', (12347, 12351), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((12365, 12376), 'math.radians', 'radians', (['az'], {}), '(az)\n', (12372, 12376), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((12395, 12402), 'math.cos', 'cos', (['el'], {}), '(el)\n', (12398, 12402), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((13193, 13206), 'math.radians', 'radians', (['lat0'], {}), '(lat0)\n', (13200, 13206), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((13222, 13235), 'math.radians', 'radians', (['lon0'], {}), '(lon0)\n', (13229, 13235), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((15448, 15471), 'math.hypot', 'hypot', (['(ea * rad)', '(eb * z)'], {}), '(ea * rad, eb * z)\n', (15453, 15471), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((15637, 15651), 'copy.deepcopy', 'deepcopy', (['vnew'], {}), '(vnew)\n', (15645, 15651), False, 'from copy import deepcopy\n'), ((21118, 21131), 'math.radians', 'radians', (['lat0'], {}), '(lat0)\n', (21125, 21131), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((21147, 21160), 'math.radians', 'radians', (['lon0'], {}), '(lon0)\n', (21154, 21160), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((21419, 21432), 'math.radians', 'radians', (['lat0'], {}), '(lat0)\n', (21426, 21432), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((21448, 21461), 'math.radians', 'radians', (['lon0'], {}), '(lon0)\n', (21455, 21461), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((23107, 23132), 'astropy.coordinates.Angle', 'Angle', (['ra_deg'], {'unit': 'u.deg'}), '(ra_deg, unit=u.deg)\n', (23112, 23132), False, 'from astropy.coordinates import Angle, SkyCoord, EarthLocation, AltAz, ICRS\n'), ((23157, 23183), 'astropy.coordinates.Angle', 'Angle', (['dec_deg'], {'unit': 'u.deg'}), '(dec_deg, unit=u.deg)\n', (23162, 23183), False, 'from astropy.coordinates import Angle, SkyCoord, EarthLocation, AltAz, ICRS\n'), ((4841, 4855), 'math.atan2', 'arctan2', (['(0)', '(-1)'], {}), '(0, -1)\n', (4848, 4855), True, 'from math import atan2 as arctan2\n'), ((4884, 4895), 'math.degrees', 'degrees', (['az'], {}), '(az)\n', (4891, 4895), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((4897, 4910), 'math.degrees', 'degrees', (['elev'], {}), '(elev)\n', (4904, 4910), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((9496, 9504), 'math.cos', 'cos', (['lat'], {}), '(lat)\n', (9499, 9504), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((9536, 9544), 'math.cos', 'cos', (['lat'], {}), '(lat)\n', (9539, 9544), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((12419, 12426), 'math.sin', 'sin', (['az'], {}), '(az)\n', (12422, 12426), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((12432, 12439), 'math.cos', 'cos', (['az'], {}), '(az)\n', (12435, 12439), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((12450, 12457), 'math.sin', 'sin', (['el'], {}), '(el)\n', (12453, 12457), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((13251, 13260), 'math.cos', 'cos', (['lon0'], {}), '(lon0)\n', (13254, 13260), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((13267, 13276), 'math.sin', 'sin', (['lon0'], {}), '(lon0)\n', (13270, 13276), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((13311, 13320), 'math.cos', 'cos', (['lon0'], {}), '(lon0)\n', (13314, 13320), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((13339, 13348), 'math.cos', 'cos', (['lat0'], {}), '(lat0)\n', (13342, 13348), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((13355, 13364), 'math.sin', 'sin', (['lat0'], {}), '(lat0)\n', (13358, 13364), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((13399, 13408), 'math.cos', 'cos', (['lat0'], {}), '(lat0)\n', (13402, 13408), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((15946, 15955), 'math.tan', 'tan', (['vnew'], {}), '(vnew)\n', (15949, 15955), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((16043, 16051), 'math.cos', 'cos', (['lat'], {}), '(lat)\n', (16046, 16051), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((16090, 16098), 'math.sin', 'sin', (['lat'], {}), '(lat)\n', (16093, 16098), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((16129, 16141), 'math.degrees', 'degrees', (['lat'], {}), '(lat)\n', (16136, 16141), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((16143, 16155), 'math.degrees', 'degrees', (['lon'], {}), '(lon)\n', (16150, 16155), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((21169, 21178), 'math.cos', 'cos', (['lat0'], {}), '(lat0)\n', (21172, 21178), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((21186, 21195), 'math.sin', 'sin', (['lat0'], {}), '(lat0)\n', (21189, 21195), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((21212, 21221), 'math.sin', 'sin', (['lat0'], {}), '(lat0)\n', (21215, 21221), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((21229, 21238), 'math.cos', 'cos', (['lat0'], {}), '(lat0)\n', (21232, 21238), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((21256, 21265), 'math.cos', 'cos', (['lon0'], {}), '(lon0)\n', (21259, 21265), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((21272, 21281), 'math.sin', 'sin', (['lon0'], {}), '(lon0)\n', (21275, 21281), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((21297, 21306), 'math.sin', 'sin', (['lon0'], {}), '(lon0)\n', (21300, 21306), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((21313, 21322), 'math.cos', 'cos', (['lon0'], {}), '(lon0)\n', (21316, 21322), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((21470, 21479), 'math.cos', 'cos', (['lon0'], {}), '(lon0)\n', (21473, 21479), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((21486, 21495), 'math.sin', 'sin', (['lon0'], {}), '(lon0)\n', (21489, 21495), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((21528, 21537), 'math.cos', 'cos', (['lon0'], {}), '(lon0)\n', (21531, 21537), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((21551, 21560), 'math.cos', 'cos', (['lat0'], {}), '(lat0)\n', (21554, 21560), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((21567, 21576), 'math.sin', 'sin', (['lat0'], {}), '(lat0)\n', (21570, 21576), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((21610, 21619), 'math.cos', 'cos', (['lat0'], {}), '(lat0)\n', (21613, 21619), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((22097, 22104), 'astropy.time.Time', 'Time', (['t'], {}), '(t)\n', (22101, 22104), False, 'from astropy.time import Time\n'), ((22202, 22208), 'astropy.coordinates.ICRS', 'ICRS', ([], {}), '()\n', (22206, 22208), False, 'from astropy.coordinates import Angle, SkyCoord, EarthLocation, AltAz, ICRS\n'), ((13295, 13304), 'math.sin', 'sin', (['lon0'], {}), '(lon0)\n', (13298, 13304), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((13383, 13392), 'math.sin', 'sin', (['lat0'], {}), '(lat0)\n', (13386, 13392), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((20916, 20924), 'math.cos', 'cos', (['ang'], {}), '(ang)\n', (20919, 20924), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((20927, 20935), 'math.sin', 'sin', (['ang'], {}), '(ang)\n', (20930, 20935), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((20974, 20982), 'math.cos', 'cos', (['ang'], {}), '(ang)\n', (20977, 20982), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((21512, 21521), 'math.sin', 'sin', (['lon0'], {}), '(lon0)\n', (21515, 21521), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((21594, 21603), 'math.sin', 'sin', (['lat0'], {}), '(lat0)\n', (21597, 21603), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((23287, 23294), 'astropy.time.Time', 'Time', (['t'], {}), '(t)\n', (23291, 23294), False, 'from astropy.time import Time\n'), ((7589, 7596), 'astropy.time.Time', 'Time', (['t'], {}), '(t)\n', (7593, 7596), False, 'from astropy.time import Time\n'), ((11241, 11248), 'astropy.time.Time', 'Time', (['t'], {}), '(t)\n', (11245, 11248), False, 'from astropy.time import Time\n'), ((16030, 16039), 'math.cos', 'cos', (['vnew'], {}), '(vnew)\n', (16033, 16039), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((16077, 16086), 'math.sin', 'sin', (['vnew'], {}), '(vnew)\n', (16080, 16086), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((20964, 20972), 'math.sin', 'sin', (['ang'], {}), '(ang)\n', (20967, 20972), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((15721, 15733), 'math.sin', 'sin', (['(v - rho)'], {}), '(v - rho)\n', (15724, 15733), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((15740, 15750), 'math.sin', 'sin', (['(2 * v)'], {}), '(2 * v)\n', (15743, 15750), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((15780, 15792), 'math.cos', 'cos', (['(v - rho)'], {}), '(v - rho)\n', (15783, 15792), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((20596, 20612), 'math.cos', 'cos', (['lat_radians'], {}), '(lat_radians)\n', (20599, 20612), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((20635, 20651), 'math.sin', 'sin', (['lat_radians'], {}), '(lat_radians)\n', (20638, 20651), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n'), ((15799, 15809), 'math.cos', 'cos', (['(2 * v)'], {}), '(2 * v)\n', (15802, 15809), False, 'from math import sin, cos, tan, sqrt, radians, hypot, degrees\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions to calculate frequency spectra."""
import copy
import warnings
import contextlib
import os
from stingray.gti import cross_gtis
from stingray.crossspectrum import AveragedCrossspectrum
from stingray.powerspectrum import AveragedPowerspectrum
from stingray.utils import show_progress
from stingray.gti import time_intervals_from_gtis
from stingray.events import EventList
import numpy as np
from astropy import log
from astropy.logger import AstropyUserWarning
from .base import (
hen_root,
common_name,
_assign_value_if_none,
interpret_bintime,
)
from .io import sort_files, save_pds, load_data
from .io import HEN_FILE_EXTENSION, get_file_type
def average_periodograms(fspec_iterable, total=None):
"""Sum a list (or iterable) of power density spectra.
Examples
--------
>>> pds = AveragedPowerspectrum()
>>> pds.freq = np.asarray([1, 2, 3])
>>> pds.power = np.asarray([3, 3, 3])
>>> pds.power_err = np.asarray([0.1, 0.1, 0.1])
>>> pds.m = 1
>>> pds.fftlen = 128
>>> pds1 = copy.deepcopy(pds)
>>> pds1.m = 2
>>> tot_pds = average_periodograms([pds, pds1])
>>> np.allclose(tot_pds.power, pds.power)
True
>>> np.allclose(tot_pds.power_err, pds.power_err / np.sqrt(3))
True
>>> tot_pds.m
3
"""
for i, contents in enumerate(show_progress(fspec_iterable, total=total)):
freq = contents.freq
pds = contents.power
epds = contents.power_err
nchunks = contents.m
rebin = 1
norm = contents.norm
fftlen = contents.fftlen
if i == 0:
rebin0, norm0, freq0 = rebin, norm, freq
tot_pds = pds * nchunks
tot_epds = epds ** 2 * nchunks
tot_npds = nchunks
tot_contents = copy.copy(contents)
else:
assert np.all(
rebin == rebin0
), "Files must be rebinned in the same way"
np.testing.assert_array_almost_equal(
freq,
freq0,
decimal=int(-np.log10(1 / fftlen) + 2),
err_msg="Frequencies must coincide",
)
assert norm == norm0, "Files must have the same normalization"
tot_pds += pds * nchunks
tot_epds += epds ** 2 * nchunks
tot_npds += nchunks
tot_contents.power = tot_pds / tot_npds
tot_contents.power_err = np.sqrt(tot_epds) / tot_npds
tot_contents.m = tot_npds
return tot_contents
def _wrap_fun_cpds(arglist):
f1, f2, outname, kwargs = arglist
return calc_cpds(f1, f2, outname=outname, **kwargs)
def _wrap_fun_pds(argdict):
fname = argdict["fname"]
argdict.pop("fname")
return calc_pds(fname, **argdict)
def sync_gtis(lc1, lc2):
"""Sync gtis between light curves or event lists.
Has to work with new and old versions of stingray.
Examples
--------
>>> from stingray.events import EventList
>>> from stingray.lightcurve import Lightcurve
>>> ev1 = EventList(
... time=np.sort(np.random.uniform(1, 10, 3)), gti=[[1, 10]])
>>> ev2 = EventList(time=np.sort(np.random.uniform(0, 9, 4)), gti=[[0, 9]])
>>> e1, e2 = sync_gtis(ev1, ev2)
>>> np.allclose(e1.gti, [[1, 9]])
True
>>> np.allclose(e2.gti, [[1, 9]])
True
>>> lc1 = Lightcurve(
... time=[0.5, 1.5, 2.5], counts=[2, 2, 3], dt=1, gti=[[0, 3]])
>>> lc2 = Lightcurve(
... time=[1.5, 2.5, 3.5, 4.5], counts=[2, 2, 3, 3], dt=1, gti=[[1, 5]])
>>> lc1._apply_gtis = lc1.apply_gtis
>>> lc2._apply_gtis = lc2.apply_gtis
>>> l1, l2 = sync_gtis(lc1, lc2)
>>> np.allclose(l1.gti, [[1, 3]])
True
>>> np.allclose(l2.gti, [[1, 3]])
True
"""
gti = cross_gtis([lc1.gti, lc2.gti])
lc1.gti = gti
lc2.gti = gti
if hasattr(lc1, "_apply_gtis"):
# Compatibility with old versions of stingray
lc1.apply_gtis = lc1._apply_gtis
lc2.apply_gtis = lc2._apply_gtis
if hasattr(lc1, "apply_gtis"):
lc1.apply_gtis()
lc2.apply_gtis()
# compatibility with old versions of stingray
if hasattr(lc1, "tseg") and lc1.tseg != lc2.tseg:
lc1.tseg = np.max(lc1.gti) - np.min(lc1.gti)
lc2.tseg = np.max(lc1.gti) - np.min(lc1.gti)
return lc1, lc2
def _format_lc_data(data, type, fftlen=512.0, bintime=1.0):
if type == "events":
events = data
gtilength = events.gti[:, 1] - events.gti[:, 0]
events.gti = events.gti[gtilength >= fftlen]
lc_data = list(events.to_lc_list(dt=bintime))
else:
lc = data
if bintime > lc.dt:
lcrebin = np.rint(bintime / lc.dt)
log.info("Rebinning lcs by a factor %d" % lcrebin)
lc = lc.rebin(bintime)
# To fix problem with float128
lc.counts = lc.counts.astype(float)
lc_data = lc
return lc_data
def _distribute_events(events, chunk_length):
"""Split event list in chunks.
Examples
--------
>>> ev = EventList([1, 2, 3, 4, 5, 6], gti=[[0.5, 6.5]])
>>> ev.pi = np.ones_like(ev.time)
>>> ev.mjdref = 56780.
>>> ev_lists = list(_distribute_events(ev, 2))
>>> np.allclose(ev_lists[0].time, [1, 2])
True
>>> np.allclose(ev_lists[1].time, [3, 4])
True
>>> np.allclose(ev_lists[2].time, [5, 6])
True
>>> np.allclose(ev_lists[0].gti, [[0.5, 2.5]])
True
>>> ev_lists[0].mjdref == ev.mjdref
True
>>> ev_lists[2].mjdref == ev.mjdref
True
>>> np.allclose(ev_lists[1].pi, [1, 1])
True
"""
gti = events.gti
start_times, stop_times = time_intervals_from_gtis(gti, chunk_length)
for start, end in zip(start_times, stop_times):
first, last = np.searchsorted(events.time, [start, end])
new_ev = EventList(
events.time[first:last], gti=np.asarray([[start, end]])
)
for attr in events.__dict__.keys():
if attr == "gti":
continue
val = getattr(events, attr)
if np.size(val) == np.size(events.time):
val = val[first:last]
setattr(new_ev, attr, val)
yield new_ev
def _provide_periodograms(events, fftlen, dt, norm):
for new_ev in _distribute_events(events, fftlen):
# Hack: epsilon slightly below zero, to allow for a GTI to be recognized as such
new_ev.gti[:, 1] += dt / 10
pds = AveragedPowerspectrum(
new_ev, dt=dt, segment_size=fftlen, norm=norm, silent=True
)
pds.fftlen = fftlen
yield pds
def _provide_cross_periodograms(events1, events2, fftlen, dt, norm):
length = events1.gti[-1, 1] - events1.gti[0, 0]
total = int(length / fftlen)
ev1_iter = _distribute_events(events1, fftlen)
ev2_iter = _distribute_events(events2, fftlen)
for new_ev in zip(ev1_iter, ev2_iter):
new_ev1, new_ev2 = new_ev
new_ev1.gti[:, 1] += dt / 10
new_ev2.gti[:, 1] += dt / 10
with contextlib.redirect_stdout(open(os.devnull, "w")):
pds = AveragedCrossspectrum(
new_ev1,
new_ev2,
dt=dt,
segment_size=fftlen,
norm=norm,
silent=True,
)
pds.fftlen = fftlen
yield pds
def calc_pds(
lcfile,
fftlen,
save_dyn=False,
bintime=1,
pdsrebin=1,
normalization="leahy",
back_ctrate=0.0,
noclobber=False,
outname=None,
save_all=False,
test=False,
):
"""Calculate the PDS from an input light curve file.
Parameters
----------
lcfile : str
The light curve file
fftlen : float
The length of the chunks over which FFTs will be calculated, in seconds
Other Parameters
----------------
save_dyn : bool
If True, save the dynamical power spectrum
bintime : float
The bin time. If different from that of the light curve, a rebinning is
performed
pdsrebin : int
Rebin the PDS of this factor.
normalization: str
'Leahy', 'frac', 'rms', or any normalization accepted by ``stingray``.
Default 'Leahy'
back_ctrate : float
The non-source count rate
noclobber : bool
If True, do not overwrite existing files
outname : str
If speficied, output file name. If not specified or None, the new file
will have the same root as the input light curve and the '_pds' suffix
"""
root = hen_root(lcfile)
if outname is None:
outname = root + "_pds" + HEN_FILE_EXTENSION
if noclobber and os.path.exists(outname):
warnings.warn("File exists, and noclobber option used. Skipping")
return
ftype, data = get_file_type(lcfile)
mjdref = data.mjdref
instr = data.instr
length = data.gti[-1, 1] - data.gti[0, 0]
if hasattr(data, "dt"):
bintime = max(data.dt, bintime)
nbins = int(length / bintime)
if ftype == "events" and (test or nbins > 10 ** 7):
print("Long observation. Using split analysis")
length = data.gti[-1, 1] - data.gti[0, 0]
total = int(length / fftlen)
pds = average_periodograms(
_provide_periodograms(
data, fftlen, bintime, norm=normalization.lower()
),
total=total,
)
else:
lc_data = _format_lc_data(data, ftype, bintime=bintime, fftlen=fftlen)
pds = AveragedPowerspectrum(
lc_data, segment_size=fftlen, norm=normalization.lower()
)
if pdsrebin is not None and pdsrebin != 1:
pds = pds.rebin(pdsrebin)
pds.instr = instr
pds.fftlen = fftlen
pds.back_phots = back_ctrate * fftlen
pds.mjdref = mjdref
log.info("Saving PDS to %s" % outname)
save_pds(pds, outname, save_all=save_all)
return outname
def calc_cpds(
lcfile1,
lcfile2,
fftlen,
save_dyn=False,
bintime=1,
pdsrebin=1,
outname="cpds" + HEN_FILE_EXTENSION,
normalization="leahy",
back_ctrate=0.0,
noclobber=False,
save_all=False,
test=False,
):
"""Calculate the CPDS from a pair of input light curve files.
Parameters
----------
lcfile1 : str
The first light curve file
lcfile2 : str
The second light curve file
fftlen : float
The length of the chunks over which FFTs will be calculated, in seconds
Other Parameters
----------------
save_dyn : bool
If True, save the dynamical power spectrum
bintime : float
The bin time. If different from that of the light curve, a rebinning is
performed
pdsrebin : int
Rebin the PDS of this factor.
normalization : str
'Leahy', 'frac', 'rms', or any normalization accepted by ``stingray``.
Default 'Leahy'
back_ctrate : float
The non-source count rate
noclobber : bool
If True, do not overwrite existing files
outname : str
Output file name for the cpds. Default: cpds.[nc|p]
"""
if noclobber and os.path.exists(outname):
warnings.warn("File exists, and noclobber option used. Skipping")
return
log.info("Loading file %s..." % lcfile1)
ftype1, lc1 = get_file_type(lcfile1)
log.info("Loading file %s..." % lcfile2)
ftype2, lc2 = get_file_type(lcfile2)
instr1 = lc1.instr
instr2 = lc2.instr
if ftype1 != ftype2:
raise ValueError(
"Please use similar data files for the two time "
"series (e.g. both events or both light curves)"
)
if hasattr(lc1, "dt"):
assert lc1.dt == lc2.dt, "Light curves are sampled differently"
lc1, lc2 = sync_gtis(lc1, lc2)
if lc1.mjdref != lc2.mjdref:
lc2 = lc2.change_mjdref(lc1.mjdref)
mjdref = lc1.mjdref
length = lc1.gti[-1, 1] - lc1.gti[0, 0]
if hasattr(lc1, "dt"):
bintime = max(lc1.dt, bintime)
nbins = int(length / bintime)
if ftype1 == "events" and (test or nbins > 10 ** 7):
print("Long observation. Using split analysis")
length = lc1.gti[-1, 1] - lc1.gti[0, 0]
total = int(length / fftlen)
cpds = average_periodograms(
_provide_cross_periodograms(
lc1, lc2, fftlen, bintime, norm=normalization.lower()
),
total=total,
)
else:
lc1 = _format_lc_data(lc1, ftype1, fftlen=fftlen, bintime=bintime)
lc2 = _format_lc_data(lc2, ftype2, fftlen=fftlen, bintime=bintime)
cpds = AveragedCrossspectrum(
lc1, lc2, segment_size=fftlen, norm=normalization.lower()
)
if pdsrebin is not None and pdsrebin != 1:
cpds = cpds.rebin(pdsrebin)
cpds.instrs = instr1 + "," + instr2
cpds.fftlen = fftlen
cpds.back_phots = back_ctrate * fftlen
cpds.mjdref = mjdref
lags, lags_err = cpds.time_lag()
cpds.lag = lags
cpds.lag_err = lags
log.info("Saving CPDS to %s" % outname)
save_pds(cpds, outname, save_all=save_all)
return outname
def calc_fspec(
files,
fftlen,
do_calc_pds=True,
do_calc_cpds=True,
do_calc_cospectrum=True,
do_calc_lags=True,
save_dyn=False,
bintime=1,
pdsrebin=1,
outroot=None,
normalization="leahy",
nproc=1,
back_ctrate=0.0,
noclobber=False,
ignore_instr=False,
save_all=False,
test=False,
):
r"""Calculate the frequency spectra: the PDS, the cospectrum, ...
Parameters
----------
files : list of str
List of input file names
fftlen : float
length of chunks to perform the FFT on.
Other Parameters
----------------
save_dyn : bool
If True, save the dynamical power spectrum
bintime : float
The bin time. If different from that of the light curve, a rebinning is
performed
pdsrebin : int
Rebin the PDS of this factor.
normalization : str
'Leahy' [3] or 'rms' [4] [5]. Default 'Leahy'.
back_ctrate : float
The non-source count rate
noclobber : bool
If True, do not overwrite existing files
outroot : str
Output file name root
nproc : int
Number of processors to use to parallelize the processing of multiple
files
ignore_instr : bool
Ignore instruments; files are alternated in the two channels
References
----------
[3] Leahy et al. 1983, ApJ, 266, 160.
[4] Belloni & Hasinger 1990, A&A, 230, 103
[5] Miyamoto et al. 1991, ApJ, 383, 784
"""
log.info("Using %s normalization" % normalization)
log.info("Using %s processors" % nproc)
if do_calc_pds:
wrapped_file_dicts = []
for f in files:
wfd = dict(
fftlen=fftlen,
save_dyn=save_dyn,
bintime=bintime,
pdsrebin=pdsrebin,
normalization=normalization.lower(),
back_ctrate=back_ctrate,
noclobber=noclobber,
save_all=save_all,
test=test,
)
wfd["fname"] = f
wrapped_file_dicts.append(wfd)
[_wrap_fun_pds(w) for w in wrapped_file_dicts]
if not do_calc_cpds or len(files) < 2:
return
if ignore_instr:
files1 = files[0::2]
files2 = files[1::2]
else:
log.info("Sorting file list")
sorted_files = sort_files(files)
warnings.warn(
"Beware! For cpds and derivatives, I assume that the "
"files are from only two instruments and in pairs "
"(even in random order)"
)
instrs = list(sorted_files.keys())
files1 = sorted_files[instrs[0]]
files2 = sorted_files[instrs[1]]
assert len(files1) == len(files2), "An even number of files is needed"
argdict = dict(
fftlen=fftlen,
save_dyn=save_dyn,
bintime=bintime,
pdsrebin=pdsrebin,
normalization=normalization.lower(),
back_ctrate=back_ctrate,
noclobber=noclobber,
save_all=save_all,
test=test,
)
funcargs = []
for i_f, f in enumerate(files1):
f1, f2 = f, files2[i_f]
outdir = os.path.dirname(f1)
if outdir == "":
outdir = os.getcwd()
outr = _assign_value_if_none(
outroot, common_name(f1, f2, default="%d" % i_f)
)
outname = os.path.join(
outdir,
outr.replace(HEN_FILE_EXTENSION, "")
+ "_cpds"
+ HEN_FILE_EXTENSION,
)
funcargs.append([f1, f2, outname, argdict])
[_wrap_fun_cpds(fa) for fa in funcargs]
def _normalize(array, ref=0):
"""Normalize array in terms of standard deviation.
Examples
--------
>>> n = 10000
>>> array1 = np.random.normal(0, 1, n)
>>> array2 = np.random.normal(0, 1, n)
>>> array = array1 ** 2 + array2 ** 2
>>> newarr = _normalize(array)
>>> np.isclose(np.std(newarr), 1, atol=0.0001)
True
"""
m = ref
std = np.std(array)
newarr = np.zeros_like(array)
good = array > m
newarr[good] = (array[good] - ref) / std
return newarr
def dumpdyn(fname, plot=False):
raise NotImplementedError(
"Dynamical power spectrum is being refactored. "
"Sorry for the inconvenience. In the meantime, "
"you can load the data into Stingray using "
"`cs = hendrics.io.load_pds(fname)` and find "
"the dynamical PDS/CPDS in cs.cs_all"
)
def dumpdyn_main(args=None):
"""Main function called by the `HENdumpdyn` command line script."""
import argparse
description = (
"Dump dynamical (cross) power spectra. "
"This script is being reimplemented. Please be "
"patient :)"
)
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"files",
help=("List of files in any valid HENDRICS " "format for PDS or CPDS"),
nargs="+",
)
parser.add_argument(
"--noplot", help="plot results", default=False, action="store_true"
)
args = parser.parse_args(args)
fnames = args.files
for f in fnames:
dumpdyn(f, plot=not args.noplot)
def main(args=None):
"""Main function called by the `HENfspec` command line script."""
import argparse
from .base import _add_default_args, check_negative_numbers_in_args
description = (
"Create frequency spectra (PDS, CPDS, cospectrum) "
"starting from well-defined input ligthcurves"
)
parser = argparse.ArgumentParser(description=description)
parser.add_argument("files", help="List of light curve files", nargs="+")
parser.add_argument(
"-b",
"--bintime",
type=float,
default=1 / 4096,
help="Light curve bin time; if negative, interpreted"
+ " as negative power of 2."
+ " Default: 2^-10, or keep input lc bin time"
+ " (whatever is larger)",
)
parser.add_argument(
"-r",
"--rebin",
type=int,
default=1,
help="(C)PDS rebinning to apply. Default: none",
)
parser.add_argument(
"-f",
"--fftlen",
type=float,
default=512,
help="Length of FFTs. Default: 512 s",
)
parser.add_argument(
"-k",
"--kind",
type=str,
default="PDS,CPDS,cos",
help="Spectra to calculate, as comma-separated list"
+ " (Accepted: PDS and CPDS;"
+ ' Default: "PDS,CPDS")',
)
parser.add_argument(
"--norm",
type=str,
default="leahy",
help="Normalization to use"
+ " (Accepted: leahy and rms;"
+ ' Default: "leahy")',
)
parser.add_argument(
"--noclobber",
help="Do not overwrite existing files",
default=False,
action="store_true",
)
parser.add_argument(
"-o",
"--outroot",
type=str,
default=None,
help="Root of output file names for CPDS only",
)
parser.add_argument(
"--back",
help=("Estimated background (non-source) count rate"),
default=0.0,
type=float,
)
parser.add_argument(
"--save-dyn",
help="save dynamical power spectrum",
default=False,
action="store_true",
)
parser.add_argument(
"--ignore-instr",
help="Ignore instrument names in channels",
default=False,
action="store_true",
)
parser.add_argument(
"--save-all",
help="Save all information contained in spectra,"
" including single pdss and light curves.",
default=False,
action="store_true",
)
parser.add_argument(
"--test",
help="Only to be used in testing",
default=False,
action="store_true",
)
_add_default_args(parser, ["loglevel", "debug"])
args = check_negative_numbers_in_args(args)
args = parser.parse_args(args)
if args.debug:
args.loglevel = "DEBUG"
log.setLevel(args.loglevel)
with log.log_to_file("HENfspec.log"):
bintime = np.longdouble(interpret_bintime(args.bintime))
fftlen = args.fftlen
pdsrebin = args.rebin
normalization = args.norm
if normalization.lower() not in [
"frac",
"abs",
"leahy",
"none",
"rms",
]:
warnings.warn("Beware! Unknown normalization!", AstropyUserWarning)
normalization = "leahy"
if normalization == "rms":
normalization = "frac"
do_cpds = do_pds = do_cos = do_lag = False
kinds = args.kind.split(",")
for k in kinds:
if k == "PDS":
do_pds = True
elif k == "CPDS":
do_cpds = True
elif k == "cos" or k == "cospectrum":
do_cos = True
do_cpds = True
elif k == "lag":
do_lag = True
do_cpds = True
calc_fspec(
args.files,
fftlen,
do_calc_pds=do_pds,
do_calc_cpds=do_cpds,
do_calc_cospectrum=do_cos,
do_calc_lags=do_lag,
save_dyn=args.save_dyn,
bintime=bintime,
pdsrebin=pdsrebin,
outroot=args.outroot,
normalization=normalization,
nproc=1,
back_ctrate=args.back,
noclobber=args.noclobber,
ignore_instr=args.ignore_instr,
save_all=args.save_all,
test=args.test,
)
| [
"numpy.log10",
"numpy.sqrt",
"stingray.utils.show_progress",
"copy.copy",
"os.path.exists",
"astropy.log.setLevel",
"argparse.ArgumentParser",
"numpy.searchsorted",
"numpy.asarray",
"numpy.max",
"stingray.gti.cross_gtis",
"numpy.rint",
"numpy.min",
"warnings.warn",
"numpy.size",
"os.pa... | [((3815, 3845), 'stingray.gti.cross_gtis', 'cross_gtis', (['[lc1.gti, lc2.gti]'], {}), '([lc1.gti, lc2.gti])\n', (3825, 3845), False, 'from stingray.gti import cross_gtis\n'), ((5697, 5740), 'stingray.gti.time_intervals_from_gtis', 'time_intervals_from_gtis', (['gti', 'chunk_length'], {}), '(gti, chunk_length)\n', (5721, 5740), False, 'from stingray.gti import time_intervals_from_gtis\n'), ((9842, 9880), 'astropy.log.info', 'log.info', (["('Saving PDS to %s' % outname)"], {}), "('Saving PDS to %s' % outname)\n", (9850, 9880), False, 'from astropy import log\n'), ((11274, 11314), 'astropy.log.info', 'log.info', (["('Loading file %s...' % lcfile1)"], {}), "('Loading file %s...' % lcfile1)\n", (11282, 11314), False, 'from astropy import log\n'), ((11360, 11400), 'astropy.log.info', 'log.info', (["('Loading file %s...' % lcfile2)"], {}), "('Loading file %s...' % lcfile2)\n", (11368, 11400), False, 'from astropy import log\n'), ((13035, 13074), 'astropy.log.info', 'log.info', (["('Saving CPDS to %s' % outname)"], {}), "('Saving CPDS to %s' % outname)\n", (13043, 13074), False, 'from astropy import log\n'), ((14643, 14693), 'astropy.log.info', 'log.info', (["('Using %s normalization' % normalization)"], {}), "('Using %s normalization' % normalization)\n", (14651, 14693), False, 'from astropy import log\n'), ((14698, 14737), 'astropy.log.info', 'log.info', (["('Using %s processors' % nproc)"], {}), "('Using %s processors' % nproc)\n", (14706, 14737), False, 'from astropy import log\n'), ((17169, 17182), 'numpy.std', 'np.std', (['array'], {}), '(array)\n', (17175, 17182), True, 'import numpy as np\n'), ((17196, 17216), 'numpy.zeros_like', 'np.zeros_like', (['array'], {}), '(array)\n', (17209, 17216), True, 'import numpy as np\n'), ((17930, 17978), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description'}), '(description=description)\n', (17953, 17978), False, 'import argparse\n'), ((18698, 18746), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description'}), '(description=description)\n', (18721, 18746), False, 'import argparse\n'), ((21218, 21245), 'astropy.log.setLevel', 'log.setLevel', (['args.loglevel'], {}), '(args.loglevel)\n', (21230, 21245), False, 'from astropy import log\n'), ((1397, 1439), 'stingray.utils.show_progress', 'show_progress', (['fspec_iterable'], {'total': 'total'}), '(fspec_iterable, total=total)\n', (1410, 1439), False, 'from stingray.utils import show_progress\n'), ((2482, 2499), 'numpy.sqrt', 'np.sqrt', (['tot_epds'], {}), '(tot_epds)\n', (2489, 2499), True, 'import numpy as np\n'), ((5815, 5857), 'numpy.searchsorted', 'np.searchsorted', (['events.time', '[start, end]'], {}), '(events.time, [start, end])\n', (5830, 5857), True, 'import numpy as np\n'), ((6502, 6588), 'stingray.powerspectrum.AveragedPowerspectrum', 'AveragedPowerspectrum', (['new_ev'], {'dt': 'dt', 'segment_size': 'fftlen', 'norm': 'norm', 'silent': '(True)'}), '(new_ev, dt=dt, segment_size=fftlen, norm=norm, silent\n =True)\n', (6523, 6588), False, 'from stingray.powerspectrum import AveragedPowerspectrum\n'), ((8696, 8719), 'os.path.exists', 'os.path.exists', (['outname'], {}), '(outname)\n', (8710, 8719), False, 'import os\n'), ((8729, 8794), 'warnings.warn', 'warnings.warn', (['"""File exists, and noclobber option used. Skipping"""'], {}), "('File exists, and noclobber option used. Skipping')\n", (8742, 8794), False, 'import warnings\n'), ((11155, 11178), 'os.path.exists', 'os.path.exists', (['outname'], {}), '(outname)\n', (11169, 11178), False, 'import os\n'), ((11188, 11253), 'warnings.warn', 'warnings.warn', (['"""File exists, and noclobber option used. Skipping"""'], {}), "('File exists, and noclobber option used. Skipping')\n", (11201, 11253), False, 'import warnings\n'), ((15465, 15494), 'astropy.log.info', 'log.info', (['"""Sorting file list"""'], {}), "('Sorting file list')\n", (15473, 15494), False, 'from astropy import log\n'), ((15545, 15695), 'warnings.warn', 'warnings.warn', (['"""Beware! For cpds and derivatives, I assume that the files are from only two instruments and in pairs (even in random order)"""'], {}), "(\n 'Beware! For cpds and derivatives, I assume that the files are from only two instruments and in pairs (even in random order)'\n )\n", (15558, 15695), False, 'import warnings\n'), ((16330, 16349), 'os.path.dirname', 'os.path.dirname', (['f1'], {}), '(f1)\n', (16345, 16349), False, 'import os\n'), ((21256, 21287), 'astropy.log.log_to_file', 'log.log_to_file', (['"""HENfspec.log"""'], {}), "('HENfspec.log')\n", (21271, 21287), False, 'from astropy import log\n'), ((1852, 1871), 'copy.copy', 'copy.copy', (['contents'], {}), '(contents)\n', (1861, 1871), False, 'import copy\n'), ((1905, 1928), 'numpy.all', 'np.all', (['(rebin == rebin0)'], {}), '(rebin == rebin0)\n', (1911, 1928), True, 'import numpy as np\n'), ((4264, 4279), 'numpy.max', 'np.max', (['lc1.gti'], {}), '(lc1.gti)\n', (4270, 4279), True, 'import numpy as np\n'), ((4282, 4297), 'numpy.min', 'np.min', (['lc1.gti'], {}), '(lc1.gti)\n', (4288, 4297), True, 'import numpy as np\n'), ((4317, 4332), 'numpy.max', 'np.max', (['lc1.gti'], {}), '(lc1.gti)\n', (4323, 4332), True, 'import numpy as np\n'), ((4335, 4350), 'numpy.min', 'np.min', (['lc1.gti'], {}), '(lc1.gti)\n', (4341, 4350), True, 'import numpy as np\n'), ((4721, 4745), 'numpy.rint', 'np.rint', (['(bintime / lc.dt)'], {}), '(bintime / lc.dt)\n', (4728, 4745), True, 'import numpy as np\n'), ((4758, 4808), 'astropy.log.info', 'log.info', (["('Rebinning lcs by a factor %d' % lcrebin)"], {}), "('Rebinning lcs by a factor %d' % lcrebin)\n", (4766, 4808), False, 'from astropy import log\n'), ((7144, 7240), 'stingray.crossspectrum.AveragedCrossspectrum', 'AveragedCrossspectrum', (['new_ev1', 'new_ev2'], {'dt': 'dt', 'segment_size': 'fftlen', 'norm': 'norm', 'silent': '(True)'}), '(new_ev1, new_ev2, dt=dt, segment_size=fftlen, norm=\n norm, silent=True)\n', (7165, 7240), False, 'from stingray.crossspectrum import AveragedCrossspectrum\n'), ((16396, 16407), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (16405, 16407), False, 'import os\n'), ((21612, 21679), 'warnings.warn', 'warnings.warn', (['"""Beware! Unknown normalization!"""', 'AstropyUserWarning'], {}), "('Beware! Unknown normalization!', AstropyUserWarning)\n", (21625, 21679), False, 'import warnings\n'), ((5927, 5953), 'numpy.asarray', 'np.asarray', (['[[start, end]]'], {}), '([[start, end]])\n', (5937, 5953), True, 'import numpy as np\n'), ((6118, 6130), 'numpy.size', 'np.size', (['val'], {}), '(val)\n', (6125, 6130), True, 'import numpy as np\n'), ((6134, 6154), 'numpy.size', 'np.size', (['events.time'], {}), '(events.time)\n', (6141, 6154), True, 'import numpy as np\n'), ((2125, 2145), 'numpy.log10', 'np.log10', (['(1 / fftlen)'], {}), '(1 / fftlen)\n', (2133, 2145), True, 'import numpy as np\n')] |
import theano
import theano.tensor as tensor
from util import ortho_weight, norm_weight, tanh, rectifier, linear
import numpy
from utils import _p
# LSTM layer
def param_init_lstm(options, params, prefix='lstm', nin=None, dim=None):
if nin is None:
nin = options['dim_proj']
if dim is None:
dim = options['dim_proj']
"""
Stack the weight matricies for all the gates
for much cleaner code and slightly faster dot-prods
"""
# input weights
W = numpy.concatenate([norm_weight(nin,dim),
norm_weight(nin,dim),
norm_weight(nin,dim),
norm_weight(nin,dim)], axis=1)
params[_p(prefix,'W')] = W
# for the previous hidden activation
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim),
ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[_p(prefix,'U')] = U
params[_p(prefix,'b')] = numpy.zeros((4 * dim,)).astype('float32')
return params
# This function implements the lstm forward propagation
def lstm_layer(tparams, state_below, options, prefix='lstm', mask=None, **kwargs):
nsteps = state_below.shape[0]
dim = tparams[_p(prefix,'U')].shape[0]
# if we are dealing with a mini-batch
if state_below.ndim == 3:
n_samples = state_below.shape[1]
init_state = tensor.alloc(0., n_samples, dim)
init_memory = tensor.alloc(0., n_samples, dim)
# during sampling
else:
n_samples = 1
init_state = tensor.alloc(0., dim)
init_memory = tensor.alloc(0., dim)
# if we have no mask, we assume all the inputs are valid
if mask == None:
mask = tensor.alloc(1., state_below.shape[0], 1)
# use the slice to calculate all the different gates
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
elif _x.ndim == 2:
return _x[:, n*dim:(n+1)*dim]
return _x[n*dim:(n+1)*dim]
# one time step of the lstm
def _step(m_, x_, h_, c_):
preact = tensor.dot(h_, tparams[_p(prefix, 'U')])
preact += x_
i = tensor.nnet.sigmoid(_slice(preact, 0, dim))
f = tensor.nnet.sigmoid(_slice(preact, 1, dim))
o = tensor.nnet.sigmoid(_slice(preact, 2, dim))
c = tensor.tanh(_slice(preact, 3, dim))
c = f * c_ + i * c
h = o * tensor.tanh(c)
return h, c, i, f, o, preact
state_below = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
rval, updates = theano.scan(_step,
sequences=[mask, state_below],
outputs_info=[init_state, init_memory, None, None, None, None],
name=_p(prefix, '_layers'),
n_steps=nsteps, profile=False)
return rval
| [
"util.norm_weight",
"numpy.zeros",
"theano.tensor.alloc",
"utils._p",
"theano.tensor.tanh",
"util.ortho_weight"
] | [((701, 716), 'utils._p', '_p', (['prefix', '"""W"""'], {}), "(prefix, 'W')\n", (703, 716), False, 'from utils import _p\n'), ((966, 981), 'utils._p', '_p', (['prefix', '"""U"""'], {}), "(prefix, 'U')\n", (968, 981), False, 'from utils import _p\n'), ((997, 1012), 'utils._p', '_p', (['prefix', '"""b"""'], {}), "(prefix, 'b')\n", (999, 1012), False, 'from utils import _p\n'), ((1428, 1461), 'theano.tensor.alloc', 'tensor.alloc', (['(0.0)', 'n_samples', 'dim'], {}), '(0.0, n_samples, dim)\n', (1440, 1461), True, 'import theano.tensor as tensor\n'), ((1483, 1516), 'theano.tensor.alloc', 'tensor.alloc', (['(0.0)', 'n_samples', 'dim'], {}), '(0.0, n_samples, dim)\n', (1495, 1516), True, 'import theano.tensor as tensor\n'), ((1591, 1613), 'theano.tensor.alloc', 'tensor.alloc', (['(0.0)', 'dim'], {}), '(0.0, dim)\n', (1603, 1613), True, 'import theano.tensor as tensor\n'), ((1635, 1657), 'theano.tensor.alloc', 'tensor.alloc', (['(0.0)', 'dim'], {}), '(0.0, dim)\n', (1647, 1657), True, 'import theano.tensor as tensor\n'), ((1755, 1797), 'theano.tensor.alloc', 'tensor.alloc', (['(1.0)', 'state_below.shape[0]', '(1)'], {}), '(1.0, state_below.shape[0], 1)\n', (1767, 1797), True, 'import theano.tensor as tensor\n'), ((512, 533), 'util.norm_weight', 'norm_weight', (['nin', 'dim'], {}), '(nin, dim)\n', (523, 533), False, 'from util import ortho_weight, norm_weight, tanh, rectifier, linear\n'), ((561, 582), 'util.norm_weight', 'norm_weight', (['nin', 'dim'], {}), '(nin, dim)\n', (572, 582), False, 'from util import ortho_weight, norm_weight, tanh, rectifier, linear\n'), ((610, 631), 'util.norm_weight', 'norm_weight', (['nin', 'dim'], {}), '(nin, dim)\n', (621, 631), False, 'from util import ortho_weight, norm_weight, tanh, rectifier, linear\n'), ((659, 680), 'util.norm_weight', 'norm_weight', (['nin', 'dim'], {}), '(nin, dim)\n', (670, 680), False, 'from util import ortho_weight, norm_weight, tanh, rectifier, linear\n'), ((789, 806), 'util.ortho_weight', 'ortho_weight', (['dim'], {}), '(dim)\n', (801, 806), False, 'from util import ortho_weight, norm_weight, tanh, rectifier, linear\n'), ((835, 852), 'util.ortho_weight', 'ortho_weight', (['dim'], {}), '(dim)\n', (847, 852), False, 'from util import ortho_weight, norm_weight, tanh, rectifier, linear\n'), ((881, 898), 'util.ortho_weight', 'ortho_weight', (['dim'], {}), '(dim)\n', (893, 898), False, 'from util import ortho_weight, norm_weight, tanh, rectifier, linear\n'), ((927, 944), 'util.ortho_weight', 'ortho_weight', (['dim'], {}), '(dim)\n', (939, 944), False, 'from util import ortho_weight, norm_weight, tanh, rectifier, linear\n'), ((1015, 1038), 'numpy.zeros', 'numpy.zeros', (['(4 * dim,)'], {}), '((4 * dim,))\n', (1026, 1038), False, 'import numpy\n'), ((2461, 2475), 'theano.tensor.tanh', 'tensor.tanh', (['c'], {}), '(c)\n', (2472, 2475), True, 'import theano.tensor as tensor\n'), ((2593, 2608), 'utils._p', '_p', (['prefix', '"""b"""'], {}), "(prefix, 'b')\n", (2595, 2608), False, 'from utils import _p\n'), ((2846, 2867), 'utils._p', '_p', (['prefix', '"""_layers"""'], {}), "(prefix, '_layers')\n", (2848, 2867), False, 'from utils import _p\n'), ((1268, 1283), 'utils._p', '_p', (['prefix', '"""U"""'], {}), "(prefix, 'U')\n", (1270, 1283), False, 'from utils import _p\n'), ((2161, 2176), 'utils._p', '_p', (['prefix', '"""U"""'], {}), "(prefix, 'U')\n", (2163, 2176), False, 'from utils import _p\n'), ((2565, 2580), 'utils._p', '_p', (['prefix', '"""W"""'], {}), "(prefix, 'W')\n", (2567, 2580), False, 'from utils import _p\n')] |
# @author: <NAME>
import math
import numpy as np
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
class OpenGLManager:
""" General parameters """
display_size = (1400, 800) # Tamanho da janela a abrir
sphere_slices = 10 # Divisioes das bolas (> -> Maior Qualidade)
text_pos = (10, 750) # Posicao inicial do texto
text_dP = 175 # Distancia entre linhas do texto
D_RENDER_DISTANCE = 100 # Distancia maxima de renderizado
STROKE_W = 2. # Tamanho de Linhas
""" Ilumination parameters """
# Posicao da fonte de iluminacao
LIGHT_ZERO_POSITION = [0, 0, 24, .5]
# Cor da fonte de iluminacao
LIGHT_ZERO_COLOR = [1., 1., 1., 1.]
# Posicao da fonte de iluminacao ambiente
LIGHT_ZERO_AMBIENT = [1., 1., 1., .1]
# Cor da fonte de direta?
LIGHT_ZERO_SPECULAR = [1., 1., 1., .5]
""" Camera parameters """
cam_pos = [0, 0, 0] # Posicao da camera (visao)
cam_rot = [0, 0, 0] # Rotacao da camera
# Rotacao da camera no eixo x (visao)
camera_rot_x = 30.
# Rotacao da camera no eixo y (visao)
camera_rot_y = 30.
""" Colors """
COLOR_BLACK = [0., 0., 0., 1.]
COLOR_WHITE = [1., 1., 1., 1.]
cube_color = [1., 0., 0., 1.] # Cor do frame do Cubo
vector_color = [1, 1, 1, 1] # Cor da esfera
# Cor da sombra da esfera
sphere_diffuse_color = [.01, .01, .01, 1.]
sphere_ambient_color = [.1, .1, .1, .1] # Cor da esfera
# Cor da reflexão da esfera
sphere_specular_color = [.01, .01, .01, 1.1]
sphere_collision_diffuse_color = [1., .0, 0., 1.] # Cor das bolas
sphere_collision_ambient_color = [.5, .0,
0., .1] # Cor ambiente? das bolas
def __init__(self, display_title, bg_color=COLOR_WHITE):
self.running = False
self.display_title = display_title
self.render_distance = OpenGLManager.D_RENDER_DISTANCE
self.bg_color = np.array(bg_color)
self.captions = []
self.init_colors()
def init_colors(self):
self.vector_color = self.text_color = self.p_color = np.array([1., 1., 1., 2.]) - self.bg_color
def init_display(self):
if self.running:
return True
# Init Window
pygame.init()
pygame.display.set_mode(
self.display_size, pygame.DOUBLEBUF | OPENGL)
pygame.display.set_caption(self.display_title)
pygame.display.gl_set_attribute(GL_ACCELERATED_VISUAL, True)
# Config window
glClearColor(self.bg_color[0], self.bg_color[1],
self.bg_color[2], self.bg_color[3])
glShadeModel(GL_SMOOTH)
glEnable(GL_CULL_FACE)
glEnable(GL_DEPTH_TEST)
glEnable(GL_LIGHTING)
glCullFace(GL_BACK)
# Init light
glLightfv(GL_LIGHT0, GL_AMBIENT, self.LIGHT_ZERO_AMBIENT)
glLightfv(GL_LIGHT0, GL_POSITION, self.LIGHT_ZERO_POSITION)
glLightfv(GL_LIGHT0, GL_DIFFUSE, self.LIGHT_ZERO_COLOR)
glLightfv(GL_LIGHT0, GL_SPECULAR, self.LIGHT_ZERO_SPECULAR)
glLightf(GL_LIGHT0, GL_CONSTANT_ATTENUATION, 0.5)
glLightf(GL_LIGHT0, GL_LINEAR_ATTENUATION, 0.01)
glEnable(GL_LIGHT0)
# Init Camera
glMatrixMode(GL_PROJECTION)
gluPerspective(
45, (self.display_size[0] / self.display_size[1]), 0.1, self.render_distance)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
# Pos Camera
self.set_cam_pos(self.cam_pos)
self.rotate_cam(self.cam_rot)
self.running = True
return True
def set_cam_pos(self, cam_pos):
self.cam_pos = cam_pos
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
glPushMatrix()
glTranslatef(cam_pos[0], cam_pos[1], cam_pos[2])
self.viewMatrix = glGetFloatv(GL_MODELVIEW_MATRIX)
def move_cam(self, cam_move):
# init model view matrix
glLoadIdentity()
# init the view matrix
glPushMatrix()
glLoadIdentity()
glTranslatef(cam_move[0], cam_move[1], cam_move[2])
# apply rotation
glRotatef(self.cam_rot[0], 1., 0., 0.)
glRotatef(self.cam_rot[1], 0., 1., 0.)
# multiply the current matrix by the get the new view matrix and store the final vie matrix
glMultMatrixf(self.viewMatrix)
self.viewMatrix = glGetFloatv(GL_MODELVIEW_MATRIX)
# apply view matrix
glPopMatrix()
glMultMatrixf(self.viewMatrix)
def rotate_cam(self, cam_rot):
self.cam_rot = cam_rot
# init model view matrix
glLoadIdentity()
# apply the look up and down
glRotatef(cam_rot[0], 1., 0., 0.)
glRotatef(cam_rot[1], 0., 1., 0.)
# multiply the current matrix by the get the new view matrix and store the final vie matrix
glMultMatrixf(self.viewMatrix)
self.viewMatrix = glGetFloatv(GL_MODELVIEW_MATRIX)
def clear_buffer(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
def swap_buffers(self):
glutSwapBuffers()
pygame.display.flip()
def wait(self, time):
pygame.time.wait(int(time))
def draw_captions(self):
""" Set 2D mode"""
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
gluOrtho2D(0.0, self.display_size[0], 0.0, self.display_size[1])
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_BLEND)
glEnable(GL_LINE_SMOOTH)
glLineWidth(OpenGLManager.STROKE_W)
glTranslatef(self.text_pos[0], self.text_pos[1], 0)
glScalef(0.1, 0.1, 0.1)
glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, self.text_color)
for i, caption in enumerate(self.captions):
glPushMatrix()
glTranslatef(0,
- i*self.text_dP, 0)
for j in range(len(caption)):
glutStrokeCharacter(GLUT_STROKE_MONO_ROMAN,
ord(caption[j]))
glPopMatrix()
""" Making sure we can render 3d again """
glMatrixMode(GL_PROJECTION)
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
def draw_solid_sphere(self, sphere_pos, sphere_radius, sphere_ambient_color=sphere_ambient_color):
glPushMatrix()
K = 1.5
glMaterialfv(GL_FRONT, GL_AMBIENT, sphere_ambient_color)
glMaterialfv(GL_FRONT, GL_DIFFUSE, [
sphere_ambient_color[0]*K,
sphere_ambient_color[1]*K,
sphere_ambient_color[2]*K,
1])
glMaterialfv(GL_FRONT, GL_SHININESS, 5)
glMaterialfv(GL_FRONT, GL_SPECULAR, self.sphere_specular_color)
glTranslatef(sphere_pos[0], sphere_pos[1], sphere_pos[2])
glutSolidSphere(sphere_radius, self.sphere_slices, self.sphere_slices)
glPopMatrix()
def draw_cube_frame(self, cube_length, cube_color=cube_color):
glPushMatrix()
glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, cube_color)
glTranslatef(0, 0, 0)
glutWireCube(cube_length)
glPopMatrix()
def draw_vector(self, p_0, p_1, vector_color = None):
if vector_color is None:
vector_color = self.p_color
glPushMatrix()
# Config stroke
glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, vector_color)
glEnable(GL_BLEND)
glEnable(GL_LINE_SMOOTH)
glLineWidth(OpenGLManager.STROKE_W)
# Draw Stroke
glBegin(GL_LINES)
glVertex3f(p_0[0], p_0[1], p_0[2])
# glVertex3f(p_1[0], p_1[1], p_1[2])
glVertex3f(p_0[0]+p_1[0], p_0[1]+p_1[1], p_0[2]+p_1[2])
glEnd()
# Draw Point
glTranslatef(p_0[0]+p_1[0], p_0[1]+p_1[1], p_0[2]+p_1[2])
glutSolidSphere(.02, 6, 6)
glPopMatrix()
def draw_line(self, points, line_width=0, color=None):
if color is None:
color = self.p_color
glPushMatrix()
# Config stroke
glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, color)
glEnable(GL_BLEND)
glEnable(GL_LINE_SMOOTH)
glLineWidth(OpenGLManager.STROKE_W)
# Draw Stroke
glBegin(GL_LINE_STRIP_ADJACENCY)
for point in points:
glVertex3f(point[0], point[1], point[2])
glEnd()
glPopMatrix()
def draw_2d_graph(self, g_pos, g_size, g_scale, g_min, g_points, caption):
""" Set 2D mode"""
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
gluOrtho2D(0.0, self.display_size[0], 0.0, self.display_size[1])
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_BLEND)
glEnable(GL_LINE_SMOOTH)
glLineWidth(OpenGLManager.STROKE_W)
# Draw Graphs points
glPushMatrix()
glTranslatef(g_pos[0], g_pos[1], 0)
glScalef(1, 1, 1)
glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, self.text_color)
glBegin(GL_LINE_STRIP)
for point in g_points:
glVertex2d((point[0] - g_min[0])*g_scale[0], (point[1] - g_min[1])*g_scale[1])
glEnd()
glPopMatrix()
# Draw Graph Box
glPushMatrix()
glLineWidth(OpenGLManager.STROKE_W*2)
glBegin(GL_LINE_STRIP)
glVertex2f(g_pos[0], g_pos[1])
glVertex2f(g_pos[0] + g_size[0], g_pos[1])
glVertex2f(g_pos[0] + g_size[0], g_pos[1] + g_size[1])
glVertex2f(g_pos[0], g_pos[1] + g_size[1])
glVertex2f(g_pos[0], g_pos[1])
glEnd()
glPopMatrix()
# Draw caption
glPushMatrix()
glLineWidth(OpenGLManager.STROKE_W)
glTranslatef(g_pos[0], g_pos[1] + g_size[1] + 5, 0)
glScalef(0.1, 0.1, 0.1)
for j in range(len(caption)):
glutStrokeCharacter(GLUT_STROKE_MONO_ROMAN, ord(caption[j]))
glPopMatrix()
""" Making sure we can render 3d again """
glMatrixMode(GL_PROJECTION)
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
| [
"pygame.init",
"pygame.display.set_mode",
"pygame.display.flip",
"numpy.array",
"pygame.display.gl_set_attribute",
"pygame.display.set_caption"
] | [((2087, 2105), 'numpy.array', 'np.array', (['bg_color'], {}), '(bg_color)\n', (2095, 2105), True, 'import numpy as np\n'), ((2410, 2423), 'pygame.init', 'pygame.init', ([], {}), '()\n', (2421, 2423), False, 'import pygame\n'), ((2432, 2501), 'pygame.display.set_mode', 'pygame.display.set_mode', (['self.display_size', '(pygame.DOUBLEBUF | OPENGL)'], {}), '(self.display_size, pygame.DOUBLEBUF | OPENGL)\n', (2455, 2501), False, 'import pygame\n'), ((2523, 2569), 'pygame.display.set_caption', 'pygame.display.set_caption', (['self.display_title'], {}), '(self.display_title)\n', (2549, 2569), False, 'import pygame\n'), ((2578, 2638), 'pygame.display.gl_set_attribute', 'pygame.display.gl_set_attribute', (['GL_ACCELERATED_VISUAL', '(True)'], {}), '(GL_ACCELERATED_VISUAL, True)\n', (2609, 2638), False, 'import pygame\n'), ((5245, 5266), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (5264, 5266), False, 'import pygame\n'), ((2249, 2279), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 2.0]'], {}), '([1.0, 1.0, 1.0, 2.0])\n', (2257, 2279), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from uncertainties import ufloat
import uncertainties
from uncertainties.unumpy import uarray
from scipy.optimize import curve_fit
import os
# print("Cwd:", os.getcwd())
# print("Using matplotlibrc from ", mpl.matplotlib_fname())
fig = plt.figure()
clear = plt.close()
ax = fig.add_subplot(111)
def gimmeTHATcolumn(array,k):
"""Extracts the k-column of an 2D-array, returns list with those column-elements"""
helparray = []
for i in range(len(array)):
helparray.append(array[i][k])
return helparray
def meanDistance(x):
x = np.array(x)
sum = 0
for a, b in zip(x, x[1:]):
sum += (b - a) / len(x)
return sum / len(x)
def autoplot(xValues, yValues, xLabel, yLabel, plotLabel="", errorbars=True, plotStyle='ro', errorStyle='g,', yScale='linear', **furtherPlotArgs):
"""Return a subplot object.
:param errorbars=True: Plots error bars when true.
:param yScale: e.g. 'log', 'dec'
"""
xValues = np.array(xValues)
yValues = np.array(yValues)
errX = None
errY = None
if type(xValues[0]) == uncertainties.Variable or type(xValues[0]) == uncertainties.AffineScalarFunc:
x = [item.nominal_value for item in xValues]
errX = [item.std_dev for item in xValues]
else:
x = xValues
if type(yValues[0]) == uncertainties.Variable or type(yValues[0]) == uncertainties.AffineScalarFunc:
y = [item.nominal_value for item in yValues]
errY = [item.std_dev for item in yValues]
else:
y = yValues
ax.set_yscale(yScale)
x_offset = (max(x) - min(x)) * 0.015
ax.set_xlim(min(x) - x_offset, max(x) + x_offset)
if yScale != 'log':
y_offset = (max(y) - min(y)) * 0.015
ax.set_ylim(min(y) - y_offset, max(y) + y_offset)
ax.set_xlabel(xLabel)
ax.set_ylabel(yLabel)
ax.legend(loc='best')
if errorbars:
if errX != None and errY != None:
plt.errorbar(x, y, xerr=errX, yerr=errY, fmt=errorStyle)
elif errY != None:
plt.errorbar(x, y, yerr=errY, fmt=errorStyle)
print(errY)
elif errX != None:
plt.errorbar(x, y, xerr=errX, fmt=errorStyle)
else:
raise "Should draw errorbars, but x, y are not ufloats!"
ax.plot(x, y, plotStyle, label=plotLabel, **furtherPlotArgs)
fig.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
return fig
def linearFit(x, a, b):
return a * x + b
def isUfloat(var):
return type(var) == uncertainties.core.Variable or type(var) == uncertainties.core.AffineScalarFunc
maxfev = 1000000
def autofit(x, y, fitFunction, p0=None):
"""Returns params of the curvefit as ufloat."""
if isUfloat(y[0]):
ny = [i.nominal_value for i in y]
dy = [i.std_dev for i in y]
params, covariance = curve_fit(fitFunction, x, ny, sigma=dy, absolute_sigma=True,
p0=p0, maxfev=maxfev)
else:
params, covariance = curve_fit(fitFunction, x, y, p0=p0, maxfev=maxfev)
errors = np.sqrt(np.diag(covariance))
return uarray(params, errors)
def array(values, offset, magnitude):
"""Return numpy array
offset: is added to all items
magnitude: all items are multiplied by 10^magnitude"""
res = np.array(values).astype(float) + offset
res *= 10 ** magnitude
return res
def mean(values):
"""Return the mean of values"""
values = np.array(values)
return sum(values) / len(values)
def stdDev(values):
"""Return estimated standard deviation"""
values = np.array(values)
b = 0
m = mean(values)
for x in values:
b += (x - m) ** 2
return np.sqrt(1 / (len(values) - 1) * b)
def stdDevOfMean(values):
"""Return estimated standard deviation of the mean (the important one!)"""
return stdDev(values) / np.sqrt(len(values))
def errorString(value):
"""Return a more beautiful number with error"""
return str(value.nominal_value) + "±" + str(value.std_dev)
def abweichung(value, lit):
"""Returns diveation of an experimental value from a literature value."""
return '{:.3f}'.format((lit - value.nominal_value) / lit * 100) + "%"
def modifiyItems(dic, keyFunction, valueFunction):
"""Applies *funkction(key,value) to each key or value in dic"""
return {keyFunction(key, value): valueFunction(key, value) for key, value in dic.items()}
# find peaks
import sys
from numpy import NaN, Inf, arange, isscalar, asarray, array
def peakdet(v, delta, x=None):
"""
Converted from MATLAB script at http://billauer.co.il/peakdet.html
Returns two arrays
function [maxtab, mintab]=peakdet(v, delta, x)
%PEAKDET Detect peaks in a vector
% [MAXTAB, MINTAB] = PEAKDET(V, DELTA) finds the local
% maxima and minima ("peaks") in the vector V.
% MAXTAB and MINTAB consists of two columns. Column 1
% contains indices in V, and column 2 the found values.
%
% With [MAXTAB, MINTAB] = PEAKDET(V, DELTA, X) the indices
% in MAXTAB and MINTAB are replaced with the corresponding
% X-values.
%
% A point is considered a maximum peak if it has the maximal
% value, and was preceded (to the left) by a value lower by
% DELTA.
% <NAME>, 3.4.05 (Explicitly not copyrighted).
% This function is released to the public domain; Any use is allowed.
"""
maxtab = []
mintab = []
if x is None:
x = arange(len(v))
v = asarray(v)
if len(v) != len(x):
sys.exit('Input vectors v and x must have same length')
if not isscalar(delta):
sys.exit('Input argument delta must be a scalar')
if delta <= 0:
sys.exit('Input argument delta must be positive')
mn, mx = Inf, -Inf
mnpos, mxpos = NaN, NaN
lookformax = True
for i in arange(len(v)):
this = v[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mx - delta:
maxtab.append((mxpos, mx))
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn + delta:
mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
return array(maxtab), array(mintab)
# if __name__=="__main__":
# from matplotlib.pyplot import plot, scatter, show
# series = [0,0,0,2,0,0,0,-2,0,0,0,2,0,0,0,-2,0]
# maxtab, mintab = peakdet(series,.3)
# plot(series)
# scatter(array(maxtab)[:,0], array(maxtab)[:,1], color='blue')
# scatter(array(mintab)[:,0], array(mintab)[:,1], color='red')
# show()
def getPeakVal(peaksmax):
"""gets the values of the peaks for the x and y axes"""
peakst = []
for i in range(len(peaksmax)):
peakst.append(peaksmax[i][0])
peaksT = []
for i in range(len(peaksmax)):
peaksT.append(peaksmax[i][1])
return peakst, peaksT
def get_noms(values):
return array([i.nominal_value for i in values])
def get_std_dev(values):
return array([i.std_dev for i in values])
| [
"scipy.optimize.curve_fit",
"numpy.isscalar",
"matplotlib.pyplot.errorbar",
"numpy.asarray",
"numpy.diag",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure",
"sys.exit",
"uncertainties.unumpy.uarray"
] | [((315, 327), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (325, 327), True, 'import matplotlib.pyplot as plt\n'), ((336, 347), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (345, 347), True, 'import matplotlib.pyplot as plt\n'), ((633, 644), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (641, 644), True, 'import numpy as np\n'), ((1040, 1057), 'numpy.array', 'np.array', (['xValues'], {}), '(xValues)\n', (1048, 1057), True, 'import numpy as np\n'), ((1072, 1089), 'numpy.array', 'np.array', (['yValues'], {}), '(yValues)\n', (1080, 1089), True, 'import numpy as np\n'), ((3147, 3169), 'uncertainties.unumpy.uarray', 'uarray', (['params', 'errors'], {}), '(params, errors)\n', (3153, 3169), False, 'from uncertainties.unumpy import uarray\n'), ((3491, 3507), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (3499, 3507), True, 'import numpy as np\n'), ((3626, 3642), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (3634, 3642), True, 'import numpy as np\n'), ((5590, 5600), 'numpy.asarray', 'asarray', (['v'], {}), '(v)\n', (5597, 5600), False, 'from numpy import NaN, Inf, arange, isscalar, asarray, array\n'), ((7197, 7237), 'numpy.array', 'array', (['[i.nominal_value for i in values]'], {}), '([i.nominal_value for i in values])\n', (7202, 7237), False, 'from numpy import NaN, Inf, arange, isscalar, asarray, array\n'), ((7276, 7310), 'numpy.array', 'array', (['[i.std_dev for i in values]'], {}), '([i.std_dev for i in values])\n', (7281, 7310), False, 'from numpy import NaN, Inf, arange, isscalar, asarray, array\n'), ((2882, 2969), 'scipy.optimize.curve_fit', 'curve_fit', (['fitFunction', 'x', 'ny'], {'sigma': 'dy', 'absolute_sigma': '(True)', 'p0': 'p0', 'maxfev': 'maxfev'}), '(fitFunction, x, ny, sigma=dy, absolute_sigma=True, p0=p0, maxfev=\n maxfev)\n', (2891, 2969), False, 'from scipy.optimize import curve_fit\n'), ((3043, 3093), 'scipy.optimize.curve_fit', 'curve_fit', (['fitFunction', 'x', 'y'], {'p0': 'p0', 'maxfev': 'maxfev'}), '(fitFunction, x, y, p0=p0, maxfev=maxfev)\n', (3052, 3093), False, 'from scipy.optimize import curve_fit\n'), ((3115, 3134), 'numpy.diag', 'np.diag', (['covariance'], {}), '(covariance)\n', (3122, 3134), True, 'import numpy as np\n'), ((5635, 5690), 'sys.exit', 'sys.exit', (['"""Input vectors v and x must have same length"""'], {}), "('Input vectors v and x must have same length')\n", (5643, 5690), False, 'import sys\n'), ((5703, 5718), 'numpy.isscalar', 'isscalar', (['delta'], {}), '(delta)\n', (5711, 5718), False, 'from numpy import NaN, Inf, arange, isscalar, asarray, array\n'), ((5728, 5777), 'sys.exit', 'sys.exit', (['"""Input argument delta must be a scalar"""'], {}), "('Input argument delta must be a scalar')\n", (5736, 5777), False, 'import sys\n'), ((5806, 5855), 'sys.exit', 'sys.exit', (['"""Input argument delta must be positive"""'], {}), "('Input argument delta must be positive')\n", (5814, 5855), False, 'import sys\n'), ((6502, 6515), 'numpy.array', 'array', (['maxtab'], {}), '(maxtab)\n', (6507, 6515), False, 'from numpy import NaN, Inf, arange, isscalar, asarray, array\n'), ((6517, 6530), 'numpy.array', 'array', (['mintab'], {}), '(mintab)\n', (6522, 6530), False, 'from numpy import NaN, Inf, arange, isscalar, asarray, array\n'), ((2000, 2056), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['x', 'y'], {'xerr': 'errX', 'yerr': 'errY', 'fmt': 'errorStyle'}), '(x, y, xerr=errX, yerr=errY, fmt=errorStyle)\n', (2012, 2056), True, 'import matplotlib.pyplot as plt\n'), ((2096, 2141), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['x', 'y'], {'yerr': 'errY', 'fmt': 'errorStyle'}), '(x, y, yerr=errY, fmt=errorStyle)\n', (2108, 2141), True, 'import matplotlib.pyplot as plt\n'), ((3340, 3356), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (3348, 3356), True, 'import numpy as np\n'), ((2205, 2250), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['x', 'y'], {'xerr': 'errX', 'fmt': 'errorStyle'}), '(x, y, xerr=errX, fmt=errorStyle)\n', (2217, 2250), True, 'import matplotlib.pyplot as plt\n')] |
'''
Define the function related with the Markov Chain Monter Carlo (MCMC) process.
'''
import numpy as np
import emcee
import time
import os
import git
path_git = git.Repo('.', search_parent_directories=True).working_tree_dir
path_datos_global = os.path.dirname(path_git)
def MCMC_sampler(log_probability, initial_values,
filename = "default.h5",
witness_file = 'witness.txt',
max_samples = 10000,
witness_freq = 100,
tolerance = 0.01,
save_path = path_datos_global+'/Resultados_cadenas/'):
'''
log_probability: logarithm of the posterior distribution that will be sampled.
initial_values: object that contains the initial value of the parameters to sample
filename: name of the h5 file that contains the chains information.
witness_file: name of the witness file.
max_samples: maximun number of sample, if the chains not converge.
witness_freq: frequency use to print the state of the calculation in the witness file.
tolerance: tolerance parameter on the convergence method.
save_path: directory in which the outputs are stored. Change this atribute on the
configuration file is recommended .
'''
nwalkers, ndim = initial_values.shape
# Set up the backend
os.chdir(save_path)
backend = emcee.backends.HDFBackend(filename)
backend.reset(nwalkers, ndim) # Don't forget to clear it in case the file already exists
textfile_witness = open(witness_file,'w+')
textfile_witness.close()
#%%
#Initialize the sampler
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability, backend=backend)
#sampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability, backend=backend,
# moves=[(emcee.moves.DEMove(), 0.4), (emcee.moves.DESnookerMove(), 0.3)
# , (emcee.moves.KDEMove(), 0.3)])
# This will be useful to testing convergence
old_tau = np.inf
t1 = time.time()
# Now we'll sample for up to max_samples steps
for sample in sampler.sample(initial_values, iterations=max_samples, progress=True):
# Only check convergence every 'witness_freq' steps
if sampler.iteration % witness_freq: #'witness_freq' es cada cuanto chequea convergencia
continue
os.chdir(save_path)
textfile_witness = open(witness_file,'w')
textfile_witness.write('Número de iteración: {} \t'.format(sampler.iteration))
t2 = time.time()
textfile_witness.write('Duración {} minutos y {} segundos'.format(int((t2-t1)/60),
int((t2-t1) - 60*int((t2-t1)/60))))
textfile_witness.close()
# Compute the autocorrelation time so far
# Using tol=0 means that we'll always get an estimate even
# if it isn't trustworthy
tau = sampler.get_autocorr_time(tol=0)
# Check convergence
converged = np.all(tau * 100 < sampler.iteration) #100 es el threshold de convergencia
#También pido que tau se mantenga relativamente constante:
converged &= np.all((np.abs(old_tau - tau) / tau) < tolerance)
if converged:
textfile_witness = open(witness_file,'a')
textfile_witness.write('\n Convergió!')
textfile_witness.close()
break
old_tau = tau
| [
"numpy.abs",
"emcee.EnsembleSampler",
"os.chdir",
"os.path.dirname",
"emcee.backends.HDFBackend",
"git.Repo",
"numpy.all",
"time.time"
] | [((248, 273), 'os.path.dirname', 'os.path.dirname', (['path_git'], {}), '(path_git)\n', (263, 273), False, 'import os\n'), ((165, 210), 'git.Repo', 'git.Repo', (['"""."""'], {'search_parent_directories': '(True)'}), "('.', search_parent_directories=True)\n", (173, 210), False, 'import git\n'), ((1281, 1300), 'os.chdir', 'os.chdir', (['save_path'], {}), '(save_path)\n', (1289, 1300), False, 'import os\n'), ((1312, 1347), 'emcee.backends.HDFBackend', 'emcee.backends.HDFBackend', (['filename'], {}), '(filename)\n', (1337, 1347), False, 'import emcee\n'), ((1549, 1620), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['nwalkers', 'ndim', 'log_probability'], {'backend': 'backend'}), '(nwalkers, ndim, log_probability, backend=backend)\n', (1570, 1620), False, 'import emcee\n'), ((1901, 1912), 'time.time', 'time.time', ([], {}), '()\n', (1910, 1912), False, 'import time\n'), ((2210, 2229), 'os.chdir', 'os.chdir', (['save_path'], {}), '(save_path)\n', (2218, 2229), False, 'import os\n'), ((2363, 2374), 'time.time', 'time.time', ([], {}), '()\n', (2372, 2374), False, 'import time\n'), ((2740, 2777), 'numpy.all', 'np.all', (['(tau * 100 < sampler.iteration)'], {}), '(tau * 100 < sampler.iteration)\n', (2746, 2777), True, 'import numpy as np\n'), ((2899, 2920), 'numpy.abs', 'np.abs', (['(old_tau - tau)'], {}), '(old_tau - tau)\n', (2905, 2920), True, 'import numpy as np\n')] |
import cv2
import numpy as np
def get_center_of_poly(pts):
# try:
# M = cv2.moments(pts)
# except:
M = cv2.moments(np.array([pts]))
centX = int(M["m10"] / M["m00"])
centY = int(M["m01"] / M["m00"])
return (centX, centY) | [
"numpy.array"
] | [((151, 166), 'numpy.array', 'np.array', (['[pts]'], {}), '([pts])\n', (159, 166), True, 'import numpy as np\n')] |
# ------------------------------------------------------------------------------
# Copyright (c) ETRI. All rights reserved.
# Licensed under the BSD 3-Clause License.
# This file is part of Youtube-Gesture-Dataset, a sub-project of AIR(AI for Robots) project.
# You can refer to details of AIR project at https://aiforrobots.github.io
# Written by <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
from scipy.signal import savgol_filter
import numpy as np
from scipy.stats import circvar
def normalize_skeleton(data, resize_factor=None):
def distance(x1, y1, x2, y2):
return np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
anchor_pt = (data[1 * 2], data[1 * 2 + 1]) # neck
if resize_factor is None:
neck_height = float(abs(data[1] - data[1 * 2 + 1]))
shoulder_length = distance(data[1 * 2], data[1 * 2 + 1], data[2 * 2], data[2 * 2 + 1]) + \
distance(data[1 * 2], data[1 * 2 + 1], data[5 * 2], data[5 * 2 + 1])
resized_neck_height = neck_height / float(shoulder_length)
if resized_neck_height > 0.6:
resize_factor = shoulder_length * resized_neck_height / 0.6
else:
resize_factor = shoulder_length
normalized_data = data.copy()
for i in range(0, len(data), 2):
normalized_data[i] = (data[i] - anchor_pt[0]) / resize_factor
normalized_data[i + 1] = (data[i + 1] - anchor_pt[1]) / resize_factor
return normalized_data, resize_factor
class MotionPreprocessor:
def __init__(self, skeletons):
self.skeletons = np.array(skeletons)
self.filtering_message = "PASS"
def get(self):
assert (self.skeletons is not None)
# filtering
if self.has_missing_frames():
self.skeletons = []
self.filtering_message = "too many missing frames"
# fill missing joints
if self.skeletons != []:
self.fill_missing_joints()
if self.skeletons is None or np.isnan(self.skeletons).any():
self.filtering_message = "failed to fill missing joints"
self.skeletons = []
# filtering
if self.skeletons != []:
if self.is_static():
self.skeletons = []
self.filtering_message = "static motion"
elif self.has_jumping_joint():
self.skeletons = []
self.filtering_message = "jumping joint"
# preprocessing
if self.skeletons != []:
self.smooth_motion()
is_side_view = False
self.skeletons = self.skeletons.tolist()
for i, frame in enumerate(self.skeletons):
del frame[2::3] # remove confidence values
self.skeletons[i], _ = normalize_skeleton(frame) # translate and scale
# assertion: missing joints
assert not np.isnan(self.skeletons[i]).any()
# side view check
if (self.skeletons[i][0] < min(self.skeletons[i][2 * 2],
self.skeletons[i][5 * 2]) or
self.skeletons[i][0] > max(self.skeletons[i][2 * 2],
self.skeletons[i][5 * 2])):
is_side_view = True
break
if len(self.skeletons) == 0 or is_side_view:
self.filtering_message = "sideview"
self.skeletons = []
return self.skeletons, self.filtering_message
def is_static(self, verbose=False):
def joint_angle(p1, p2, p3):
v1 = p1 - p2
v2 = p3 - p2
ang1 = np.arctan2(*v1[::-1])
ang2 = np.arctan2(*v2[::-1])
return np.rad2deg((ang1 - ang2) % (2 * np.pi))
def get_joint_variance(skeleton, index1, index2, index3):
angles = []
for i in range(skeleton.shape[0]):
x1, y1 = skeleton[i, index1 * 3], skeleton[i, index1 * 3 + 1]
x2, y2 = skeleton[i, index2 * 3], skeleton[i, index2 * 3 + 1]
x3, y3 = skeleton[i, index3 * 3], skeleton[i, index3 * 3 + 1]
angle = joint_angle(np.array([x1, y1]), np.array([x2, y2]), np.array([x3, y3]))
angles.append(angle)
variance = circvar(angles, low=0, high=360)
return variance
left_arm_var = get_joint_variance(self.skeletons, 2, 3, 4)
right_arm_var = get_joint_variance(self.skeletons, 5, 6, 7)
th = 150
if left_arm_var < th and right_arm_var < th:
print('too static - left var {}, right var {}'.format(left_arm_var, right_arm_var))
return True
else:
if verbose:
print('not static - left var {}, right var {}'.format(left_arm_var, right_arm_var))
return False
def has_jumping_joint(self, verbose=False):
frame_diff = np.squeeze(self.skeletons[1:, :24] - self.skeletons[:-1, :24])
diffs = abs(frame_diff.flatten())
width = max(self.skeletons[0, :24:3]) - min(self.skeletons[0, :24:3])
if max(diffs) > width / 2.0:
print('jumping joint - diff {}, width {}'.format(max(diffs), width))
return True
else:
if verbose:
print('no jumping joint - diff {}, width {}'.format(max(diffs), width))
return False
def has_missing_frames(self):
n_empty_frames = 0
n_frames = self.skeletons.shape[0]
for i in range(n_frames):
if np.sum(self.skeletons[i]) == 0:
n_empty_frames += 1
ret = n_empty_frames > n_frames * 0.1
if ret:
print('missing frames - {} / {}'.format(n_empty_frames, n_frames))
return ret
def smooth_motion(self):
for i in range(24):
self.skeletons[:, i] = savgol_filter(self.skeletons[:, i], 5, 2)
def fill_missing_joints(self):
skeletons = self.skeletons
n_joints = 8 # only upper body
def nan_helper(y):
return np.isnan(y), lambda z: z.nonzero()[0]
for i in range(n_joints):
xs, ys = skeletons[:, i * 3], skeletons[:, i * 3 + 1]
xs[xs == 0] = np.nan
ys[ys == 0] = np.nan
if sum(np.isnan(xs)) > len(xs) / 2:
skeletons = None
break
if sum(np.isnan(ys)) > len(ys) / 2:
skeletons = None
break
if np.isnan(xs).any():
nans, t = nan_helper(xs)
xs[nans] = np.interp(t(nans), t(~nans), xs[~nans])
skeletons[:, i * 3] = xs
if np.isnan(ys).any():
nans, t = nan_helper(ys)
ys[nans] = np.interp(t(nans), t(~nans), ys[~nans])
skeletons[:, i * 3 + 1] = ys
return skeletons
| [
"numpy.sqrt",
"scipy.signal.savgol_filter",
"numpy.squeeze",
"numpy.array",
"numpy.sum",
"numpy.arctan2",
"numpy.isnan",
"numpy.rad2deg",
"scipy.stats.circvar"
] | [((638, 678), 'numpy.sqrt', 'np.sqrt', (['((x1 - x2) ** 2 + (y1 - y2) ** 2)'], {}), '((x1 - x2) ** 2 + (y1 - y2) ** 2)\n', (645, 678), True, 'import numpy as np\n'), ((1605, 1624), 'numpy.array', 'np.array', (['skeletons'], {}), '(skeletons)\n', (1613, 1624), True, 'import numpy as np\n'), ((4991, 5053), 'numpy.squeeze', 'np.squeeze', (['(self.skeletons[1:, :24] - self.skeletons[:-1, :24])'], {}), '(self.skeletons[1:, :24] - self.skeletons[:-1, :24])\n', (5001, 5053), True, 'import numpy as np\n'), ((3718, 3739), 'numpy.arctan2', 'np.arctan2', (['*v1[::-1]'], {}), '(*v1[::-1])\n', (3728, 3739), True, 'import numpy as np\n'), ((3759, 3780), 'numpy.arctan2', 'np.arctan2', (['*v2[::-1]'], {}), '(*v2[::-1])\n', (3769, 3780), True, 'import numpy as np\n'), ((3800, 3839), 'numpy.rad2deg', 'np.rad2deg', (['((ang1 - ang2) % (2 * np.pi))'], {}), '((ang1 - ang2) % (2 * np.pi))\n', (3810, 3839), True, 'import numpy as np\n'), ((4370, 4402), 'scipy.stats.circvar', 'circvar', (['angles'], {'low': '(0)', 'high': '(360)'}), '(angles, low=0, high=360)\n', (4377, 4402), False, 'from scipy.stats import circvar\n'), ((5944, 5985), 'scipy.signal.savgol_filter', 'savgol_filter', (['self.skeletons[:, i]', '(5)', '(2)'], {}), '(self.skeletons[:, i], 5, 2)\n', (5957, 5985), False, 'from scipy.signal import savgol_filter\n'), ((5622, 5647), 'numpy.sum', 'np.sum', (['self.skeletons[i]'], {}), '(self.skeletons[i])\n', (5628, 5647), True, 'import numpy as np\n'), ((6144, 6155), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (6152, 6155), True, 'import numpy as np\n'), ((4249, 4267), 'numpy.array', 'np.array', (['[x1, y1]'], {}), '([x1, y1])\n', (4257, 4267), True, 'import numpy as np\n'), ((4269, 4287), 'numpy.array', 'np.array', (['[x2, y2]'], {}), '([x2, y2])\n', (4277, 4287), True, 'import numpy as np\n'), ((4289, 4307), 'numpy.array', 'np.array', (['[x3, y3]'], {}), '([x3, y3])\n', (4297, 4307), True, 'import numpy as np\n'), ((6369, 6381), 'numpy.isnan', 'np.isnan', (['xs'], {}), '(xs)\n', (6377, 6381), True, 'import numpy as np\n'), ((6473, 6485), 'numpy.isnan', 'np.isnan', (['ys'], {}), '(ys)\n', (6481, 6485), True, 'import numpy as np\n'), ((6573, 6585), 'numpy.isnan', 'np.isnan', (['xs'], {}), '(xs)\n', (6581, 6585), True, 'import numpy as np\n'), ((6758, 6770), 'numpy.isnan', 'np.isnan', (['ys'], {}), '(ys)\n', (6766, 6770), True, 'import numpy as np\n'), ((2027, 2051), 'numpy.isnan', 'np.isnan', (['self.skeletons'], {}), '(self.skeletons)\n', (2035, 2051), True, 'import numpy as np\n'), ((2938, 2965), 'numpy.isnan', 'np.isnan', (['self.skeletons[i]'], {}), '(self.skeletons[i])\n', (2946, 2965), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 28 11:04:08 2018
@author: antony
"""
import pandas as pd
import phenograph
import collections
import os
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
import libcluster
import libsparse
from libsparse import SparseDataFrame
# As per 10x
TSNE_PERPLEXITY = 30
TSNE_MAX_ITER = 1000
TSNE_LEARNING = 200 #200
TSNE_RANDOM_STATE = 0 #42
TSNE_METRIC = 'correlation' #'euclidean' #'correlation'
TSNE_INIT = 'pca'
def get_cluster_file(dir, name, tpmmode=True, logmode=True):
if dir.endswith('/'):
dir = dir[:-1]
file = '{}/clusters_{}.txt'.format(dir, name)
#if tpmmode:
# file += '_tpm'
#if logmode:
# file += '_log2'
#file += '.txt'
return file
def get_kmeans_file(dir, name, clusters):
if dir.endswith('/'):
dir = dir[:-1]
file = '{}/clusters_kmeans_{}_{}.txt'.format(dir, clusters, name)
return file
def get_pca_file(dir, name, tpmmode=True, logmode=True):
if dir.endswith('/'):
dir = dir[:-1]
return '{}/pca_data_{}.txt'.format(dir, name)
def get_pca_var_file(name, tpmmode=True, logmode=True):
return 'pca_var_{}.txt'.format(name)
def get_dim_file(dir, name, mode='tsne'):
if dir.endswith('/'):
dir = dir[:-1]
return '{}/{}_data_{}.txt'.format(dir, mode, name)
def get_tsne_file(dir, name, tpmmode=True, logmode=True):
return get_dim_file(dir, name, mode='tsne')
def write_clusters(headers, labels, name, tpmmode=True, logmode=True, dir='.'):
file = get_cluster_file(dir, name, tpmmode=tpmmode, logmode=logmode)
print('Writing clusters to {}...'.format(file))
if type(headers) is pd.core.frame.DataFrame:
headers = headers.iloc[:, 0].tolist()
# one based is a convience for users so that they don't have to use
# zero based ids
df = pd.DataFrame({'Barcode':headers, 'Cluster':labels, 'cluster_one_based':(labels + 1)})
df = df[['Barcode', 'Cluster']]
df = df.set_index('Barcode')
df.to_csv(file, sep='\t', header=True, index=True)
def write_kmeans_clusters(name, clusters, headers, labels, dir='.'):
file = get_kmeans_file(dir, name, clusters)
print('Writing k-means clusters to {}...'.format(file))
if type(headers) is pd.core.frame.DataFrame:
headers = headers.iloc[:, 0].tolist()
# one based is a convience for users so that they don't have to use
# zero based ids
df = pd.DataFrame({'Barcode':headers, 'Cluster':labels})
df = df[['Barcode', 'Cluster']]
df = df.set_index('Barcode')
df.to_csv(file, sep='\t', header=True, index=True)
def read_clusters(file):
print('Reading clusters from {}...'.format(file))
data = pd.read_csv(file, sep='\t', header=0, index_col=0)
cluster_map = collections.defaultdict(int)
for i in range(0, data.shape[0]):
cluster_map[data.index[i]] = data['Cluster'][i]
return cluster_map, data
def load_phenograph_clusters(pca,
name,
cache=True,
neighbors=20,
dir='.'):
"""
Given a pca matrix, cluster on it
Parameters
----------
pca : array, shape (n_samples, n_pca)
name : str
Name of run.
cache : bool, optional
Create a file of the clusters for reloading. Default is True.
Returns
-------
tsne : array, shape (n_samples, 2)
The tsne coordinates for each sample
"""
file = get_cluster_file(dir, name)
if not os.path.isfile(file) or not cache:
print('{} was not found, creating it with...'.format(file))
k = min(pca.shape[0] - 2, neighbors)
# Find the interesting clusters
labels, graph, Q = phenograph.cluster(pca, k=k)
if min(labels) == -1:
new_label = 100
labels[np.where(labels == -1)] = new_label
labels += 1
write_clusters(pca.index.tolist(), labels, name)
cluster_map, data = read_clusters(file)
labels = data
#return cluster_map, labels
return labels
def load_kmeans_clusters(pca, name, clusters=10, cache=True, dir='.'):
"""
Given a pca matrix, cluster on it
Parameters
----------
pca : array, shape (n_samples, n_pca)
name : str
Name of run.
cache : bool, optional
Create a file of the clusters for reloading. Default is True.
Returns
-------
tsne : array, shape (n_samples, 2)
The tsne coordinates for each sample
"""
file = get_kmeans_file(dir, name, clusters)
if not os.path.isfile(file) or not cache:
print('{} was not found, creating it with...'.format(file))
labels = KMeans(n_clusters=clusters).fit_predict(pca)
if min(labels) == -1:
new_label = 100
labels[np.where(labels == -1)] = new_label
labels += 1
write_kmeans_clusters(name, clusters, pca.index.tolist(), labels, dir=dir)
cluster_map, data = read_clusters(file)
labels = data
return labels
def read_pca(file, dir='.'):
if not os.path.isfile(file):
file = get_pca_file(dir, file)
print('Reading pca from {}...'.format(file))
return pd.read_csv(file, sep='\t', header=0, index_col=0)
def load_pca(data,
name,
n=50,
mode='random',
tpmmode=True,
logmode=True,
exclude=[],
cache=True,
dir='.'):
file = get_pca_file(dir, name, tpmmode=tpmmode, logmode=logmode)
if not os.path.isfile(file) or not cache:
print('{} was not found, creating it with n={}...'.format(file, n))
p, pca = libcluster.pca(data, n=n, mode=mode, exclude=exclude)
labels = ['PC-{}'.format(x + 1) for x in range(0, pca.shape[1])]
var_file = get_pca_var_file(name, tpmmode=tpmmode, logmode=logmode)
df = pd.DataFrame(p.explained_variance_ratio_, index=labels, columns=['Variance'])
df.to_csv(var_file, sep='\t', header=True, index=True)
df = pd.DataFrame(pca, index=data.columns, columns=labels)
df.to_csv(file, sep='\t', header=True, index=True)
return read_pca(file)
def read_tsne(file):
print('Reading clusters from {}...'.format(file))
return pd.read_csv(file, sep='\t', header=0, index_col=0)
def new_tsne():
return TSNE(n_components=2,
verbose=1,
perplexity=TSNE_PERPLEXITY,
learning_rate=TSNE_LEARNING,
n_iter=TSNE_MAX_ITER,
method='barnes_hut',
random_state=TSNE_RANDOM_STATE,
init=TSNE_INIT,
metric=TSNE_METRIC)
def load_tsne(data, name, n=50, tpmmode=True, logmode=True, exclude=[]):
"""
Run t-sne
Parameters
----------
data : array, shape (n_samples, n_features)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
name : str
Name of run.
Returns
-------
tsne : array, shape (n_samples, 2)
The tsne coordinates for each sample
"""
file = get_tsne_file(name, tpmmode=tpmmode, logmode=logmode)
if not os.path.isfile(file):
print('{} was not found, creating it with n={}...'.format(file, n))
_, pca = libcluster.pca(data, n=n, exclude=exclude)
tsne = new_tsne()
tsne_results = tsne.fit_transform(pca)
data = pd.DataFrame({'Barcode':data.index, 'TSNE-1':tsne_results[:, 0], 'TSNE-2':tsne_results[:, 1]})
data = data[['Barcode', 'TSNE-1', 'TSNE-2']]
data = data.set_index('Barcode')
data.to_csv(file, sep='\t', header=True, index=True)
return read_tsne(file)
def load_pca_tsne(pca, name, tpmmode=True, logmode=True, exclude=[], cache=True, dir='.'):
"""
Run t-sne using pca result
Parameters
----------
pca : array, shape (n_samples, n_pca)
pca matrix.
name: str
name of pca results
Returns
-------
tsne : array, shape (n_samples, 2)
The tsne coordinates for each sample
"""
file = get_tsne_file(dir, name, tpmmode=tpmmode, logmode=logmode)
if not os.path.isfile(file) or not cache:
print('{} was not found, creating it...'.format(file))
# perplexity = 5, n_iter = 5000, learning = 10
tsne = new_tsne()
if isinstance(pca, SparseDataFrame):
tsne_results = SparseDataFrame(tsne.fit_transform(pca.data), pca.index, pca.columns)
else:
tsne_results = tsne.fit_transform(pca)
data = pd.DataFrame({'Barcode':pca.index, 'TSNE-1':tsne_results[:, 0], 'TSNE-2':tsne_results[:, 1]})
data = data[['Barcode', 'TSNE-1', 'TSNE-2']]
data = data.set_index('Barcode')
data.to_csv(file, sep='\t', header=True)
return read_tsne(file)
def lz_dz_diversity_plot(diversity, x, y, lz_indices, dz_indices, ax, cmap, norm):
# How many labels to cycle through (it cannot exceed the number of colors)
#cmap = plt.cm.plasma
#norm = matplotlib.colors.Normalize(vmin=0, vmax=max(1, round(max(diversity))))
ret = None
if len(lz_indices) > 0:
div = np.take(diversity, lz_indices)
x1 = np.take(x, lz_indices)
y1 = np.take(y, lz_indices)
indices = np.argsort(div)
div = np.take(div, indices)
x1 = np.take(x1, indices)
y1 = np.take(y1, indices)
ret = ax.scatter(x1, y1, c=div, cmap=cmap, norm=norm, s=libcluster.MARKER_SIZE, alpha=0.8, marker='^')
if len(dz_indices) > 0:
div = np.take(diversity, dz_indices)
x1 = np.take(x, dz_indices)
y1 = np.take(y, dz_indices)
indices = np.argsort(div)
div = np.take(div, indices)
x1 = np.take(x1, indices)
y1 = np.take(y1, indices)
ret = ax.scatter(x1, y1, c=div, cmap=cmap, norm=norm, alpha=0.8, s=libcluster.MARKER_SIZE, marker='o')
return ret
def diversity_plot(pca_results, diversity, lz_indices, dz_indices, prefix, diversity_type):
fig, ax = libcluster.make_figure()
cmap = plt.cm.plasma
norm = matplotlib.colors.Normalize(vmin=0, vmax=max(1, round(max(diversity))))
x = pca_results['tsne-1'].tolist()
y = pca_results['tsne-2'].tolist()
lz_dz_diversity_plot(diversity, x, y, lz_indices, dz_indices, ax, cmap, norm)
libcluster.format_simple_axes(ax, title="t-SNE")
cax = fig.add_axes([0.8, 0.05, 0.15, 0.02])
#d = np.array([[0,1]])
#im = cax.imshow(d, interpolation='nearest', cmap=cmap, norm=norm)
#fig.colorbar(im, cax=cax, orientation='horizontal')
matplotlib.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm, ticks=[0.0, 1.0], orientation='horizontal')
ax.set_title('Shannon Diversity Index')
libcluster.save_plot(fig, 'tsne_{}_{}_diversity.pdf'.format(prefix, diversity_type))
def tsne_legend(ax, labels, colors):
labeln = min(len(colors), np.max(labels) + 1)
for i in range(0, labeln):
color = colors[i]
size = len(np.where(labels == i)[0])
ax.scatter([],[], marker='o', color=color, alpha=0.9, s=libcluster.MARKER_SIZE, label="Cluster {} ({})".format(i + 1, size))
def get_tsne_plot_name(name, t1=1, t2=2):
return 'tsne_{}.pdf'.format(name) #, t1, t2)
def format_simple_axes(ax, title="t-SNE", dim1=1, dim2=2, subtitle1="", subtitle2=""):
libcluster.invisible_axes(ax)
ax.annotate('',
xy=(40, 0), # theta, radius
xytext=(-2, 0),
xycoords='axes pixels',
textcoords='axes pixels',
arrowprops=dict(arrowstyle='->', facecolor='red'))
ax.annotate('',
xy=(0, 40), # theta, radius
xytext=(0, -2),
xycoords='axes pixels',
textcoords='axes pixels',
arrowprops=dict(arrowstyle='->', facecolor='black'))
if subtitle1 != "":
ax.text(0, -0.04, '{} {} ({})'.format(title, dim1, subtitle1), transform=ax.transAxes)
else:
ax.text(0, -0.04, '{} {}'.format(title, dim1), transform=ax.transAxes)
if subtitle2 != "":
ax.text(-0.04, 0, '{} {} ({})'.format(title, dim2, subtitle2), va='bottom', transform=ax.transAxes, rotation=90)
else:
ax.text(-0.04, 0, '{} {}'.format(title, dim2), va='bottom', transform=ax.transAxes, rotation=90)
| [
"sklearn.cluster.KMeans",
"libcluster.make_figure",
"phenograph.cluster",
"pandas.read_csv",
"numpy.where",
"libcluster.format_simple_axes",
"matplotlib.colorbar.ColorbarBase",
"libcluster.invisible_axes",
"sklearn.manifold.TSNE",
"numpy.max",
"os.path.isfile",
"numpy.take",
"numpy.argsort",... | [((2005, 2096), 'pandas.DataFrame', 'pd.DataFrame', (["{'Barcode': headers, 'Cluster': labels, 'cluster_one_based': labels + 1}"], {}), "({'Barcode': headers, 'Cluster': labels, 'cluster_one_based': \n labels + 1})\n", (2017, 2096), True, 'import pandas as pd\n'), ((2607, 2660), 'pandas.DataFrame', 'pd.DataFrame', (["{'Barcode': headers, 'Cluster': labels}"], {}), "({'Barcode': headers, 'Cluster': labels})\n", (2619, 2660), True, 'import pandas as pd\n'), ((2886, 2936), 'pandas.read_csv', 'pd.read_csv', (['file'], {'sep': '"""\t"""', 'header': '(0)', 'index_col': '(0)'}), "(file, sep='\\t', header=0, index_col=0)\n", (2897, 2936), True, 'import pandas as pd\n'), ((2956, 2984), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (2979, 2984), False, 'import collections\n'), ((5571, 5621), 'pandas.read_csv', 'pd.read_csv', (['file'], {'sep': '"""\t"""', 'header': '(0)', 'index_col': '(0)'}), "(file, sep='\\t', header=0, index_col=0)\n", (5582, 5621), True, 'import pandas as pd\n'), ((6635, 6685), 'pandas.read_csv', 'pd.read_csv', (['file'], {'sep': '"""\t"""', 'header': '(0)', 'index_col': '(0)'}), "(file, sep='\\t', header=0, index_col=0)\n", (6646, 6685), True, 'import pandas as pd\n'), ((6714, 6923), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'verbose': '(1)', 'perplexity': 'TSNE_PERPLEXITY', 'learning_rate': 'TSNE_LEARNING', 'n_iter': 'TSNE_MAX_ITER', 'method': '"""barnes_hut"""', 'random_state': 'TSNE_RANDOM_STATE', 'init': 'TSNE_INIT', 'metric': 'TSNE_METRIC'}), "(n_components=2, verbose=1, perplexity=TSNE_PERPLEXITY, learning_rate=\n TSNE_LEARNING, n_iter=TSNE_MAX_ITER, method='barnes_hut', random_state=\n TSNE_RANDOM_STATE, init=TSNE_INIT, metric=TSNE_METRIC)\n", (6718, 6923), False, 'from sklearn.manifold import TSNE\n'), ((10482, 10506), 'libcluster.make_figure', 'libcluster.make_figure', ([], {}), '()\n', (10504, 10506), False, 'import libcluster\n'), ((10773, 10821), 'libcluster.format_simple_axes', 'libcluster.format_simple_axes', (['ax'], {'title': '"""t-SNE"""'}), "(ax, title='t-SNE')\n", (10802, 10821), False, 'import libcluster\n'), ((11023, 11131), 'matplotlib.colorbar.ColorbarBase', 'matplotlib.colorbar.ColorbarBase', (['cax'], {'cmap': 'cmap', 'norm': 'norm', 'ticks': '[0.0, 1.0]', 'orientation': '"""horizontal"""'}), "(cax, cmap=cmap, norm=norm, ticks=[0.0, 1.0\n ], orientation='horizontal')\n", (11055, 11131), False, 'import matplotlib\n'), ((11774, 11803), 'libcluster.invisible_axes', 'libcluster.invisible_axes', (['ax'], {}), '(ax)\n', (11799, 11803), False, 'import libcluster\n'), ((3980, 4008), 'phenograph.cluster', 'phenograph.cluster', (['pca'], {'k': 'k'}), '(pca, k=k)\n', (3998, 4008), False, 'import phenograph\n'), ((5442, 5462), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (5456, 5462), False, 'import os\n'), ((6048, 6101), 'libcluster.pca', 'libcluster.pca', (['data'], {'n': 'n', 'mode': 'mode', 'exclude': 'exclude'}), '(data, n=n, mode=mode, exclude=exclude)\n', (6062, 6101), False, 'import libcluster\n'), ((6262, 6339), 'pandas.DataFrame', 'pd.DataFrame', (['p.explained_variance_ratio_'], {'index': 'labels', 'columns': "['Variance']"}), "(p.explained_variance_ratio_, index=labels, columns=['Variance'])\n", (6274, 6339), True, 'import pandas as pd\n'), ((6413, 6466), 'pandas.DataFrame', 'pd.DataFrame', (['pca'], {'index': 'data.columns', 'columns': 'labels'}), '(pca, index=data.columns, columns=labels)\n', (6425, 6466), True, 'import pandas as pd\n'), ((7609, 7629), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (7623, 7629), False, 'import os\n'), ((7729, 7771), 'libcluster.pca', 'libcluster.pca', (['data'], {'n': 'n', 'exclude': 'exclude'}), '(data, n=n, exclude=exclude)\n', (7743, 7771), False, 'import libcluster\n'), ((7877, 7978), 'pandas.DataFrame', 'pd.DataFrame', (["{'Barcode': data.index, 'TSNE-1': tsne_results[:, 0], 'TSNE-2':\n tsne_results[:, 1]}"], {}), "({'Barcode': data.index, 'TSNE-1': tsne_results[:, 0], 'TSNE-2':\n tsne_results[:, 1]})\n", (7889, 7978), True, 'import pandas as pd\n'), ((9070, 9170), 'pandas.DataFrame', 'pd.DataFrame', (["{'Barcode': pca.index, 'TSNE-1': tsne_results[:, 0], 'TSNE-2': tsne_results\n [:, 1]}"], {}), "({'Barcode': pca.index, 'TSNE-1': tsne_results[:, 0], 'TSNE-2':\n tsne_results[:, 1]})\n", (9082, 9170), True, 'import pandas as pd\n'), ((9671, 9701), 'numpy.take', 'np.take', (['diversity', 'lz_indices'], {}), '(diversity, lz_indices)\n', (9678, 9701), True, 'import numpy as np\n'), ((9711, 9733), 'numpy.take', 'np.take', (['x', 'lz_indices'], {}), '(x, lz_indices)\n', (9718, 9733), True, 'import numpy as np\n'), ((9743, 9765), 'numpy.take', 'np.take', (['y', 'lz_indices'], {}), '(y, lz_indices)\n', (9750, 9765), True, 'import numpy as np\n'), ((9780, 9795), 'numpy.argsort', 'np.argsort', (['div'], {}), '(div)\n', (9790, 9795), True, 'import numpy as np\n'), ((9806, 9827), 'numpy.take', 'np.take', (['div', 'indices'], {}), '(div, indices)\n', (9813, 9827), True, 'import numpy as np\n'), ((9837, 9857), 'numpy.take', 'np.take', (['x1', 'indices'], {}), '(x1, indices)\n', (9844, 9857), True, 'import numpy as np\n'), ((9867, 9887), 'numpy.take', 'np.take', (['y1', 'indices'], {}), '(y1, indices)\n', (9874, 9887), True, 'import numpy as np\n'), ((10036, 10066), 'numpy.take', 'np.take', (['diversity', 'dz_indices'], {}), '(diversity, dz_indices)\n', (10043, 10066), True, 'import numpy as np\n'), ((10076, 10098), 'numpy.take', 'np.take', (['x', 'dz_indices'], {}), '(x, dz_indices)\n', (10083, 10098), True, 'import numpy as np\n'), ((10108, 10130), 'numpy.take', 'np.take', (['y', 'dz_indices'], {}), '(y, dz_indices)\n', (10115, 10130), True, 'import numpy as np\n'), ((10145, 10160), 'numpy.argsort', 'np.argsort', (['div'], {}), '(div)\n', (10155, 10160), True, 'import numpy as np\n'), ((10171, 10192), 'numpy.take', 'np.take', (['div', 'indices'], {}), '(div, indices)\n', (10178, 10192), True, 'import numpy as np\n'), ((10202, 10222), 'numpy.take', 'np.take', (['x1', 'indices'], {}), '(x1, indices)\n', (10209, 10222), True, 'import numpy as np\n'), ((10232, 10252), 'numpy.take', 'np.take', (['y1', 'indices'], {}), '(y1, indices)\n', (10239, 10252), True, 'import numpy as np\n'), ((3747, 3767), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (3761, 3767), False, 'import os\n'), ((4880, 4900), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (4894, 4900), False, 'import os\n'), ((5923, 5943), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (5937, 5943), False, 'import os\n'), ((8633, 8653), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (8647, 8653), False, 'import os\n'), ((11330, 11344), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (11336, 11344), True, 'import numpy as np\n'), ((4091, 4113), 'numpy.where', 'np.where', (['(labels == -1)'], {}), '(labels == -1)\n', (4099, 4113), True, 'import numpy as np\n'), ((5009, 5036), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'clusters'}), '(n_clusters=clusters)\n', (5015, 5036), False, 'from sklearn.cluster import KMeans\n'), ((5136, 5158), 'numpy.where', 'np.where', (['(labels == -1)'], {}), '(labels == -1)\n', (5144, 5158), True, 'import numpy as np\n'), ((11424, 11445), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (11432, 11445), True, 'import numpy as np\n')] |
import sys
import numpy as np
import theano.tensor as T
from keras.layers import Input, Conv2D, Activation, Lambda, UpSampling2D, merge
from keras.models import Model
from keras.engine.topology import Layer
from neural_style.utils import floatX
class InstanceNormalization(Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def build(self, input_shape):
self.scale = self.add_weight(shape=(input_shape[1],), initializer="uniform", trainable=True)
self.shift = self.add_weight(shape=(input_shape[1],), initializer="zero", trainable=True)
super().build(input_shape)
def call(self, x, mask=None):
hw = T.cast(x.shape[2] * x.shape[3], floatX)
mu = x.sum(axis=-1).sum(axis=-1) / hw
mu_vec = mu.dimshuffle(0, 1, "x", "x")
sig2 = T.square(x - mu_vec).sum(axis=-1).sum(axis=-1) / hw
y = (x - mu_vec) / T.sqrt(sig2.dimshuffle(0, 1, "x", "x") + 1e-5)
return self.scale.dimshuffle("x", 0, "x", "x") * y + self.shift.dimshuffle("x", 0, "x", "x")
class ReflectPadding2D(Layer):
def __init__(self, padding=(1, 1), **kwargs):
self.padding = padding
super().__init__(**kwargs)
def build(self, input_shape):
super().build(input_shape)
def call(self, x, mask=None):
p0, p1 = self.padding[0], self.padding[1]
y = T.zeros((x.shape[0], x.shape[1], x.shape[2]+(2*p0), x.shape[3]+(2*p1)), dtype=floatX)
y = T.set_subtensor(y[:, :, p0:-p0, p1:-p1], x)
y = T.set_subtensor(y[:, :, :p0, p1:-p1], x[:, :, p0:0:-1, :])
y = T.set_subtensor(y[:, :, -p0:, p1:-p1], x[:, :, -2:-2-p0:-1])
y = T.set_subtensor(y[:, :, p0:-p0, :p1], x[:, :, :, p1:0:-1])
y = T.set_subtensor(y[:, :, p0:-p0, -p1:], x[:, :, :, -2:-2-p1:-1])
y = T.set_subtensor(y[:, :, :p0, :p1], x[:, :, p0:0:-1, p1:0:-1])
y = T.set_subtensor(y[:, :, -p0:, :p1], x[:, :, -2:-2-p0:-1, p1:0:-1])
y = T.set_subtensor(y[:, :, :p0, -p1:], x[:, :, p0:0:-1, -2:-2-p1:-1])
y = T.set_subtensor(y[:, :, -p0:, -p1:], x[:, :, -2:-2-p0:-1, -2:-2-p1:-1])
return y
def get_output_shape_for(self, input_shape):
return (input_shape[0], input_shape[1], input_shape[2]+(2*self.padding[0]), input_shape[3]+(2*self.padding[1]))
def conv_layer(in_, nb_filter, filter_length, subsample=1, upsample=1, only_conv=False):
if upsample != 1:
out = UpSampling2D(size=(upsample, upsample))(in_)
else:
out = in_
padding = int(np.floor(filter_length / 2))
out = ReflectPadding2D((padding, padding))(out)
out = Conv2D(nb_filter, filter_length, filter_length, subsample=(subsample, subsample), border_mode="valid")(out)
if not only_conv:
out = InstanceNormalization()(out)
out = Activation("relu")(out)
return out
def residual_block(in_):
out = conv_layer(in_, 128, 3)
out = conv_layer(out, 128, 3, only_conv=True)
return merge([out, in_], mode="sum")
def get_transformer_net(X, weights=None):
input_ = Input(tensor=X, shape=(3, 256, 256))
y = conv_layer(input_, 32, 9)
y = conv_layer(y, 64, 3, subsample=2)
y = conv_layer(y, 128, 3, subsample=2)
y = residual_block(y)
y = residual_block(y)
y = residual_block(y)
y = residual_block(y)
y = residual_block(y)
y = conv_layer(y, 64, 3, upsample=2)
y = conv_layer(y, 32, 3, upsample=2)
y = conv_layer(y, 3, 9, only_conv=True)
y = Activation("tanh")(y)
y = Lambda(lambda x: x * 150, output_shape=(3, None, None))(y)
net = Model(input=input_, output=y)
if weights is not None:
try:
net.load_weights(weights)
except OSError as e:
print(e)
sys.exit(1)
return net
| [
"keras.layers.Conv2D",
"sys.exit",
"keras.layers.UpSampling2D",
"keras.layers.Lambda",
"keras.layers.merge",
"numpy.floor",
"theano.tensor.cast",
"keras.layers.Input",
"theano.tensor.zeros",
"keras.models.Model",
"theano.tensor.set_subtensor",
"keras.layers.Activation",
"theano.tensor.square... | [((2957, 2986), 'keras.layers.merge', 'merge', (['[out, in_]'], {'mode': '"""sum"""'}), "([out, in_], mode='sum')\n", (2962, 2986), False, 'from keras.layers import Input, Conv2D, Activation, Lambda, UpSampling2D, merge\n'), ((3044, 3080), 'keras.layers.Input', 'Input', ([], {'tensor': 'X', 'shape': '(3, 256, 256)'}), '(tensor=X, shape=(3, 256, 256))\n', (3049, 3080), False, 'from keras.layers import Input, Conv2D, Activation, Lambda, UpSampling2D, merge\n'), ((3564, 3593), 'keras.models.Model', 'Model', ([], {'input': 'input_', 'output': 'y'}), '(input=input_, output=y)\n', (3569, 3593), False, 'from keras.models import Model\n'), ((672, 711), 'theano.tensor.cast', 'T.cast', (['(x.shape[2] * x.shape[3])', 'floatX'], {}), '(x.shape[2] * x.shape[3], floatX)\n', (678, 711), True, 'import theano.tensor as T\n'), ((1364, 1457), 'theano.tensor.zeros', 'T.zeros', (['(x.shape[0], x.shape[1], x.shape[2] + 2 * p0, x.shape[3] + 2 * p1)'], {'dtype': 'floatX'}), '((x.shape[0], x.shape[1], x.shape[2] + 2 * p0, x.shape[3] + 2 * p1),\n dtype=floatX)\n', (1371, 1457), True, 'import theano.tensor as T\n'), ((1462, 1505), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['y[:, :, p0:-p0, p1:-p1]', 'x'], {}), '(y[:, :, p0:-p0, p1:-p1], x)\n', (1477, 1505), True, 'import theano.tensor as T\n'), ((1518, 1576), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['y[:, :, :p0, p1:-p1]', 'x[:, :, p0:0:-1, :]'], {}), '(y[:, :, :p0, p1:-p1], x[:, :, p0:0:-1, :])\n', (1533, 1576), True, 'import theano.tensor as T\n'), ((1589, 1651), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['y[:, :, -p0:, p1:-p1]', 'x[:, :, -2:-2 - p0:-1]'], {}), '(y[:, :, -p0:, p1:-p1], x[:, :, -2:-2 - p0:-1])\n', (1604, 1651), True, 'import theano.tensor as T\n'), ((1662, 1720), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['y[:, :, p0:-p0, :p1]', 'x[:, :, :, p1:0:-1]'], {}), '(y[:, :, p0:-p0, :p1], x[:, :, :, p1:0:-1])\n', (1677, 1720), True, 'import theano.tensor as T\n'), ((1733, 1798), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['y[:, :, p0:-p0, -p1:]', 'x[:, :, :, -2:-2 - p1:-1]'], {}), '(y[:, :, p0:-p0, -p1:], x[:, :, :, -2:-2 - p1:-1])\n', (1748, 1798), True, 'import theano.tensor as T\n'), ((1809, 1870), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['y[:, :, :p0, :p1]', 'x[:, :, p0:0:-1, p1:0:-1]'], {}), '(y[:, :, :p0, :p1], x[:, :, p0:0:-1, p1:0:-1])\n', (1824, 1870), True, 'import theano.tensor as T\n'), ((1883, 1951), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['y[:, :, -p0:, :p1]', 'x[:, :, -2:-2 - p0:-1, p1:0:-1]'], {}), '(y[:, :, -p0:, :p1], x[:, :, -2:-2 - p0:-1, p1:0:-1])\n', (1898, 1951), True, 'import theano.tensor as T\n'), ((1962, 2030), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['y[:, :, :p0, -p1:]', 'x[:, :, p0:0:-1, -2:-2 - p1:-1]'], {}), '(y[:, :, :p0, -p1:], x[:, :, p0:0:-1, -2:-2 - p1:-1])\n', (1977, 2030), True, 'import theano.tensor as T\n'), ((2041, 2116), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['y[:, :, -p0:, -p1:]', 'x[:, :, -2:-2 - p0:-1, -2:-2 - p1:-1]'], {}), '(y[:, :, -p0:, -p1:], x[:, :, -2:-2 - p0:-1, -2:-2 - p1:-1])\n', (2056, 2116), True, 'import theano.tensor as T\n'), ((2518, 2545), 'numpy.floor', 'np.floor', (['(filter_length / 2)'], {}), '(filter_length / 2)\n', (2526, 2545), True, 'import numpy as np\n'), ((2609, 2715), 'keras.layers.Conv2D', 'Conv2D', (['nb_filter', 'filter_length', 'filter_length'], {'subsample': '(subsample, subsample)', 'border_mode': '"""valid"""'}), "(nb_filter, filter_length, filter_length, subsample=(subsample,\n subsample), border_mode='valid')\n", (2615, 2715), False, 'from keras.layers import Input, Conv2D, Activation, Lambda, UpSampling2D, merge\n'), ((3464, 3482), 'keras.layers.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (3474, 3482), False, 'from keras.layers import Input, Conv2D, Activation, Lambda, UpSampling2D, merge\n'), ((3494, 3549), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x * 150)'], {'output_shape': '(3, None, None)'}), '(lambda x: x * 150, output_shape=(3, None, None))\n', (3500, 3549), False, 'from keras.layers import Input, Conv2D, Activation, Lambda, UpSampling2D, merge\n'), ((2427, 2466), 'keras.layers.UpSampling2D', 'UpSampling2D', ([], {'size': '(upsample, upsample)'}), '(size=(upsample, upsample))\n', (2439, 2466), False, 'from keras.layers import Input, Conv2D, Activation, Lambda, UpSampling2D, merge\n'), ((2796, 2814), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2806, 2814), False, 'from keras.layers import Input, Conv2D, Activation, Lambda, UpSampling2D, merge\n'), ((3735, 3746), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3743, 3746), False, 'import sys\n'), ((820, 840), 'theano.tensor.square', 'T.square', (['(x - mu_vec)'], {}), '(x - mu_vec)\n', (828, 840), True, 'import theano.tensor as T\n')] |
import numpy as np
# With a Q-learning algorithm returns how good is each response.
def player0(data, Q, player, valid, learning_rate, feedback):
actual = Q[data[player][0]][data[player][1]][data[player][2] - 1][data[player][3]][
int(np.log2(data[player][4]))] # How much it weights the actual state.
p0 = 0 # The amount of points if choose 0.
p1 = 0 # The amount of points if choose 1.
p2 = 0 # The amount of points if choose 2.
p3 = 0 # The amount of points if choose 3.
if feedback == 0:
# The probability is 0 because it is an illegal move.
if valid == 0:
p1 = actual
p3 = 0
else:
p1 = 0
# If it is a legal move then the value of playing 0 is the same to raise the points_hand variable,
# Else it's 0.
if data[0][4] == 8:
p0 = 0
else:
p0 = Q[data[player][0]][data[player][1]][data[player][2] - 1][data[player][3]][
int(np.log2(data[player][4])) + 1]
# If the points of the opponent is greater than 15 then it shouldn't be an option.
try:
p2 = Q[data[player][0]][data[player][1] + data[player][4]][data[player][2] - 1][data[player][3]][
int(np.log2(data[player][4]))]
except:
p2 = 0
else: # If it is call because of feedback then update the current state
Q[data[player][0]][data[player][1]][data[player][2] - 1][data[player][3]][
int(np.log2(data[player][4]))] += learning_rate * (feedback - actual)
return [p0, p1, p2, p3]
# Taking the probability of the 4 possible inputs, returns the final response.
def players(probability):
p0 = probability[0]
p1 = probability[1]
p2 = probability[2]
p3 = probability[3]
normal_const = p0 + p1 + p2 + p3
chance = np.array([p0, p1, p2, p3]) / normal_const
choose = np.random.random()
if choose < chance[0]:
return 0
elif choose < chance[0] + chance[1]:
return 1
elif choose < chance[0] + chance[1] + chance[2]:
return 2
else:
return 3
| [
"numpy.random.random",
"numpy.array",
"numpy.log2"
] | [((1902, 1920), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1918, 1920), True, 'import numpy as np\n'), ((1847, 1873), 'numpy.array', 'np.array', (['[p0, p1, p2, p3]'], {}), '([p0, p1, p2, p3])\n', (1855, 1873), True, 'import numpy as np\n'), ((248, 272), 'numpy.log2', 'np.log2', (['data[player][4]'], {}), '(data[player][4])\n', (255, 272), True, 'import numpy as np\n'), ((1500, 1524), 'numpy.log2', 'np.log2', (['data[player][4]'], {}), '(data[player][4])\n', (1507, 1524), True, 'import numpy as np\n'), ((1262, 1286), 'numpy.log2', 'np.log2', (['data[player][4]'], {}), '(data[player][4])\n', (1269, 1286), True, 'import numpy as np\n'), ((996, 1020), 'numpy.log2', 'np.log2', (['data[player][4]'], {}), '(data[player][4])\n', (1003, 1020), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plot
import matplotlib.dates as md
from matplotlib.dates import date2num
import datetime
# from pylab import *
from numpy import polyfit
import numpy as np
f = open("deviations.csv")
values = []
timestamps = []
for (i, line) in enumerate(f):
if i >= 1:
lineArray = line.split(",")
date = datetime.datetime.strptime(lineArray[0], '%Y-%m-%d %H:%M:%S')
timestamps.append(date2num(date))
value = lineArray[1].strip()
values.append(value)
if i > 100000:
break
plot.subplots_adjust(bottom=0.2)
plot.xticks( rotation=25 )
ax=plot.gca()
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
# countArray = np.arange(0.0, len(timestamps))
floatValues = np.array(map(float, values))
fit = polyfit(timestamps,floatValues,1)
fit_fn = np.poly1d(fit) # fit_fn is now a function which takes in x and returns an estimate for y
# plot(x,y, 'yo', x, fit_fn(x), '--k')
plot.plot(timestamps, values, timestamps, fit_fn(timestamps), '--k')
#plot.plot(timestamps, values)
plot.show()
| [
"matplotlib.dates.date2num",
"matplotlib.pyplot.xticks",
"numpy.polyfit",
"datetime.datetime.strptime",
"matplotlib.pyplot.gca",
"matplotlib.dates.DateFormatter",
"numpy.poly1d",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show"
] | [((541, 573), 'matplotlib.pyplot.subplots_adjust', 'plot.subplots_adjust', ([], {'bottom': '(0.2)'}), '(bottom=0.2)\n', (561, 573), True, 'import matplotlib.pyplot as plot\n'), ((574, 598), 'matplotlib.pyplot.xticks', 'plot.xticks', ([], {'rotation': '(25)'}), '(rotation=25)\n', (585, 598), True, 'import matplotlib.pyplot as plot\n'), ((604, 614), 'matplotlib.pyplot.gca', 'plot.gca', ([], {}), '()\n', (612, 614), True, 'import matplotlib.pyplot as plot\n'), ((622, 659), 'matplotlib.dates.DateFormatter', 'md.DateFormatter', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (638, 659), True, 'import matplotlib.dates as md\n'), ((792, 827), 'numpy.polyfit', 'polyfit', (['timestamps', 'floatValues', '(1)'], {}), '(timestamps, floatValues, 1)\n', (799, 827), False, 'from numpy import polyfit\n'), ((835, 849), 'numpy.poly1d', 'np.poly1d', (['fit'], {}), '(fit)\n', (844, 849), True, 'import numpy as np\n'), ((1065, 1076), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (1074, 1076), True, 'import matplotlib.pyplot as plot\n'), ((338, 399), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['lineArray[0]', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(lineArray[0], '%Y-%m-%d %H:%M:%S')\n", (364, 399), False, 'import datetime\n'), ((426, 440), 'matplotlib.dates.date2num', 'date2num', (['date'], {}), '(date)\n', (434, 440), False, 'from matplotlib.dates import date2num\n')] |
import abc
import typing
import numpy as np
class IResidualCalculator(metaclass=abc.ABCMeta):
@abc.abstractmethod
def calc(self, x: np.ndarray, y: np.ndarray, beta: np.ndarray) -> np.ndarray:
pass
class IJacobiMatElemCalculator(metaclass=abc.ABCMeta):
@abc.abstractmethod
def calc(self, x: np.ndarray, y: np.ndarray, beta: np.ndarray) -> np.ndarray:
pass
class IInitialEstimateBetaCalculator(metaclass=abc.ABCMeta):
@abc.abstractmethod
def calc(self, x: np.ndarray, y: np.ndarray) -> np.ndarray:
pass
class ICurveFunc(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __call__(self, x: np.ndarray) -> np.ndarray:
pass
@abc.abstractmethod
def inverse(self, y: np.ndarray) -> np.ndarray:
pass
@abc.abstractmethod
def get_beta(self) -> np.ndarray:
pass
class IModelFunc(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __call__(self, x: np.ndarray, beta: np.ndarray) -> np.ndarray:
pass
@abc.abstractmethod
def get_residual_calculator(self) -> IResidualCalculator:
pass
@abc.abstractmethod
def get_jacobi_mat_elem_calculator(self) -> typing.Tuple[IJacobiMatElemCalculator, ...]:
pass
@abc.abstractmethod
def get_initial_estimate_beta_calculator(self) -> IInitialEstimateBetaCalculator:
pass
@abc.abstractmethod
def create_curve_func(self, beta: np.ndarray) -> ICurveFunc:
pass
class Solver():
def __init__(self, model_func: IModelFunc) -> None:
self.__model_func: IModelFunc = model_func
self.__residual_calculator: IResidualCalculator = \
model_func.get_residual_calculator()
self.__jacobi_mat_elem_calculator: typing.Tuple[IJacobiMatElemCalculator, ...] = \
model_func.get_jacobi_mat_elem_calculator()
self.__initial_estimate_beta_calculator: IInitialEstimateBetaCalculator = \
model_func.get_initial_estimate_beta_calculator()
def get_curve_func(self, x: np.ndarray, y: np.ndarray,
lambda_: float = 0.001, nu: float = 1.1,
rss_convergence: float = 0.000_001, max_iteration: int = 5000) -> ICurveFunc:
if x.ndim != 1:
raise ValueError('x is not a one-dimensional array.')
if y.ndim != 1:
raise ValueError('y is not a one-dimensional array.')
if x.shape != y.shape:
raise ValueError('The number of data for x and y do not match.')
if len(self.__jacobi_mat_elem_calculator) > x.shape[0] or \
len(self.__jacobi_mat_elem_calculator) > y.shape[0]:
raise ValueError('The number of x and y data is not enough.')
if lambda_ <= 0.:
raise ValueError('lambda is 0 or less.')
if nu <= 1.:
raise ValueError('nu is 1 or less.')
if rss_convergence <= 0.:
raise ValueError('rss_convergence is 0 or less.')
if max_iteration <= 0:
raise ValueError('max_iteration is 0 or less.')
beta: np.ndarray = self.__initial_estimate_beta_calculator.calc(x, y)
if beta.ndim != 1:
raise ValueError('beta is not a one-dimensional array.')
if len(self.__jacobi_mat_elem_calculator) > beta.shape[0]:
raise ValueError('The number of beta parameters is not enough.')
is_converge: bool = False
def calc_rss(beta: np.ndarray) -> float:
return np.sum(np.square(self.__residual_calculator.calc(x, y, beta)))
rss: float = calc_rss(beta)
for _ in range(max_iteration):
jacobi_mat_t: np.ndarray = None
for elem_calculator in self.__jacobi_mat_elem_calculator:
if jacobi_mat_t is None:
jacobi_mat_t = elem_calculator.calc(x, y, beta)
else:
jacobi_mat_t = np.vstack(
(jacobi_mat_t, elem_calculator.calc(x, y, beta)))
jacobi_mat: np.ndarray = jacobi_mat_t.T
approx_hesse_mat: np.ndarray = np.matmul(jacobi_mat_t, jacobi_mat)
residual: np.ndarray = self.__residual_calculator.calc(x, y, beta)
def calc_next_beta(lambda_: float) -> np.ndarray:
return beta - np.matmul(np.matmul(np.linalg.pinv(
approx_hesse_mat + (lambda_ * np.identity(beta.shape[0]))), jacobi_mat_t), residual)
for k in range(max_iteration):
beta_lambda_div_nu: np.ndarray = calc_next_beta(lambda_ / nu)
rss_lambda_div_nu: float = calc_rss(beta_lambda_div_nu)
if rss_lambda_div_nu <= rss:
if rss - rss_lambda_div_nu < rss_convergence:
is_converge = True
lambda_ = lambda_ / nu
beta = beta_lambda_div_nu
rss = rss_lambda_div_nu
break
else:
beta_lambda: np.ndarray = calc_next_beta(lambda_)
rss_lambda: float = calc_rss(beta_lambda)
if rss_lambda <= rss:
if rss - rss_lambda < rss_convergence:
is_converge = True
beta = beta_lambda
rss = rss_lambda
break
else:
lambda_ = lambda_ * (nu ** (k + 2))
if is_converge == True:
break
return self.__model_func.create_curve_func(beta)
| [
"numpy.identity",
"numpy.matmul"
] | [((4077, 4112), 'numpy.matmul', 'np.matmul', (['jacobi_mat_t', 'jacobi_mat'], {}), '(jacobi_mat_t, jacobi_mat)\n', (4086, 4112), True, 'import numpy as np\n'), ((4371, 4397), 'numpy.identity', 'np.identity', (['beta.shape[0]'], {}), '(beta.shape[0])\n', (4382, 4397), True, 'import numpy as np\n')] |
from datetime import datetime, timedelta
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
import sys
from os.path import dirname, abspath
sys.path.insert(0,dirname(dirname(dirname(abspath(__file__)))))
from fleet_request import FleetRequest
from grid_info import GridInfo
from fleets.electric_vehicles_fleet.electric_vehicles_fleet import ElectricVehiclesFleet
def main(ts, grid):
# Instantiation of an object of the ElectricVehiclesFleet class
fleet_test = ElectricVehiclesFleet(grid, ts)
dt = 3600*1 # time step (in seconds)
sim_step = timedelta(seconds = dt)
seconds_of_simulation = 24*3600 # (in seconds)
local_time = fleet_test.get_time_of_the_day(ts)
t = np.arange(local_time,local_time+seconds_of_simulation,dt) # array of time in seconds
# Power requested (kW): test
fleet_test.is_autonomous = False
fleet_test.is_P_priority = True
# List of requests
requests = []
for i in range(len(t)):
req = FleetRequest(ts+i*sim_step, sim_step, ts, None, 0.)
requests.append(req)
FORECAST = fleet_test.forecast(requests)
eff_charging = np.zeros([len(t), ])
eff_discharging = np.zeros([len(t), ])
energy = np.zeros([len(t), ])
capacity = np.zeros([len(t), ])
min_power_service = np.zeros([len(t), ])
max_power_service = np.zeros([len(t), ])
min_power_togrid = np.zeros([len(t), ])
max_power_togrid = np.zeros([len(t), ])
for i in range(len(t)):
eff_charging[i] = FORECAST[i].Eff_charge
eff_discharging[i] = FORECAST[i].Eff_discharge
energy[i] = FORECAST[i].E
capacity[i] = FORECAST[i].C
min_power_service[i] = FORECAST[i].P_service_min
max_power_service[i] = FORECAST[i].P_service_max
min_power_togrid[i] = FORECAST[i].P_togrid_min
max_power_togrid[i] = FORECAST[i].P_togrid_max
return (eff_charging, eff_discharging, energy, capacity,
min_power_service, max_power_service,
max_power_togrid, min_power_togrid)
if __name__ == "__main__":
dirname = os.path.dirname(__file__)
# Time stamp to start the simulation
ts = datetime(2018, 9, 20, 00, 0, 00, 000000)
grid = GridInfo('Grid_Info_DATA_2.csv')
e_in, e_out, e, c, p_serv_min, p_serv_max, p_min, p_max = main(ts, grid)
# Compute the roundtrip efficiency matrix
rt = np.multiply.outer(e_in*0.01, e_out*0.01)*100
np.fill_diagonal(rt, 0)
# Save the roundtrip efficiency matrix
np.savetxt(os.path.join(dirname,'data/roundtrip_efficiency_electric_vehicle.csv'), rt, delimiter=",")
fig, ax = plt.subplots()
ax = sns.heatmap(rt, annot=False, linewidths=.5)
ax.set_title('Roundtrip efficiency matrix for EVs')
ax.set_xlabel(r'$t_i$ (hr)')
ax.set_ylabel(r'$t_j$ (hr)')
| [
"datetime.datetime",
"os.path.join",
"numpy.fill_diagonal",
"seaborn.heatmap",
"numpy.multiply.outer",
"os.path.dirname",
"fleets.electric_vehicles_fleet.electric_vehicles_fleet.ElectricVehiclesFleet",
"fleet_request.FleetRequest",
"grid_info.GridInfo",
"datetime.timedelta",
"os.path.abspath",
... | [((509, 540), 'fleets.electric_vehicles_fleet.electric_vehicles_fleet.ElectricVehiclesFleet', 'ElectricVehiclesFleet', (['grid', 'ts'], {}), '(grid, ts)\n', (530, 540), False, 'from fleets.electric_vehicles_fleet.electric_vehicles_fleet import ElectricVehiclesFleet\n'), ((598, 619), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'dt'}), '(seconds=dt)\n', (607, 619), False, 'from datetime import datetime, timedelta\n'), ((733, 794), 'numpy.arange', 'np.arange', (['local_time', '(local_time + seconds_of_simulation)', 'dt'], {}), '(local_time, local_time + seconds_of_simulation, dt)\n', (742, 794), True, 'import numpy as np\n'), ((2141, 2166), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2156, 2166), False, 'import os\n'), ((2217, 2250), 'datetime.datetime', 'datetime', (['(2018)', '(9)', '(20)', '(0)', '(0)', '(0)', '(0)'], {}), '(2018, 9, 20, 0, 0, 0, 0)\n', (2225, 2250), False, 'from datetime import datetime, timedelta\n'), ((2273, 2305), 'grid_info.GridInfo', 'GridInfo', (['"""Grid_Info_DATA_2.csv"""'], {}), "('Grid_Info_DATA_2.csv')\n", (2281, 2305), False, 'from grid_info import GridInfo\n'), ((2497, 2520), 'numpy.fill_diagonal', 'np.fill_diagonal', (['rt', '(0)'], {}), '(rt, 0)\n', (2513, 2520), True, 'import numpy as np\n'), ((2689, 2703), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2701, 2703), True, 'import matplotlib.pyplot as plt\n'), ((2713, 2757), 'seaborn.heatmap', 'sns.heatmap', (['rt'], {'annot': '(False)', 'linewidths': '(0.5)'}), '(rt, annot=False, linewidths=0.5)\n', (2724, 2757), True, 'import seaborn as sns\n'), ((1014, 1070), 'fleet_request.FleetRequest', 'FleetRequest', (['(ts + i * sim_step)', 'sim_step', 'ts', 'None', '(0.0)'], {}), '(ts + i * sim_step, sim_step, ts, None, 0.0)\n', (1026, 1070), False, 'from fleet_request import FleetRequest\n'), ((2448, 2492), 'numpy.multiply.outer', 'np.multiply.outer', (['(e_in * 0.01)', '(e_out * 0.01)'], {}), '(e_in * 0.01, e_out * 0.01)\n', (2465, 2492), True, 'import numpy as np\n'), ((2579, 2650), 'os.path.join', 'os.path.join', (['dirname', '"""data/roundtrip_efficiency_electric_vehicle.csv"""'], {}), "(dirname, 'data/roundtrip_efficiency_electric_vehicle.csv')\n", (2591, 2650), False, 'import os\n'), ((215, 232), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (222, 232), False, 'from os.path import dirname, abspath\n')] |
# in shell
import os, sys
simfempypath = os.path.abspath(os.path.join(__file__, os.path.pardir, os.path.pardir, os.path.pardir, os.path.pardir,'simfempy'))
sys.path.insert(0,simfempypath)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pygmsh
from simfempy.applications.stokes import Stokes
from simfempy.applications.navierstokes import NavierStokes
from simfempy.applications.problemdata import ProblemData
from simfempy.meshes.simplexmesh import SimplexMesh
from simfempy.meshes import plotmesh
from simfempy.tools import timer
# ================================================================c#
def main(**kwargs):
modelargs = kwargs.pop('modelargs', {})
testcases = ['drivenCavity', 'backwardFacingStep', 'poiseuille', 'schaeferTurek']
testcase = kwargs.pop('testcase', testcases[0])
model = kwargs.pop('model', 'NavierStokes')
bdryplot = kwargs.pop('bdryplot', False)
plot = kwargs.pop('plot', False)
# linearsolver = kwargs.pop('linearsolver', 'gcrotmk_1')
# precond_p = kwargs.pop('precond_p', 'diag')
# create mesh and data
t = timer.Timer("mesh")
mesh, data = eval(testcase)(**kwargs)
t.add('pygmsh')
mesh = SimplexMesh(mesh)
t.add('SimplexMesh')
print(t)
print(f"{mesh=}")
if bdryplot:
plotmesh.meshWithBoundaries(mesh)
plt.show()
return
# create application
if model == "Stokes":
model = Stokes(mesh=mesh, problemdata=data, **modelargs)
else:
# model = NavierStokes(mesh=mesh, problemdata=data, hdivpenalty=10)
model = NavierStokes(mesh=mesh, problemdata=data, **modelargs)
result = model.solve()
print(f"{result.info['timer']}")
print(f"postproc:")
for k, v in result.data['global'].items(): print(f"{k}: {v}")
if mesh.dimension ==2:
fig = plt.figure(figsize=(10, 8))
outer = gridspec.GridSpec(1, 2, wspace=0.2, hspace=0.2)
plotmesh.meshWithData(mesh, data=result.data, title="Stokes", fig=fig, outer=outer[0])
plotmesh.meshWithData(mesh, title="Stokes", fig=fig, outer=outer[1],
quiver_data={"V":list(result.data['point'].values())})
plt.show()
else:
filename = testcase+'.vtu'
mesh.write(filename, data=result.data)
if plot:
import pyvista as pv
mesh = pv.read(filename)
cpos = mesh.plot()
# ================================================================ #
def poiseuille2d(h= 0.1, mu=0.1):
with pygmsh.geo.Geometry() as geom:
p = geom.add_rectangle(xmin=0, xmax=4, ymin=0, ymax=1, z=0, mesh_size=h)
geom.add_physical(p.surface, label="100")
for i in range(len(p.lines)): geom.add_physical(p.lines[i], label=f"{1000 + i}")
mesh = geom.generate_mesh()
data = ProblemData()
# boundary conditions
data.bdrycond.set("Dirichlet", [1002,1000,1003])
data.bdrycond.set("Neumann", [])
data.bdrycond.set("Navier", [])
data.bdrycond.set("Pressure", [1001])
data.bdrycond.fct[1003] = [lambda x, y, z: 4*y*(1-y), lambda x, y, z: 0]
# parameters
data.params.scal_glob["mu"] = mu
data.params.scal_glob["navier"] = 1.01
#TODO pass ncomp with mesh ?!
data.ncomp = 2
return mesh, data
# ================================================================ #
def poiseuille3d(h= 0.1, mu=0.1):
with pygmsh.geo.Geometry() as geom:
p = geom.add_rectangle(xmin=0, xmax=4, ymin=0, ymax=1, z=0, mesh_size=h)
axis = [0, 0, 1]
top, vol, lat = geom.extrude(p.surface, axis)
geom.add_physical([top, p.surface, lat[0], lat[2]], label="100")
geom.add_physical(lat[1], label="101")
geom.add_physical(lat[3], label="103")
geom.add_physical(vol, label="10")
mesh = geom.generate_mesh()
data = ProblemData()
# boundary conditions
data.bdrycond.set("Dirichlet", [100, 103])
data.bdrycond.set("Neumann", [101])
data.bdrycond.fct[103] = [lambda x, y, z: 16*y*(1-y)*z*(1-z), lambda x, y, z: 0, lambda x, y, z: 0]
# parameters
data.params.scal_glob["mu"] = mu
data.ncomp = 3
return mesh, data
# ================================================================c#
def drivenCavity2d(h=0.1, mu=0.01):
with pygmsh.geo.Geometry() as geom:
ms = [h*v for v in [1.,1.,0.2,0.2]]
p = geom.add_rectangle(xmin=0, xmax=1, ymin=0, ymax=1, z=0, mesh_size=ms)
geom.add_physical(p.surface, label="100")
geom.add_physical(p.lines[2], label="1002")
geom.add_physical([p.lines[0], p.lines[1], p.lines[3]], label="1000")
mesh = geom.generate_mesh()
data = ProblemData()
# boundary conditions
data.bdrycond.set("Dirichlet", [1000, 1002])
data.bdrycond.fct[1002] = [lambda x, y, z: 1, lambda x, y, z: 0]
# parameters
data.params.scal_glob["mu"] = mu
data.params.scal_glob["navier"] = mu
#TODO pass ncomp with mesh ?!
data.ncomp = 2
return mesh, data
# ================================================================c#
def drivenCavity3d(h=0.1, mu=0.01):
with pygmsh.geo.Geometry() as geom:
p = geom.add_rectangle(xmin=0, xmax=1, ymin=0, ymax=1, z=0, mesh_size=h)
axis = [0, 0, 1]
top, vol, lat = geom.extrude(p.surface, axis)
geom.add_physical(top, label="102")
geom.add_physical([p.surface, lat[0], lat[1], lat[2], lat[3]], label="100")
geom.add_physical(vol, label="10")
mesh = geom.generate_mesh()
data = ProblemData()
# boundary conditions
data.bdrycond.set("Dirichlet", [100, 102])
data.bdrycond.fct[102] = [lambda x, y, z: 1, lambda x, y, z: 0, lambda x, y, z: 0]
# parameters
data.params.scal_glob["mu"] = mu
data.params.scal_glob["navier"] = mu
data.ncomp = 3
return mesh, data
# ================================================================ #
def backwardFacingStep2d(h=0.2, mu=0.02):
with pygmsh.geo.Geometry() as geom:
X = []
X.append([-1.0, 1.0])
X.append([-1.0, 0.0])
X.append([0.0, 0.0])
X.append([0.0, -1.0])
X.append([3.0, -1.0])
X.append([3.0, 1.0])
p = geom.add_polygon(points=np.insert(np.array(X), 2, 0, axis=1), mesh_size=h)
geom.add_physical(p.surface, label="100")
dirlines = [p for i,p in enumerate(p.lines) if i != 0 and i != 4]
geom.add_physical(dirlines, "1002")
geom.add_physical(p.lines[0], "1000")
geom.add_physical(p.lines[4], "1004")
mesh = geom.generate_mesh()
data = ProblemData()
# boundary conditions
data.bdrycond.set("Dirichlet", [1000, 1002])
data.bdrycond.set("Pressure", [1004])
data.bdrycond.fct[1000] = [lambda x, y, z: y*(1-y), lambda x, y, z: 0]
# parameters
data.params.scal_glob["mu"] = mu
data.ncomp = 2
return mesh, data
# ================================================================ #
def backwardFacingStep3d(h=0.2, mu=0.02):
with pygmsh.geo.Geometry() as geom:
X = []
X.append([-1.0, 1.0])
X.append([-1.0, 0.0])
X.append([0.0, 0.0])
X.append([0.0, -1.0])
X.append([3.0, -1.0])
X.append([3.0, 1.0])
p = geom.add_polygon(points=np.insert(np.array(X), 2, 0, axis=1), mesh_size=h)
axis = [0, 0, 1]
top, vol, lat = geom.extrude(p.surface, axis)
dirf = [lat[i] for i in range(1,6) if i!=4 ]
dirf.extend([p.surface, top])
geom.add_physical(dirf, label="100")
geom.add_physical(lat[0], label="102")
geom.add_physical(lat[4], label="104")
# for i in range(len(lat)):
# geom.add_physical(lat[i], label=f"{101+i}")
geom.add_physical(vol, label="10")
# geom.add_physical(p.surface, label="100")
# dirlines = [p for i,p in enumerate(p.lines) if i != 0 and i != 4]
# geom.add_physical(dirlines, "1002")
# geom.add_physical(p.lines[0], "1000")
# geom.add_physical(p.lines[4], "1004")
mesh = geom.generate_mesh()
data = ProblemData()
# boundary conditions
data.bdrycond.set("Dirichlet", [100, 102])
data.bdrycond.set("Pressure", [104])
data.bdrycond.fct[102] = [lambda x, y, z: y*(1-y)*z*(1-z), lambda x, y, z: 0, lambda x, y, z: 0]
# parameters
data.params.scal_glob["mu"] = mu
data.ncomp = 3
return mesh, data
# ================================================================ #
def schaeferTurek2d(h= 0.5, hcircle=None):
if hcircle is None: hcircle = 0.2*h
with pygmsh.geo.Geometry() as geom:
circle = geom.add_circle(x0=[2,2], radius=0.5, mesh_size=hcircle, num_sections=10, make_surface=False)
geom.add_physical(circle.curve_loop.curves, label="3000")
p = geom.add_rectangle(xmin=0, xmax=11, ymin=0, ymax=4.1, z=0, mesh_size=h, holes=[circle])
geom.add_physical(p.surface, label="100")
for i in range(len(p.lines)): geom.add_physical(p.lines[i], label=f"{1000 + i}")
mesh = geom.generate_mesh()
data = ProblemData()
# boundary conditions
data.bdrycond.set("Dirichlet", [1002,1000,1003,3000])
data.bdrycond.set("Neumann", [1001])
data.bdrycond.fct[1003] = [lambda x, y, z: 0.3*y*(4.1-y)/2.05**2, lambda x, y, z: 0]
data.params.scal_glob["mu"] = 0.01
data.postproc.set(name='bdrynflux', type='bdry_nflux', colors=3000)
def changepostproc(info):
bdrynflux = info.pop('bdrynflux')
info['drag'] = -50*bdrynflux[0]
info['lift'] = -50*bdrynflux[1]
info['err_drag'] = 5.57953523384+50*bdrynflux[0]
info['err_lift'] = 0.010618937712+50*bdrynflux[1]
data.postproc.changepostproc = changepostproc
data.ncomp = 2
return mesh, data
# ================================================================ #
def schaeferTurek3d(h= 1, hcircle=None):
if hcircle is None: hcircle = 0.25*h
with pygmsh.geo.Geometry() as geom:
circle = geom.add_circle(x0=[5,2], radius=0.5, mesh_size=hcircle, num_sections=8, make_surface=False)
p = geom.add_rectangle(xmin=0, xmax=25, ymin=0, ymax=4.1, z=0, mesh_size=h, holes=[circle])
axis = [0, 0, 4.1]
top, vol, lat = geom.extrude(p.surface, axis)
geom.add_physical([top,p.surface, lat[0], lat[2]], label="100")
geom.add_physical(lat[1], label="101")
geom.add_physical(lat[3], label="103")
geom.add_physical(lat[4:], label="300")
geom.add_physical(vol, label="10")
mesh = geom.generate_mesh()
data = ProblemData()
# boundary conditions
data.bdrycond.set("Dirichlet", [100,103,300])
data.bdrycond.set("Neumann", [101])
data.bdrycond.fct[103] = [lambda x, y, z: 0.45*y*(4.1-y)*z*(4.1-z)/2.05**4, lambda x, y, z: 0, lambda x, y, z: 0]
data.params.scal_glob["mu"] = 0.01
data.postproc.set(name='bdrynflux', type='bdry_nflux', colors=300)
data.postproc.set(name='mean', type='bdry_vmean', colors=[101,103])
data.ncomp = 3
return mesh, data
#================================================================#
if __name__ == '__main__':
# main(testcase='poiseuille2d', h=0.2, mu=1e-6, modelargs={'convmethod':'lps', 'divdivparam':1., 'hdivpenalty':0.1, 'precond_v':'spsolve'})
# main(testcase='drivenCavity2d', h=1, mu=3e-2, precond_p='schur')
# main(testcase='backwardFacingStep2d', mu=2e-3)
main(testcase='backwardFacingStep3d', mu=2e-2)
# main(testcase='schaeferTurek2d')
# main(testcase='poiseuille3d', h=0.2, mu=1e-3)
# main(testcase='drivenCavity3d', mu=0.001, precond_p='schur')
# main(testcase='schaeferTurek3d', h=0.5, bdryplot=False, model='Stokes', plot=False)
# main(testcase='schaeferTurek3d', h=0.5, bdryplot=False, linearsolver='gcrotmk_1', model='Stokes', plot=False)
# main(testcase='schaeferTurek3d', h=0.95, bdryplot=False, linearsolver='spsolve', model='Stokes', plot=False)
| [
"pygmsh.geo.Geometry",
"sys.path.insert",
"simfempy.applications.navierstokes.NavierStokes",
"simfempy.applications.problemdata.ProblemData",
"os.path.join",
"simfempy.meshes.plotmesh.meshWithBoundaries",
"simfempy.meshes.plotmesh.meshWithData",
"simfempy.applications.stokes.Stokes",
"matplotlib.pyp... | [((156, 188), 'sys.path.insert', 'sys.path.insert', (['(0)', 'simfempypath'], {}), '(0, simfempypath)\n', (171, 188), False, 'import os, sys\n'), ((57, 160), 'os.path.join', 'os.path.join', (['__file__', 'os.path.pardir', 'os.path.pardir', 'os.path.pardir', 'os.path.pardir', '"""simfempy"""'], {}), "(__file__, os.path.pardir, os.path.pardir, os.path.pardir, os.\n path.pardir, 'simfempy')\n", (69, 160), False, 'import os, sys\n'), ((1129, 1148), 'simfempy.tools.timer.Timer', 'timer.Timer', (['"""mesh"""'], {}), "('mesh')\n", (1140, 1148), False, 'from simfempy.tools import timer\n'), ((1222, 1239), 'simfempy.meshes.simplexmesh.SimplexMesh', 'SimplexMesh', (['mesh'], {}), '(mesh)\n', (1233, 1239), False, 'from simfempy.meshes.simplexmesh import SimplexMesh\n'), ((2850, 2863), 'simfempy.applications.problemdata.ProblemData', 'ProblemData', ([], {}), '()\n', (2861, 2863), False, 'from simfempy.applications.problemdata import ProblemData\n'), ((3867, 3880), 'simfempy.applications.problemdata.ProblemData', 'ProblemData', ([], {}), '()\n', (3878, 3880), False, 'from simfempy.applications.problemdata import ProblemData\n'), ((4691, 4704), 'simfempy.applications.problemdata.ProblemData', 'ProblemData', ([], {}), '()\n', (4702, 4704), False, 'from simfempy.applications.problemdata import ProblemData\n'), ((5542, 5555), 'simfempy.applications.problemdata.ProblemData', 'ProblemData', ([], {}), '()\n', (5553, 5555), False, 'from simfempy.applications.problemdata import ProblemData\n'), ((6590, 6603), 'simfempy.applications.problemdata.ProblemData', 'ProblemData', ([], {}), '()\n', (6601, 6603), False, 'from simfempy.applications.problemdata import ProblemData\n'), ((8086, 8099), 'simfempy.applications.problemdata.ProblemData', 'ProblemData', ([], {}), '()\n', (8097, 8099), False, 'from simfempy.applications.problemdata import ProblemData\n'), ((9065, 9078), 'simfempy.applications.problemdata.ProblemData', 'ProblemData', ([], {}), '()\n', (9076, 9078), False, 'from simfempy.applications.problemdata import ProblemData\n'), ((10554, 10567), 'simfempy.applications.problemdata.ProblemData', 'ProblemData', ([], {}), '()\n', (10565, 10567), False, 'from simfempy.applications.problemdata import ProblemData\n'), ((1326, 1359), 'simfempy.meshes.plotmesh.meshWithBoundaries', 'plotmesh.meshWithBoundaries', (['mesh'], {}), '(mesh)\n', (1353, 1359), False, 'from simfempy.meshes import plotmesh\n'), ((1368, 1378), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1376, 1378), True, 'import matplotlib.pyplot as plt\n'), ((1461, 1509), 'simfempy.applications.stokes.Stokes', 'Stokes', ([], {'mesh': 'mesh', 'problemdata': 'data'}), '(mesh=mesh, problemdata=data, **modelargs)\n', (1467, 1509), False, 'from simfempy.applications.stokes import Stokes\n'), ((1612, 1666), 'simfempy.applications.navierstokes.NavierStokes', 'NavierStokes', ([], {'mesh': 'mesh', 'problemdata': 'data'}), '(mesh=mesh, problemdata=data, **modelargs)\n', (1624, 1666), False, 'from simfempy.applications.navierstokes import NavierStokes\n'), ((1862, 1889), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (1872, 1889), True, 'import matplotlib.pyplot as plt\n'), ((1906, 1953), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {'wspace': '(0.2)', 'hspace': '(0.2)'}), '(1, 2, wspace=0.2, hspace=0.2)\n', (1923, 1953), True, 'import matplotlib.gridspec as gridspec\n'), ((1962, 2052), 'simfempy.meshes.plotmesh.meshWithData', 'plotmesh.meshWithData', (['mesh'], {'data': 'result.data', 'title': '"""Stokes"""', 'fig': 'fig', 'outer': 'outer[0]'}), "(mesh, data=result.data, title='Stokes', fig=fig,\n outer=outer[0])\n", (1983, 2052), False, 'from simfempy.meshes import plotmesh\n'), ((2217, 2227), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2225, 2227), True, 'import matplotlib.pyplot as plt\n'), ((2552, 2573), 'pygmsh.geo.Geometry', 'pygmsh.geo.Geometry', ([], {}), '()\n', (2571, 2573), False, 'import pygmsh\n'), ((3419, 3440), 'pygmsh.geo.Geometry', 'pygmsh.geo.Geometry', ([], {}), '()\n', (3438, 3440), False, 'import pygmsh\n'), ((4307, 4328), 'pygmsh.geo.Geometry', 'pygmsh.geo.Geometry', ([], {}), '()\n', (4326, 4328), False, 'import pygmsh\n'), ((5133, 5154), 'pygmsh.geo.Geometry', 'pygmsh.geo.Geometry', ([], {}), '()\n', (5152, 5154), False, 'import pygmsh\n'), ((5972, 5993), 'pygmsh.geo.Geometry', 'pygmsh.geo.Geometry', ([], {}), '()\n', (5991, 5993), False, 'import pygmsh\n'), ((7012, 7033), 'pygmsh.geo.Geometry', 'pygmsh.geo.Geometry', ([], {}), '()\n', (7031, 7033), False, 'import pygmsh\n'), ((8571, 8592), 'pygmsh.geo.Geometry', 'pygmsh.geo.Geometry', ([], {}), '()\n', (8590, 8592), False, 'import pygmsh\n'), ((9928, 9949), 'pygmsh.geo.Geometry', 'pygmsh.geo.Geometry', ([], {}), '()\n', (9947, 9949), False, 'import pygmsh\n'), ((2389, 2406), 'pyvista.read', 'pv.read', (['filename'], {}), '(filename)\n', (2396, 2406), True, 'import pyvista as pv\n'), ((6242, 6253), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (6250, 6253), True, 'import numpy as np\n'), ((7282, 7293), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (7290, 7293), True, 'import numpy as np\n')] |
"""Module that contains the command line app."""
import click
import importlib
import numpy as np
import toml
from astropy.units import Quantity
from pathlib import Path
from rich import box
from rich.console import Console
from rich.panel import Panel
from rich.rule import Rule
from rich.table import Table
from time import time
import hmf
from hmf.helpers.functional import get_hmf
from .helpers.cfg_utils import framework_to_dict
console = Console(width=100)
def _get_config(config=None):
if config is None:
return {}
with open(config, "r") as fl:
cfg = toml.load(fl)
# Import an actual framework.
fmwk = cfg.get("framework", None)
if fmwk:
mod, cls = fmwk.rsplit(".", maxsplit=1)
cfg["framework"] = getattr(importlib.import_module(mod), cls)
return cfg
def _ctx_to_dct(args):
dct = {}
j = 0
while j < len(args):
arg = args[j]
if "=" in arg:
a = arg.split("=")
k = a[0].replace("--", "")
v = a[-1]
j += 1
else:
k = arg.replace("--", "")
v = args[j + 1]
j += 2
try:
# For most arguments, this will convert it to the right type.
v = eval(v)
except NameError:
# If it's supposed to be a string, but quotes weren't supplied.
v = eval('"' + v + '"')
dct[k] = v
return dct
def _process_dct(dct):
out = {}
for k, v in dct.items():
if isinstance(v, dict):
if set(v.keys()) == {"unit", "value"}:
v = Quantity(v["value"], v["unit"])
else:
v = _process_dct(v)
out[k] = v
return out
main = click.Group()
@main.command(
context_settings={ # Doing this allows arbitrary options to override config
"ignore_unknown_options": True,
"allow_extra_args": True,
}
)
@click.option(
"-i",
"--config",
type=click.Path(exists=True, dir_okay=False),
default=None,
)
@click.option(
"-o",
"--outdir",
type=click.Path(exists=True, dir_okay=True, file_okay=True),
default=".",
)
@click.option(
"-l",
"--label",
type=str,
default="hmf",
)
@click.pass_context
def run(ctx, config, outdir, label):
"""Calculate quantities using hmf and output to a file.
Parameters
----------
ctx :
A parameter from the parent CLI function to be able to override config.
config : str
Path to the configuration file.
"""
run_cli(config, "hmf", ctx.args, outdir, label, [hmf], hmf.MassFunction)
def run_cli(config, pkg_name, args, outdir, label, pkgs, default_framework):
"""Run the CLI command."""
console.print(
Panel(f"Welcome to {pkg_name}!", box=box.DOUBLE_EDGE),
style="bold",
justify="center",
)
console.print()
for pkg in pkgs:
console.print(
f"Using {pkg.__name__} version [blue]{pkg.__version__}[/blue]",
style="strong",
)
cfg = _get_config(config)
# Update the file-based config with options given on the CLI.
if "params" not in cfg:
cfg["params"] = {}
if args:
cfg["params"].update(_ctx_to_dct(args))
cfg["params"] = _process_dct(cfg["params"])
console.print()
console.print("You set the following parameters explicitly:", style="bold")
for k, v in cfg.get("params", {}).items():
console.print(f" {k}: {v}")
quantities = cfg.get("quantities", ["m", "dndm"])
out = get_hmf(
quantities,
framework=cfg.get("framework", default_framework),
get_label=True,
label_kind="filename",
**cfg.get("params", {}),
)
outdir = Path(outdir)
console.print()
console.print("Quantities to be obtained: ", style="bold")
for q in quantities:
console.print(f" - {q}", style="dim grey53")
console.print()
console.print(Rule("Starting Calculations", style="grey53"))
t = time()
for quants, obj, lab in out:
lab = lab or label
table = Table.grid()
table.expand = True
table.add_column(style="bold", justify="left")
table.add_column(style="blue", justify="right")
table.add_row(f"Calculated {lab}:", f"[[{time() - t:.2f} sec]]")
console.print(table)
t = time()
# Write out quantities
for qname, q in zip(quantities, quants):
np.savetxt(outdir / f"{lab}_{qname}.txt", q)
console.print(
f" Writing quantities to [cyan]{outdir}/{lab}_<quantity>.txt[/cyan]."
)
# Write out parameters
dct = framework_to_dict(obj)
dct["quantities"] = quantities
with open(outdir / f"{lab}_cfg.toml", "w") as fl:
toml.dump(dct, fl, encoder=toml.TomlNumpyEncoder())
console.print(
f" Writing full config to [cyan]{outdir}/{lab}_cfg.toml[/cyan]."
)
console.print()
console.print(Rule("Finished!", style="grey53"), style="bold green")
| [
"rich.table.Table.grid",
"importlib.import_module",
"pathlib.Path",
"click.option",
"rich.panel.Panel",
"rich.rule.Rule",
"rich.console.Console",
"click.Path",
"toml.load",
"numpy.savetxt",
"toml.TomlNumpyEncoder",
"click.Group",
"time.time",
"astropy.units.Quantity"
] | [((447, 465), 'rich.console.Console', 'Console', ([], {'width': '(100)'}), '(width=100)\n', (454, 465), False, 'from rich.console import Console\n'), ((1738, 1751), 'click.Group', 'click.Group', ([], {}), '()\n', (1749, 1751), False, 'import click\n'), ((2169, 2223), 'click.option', 'click.option', (['"""-l"""', '"""--label"""'], {'type': 'str', 'default': '"""hmf"""'}), "('-l', '--label', type=str, default='hmf')\n", (2181, 2223), False, 'import click\n'), ((3759, 3771), 'pathlib.Path', 'Path', (['outdir'], {}), '(outdir)\n', (3763, 3771), False, 'from pathlib import Path\n'), ((4029, 4035), 'time.time', 'time', ([], {}), '()\n', (4033, 4035), False, 'from time import time\n'), ((588, 601), 'toml.load', 'toml.load', (['fl'], {}), '(fl)\n', (597, 601), False, 'import toml\n'), ((1982, 2021), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'dir_okay': '(False)'}), '(exists=True, dir_okay=False)\n', (1992, 2021), False, 'import click\n'), ((2093, 2147), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'dir_okay': '(True)', 'file_okay': '(True)'}), '(exists=True, dir_okay=True, file_okay=True)\n', (2103, 2147), False, 'import click\n'), ((2760, 2813), 'rich.panel.Panel', 'Panel', (['f"""Welcome to {pkg_name}!"""'], {'box': 'box.DOUBLE_EDGE'}), "(f'Welcome to {pkg_name}!', box=box.DOUBLE_EDGE)\n", (2765, 2813), False, 'from rich.panel import Panel\n'), ((3974, 4019), 'rich.rule.Rule', 'Rule', (['"""Starting Calculations"""'], {'style': '"""grey53"""'}), "('Starting Calculations', style='grey53')\n", (3978, 4019), False, 'from rich.rule import Rule\n'), ((4114, 4126), 'rich.table.Table.grid', 'Table.grid', ([], {}), '()\n', (4124, 4126), False, 'from rich.table import Table\n'), ((4381, 4387), 'time.time', 'time', ([], {}), '()\n', (4385, 4387), False, 'from time import time\n'), ((5030, 5063), 'rich.rule.Rule', 'Rule', (['"""Finished!"""'], {'style': '"""grey53"""'}), "('Finished!', style='grey53')\n", (5034, 5063), False, 'from rich.rule import Rule\n'), ((772, 800), 'importlib.import_module', 'importlib.import_module', (['mod'], {}), '(mod)\n', (795, 800), False, 'import importlib\n'), ((4481, 4525), 'numpy.savetxt', 'np.savetxt', (["(outdir / f'{lab}_{qname}.txt')", 'q'], {}), "(outdir / f'{lab}_{qname}.txt', q)\n", (4491, 4525), True, 'import numpy as np\n'), ((1607, 1638), 'astropy.units.Quantity', 'Quantity', (["v['value']", "v['unit']"], {}), "(v['value'], v['unit'])\n", (1615, 1638), False, 'from astropy.units import Quantity\n'), ((4849, 4872), 'toml.TomlNumpyEncoder', 'toml.TomlNumpyEncoder', ([], {}), '()\n', (4870, 4872), False, 'import toml\n'), ((4315, 4321), 'time.time', 'time', ([], {}), '()\n', (4319, 4321), False, 'from time import time\n')] |
import random
import unittest
import numpy as np
import torch
from code_soup.common.utils import Seeding
class TestSeeding(unittest.TestCase):
"""Test the seed function."""
def test_seed(self):
"""Test that the seed is set."""
random.seed(42)
initial_state = random.getstate()
Seeding.seed(42)
final_state = random.getstate()
self.assertEqual(initial_state, final_state)
self.assertEqual(np.random.get_state()[1][0], 42)
self.assertEqual(torch.get_rng_state().tolist()[0], 42)
| [
"numpy.random.get_state",
"random.seed",
"random.getstate",
"torch.get_rng_state",
"code_soup.common.utils.Seeding.seed"
] | [((256, 271), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (267, 271), False, 'import random\n'), ((296, 313), 'random.getstate', 'random.getstate', ([], {}), '()\n', (311, 313), False, 'import random\n'), ((322, 338), 'code_soup.common.utils.Seeding.seed', 'Seeding.seed', (['(42)'], {}), '(42)\n', (334, 338), False, 'from code_soup.common.utils import Seeding\n'), ((361, 378), 'random.getstate', 'random.getstate', ([], {}), '()\n', (376, 378), False, 'import random\n'), ((457, 478), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (476, 478), True, 'import numpy as np\n'), ((515, 536), 'torch.get_rng_state', 'torch.get_rng_state', ([], {}), '()\n', (534, 536), False, 'import torch\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.